From: Federico Caselli Date: Tue, 27 Sep 2022 21:29:57 +0000 (+0200) Subject: Format code in the rst docs file X-Git-Tag: rel_2_0_0b1~30^2 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=23dbf572cec3802d9d54d2f5a52eeaeb18d1c26f;p=thirdparty%2Fsqlalchemy%2Fsqlalchemy.git Format code in the rst docs file Added script to format code in the rst documentation using black. This is also added to the lint tox job to ensure that the code in the docs is properly formatted. Change-Id: I799444f22da153484ca5f095d57755762348da40 --- diff --git a/doc/build/changelog/changelog_07.rst b/doc/build/changelog/changelog_07.rst index a9ae15c3bf..77757317f2 100644 --- a/doc/build/changelog/changelog_07.rst +++ b/doc/build/changelog/changelog_07.rst @@ -2,6 +2,7 @@ 0.7 Changelog ============= + .. changelog:: :version: 0.7.11 :released: diff --git a/doc/build/changelog/migration_04.rst b/doc/build/changelog/migration_04.rst index b503134079..93a2b654fb 100644 --- a/doc/build/changelog/migration_04.rst +++ b/doc/build/changelog/migration_04.rst @@ -27,7 +27,7 @@ Secondly, anywhere you used to say ``engine=``, :: - myengine = create_engine('sqlite://') + myengine = create_engine("sqlite://") meta = MetaData(myengine) @@ -56,6 +56,7 @@ In 0.3, this code worked: from sqlalchemy import * + class UTCDateTime(types.TypeDecorator): pass @@ -66,6 +67,7 @@ In 0.4, one must do: from sqlalchemy import * from sqlalchemy import types + class UTCDateTime(types.TypeDecorator): pass @@ -119,7 +121,7 @@ when working with mapped classes: :: - session.query(User).filter(and_(User.name == 'fred', User.id > 17)) + session.query(User).filter(and_(User.name == "fred", User.id > 17)) While simple column-based comparisons are no big deal, the class attributes have some new "higher level" constructs @@ -139,18 +141,18 @@ available, including what was previously only available in # return all users who contain a particular address with # the email_address like '%foo%' - filter(User.addresses.any(Address.email_address.like('%foo%'))) + filter(User.addresses.any(Address.email_address.like("%foo%"))) # same, email address equals 'foo@bar.com'. can fall back to keyword # args for simple comparisons - filter(User.addresses.any(email_address = 'foo@bar.com')) + filter(User.addresses.any(email_address="foo@bar.com")) # return all Addresses whose user attribute has the username 'ed' - filter(Address.user.has(name='ed')) + filter(Address.user.has(name="ed")) # return all Addresses whose user attribute has the username 'ed' # and an id > 5 (mixing clauses with kwargs) - filter(Address.user.has(User.id > 5, name='ed')) + filter(Address.user.has(User.id > 5, name="ed")) The ``Column`` collection remains available on mapped classes in the ``.c`` attribute. Note that property-based @@ -199,12 +201,20 @@ any ``Alias`` objects: :: # standard self-referential TreeNode mapper with backref - mapper(TreeNode, tree_nodes, properties={ - 'children':relation(TreeNode, backref=backref('parent', remote_side=tree_nodes.id)) - }) + mapper( + TreeNode, + tree_nodes, + properties={ + "children": relation( + TreeNode, backref=backref("parent", remote_side=tree_nodes.id) + ) + }, + ) # query for node with child containing "bar" two levels deep - session.query(TreeNode).join(["children", "children"], aliased=True).filter_by(name='bar') + session.query(TreeNode).join(["children", "children"], aliased=True).filter_by( + name="bar" + ) To add criterion for each table along the way in an aliased join, you can use ``from_joinpoint`` to keep joining against @@ -215,15 +225,15 @@ the same line of aliases: # search for the treenode along the path "n1/n12/n122" # first find a Node with name="n122" - q = sess.query(Node).filter_by(name='n122') + q = sess.query(Node).filter_by(name="n122") # then join to parent with "n12" - q = q.join('parent', aliased=True).filter_by(name='n12') + q = q.join("parent", aliased=True).filter_by(name="n12") # join again to the next parent with 'n1'. use 'from_joinpoint' # so we join from the previous point, instead of joining off the # root table - q = q.join('parent', aliased=True, from_joinpoint=True).filter_by(name='n1') + q = q.join("parent", aliased=True, from_joinpoint=True).filter_by(name="n1") node = q.first() @@ -271,17 +281,24 @@ deep you want to go. Lets show the self-referential :: - nodes = Table('nodes', metadata, - Column('id', Integer, primary_key=True), - Column('parent_id', Integer, ForeignKey('nodes.id')), - Column('name', String(30))) + nodes = Table( + "nodes", + metadata, + Column("id", Integer, primary_key=True), + Column("parent_id", Integer, ForeignKey("nodes.id")), + Column("name", String(30)), + ) + class TreeNode(object): pass - mapper(TreeNode, nodes, properties={ - 'children':relation(TreeNode, lazy=False, join_depth=3) - }) + + mapper( + TreeNode, + nodes, + properties={"children": relation(TreeNode, lazy=False, join_depth=3)}, + ) So what happens when we say: @@ -324,10 +341,13 @@ new type, ``Point``. Stores an x/y coordinate: def __init__(self, x, y): self.x = x self.y = y + def __composite_values__(self): return self.x, self.y + def __eq__(self, other): return other.x == self.x and other.y == self.y + def __ne__(self, other): return not self.__eq__(other) @@ -341,13 +361,15 @@ Let's create a table of vertices storing two points per row: :: - vertices = Table('vertices', metadata, - Column('id', Integer, primary_key=True), - Column('x1', Integer), - Column('y1', Integer), - Column('x2', Integer), - Column('y2', Integer), - ) + vertices = Table( + "vertices", + metadata, + Column("id", Integer, primary_key=True), + Column("x1", Integer), + Column("y1", Integer), + Column("x2", Integer), + Column("y2", Integer), + ) Then, map it ! We'll create a ``Vertex`` object which stores two ``Point`` objects: @@ -359,10 +381,15 @@ stores two ``Point`` objects: self.start = start self.end = end - mapper(Vertex, vertices, properties={ - 'start':composite(Point, vertices.c.x1, vertices.c.y1), - 'end':composite(Point, vertices.c.x2, vertices.c.y2) - }) + + mapper( + Vertex, + vertices, + properties={ + "start": composite(Point, vertices.c.x1, vertices.c.y1), + "end": composite(Point, vertices.c.x2, vertices.c.y2), + }, + ) Once you've set up your composite type, it's usable just like any other type: @@ -370,7 +397,7 @@ like any other type: :: - v = Vertex(Point(3, 4), Point(26,15)) + v = Vertex(Point(3, 4), Point(26, 15)) session.save(v) session.flush() @@ -388,7 +415,7 @@ work as primary keys too, and are usable in ``query.get()``: # a Document class which uses a composite Version # object as primary key - document = query.get(Version(1, 'a')) + document = query.get(Version(1, "a")) ``dynamic_loader()`` relations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -438,16 +465,12 @@ eager in one pass: :: - mapper(Foo, foo_table, properties={ - 'bar':relation(Bar) - }) - mapper(Bar, bar_table, properties={ - 'bat':relation(Bat) - }) + mapper(Foo, foo_table, properties={"bar": relation(Bar)}) + mapper(Bar, bar_table, properties={"bat": relation(Bat)}) mapper(Bat, bat_table) # eager load bar and bat - session.query(Foo).options(eagerload_all('bar.bat')).filter(...).all() + session.query(Foo).options(eagerload_all("bar.bat")).filter(...).all() New Collection API ^^^^^^^^^^^^^^^^^^ @@ -471,7 +494,7 @@ many needs: # use a dictionary relation keyed by a column relation(Item, collection_class=column_mapped_collection(items.c.keyword)) # or named attribute - relation(Item, collection_class=attribute_mapped_collection('keyword')) + relation(Item, collection_class=attribute_mapped_collection("keyword")) # or any function you like relation(Item, collection_class=mapped_collection(lambda entity: entity.a + entity.b)) @@ -493,12 +516,20 @@ columns or subqueries: :: - mapper(User, users, properties={ - 'fullname': column_property((users.c.firstname + users.c.lastname).label('fullname')), - 'numposts': column_property( - select([func.count(1)], users.c.id==posts.c.user_id).correlate(users).label('posts') - ) - }) + mapper( + User, + users, + properties={ + "fullname": column_property( + (users.c.firstname + users.c.lastname).label("fullname") + ), + "numposts": column_property( + select([func.count(1)], users.c.id == posts.c.user_id) + .correlate(users) + .label("posts") + ), + }, + ) a typical query looks like: @@ -534,7 +565,7 @@ your ``engine`` (or anywhere): from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker - engine = create_engine('myengine://') + engine = create_engine("myengine://") Session = sessionmaker(bind=engine, autoflush=True, transactional=True) # use the new Session() freely @@ -542,7 +573,6 @@ your ``engine`` (or anywhere): sess.save(someobject) sess.flush() - If you need to post-configure your Session, say with an engine, add it later with ``configure()``: @@ -562,7 +592,7 @@ with both ``sessionmaker`` as well as ``create_session()``: Session = scoped_session(sessionmaker(autoflush=True, transactional=True)) Session.configure(bind=engine) - u = User(name='wendy') + u = User(name="wendy") sess = Session() sess.save(u) @@ -573,7 +603,6 @@ with both ``sessionmaker`` as well as ``create_session()``: sess2 = Session() assert sess is sess2 - When using a thread-local ``Session``, the returned class has all of ``Session's`` interface implemented as classmethods, and "assignmapper"'s functionality is @@ -586,11 +615,10 @@ old ``objectstore`` days.... # "assignmapper"-like functionality available via ScopedSession.mapper Session.mapper(User, users_table) - u = User(name='wendy') + u = User(name="wendy") Session.commit() - Sessions are again Weak Referencing By Default ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -624,13 +652,13 @@ Also, ``autoflush=True`` means the ``Session`` will Session = sessionmaker(bind=engine, autoflush=True, transactional=True) - u = User(name='wendy') + u = User(name="wendy") sess = Session() sess.save(u) # wendy is flushed, comes right back from a query - wendy = sess.query(User).filter_by(name='wendy').one() + wendy = sess.query(User).filter_by(name="wendy").one() Transactional methods moved onto sessions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -649,7 +677,7 @@ background). # use the session - sess.commit() # commit transaction + sess.commit() # commit transaction Sharing a ``Session`` with an enclosing engine-level (i.e. non-ORM) transaction is easy: @@ -745,7 +773,7 @@ Just like it says: :: - b = bindparam('foo', type_=String) + b = bindparam("foo", type_=String) in\_ Function Changed to Accept Sequence or Selectable ------------------------------------------------------ @@ -847,8 +875,18 @@ Out Parameters for Oracle :: - result = engine.execute(text("begin foo(:x, :y, :z); end;", bindparams=[bindparam('x', Numeric), outparam('y', Numeric), outparam('z', Numeric)]), x=5) - assert result.out_parameters == {'y':10, 'z':75} + result = engine.execute( + text( + "begin foo(:x, :y, :z); end;", + bindparams=[ + bindparam("x", Numeric), + outparam("y", Numeric), + outparam("z", Numeric), + ], + ), + x=5, + ) + assert result.out_parameters == {"y": 10, "z": 75} Connection-bound ``MetaData``, ``Sessions`` ------------------------------------------- diff --git a/doc/build/changelog/migration_05.rst b/doc/build/changelog/migration_05.rst index 64b69e1523..39bb9cb053 100644 --- a/doc/build/changelog/migration_05.rst +++ b/doc/build/changelog/migration_05.rst @@ -64,15 +64,21 @@ Object Relational Mapping :: - session.query(User.name, func.count(Address.id).label("numaddresses")).join(Address).group_by(User.name) + session.query(User.name, func.count(Address.id).label("numaddresses")).join( + Address + ).group_by(User.name) The tuples returned by any multi-column/entity query are *named*' tuples: :: - for row in session.query(User.name, func.count(Address.id).label('numaddresses')).join(Address).group_by(User.name): - print("name", row.name, "number", row.numaddresses) + for row in ( + session.query(User.name, func.count(Address.id).label("numaddresses")) + .join(Address) + .group_by(User.name) + ): + print("name", row.name, "number", row.numaddresses) ``Query`` has a ``statement`` accessor, as well as a ``subquery()`` method which allow ``Query`` to be used to @@ -223,17 +229,24 @@ Object Relational Mapping :: - mapper(User, users, properties={ - 'addresses':relation(Address, order_by=addresses.c.id) - }, order_by=users.c.id) + mapper( + User, + users, + properties={"addresses": relation(Address, order_by=addresses.c.id)}, + order_by=users.c.id, + ) To set ordering on a backref, use the ``backref()`` function: :: - 'keywords':relation(Keyword, secondary=item_keywords, - order_by=keywords.c.name, backref=backref('items', order_by=items.c.id)) + "keywords": relation( + Keyword, + secondary=item_keywords, + order_by=keywords.c.name, + backref=backref("items", order_by=items.c.id), + ) Using declarative ? To help with the new ``order_by`` requirement, ``order_by`` and friends can now be set using @@ -244,7 +257,7 @@ Object Relational Mapping class MyClass(MyDeclarativeBase): ... - 'addresses':relation("Address", order_by="Address.id") + "addresses": relation("Address", order_by="Address.id") It's generally a good idea to set ``order_by`` on ``relation()s`` which load list-based collections of @@ -402,14 +415,17 @@ Schema/Types convert_result_value methods """ + def bind_processor(self, dialect): def convert(value): return self.convert_bind_param(value, dialect) + return convert def result_processor(self, dialect): def convert(value): return self.convert_result_value(value, dialect) + return convert def convert_result_value(self, value, dialect): diff --git a/doc/build/changelog/migration_06.rst b/doc/build/changelog/migration_06.rst index 0867fefe02..73c57bd931 100644 --- a/doc/build/changelog/migration_06.rst +++ b/doc/build/changelog/migration_06.rst @@ -73,7 +73,7 @@ will use psycopg2: :: - create_engine('postgresql://scott:tiger@localhost/test') + create_engine("postgresql://scott:tiger@localhost/test") However to specify a specific DBAPI backend such as pg8000, add it to the "protocol" section of the URL using a plus @@ -81,7 +81,7 @@ sign "+": :: - create_engine('postgresql+pg8000://scott:tiger@localhost/test') + create_engine("postgresql+pg8000://scott:tiger@localhost/test") Important Dialect Links: @@ -138,8 +138,15 @@ set of PG types: :: - from sqlalchemy.dialects.postgresql import INTEGER, BIGINT, SMALLINT,\ - VARCHAR, MACADDR, DATE, BYTEA + from sqlalchemy.dialects.postgresql import ( + INTEGER, + BIGINT, + SMALLINT, + VARCHAR, + MACADDR, + DATE, + BYTEA, + ) Above, ``INTEGER`` is actually the plain ``INTEGER`` type from ``sqlalchemy.types``, but the PG dialect makes it @@ -164,7 +171,7 @@ object returns another ``ClauseElement``: :: >>> from sqlalchemy.sql import column - >>> column('foo') == 5 + >>> column("foo") == 5 This so that Python expressions produce SQL expressions when @@ -172,16 +179,15 @@ converted to strings: :: - >>> str(column('foo') == 5) + >>> str(column("foo") == 5) 'foo = :foo_1' But what happens if we say this? :: - >>> if column('foo') == 5: + >>> if column("foo") == 5: ... print("yes") - ... In previous versions of SQLAlchemy, the returned ``_BinaryExpression`` was a plain Python object which @@ -191,11 +197,11 @@ as to that being compared. Meaning: :: - >>> bool(column('foo') == 5) + >>> bool(column("foo") == 5) False - >>> bool(column('foo') == column('foo')) + >>> bool(column("foo") == column("foo")) False - >>> c = column('foo') + >>> c = column("foo") >>> bool(c == c) True >>> @@ -252,7 +258,7 @@ sets: :: - connection.execute(table.insert(), {'data':'row1'}, {'data':'row2'}, {'data':'row3'}) + connection.execute(table.insert(), {"data": "row1"}, {"data": "row2"}, {"data": "row3"}) When the ``Connection`` object sends off the given ``insert()`` construct for compilation, it passes to the @@ -268,10 +274,12 @@ works: :: - connection.execute(table.insert(), - {'timestamp':today, 'data':'row1'}, - {'timestamp':today, 'data':'row2'}, - {'data':'row3'}) + connection.execute( + table.insert(), + {"timestamp": today, "data": "row1"}, + {"timestamp": today, "data": "row2"}, + {"data": "row3"}, + ) Because the third row does not specify the 'timestamp' column. Previous versions of SQLAlchemy would simply insert @@ -392,7 +400,7 @@ with tables or metadata objects: from sqlalchemy.schema import DDL - DDL('CREATE TRIGGER users_trigger ...').execute_at('after-create', metadata) + DDL("CREATE TRIGGER users_trigger ...").execute_at("after-create", metadata) Now the full suite of DDL constructs are available under the same system, including those for CREATE TABLE, ADD @@ -402,7 +410,7 @@ CONSTRAINT, etc.: from sqlalchemy.schema import Constraint, AddConstraint - AddContraint(CheckConstraint("value > 5")).execute_at('after-create', mytable) + AddContraint(CheckConstraint("value > 5")).execute_at("after-create", mytable) Additionally, all the DDL objects are now regular ``ClauseElement`` objects just like any other SQLAlchemy @@ -428,20 +436,22 @@ make your own: from sqlalchemy.schema import DDLElement from sqlalchemy.ext.compiler import compiles - class AlterColumn(DDLElement): + class AlterColumn(DDLElement): def __init__(self, column, cmd): self.column = column self.cmd = cmd + @compiles(AlterColumn) def visit_alter_column(element, compiler, **kw): return "ALTER TABLE %s ALTER COLUMN %s %s ..." % ( element.column.table.name, element.column.name, - element.cmd + element.cmd, ) + engine.execute(AlterColumn(table.c.mycolumn, "SET DEFAULT 'test'")) Deprecated/Removed Schema Elements @@ -566,6 +576,7 @@ To use an inspector: :: from sqlalchemy.engine.reflection import Inspector + insp = Inspector.from_engine(my_engine) print(insp.get_schema_names()) @@ -578,10 +589,10 @@ such as that of PostgreSQL which provides a :: - my_engine = create_engine('postgresql://...') + my_engine = create_engine("postgresql://...") pg_insp = Inspector.from_engine(my_engine) - print(pg_insp.get_table_oid('my_table')) + print(pg_insp.get_table_oid("my_table")) RETURNING Support ================= @@ -600,10 +611,10 @@ columns will be returned as a regular result set: result = connection.execute( - table.insert().values(data='some data').returning(table.c.id, table.c.timestamp) - ) + table.insert().values(data="some data").returning(table.c.id, table.c.timestamp) + ) row = result.first() - print("ID:", row['id'], "Timestamp:", row['timestamp']) + print("ID:", row["id"], "Timestamp:", row["timestamp"]) The implementation of RETURNING across the four supported backends varies wildly, in the case of Oracle requiring an @@ -740,7 +751,7 @@ that converts unicode back to utf-8, or whatever is desired: def process_result_value(self, value, dialect): if isinstance(value, unicode): - value = value.encode('utf-8') + value = value.encode("utf-8") return value Note that the ``assert_unicode`` flag is now deprecated. @@ -968,9 +979,11 @@ At mapper level: :: mapper(Child, child) - mapper(Parent, parent, properties={ - 'child':relationship(Child, lazy='joined', innerjoin=True) - }) + mapper( + Parent, + parent, + properties={"child": relationship(Child, lazy="joined", innerjoin=True)}, + ) At query time level: diff --git a/doc/build/changelog/migration_07.rst b/doc/build/changelog/migration_07.rst index a222f5380b..4763b9134c 100644 --- a/doc/build/changelog/migration_07.rst +++ b/doc/build/changelog/migration_07.rst @@ -244,7 +244,7 @@ with an explicit onclause is now: :: - query.join(SomeClass, SomeClass.id==ParentClass.some_id) + query.join(SomeClass, SomeClass.id == ParentClass.some_id) In 0.6, this usage was considered to be an error, because ``join()`` accepts multiple arguments corresponding to @@ -336,10 +336,12 @@ to the creation of the index outside of the Table. That is: :: - Table('mytable', metadata, - Column('id',Integer, primary_key=True), - Column('name', String(50), nullable=False), - Index('idx_name', 'name') + Table( + "mytable", + metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Index("idx_name", "name"), ) The primary rationale here is for the benefit of declarative @@ -348,14 +350,16 @@ The primary rationale here is for the benefit of declarative :: class HasNameMixin(object): - name = Column('name', String(50), nullable=False) + name = Column("name", String(50), nullable=False) + @declared_attr def __table_args__(cls): - return (Index('name'), {}) + return (Index("name"), {}) + class User(HasNameMixin, Base): - __tablename__ = 'user' - id = Column('id', Integer, primary_key=True) + __tablename__ = "user" + id = Column("id", Integer, primary_key=True) `Indexes `_ @@ -385,17 +389,16 @@ tutorial: from sqlalchemy.sql import table, column, select, func - empsalary = table('empsalary', - column('depname'), - column('empno'), - column('salary')) + empsalary = table("empsalary", column("depname"), column("empno"), column("salary")) - s = select([ + s = select( + [ empsalary, - func.avg(empsalary.c.salary). - over(partition_by=empsalary.c.depname). - label('avg') - ]) + func.avg(empsalary.c.salary) + .over(partition_by=empsalary.c.depname) + .label("avg"), + ] + ) print(s) @@ -495,7 +498,7 @@ equivalent to: :: - query.from_self(func.count(literal_column('1'))).scalar() + query.from_self(func.count(literal_column("1"))).scalar() Previously, internal logic attempted to rewrite the columns clause of the query itself, and upon detection of a @@ -534,6 +537,7 @@ be used: :: from sqlalchemy import func + session.query(func.count(MyClass.id)).scalar() or for ``count(*)``: @@ -541,7 +545,8 @@ or for ``count(*)``: :: from sqlalchemy import func, literal_column - session.query(func.count(literal_column('*'))).select_from(MyClass).scalar() + + session.query(func.count(literal_column("*"))).select_from(MyClass).scalar() LIMIT/OFFSET clauses now use bind parameters -------------------------------------------- @@ -690,8 +695,11 @@ function, can be mapped. from sqlalchemy import select, func from sqlalchemy.orm import mapper + class Subset(object): pass + + selectable = select(["x", "y", "z"]).select_from(func.some_db_function()).alias() mapper(Subset, selectable, primary_key=[selectable.c.x]) @@ -773,10 +781,11 @@ mutations, the type object must be constructed with :: - Table('mytable', metadata, + Table( + "mytable", + metadata, # .... - - Column('pickled_data', PickleType(mutable=True)) + Column("pickled_data", PickleType(mutable=True)), ) The ``mutable=True`` flag is being phased out, in favor of @@ -1036,7 +1045,7 @@ key column ``id``, the following now produces an error: :: - foobar = foo.join(bar, foo.c.id==bar.c.foo_id) + foobar = foo.join(bar, foo.c.id == bar.c.foo_id) mapper(FooBar, foobar) This because the ``mapper()`` refuses to guess what column @@ -1047,10 +1056,8 @@ explicit: :: - foobar = foo.join(bar, foo.c.id==bar.c.foo_id) - mapper(FooBar, foobar, properties={ - 'id':[foo.c.id, bar.c.id] - }) + foobar = foo.join(bar, foo.c.id == bar.c.foo_id) + mapper(FooBar, foobar, properties={"id": [foo.c.id, bar.c.id]}) :ticket:`1896` @@ -1231,14 +1238,14 @@ backend: :: - select([mytable], distinct='ALL', prefixes=['HIGH_PRIORITY']) + select([mytable], distinct="ALL", prefixes=["HIGH_PRIORITY"]) The ``prefixes`` keyword or ``prefix_with()`` method should be used for non-standard or unusual prefixes: :: - select([mytable]).prefix_with('HIGH_PRIORITY', 'ALL') + select([mytable]).prefix_with("HIGH_PRIORITY", "ALL") ``useexisting`` superseded by ``extend_existing`` and ``keep_existing`` ----------------------------------------------------------------------- diff --git a/doc/build/changelog/migration_08.rst b/doc/build/changelog/migration_08.rst index a4dc58549f..9c5b381ee9 100644 --- a/doc/build/changelog/migration_08.rst +++ b/doc/build/changelog/migration_08.rst @@ -90,11 +90,11 @@ entities. The new system includes these features: :: class Folder(Base): - __tablename__ = 'folder' + __tablename__ = "folder" __table_args__ = ( - ForeignKeyConstraint( - ['account_id', 'parent_id'], - ['folder.account_id', 'folder.folder_id']), + ForeignKeyConstraint( + ["account_id", "parent_id"], ["folder.account_id", "folder.folder_id"] + ), ) account_id = Column(Integer, primary_key=True) @@ -102,10 +102,9 @@ entities. The new system includes these features: parent_id = Column(Integer) name = Column(String) - parent_folder = relationship("Folder", - backref="child_folders", - remote_side=[account_id, folder_id] - ) + parent_folder = relationship( + "Folder", backref="child_folders", remote_side=[account_id, folder_id] + ) Above, the ``Folder`` refers to its parent ``Folder`` joining from ``account_id`` to itself, and ``parent_id`` @@ -144,18 +143,19 @@ entities. The new system includes these features: expected in most cases:: class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign_keys, remote_side - parent_host = relationship("HostEntry", - primaryjoin=ip_address == cast(content, INET), - foreign_keys=content, - remote_side=ip_address - ) + parent_host = relationship( + "HostEntry", + primaryjoin=ip_address == cast(content, INET), + foreign_keys=content, + remote_side=ip_address, + ) The new :func:`_orm.relationship` mechanics make use of a SQLAlchemy concept known as :term:`annotations`. These annotations @@ -167,8 +167,9 @@ entities. The new system includes these features: from sqlalchemy.orm import foreign, remote + class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = Column(Integer, primary_key=True) ip_address = Column(INET) @@ -176,11 +177,10 @@ entities. The new system includes these features: # relationship() using explicit foreign() and remote() annotations # in lieu of separate arguments - parent_host = relationship("HostEntry", - primaryjoin=remote(ip_address) == \ - cast(foreign(content), INET), - ) - + parent_host = relationship( + "HostEntry", + primaryjoin=remote(ip_address) == cast(foreign(content), INET), + ) .. seealso:: @@ -226,12 +226,11 @@ certain contexts, such as :class:`.AliasedInsp` and A walkthrough of some key capabilities follows:: >>> class User(Base): - ... __tablename__ = 'user' + ... __tablename__ = "user" ... id = Column(Integer, primary_key=True) ... name = Column(String) ... name_syn = synonym(name) ... addresses = relationship("Address") - ... >>> # universal entry point is inspect() >>> b = inspect(User) @@ -285,7 +284,7 @@ A walkthrough of some key capabilities follows:: "user".id = address.user_id >>> # inspect works on instances - >>> u1 = User(id=3, name='x') + >>> u1 = User(id=3, name="x") >>> b = inspect(u1) >>> # it returns the InstanceState @@ -354,10 +353,11 @@ usable anywhere: :: from sqlalchemy.orm import with_polymorphic + palias = with_polymorphic(Person, [Engineer, Manager]) - session.query(Company).\ - join(palias, Company.employees).\ - filter(or_(Engineer.language=='java', Manager.hair=='pointy')) + session.query(Company).join(palias, Company.employees).filter( + or_(Engineer.language == "java", Manager.hair == "pointy") + ) .. seealso:: @@ -377,9 +377,11 @@ by combining it with the new :func:`.with_polymorphic` function:: # use eager loading in conjunction with with_polymorphic targets Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True) - q = s.query(DataContainer).\ - join(DataContainer.jobs.of_type(Job_P)).\ - options(contains_eager(DataContainer.jobs.of_type(Job_P))) + q = ( + s.query(DataContainer) + .join(DataContainer.jobs.of_type(Job_P)) + .options(contains_eager(DataContainer.jobs.of_type(Job_P))) + ) The method now works equally well in most places a regular relationship attribute is accepted, including with loader functions like @@ -389,26 +391,28 @@ and :meth:`.PropComparator.has`:: # use eager loading in conjunction with with_polymorphic targets Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True) - q = s.query(DataContainer).\ - join(DataContainer.jobs.of_type(Job_P)).\ - options(contains_eager(DataContainer.jobs.of_type(Job_P))) + q = ( + s.query(DataContainer) + .join(DataContainer.jobs.of_type(Job_P)) + .options(contains_eager(DataContainer.jobs.of_type(Job_P))) + ) # pass subclasses to eager loads (implicitly applies with_polymorphic) - q = s.query(ParentThing).\ - options( - joinedload_all( - ParentThing.container, - DataContainer.jobs.of_type(SubJob) - )) + q = s.query(ParentThing).options( + joinedload_all(ParentThing.container, DataContainer.jobs.of_type(SubJob)) + ) # control self-referential aliasing with any()/has() Job_A = aliased(Job) - q = s.query(Job).join(DataContainer.jobs).\ - filter( - DataContainer.jobs.of_type(Job_A).\ - any(and_(Job_A.id < Job.id, Job_A.type=='fred') - ) - ) + q = ( + s.query(Job) + .join(DataContainer.jobs) + .filter( + DataContainer.jobs.of_type(Job_A).any( + and_(Job_A.id < Job.id, Job_A.type == "fred") + ) + ) + ) .. seealso:: @@ -429,13 +433,15 @@ with a declarative base class:: Base = declarative_base() + @event.listens_for("load", Base, propagate=True) def on_load(target, context): print("New instance loaded:", target) + # on_load() will be applied to SomeClass class SomeClass(Base): - __tablename__ = 'sometable' + __tablename__ = "sometable" # ... @@ -453,8 +459,9 @@ can be referred to via dotted name in expressions:: class Snack(Base): # ... - peanuts = relationship("nuts.Peanut", - primaryjoin="nuts.Peanut.snack_id == Snack.id") + peanuts = relationship( + "nuts.Peanut", primaryjoin="nuts.Peanut.snack_id == Snack.id" + ) The resolution allows that any full or partial disambiguating package name can be used. If the @@ -484,17 +491,22 @@ in one step: class ReflectedOne(DeferredReflection, Base): __abstract__ = True + class ReflectedTwo(DeferredReflection, Base): __abstract__ = True + class MyClass(ReflectedOne): - __tablename__ = 'mytable' + __tablename__ = "mytable" + class MyOtherClass(ReflectedOne): - __tablename__ = 'myothertable' + __tablename__ = "myothertable" + class YetAnotherClass(ReflectedTwo): - __tablename__ = 'yetanothertable' + __tablename__ = "yetanothertable" + ReflectedOne.prepare(engine_one) ReflectedTwo.prepare(engine_two) @@ -535,10 +547,9 @@ Below, we emit an UPDATE against ``SomeEntity``, adding a FROM clause (or equivalent, depending on backend) against ``SomeOtherEntity``:: - query(SomeEntity).\ - filter(SomeEntity.id==SomeOtherEntity.id).\ - filter(SomeOtherEntity.foo=='bar').\ - update({"data":"x"}) + query(SomeEntity).filter(SomeEntity.id == SomeOtherEntity.id).filter( + SomeOtherEntity.foo == "bar" + ).update({"data": "x"}) In particular, updates to joined-inheritance entities are supported, provided the target of the UPDATE is local to the @@ -548,10 +559,9 @@ given ``Engineer`` as a joined subclass of ``Person``: :: - query(Engineer).\ - filter(Person.id==Engineer.id).\ - filter(Person.name=='dilbert').\ - update({"engineer_data":"java"}) + query(Engineer).filter(Person.id == Engineer.id).filter( + Person.name == "dilbert" + ).update({"engineer_data": "java"}) would produce: @@ -649,6 +659,7 @@ For example, to add logarithm support to :class:`.Numeric` types: from sqlalchemy.types import Numeric from sqlalchemy.sql import func + class CustomNumeric(Numeric): class comparator_factory(Numeric.Comparator): def log(self, other): @@ -659,16 +670,17 @@ The new type is usable like any other type: :: - data = Table('data', metadata, - Column('id', Integer, primary_key=True), - Column('x', CustomNumeric(10, 5)), - Column('y', CustomNumeric(10, 5)) - ) + data = Table( + "data", + metadata, + Column("id", Integer, primary_key=True), + Column("x", CustomNumeric(10, 5)), + Column("y", CustomNumeric(10, 5)), + ) stmt = select([data.c.x.log(data.c.y)]).where(data.c.x.log(2) < value) print(conn.execute(stmt).fetchall()) - New features which have come from this immediately include support for PostgreSQL's HSTORE type, as well as new operations associated with PostgreSQL's ARRAY @@ -696,11 +708,13 @@ support this syntax, including PostgreSQL, SQLite, and MySQL. It is not the same thing as the usual ``executemany()`` style of INSERT which remains unchanged:: - users.insert().values([ - {"name": "some name"}, - {"name": "some other name"}, - {"name": "yet another name"}, - ]) + users.insert().values( + [ + {"name": "some name"}, + {"name": "some other name"}, + {"name": "yet another name"}, + ] + ) .. seealso:: @@ -721,6 +735,7 @@ functionality, except on the database side:: from sqlalchemy.types import String from sqlalchemy import func, Table, Column, MetaData + class LowerString(String): def bind_expression(self, bindvalue): return func.lower(bindvalue) @@ -728,18 +743,15 @@ functionality, except on the database side:: def column_expression(self, col): return func.lower(col) + metadata = MetaData() - test_table = Table( - 'test_table', - metadata, - Column('data', LowerString) - ) + test_table = Table("test_table", metadata, Column("data", LowerString)) Above, the ``LowerString`` type defines a SQL expression that will be emitted whenever the ``test_table.c.data`` column is rendered in the columns clause of a SELECT statement:: - >>> print(select([test_table]).where(test_table.c.data == 'HI')) + >>> print(select([test_table]).where(test_table.c.data == "HI")) SELECT lower(test_table.data) AS data FROM test_table WHERE test_table.data = lower(:data_1) @@ -789,16 +801,17 @@ against a particular target selectable:: signatures = relationship("Signature", lazy=False) + class Signature(Base): __tablename__ = "signature" id = Column(Integer, primary_key=True) sig_count = column_property( - select([func.count('*')]).\ - where(SnortEvent.signature == id). - correlate_except(SnortEvent) - ) + select([func.count("*")]) + .where(SnortEvent.signature == id) + .correlate_except(SnortEvent) + ) .. seealso:: @@ -818,19 +831,16 @@ and containment methods such as from sqlalchemy.dialects.postgresql import HSTORE - data = Table('data_table', metadata, - Column('id', Integer, primary_key=True), - Column('hstore_data', HSTORE) - ) - - engine.execute( - select([data.c.hstore_data['some_key']]) - ).scalar() + data = Table( + "data_table", + metadata, + Column("id", Integer, primary_key=True), + Column("hstore_data", HSTORE), + ) - engine.execute( - select([data.c.hstore_data.matrix()]) - ).scalar() + engine.execute(select([data.c.hstore_data["some_key"]])).scalar() + engine.execute(select([data.c.hstore_data.matrix()])).scalar() .. seealso:: @@ -861,30 +871,20 @@ results: The type also introduces new operators, using the new type-specific operator framework. New operations include indexed access:: - result = conn.execute( - select([mytable.c.arraycol[2]]) - ) + result = conn.execute(select([mytable.c.arraycol[2]])) slice access in SELECT:: - result = conn.execute( - select([mytable.c.arraycol[2:4]]) - ) + result = conn.execute(select([mytable.c.arraycol[2:4]])) slice updates in UPDATE:: - conn.execute( - mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]}) - ) + conn.execute(mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]})) freestanding array literals:: >>> from sqlalchemy.dialects import postgresql - >>> conn.scalar( - ... select([ - ... postgresql.array([1, 2]) + postgresql.array([3, 4, 5]) - ... ]) - ... ) + >>> conn.scalar(select([postgresql.array([1, 2]) + postgresql.array([3, 4, 5])])) [1, 2, 3, 4, 5] array concatenation, where below, the right side ``[4, 5, 6]`` is coerced into an array literal:: @@ -912,20 +912,24 @@ everything else. :: - Column('sometimestamp', sqlite.DATETIME(truncate_microseconds=True)) - Column('sometimestamp', sqlite.DATETIME( - storage_format=( - "%(year)04d%(month)02d%(day)02d" - "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d" - ), - regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})" - ) - ) - Column('somedate', sqlite.DATE( - storage_format="%(month)02d/%(day)02d/%(year)04d", - regexp="(?P\d+)/(?P\d+)/(?P\d+)", - ) - ) + Column("sometimestamp", sqlite.DATETIME(truncate_microseconds=True)) + Column( + "sometimestamp", + sqlite.DATETIME( + storage_format=( + "%(year)04d%(month)02d%(day)02d" + "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d" + ), + regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})", + ), + ) + Column( + "somedate", + sqlite.DATE( + storage_format="%(month)02d/%(day)02d/%(year)04d", + regexp="(?P\d+)/(?P\d+)/(?P\d+)", + ), + ) Huge thanks to Nate Dub for the sprinting on this at Pycon 2012. @@ -946,7 +950,7 @@ The "collate" keyword, long accepted by the MySQL dialect, is now established on all :class:`.String` types and will render on any backend, including when features such as :meth:`_schema.MetaData.create_all` and :func:`.cast` is used:: - >>> stmt = select([cast(sometable.c.somechar, String(20, collation='utf8'))]) + >>> stmt = select([cast(sometable.c.somechar, String(20, collation="utf8"))]) >>> print(stmt) SELECT CAST(sometable.somechar AS VARCHAR(20) COLLATE "utf8") AS anon_1 FROM sometable @@ -1047,33 +1051,35 @@ The new behavior allows the following test case to work:: Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(64)) + class UserKeyword(Base): - __tablename__ = 'user_keyword' - user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) - keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) + __tablename__ = "user_keyword" + user_id = Column(Integer, ForeignKey("user.id"), primary_key=True) + keyword_id = Column(Integer, ForeignKey("keyword.id"), primary_key=True) - user = relationship(User, - backref=backref("user_keywords", - cascade="all, delete-orphan") - ) + user = relationship( + User, backref=backref("user_keywords", cascade="all, delete-orphan") + ) - keyword = relationship("Keyword", - backref=backref("user_keywords", - cascade="all, delete-orphan") - ) + keyword = relationship( + "Keyword", backref=backref("user_keywords", cascade="all, delete-orphan") + ) # uncomment this to enable the old behavior # __mapper_args__ = {"legacy_is_orphan": True} + class Keyword(Base): - __tablename__ = 'keyword' + __tablename__ = "keyword" id = Column(Integer, primary_key=True) - keyword = Column('keyword', String(64)) + keyword = Column("keyword", String(64)) + from sqlalchemy import create_engine from sqlalchemy.orm import Session @@ -1103,7 +1109,6 @@ The new behavior allows the following test case to work:: session.commit() - :ticket:`2655` The after_attach event fires after the item is associated with the Session instead of before; before_attach added @@ -1129,9 +1134,9 @@ use cases should use the new "before_attach" event: @event.listens_for(Session, "before_attach") def before_attach(session, instance): - instance.some_necessary_attribute = session.query(Widget).\ - filter_by(instance.widget_name).\ - first() + instance.some_necessary_attribute = ( + session.query(Widget).filter_by(instance.widget_name).first() + ) :ticket:`2464` @@ -1146,11 +1151,13 @@ parent: :: - subq = session.query(Entity.value).\ - filter(Entity.id==Parent.entity_id).\ - correlate(Parent).\ - as_scalar() - session.query(Parent).filter(subq=="some value") + subq = ( + session.query(Entity.value) + .filter(Entity.id == Parent.entity_id) + .correlate(Parent) + .as_scalar() + ) + session.query(Parent).filter(subq == "some value") This was the opposite behavior of a plain ``select()`` construct which would assume auto-correlation by default. @@ -1158,10 +1165,8 @@ The above statement in 0.8 will correlate automatically: :: - subq = session.query(Entity.value).\ - filter(Entity.id==Parent.entity_id).\ - as_scalar() - session.query(Parent).filter(subq=="some value") + subq = session.query(Entity.value).filter(Entity.id == Parent.entity_id).as_scalar() + session.query(Parent).filter(subq == "some value") like in ``select()``, correlation can be disabled by calling ``query.correlate(None)`` or manually set by passing an @@ -1187,8 +1192,8 @@ objects relative to what's being selected:: from sqlalchemy.sql import table, column, select - t1 = table('t1', column('x')) - t2 = table('t2', column('y')) + t1 = table("t1", column("x")) + t2 = table("t2", column("y")) s = select([t1, t2]).correlate(t1) print(s) @@ -1263,8 +1268,8 @@ doing something like this: :: - scalar_subq = select([someothertable.c.id]).where(someothertable.c.data=='foo') - select([sometable]).where(sometable.c.id==scalar_subq) + scalar_subq = select([someothertable.c.id]).where(someothertable.c.data == "foo") + select([sometable]).where(sometable.c.id == scalar_subq) SQL Server doesn't allow an equality comparison to a scalar SELECT, that is, "x = (SELECT something)". The MSSQL dialect @@ -1313,32 +1318,28 @@ key would be ignored, inconsistently versus when :: # before 0.8 - table1 = Table('t1', metadata, - Column('col1', Integer, key='column_one') - ) + table1 = Table("t1", metadata, Column("col1", Integer, key="column_one")) s = select([table1]) - s.c.column_one # would be accessible like this - s.c.col1 # would raise AttributeError + s.c.column_one # would be accessible like this + s.c.col1 # would raise AttributeError s = select([table1]).apply_labels() - s.c.table1_column_one # would raise AttributeError - s.c.table1_col1 # would be accessible like this + s.c.table1_column_one # would raise AttributeError + s.c.table1_col1 # would be accessible like this In 0.8, :attr:`_schema.Column.key` is honored in both cases: :: # with 0.8 - table1 = Table('t1', metadata, - Column('col1', Integer, key='column_one') - ) + table1 = Table("t1", metadata, Column("col1", Integer, key="column_one")) s = select([table1]) - s.c.column_one # works - s.c.col1 # AttributeError + s.c.column_one # works + s.c.col1 # AttributeError s = select([table1]).apply_labels() - s.c.table1_column_one # works - s.c.table1_col1 # AttributeError + s.c.table1_column_one # works + s.c.table1_col1 # AttributeError All other behavior regarding "name" and "key" are the same, including that the rendered SQL will still use the form @@ -1408,8 +1409,8 @@ warning: :: - t1 = table('t1', column('x')) - t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z" + t1 = table("t1", column("x")) + t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z" :ticket:`2415` @@ -1439,7 +1440,7 @@ always compared case-insensitively: :: >>> row = result.fetchone() - >>> row['foo'] == row['FOO'] == row['Foo'] + >>> row["foo"] == row["FOO"] == row["Foo"] True This was for the benefit of a few dialects which in the diff --git a/doc/build/changelog/migration_09.rst b/doc/build/changelog/migration_09.rst index 70fa49e343..c5f4a31532 100644 --- a/doc/build/changelog/migration_09.rst +++ b/doc/build/changelog/migration_09.rst @@ -60,8 +60,7 @@ Using a :class:`_query.Query` in conjunction with a composite attribute now retu type maintained by that composite, rather than being broken out into individual columns. Using the mapping setup at :ref:`mapper_composite`:: - >>> session.query(Vertex.start, Vertex.end).\ - ... filter(Vertex.start == Point(3, 4)).all() + >>> session.query(Vertex.start, Vertex.end).filter(Vertex.start == Point(3, 4)).all() [(Point(x=3, y=4), Point(x=5, y=6))] This change is backwards-incompatible with code that expects the individual attribute @@ -69,8 +68,9 @@ to be expanded into individual columns. To get that behavior, use the ``.clause accessor:: - >>> session.query(Vertex.start.clauses, Vertex.end.clauses).\ - ... filter(Vertex.start == Point(3, 4)).all() + >>> session.query(Vertex.start.clauses, Vertex.end.clauses).filter( + ... Vertex.start == Point(3, 4) + ... ).all() [(3, 4, 5, 6)] .. seealso:: @@ -93,9 +93,11 @@ Consider the following example against the usual ``User`` mapping:: select_stmt = select([User]).where(User.id == 7).alias() - q = session.query(User).\ - join(select_stmt, User.id == select_stmt.c.id).\ - filter(User.name == 'ed') + q = ( + session.query(User) + .join(select_stmt, User.id == select_stmt.c.id) + .filter(User.name == "ed") + ) The above statement predictably renders SQL like the following:: @@ -109,10 +111,12 @@ If we wanted to reverse the order of the left and right elements of the JOIN, the documentation would lead us to believe we could use :meth:`_query.Query.select_from` to do so:: - q = session.query(User).\ - select_from(select_stmt).\ - join(User, User.id == select_stmt.c.id).\ - filter(User.name == 'ed') + q = ( + session.query(User) + .select_from(select_stmt) + .join(User, User.id == select_stmt.c.id) + .filter(User.name == "ed") + ) However, in version 0.8 and earlier, the above use of :meth:`_query.Query.select_from` would apply the ``select_stmt`` to **replace** the ``User`` entity, as it @@ -137,7 +141,7 @@ to selecting from a customized :func:`.aliased` construct:: select_stmt = select([User]).where(User.id == 7) user_from_stmt = aliased(User, select_stmt.alias()) - q = session.query(user_from_stmt).filter(user_from_stmt.name == 'ed') + q = session.query(user_from_stmt).filter(user_from_stmt.name == "ed") So with SQLAlchemy 0.9, our query that selects from ``select_stmt`` produces the SQL we expect:: @@ -180,17 +184,20 @@ The change is illustrated as follows:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) a = relationship("A", backref=backref("bs", viewonly=True)) + e = create_engine("sqlite://") Base.metadata.create_all(e) @@ -229,16 +236,17 @@ the "association" row being present or not when the comparison is against Consider this mapping:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(Integer, ForeignKey('b.id'), primary_key=True) + b_id = Column(Integer, ForeignKey("b.id"), primary_key=True) b = relationship("B") b_value = association_proxy("b", "value") + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) value = Column(String) @@ -323,21 +331,24 @@ proxied value. E.g.:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b = relationship("B", uselist=False) bname = association_proxy("b", "name") + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) name = Column(String) + a1 = A() # this is how m2o's always have worked @@ -370,17 +381,19 @@ This is a small change demonstrated as follows:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) data = Column(String) + e = create_engine("sqlite://", echo=True) Base.metadata.create_all(e) sess = Session(e) - a1 = A(data='a1') + a1 = A(data="a1") sess.add(a1) sess.commit() # a1 is now expired @@ -388,11 +401,23 @@ This is a small change demonstrated as follows:: assert inspect(a1).attrs.data.history == (None, None, None) # in 0.8, this would fail to load the unloaded state. - assert attributes.get_history(a1, 'data') == ((), ['a1',], ()) + assert attributes.get_history(a1, "data") == ( + (), + [ + "a1", + ], + (), + ) # load_history() is now equivalent to get_history() with # passive=PASSIVE_OFF ^ INIT_OK - assert inspect(a1).attrs.data.load_history() == ((), ['a1',], ()) + assert inspect(a1).attrs.data.load_history() == ( + (), + [ + "a1", + ], + (), + ) :ticket:`2787` @@ -452,14 +477,10 @@ use the :meth:`.TypeEngine.with_variant` method:: from sqlalchemy.dialects.mysql import INTEGER d = Date().with_variant( - DATE(storage_format="%(day)02d.%(month)02d.%(year)04d"), - "sqlite" - ) + DATE(storage_format="%(day)02d.%(month)02d.%(year)04d"), "sqlite" + ) - i = Integer().with_variant( - INTEGER(display_width=5), - "mysql" - ) + i = Integer().with_variant(INTEGER(display_width=5), "mysql") :meth:`.TypeEngine.with_variant` isn't new, it was added in SQLAlchemy 0.7.2. So code that is running on the 0.8 series can be corrected to use @@ -549,7 +570,7 @@ The precedence rules for COLLATE have been changed Previously, an expression like the following:: - print((column('x') == 'somevalue').collate("en_EN")) + print((column("x") == "somevalue").collate("en_EN")) would produce an expression like this:: @@ -567,7 +588,7 @@ The potentially backwards incompatible change arises if the :meth:`.ColumnOperators.collate` operator is being applied to the right-hand column, as follows:: - print(column('x') == literal('somevalue').collate("en_EN")) + print(column("x") == literal("somevalue").collate("en_EN")) In 0.8, this produces:: @@ -584,11 +605,11 @@ The :meth:`.ColumnOperators.collate` operator now works more appropriately withi generated:: >>> # 0.8 - >>> print(column('x').collate('en_EN').desc()) + >>> print(column("x").collate("en_EN").desc()) (x COLLATE en_EN) DESC >>> # 0.9 - >>> print(column('x').collate('en_EN').desc()) + >>> print(column("x").collate("en_EN").desc()) x COLLATE en_EN DESC :ticket:`2879` @@ -604,7 +625,7 @@ The :class:`_postgresql.ENUM` type will now apply escaping to single quote signs within the enumerated values:: >>> from sqlalchemy.dialects import postgresql - >>> type = postgresql.ENUM('one', 'two', "three's", name="myenum") + >>> type = postgresql.ENUM("one", "two", "three's", name="myenum") >>> from sqlalchemy.dialects.postgresql import base >>> print(base.CreateEnumType(type).compile(dialect=postgresql.dialect())) CREATE TYPE myenum AS ENUM ('one','two','three''s') @@ -633,6 +654,7 @@ from all locations in which it had been established:: """listen for before_insert""" # ... + event.remove(MyClass, "before_insert", my_before_insert) In the example above, the ``propagate=True`` flag is set. This @@ -689,13 +711,9 @@ Setting an option on path that is based on a subclass requires that all links in the path be spelled out as class bound attributes, since the :meth:`.PropComparator.of_type` method needs to be called:: - session.query(Company).\ - options( - subqueryload_all( - Company.employees.of_type(Engineer), - Engineer.machines - ) - ) + session.query(Company).options( + subqueryload_all(Company.employees.of_type(Engineer), Engineer.machines) + ) **New Way** @@ -726,7 +744,6 @@ but the intent is clearer:: query(User).options(defaultload("orders").defaultload("items").subqueryload("keywords")) - The dotted style can still be taken advantage of, particularly in the case of skipping over several path elements:: @@ -791,7 +808,6 @@ others:: # undefer all Address columns query(User).options(defaultload(User.addresses).undefer("*")) - :ticket:`1418` @@ -850,9 +866,9 @@ compatible construct can be passed to the new method :meth:`_expression.Insert.f where it will be used to render an ``INSERT .. SELECT`` construct:: >>> from sqlalchemy.sql import table, column - >>> t1 = table('t1', column('a'), column('b')) - >>> t2 = table('t2', column('x'), column('y')) - >>> print(t1.insert().from_select(['a', 'b'], t2.select().where(t2.c.y == 5))) + >>> t1 = table("t1", column("a"), column("b")) + >>> t2 = table("t2", column("x"), column("y")) + >>> print(t1.insert().from_select(["a", "b"], t2.select().where(t2.c.y == 5))) INSERT INTO t1 (a, b) SELECT t2.x, t2.y FROM t2 WHERE t2.y = :y_1 @@ -861,7 +877,7 @@ The construct is smart enough to also accommodate ORM objects such as classes and :class:`_query.Query` objects:: s = Session() - q = s.query(User.id, User.name).filter_by(name='ed') + q = s.query(User.id, User.name).filter_by(name="ed") ins = insert(Address).from_select((Address.id, Address.email_address), q) rendering:: @@ -920,9 +936,10 @@ for ``.decimal_return_scale`` if it is not otherwise specified. If both from sqlalchemy.dialects.mysql import DOUBLE import decimal - data = Table('data', metadata, - Column('double_value', - mysql.DOUBLE(decimal_return_scale=12, asdecimal=True)) + data = Table( + "data", + metadata, + Column("double_value", mysql.DOUBLE(decimal_return_scale=12, asdecimal=True)), ) conn.execute( @@ -938,7 +955,6 @@ for ``.decimal_return_scale`` if it is not otherwise specified. If both # much precision for DOUBLE assert result == decimal.Decimal("45.768392065789") - :ticket:`2867` @@ -1004,8 +1020,9 @@ from a backref:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", backref="a") @@ -1015,21 +1032,22 @@ from a backref:: print("A.bs validator") return item + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) @validates("a", include_backrefs=False) def validate_a(self, key, item): print("B.a validator") return item + a1 = A() a1.bs.append(B()) # prints only "A.bs validator" - :ticket:`1535` @@ -1262,14 +1280,9 @@ without any subqueries generated:: employee_alias = with_polymorphic(Person, [Engineer, Manager], flat=True) - session.query(Company).join( - Company.employees.of_type(employee_alias) - ).filter( - or_( - Engineer.primary_language == 'python', - Manager.manager_name == 'dilbert' - ) - ) + session.query(Company).join(Company.employees.of_type(employee_alias)).filter( + or_(Engineer.primary_language == "python", Manager.manager_name == "dilbert") + ) Generates (everywhere except SQLite):: @@ -1295,7 +1308,9 @@ on the right side. Normally, a joined eager load chain like the following:: - query(User).options(joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True)) + query(User).options( + joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True) + ) Would not produce an inner join; because of the LEFT OUTER JOIN from user->order, joined eager loading could not use an INNER join from order->items without changing @@ -1311,7 +1326,9 @@ the new "right-nested joins are OK" logic would kick in, and we'd get:: Since we missed the boat on that, to avoid further regressions we've added the above functionality by specifying the string ``"nested"`` to :paramref:`_orm.joinedload.innerjoin`:: - query(User).options(joinedload("orders", innerjoin=False).joinedload("items", innerjoin="nested")) + query(User).options( + joinedload("orders", innerjoin=False).joinedload("items", innerjoin="nested") + ) This feature is new in 0.9.4. @@ -1406,16 +1423,18 @@ replacement operation, which in turn should cause the item to be removed from a previous collection:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) children = relationship("Child", backref="parent") + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) - parent_id = Column(ForeignKey('parent.id')) + parent_id = Column(ForeignKey("parent.id")) + p1 = Parent() p2 = Parent() @@ -1520,7 +1539,7 @@ Starting with a table such as this:: from sqlalchemy import Table, Boolean, Integer, Column, MetaData - t1 = Table('t', MetaData(), Column('x', Boolean()), Column('y', Integer)) + t1 = Table("t", MetaData(), Column("x", Boolean()), Column("y", Integer)) A select construct will now render the boolean column as a binary expression on backends that don't feature ``true``/``false`` constant behavior:: @@ -1535,8 +1554,9 @@ The :func:`.and_` and :func:`.or_` constructs will now exhibit quasi "short circuit" behavior, that is truncating a rendered expression, when a :func:`.true` or :func:`.false` constant is present:: - >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile( - ... dialect=postgresql.dialect())) + >>> print( + ... select([t1]).where(and_(t1.c.y > 5, false())).compile(dialect=postgresql.dialect()) + ... ) SELECT t.x, t.y FROM t WHERE false :func:`.true` can be used as the base to build up an expression:: @@ -1549,8 +1569,7 @@ The :func:`.and_` and :func:`.or_` constructs will now exhibit quasi The boolean constants :func:`.true` and :func:`.false` themselves render as ``0 = 1`` and ``1 = 1`` for a backend with no boolean constants:: - >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile( - ... dialect=mysql.dialect())) + >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile(dialect=mysql.dialect())) SELECT t.x, t.y FROM t WHERE 0 = 1 Interpretation of ``None``, while not particularly valid SQL, is at least @@ -1581,7 +1600,7 @@ E.g. an example like:: from sqlalchemy.sql import table, column, select, func - t = table('t', column('c1'), column('c2')) + t = table("t", column("c1"), column("c2")) expr = (func.foo(t.c.c1) + t.c.c2).label("expr") stmt = select([expr]).order_by(expr) @@ -1620,16 +1639,16 @@ The ``__eq__()`` method now compares both sides as a tuple and also an ``__lt__()`` method has been added:: users.insert().execute( - dict(user_id=1, user_name='foo'), - dict(user_id=2, user_name='bar'), - dict(user_id=3, user_name='def'), - ) + dict(user_id=1, user_name="foo"), + dict(user_id=2, user_name="bar"), + dict(user_id=3, user_name="def"), + ) rows = users.select().order_by(users.c.user_name).execute().fetchall() - eq_(rows, [(2, 'bar'), (3, 'def'), (1, 'foo')]) + eq_(rows, [(2, "bar"), (3, "def"), (1, "foo")]) - eq_(sorted(rows), [(1, 'foo'), (2, 'bar'), (3, 'def')]) + eq_(sorted(rows), [(1, "foo"), (2, "bar"), (3, "def")]) :ticket:`2848` @@ -1667,7 +1686,7 @@ Above, ``bp`` remains unchanged, but the ``String`` type will be used when the statement is executed, which we can see by examining the ``binds`` dictionary:: >>> compiled = stmt.compile() - >>> compiled.binds['some_col'].type + >>> compiled.binds["some_col"].type String The feature allows custom types to take their expected effect within INSERT/UPDATE @@ -1727,10 +1746,10 @@ Scenarios which now work correctly include: >>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKey >>> metadata = MetaData() - >>> t2 = Table('t2', metadata, Column('t1id', ForeignKey('t1.id'))) + >>> t2 = Table("t2", metadata, Column("t1id", ForeignKey("t1.id"))) >>> t2.c.t1id.type NullType() - >>> t1 = Table('t1', metadata, Column('id', Integer, primary_key=True)) + >>> t1 = Table("t1", metadata, Column("id", Integer, primary_key=True)) >>> t2.c.t1id.type Integer() @@ -1738,16 +1757,23 @@ Scenarios which now work correctly include: >>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKeyConstraint >>> metadata = MetaData() - >>> t2 = Table('t2', metadata, - ... Column('t1a'), Column('t1b'), - ... ForeignKeyConstraint(['t1a', 't1b'], ['t1.a', 't1.b'])) + >>> t2 = Table( + ... "t2", + ... metadata, + ... Column("t1a"), + ... Column("t1b"), + ... ForeignKeyConstraint(["t1a", "t1b"], ["t1.a", "t1.b"]), + ... ) >>> t2.c.t1a.type NullType() >>> t2.c.t1b.type NullType() - >>> t1 = Table('t1', metadata, - ... Column('a', Integer, primary_key=True), - ... Column('b', Integer, primary_key=True)) + >>> t1 = Table( + ... "t1", + ... metadata, + ... Column("a", Integer, primary_key=True), + ... Column("b", Integer, primary_key=True), + ... ) >>> t2.c.t1a.type Integer() >>> t2.c.t1b.type @@ -1758,13 +1784,13 @@ Scenarios which now work correctly include: >>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKey >>> metadata = MetaData() - >>> t2 = Table('t2', metadata, Column('t1id', ForeignKey('t1.id'))) - >>> t3 = Table('t3', metadata, Column('t2t1id', ForeignKey('t2.t1id'))) + >>> t2 = Table("t2", metadata, Column("t1id", ForeignKey("t1.id"))) + >>> t3 = Table("t3", metadata, Column("t2t1id", ForeignKey("t2.t1id"))) >>> t2.c.t1id.type NullType() >>> t3.c.t2t1id.type NullType() - >>> t1 = Table('t1', metadata, Column('id', Integer, primary_key=True)) + >>> t1 = Table("t1", metadata, Column("id", Integer, primary_key=True)) >>> t2.c.t1id.type Integer() >>> t3.c.t2t1id.type diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst index 68fb0bd777..6501911111 100644 --- a/doc/build/changelog/migration_10.rst +++ b/doc/build/changelog/migration_10.rst @@ -71,15 +71,16 @@ once, a query as a pre-compiled unit begins to be feasible:: bakery = baked.bakery() + def search_for_user(session, username, email=None): baked_query = bakery(lambda session: session.query(User)) - baked_query += lambda q: q.filter(User.name == bindparam('username')) + baked_query += lambda q: q.filter(User.name == bindparam("username")) baked_query += lambda q: q.order_by(User.id) if email: - baked_query += lambda q: q.filter(User.email == bindparam('email')) + baked_query += lambda q: q.filter(User.email == bindparam("email")) result = baked_query(session).params(username=username, email=email).all() @@ -109,10 +110,11 @@ call upon mixin-established columns and will receive a reference to the correct @declared_attr def foobar_prop(cls): - return column_property('foobar: ' + cls.foobar) + return column_property("foobar: " + cls.foobar) + class SomeClass(HasFooBar, Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" id = Column(Integer, primary_key=True) Above, ``SomeClass.foobar_prop`` will be invoked against ``SomeClass``, @@ -132,10 +134,11 @@ this:: @declared_attr def foobar_prop(cls): - return column_property('foobar: ' + cls.foobar) + return column_property("foobar: " + cls.foobar) + class SomeClass(HasFooBar, Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" id = Column(Integer, primary_key=True) Previously, ``SomeClass`` would be mapped with one particular copy of @@ -167,16 +170,19 @@ applied:: @declared_attr.cascading def id(cls): if has_inherited_table(cls): - return Column(ForeignKey('myclass.id'), primary_key=True) + return Column(ForeignKey("myclass.id"), primary_key=True) else: return Column(Integer, primary_key=True) + class MyClass(HasIdMixin, Base): - __tablename__ = 'myclass' + __tablename__ = "myclass" # ... + class MySubClass(MyClass): - "" + """ """ + # ... .. seealso:: @@ -189,13 +195,17 @@ on the abstract base:: from sqlalchemy import Column, Integer, ForeignKey from sqlalchemy.orm import relationship - from sqlalchemy.ext.declarative import (declarative_base, declared_attr, - AbstractConcreteBase) + from sqlalchemy.ext.declarative import ( + declarative_base, + declared_attr, + AbstractConcreteBase, + ) Base = declarative_base() + class Something(Base): - __tablename__ = u'something' + __tablename__ = "something" id = Column(Integer, primary_key=True) @@ -212,9 +222,8 @@ on the abstract base:: class Concrete(Abstract): - __tablename__ = u'cca' - __mapper_args__ = {'polymorphic_identity': 'cca', 'concrete': True} - + __tablename__ = "cca" + __mapper_args__ = {"polymorphic_identity": "cca", "concrete": True} The above mapping will set up a table ``cca`` with both an ``id`` and a ``something_id`` column, and ``Concrete`` will also have a relationship @@ -240,17 +249,19 @@ of load that's improved the most:: Base = declarative_base() + class Foo(Base): __table__ = Table( - 'foo', Base.metadata, - Column('id', Integer, primary_key=True), - Column('a', Integer(), nullable=False), - Column('b', Integer(), nullable=False), - Column('c', Integer(), nullable=False), + "foo", + Base.metadata, + Column("id", Integer, primary_key=True), + Column("a", Integer(), nullable=False), + Column("b", Integer(), nullable=False), + Column("c", Integer(), nullable=False), ) - engine = create_engine( - 'mysql+mysqldb://scott:tiger@localhost/test', echo=True) + + engine = create_engine("mysql+mysqldb://scott:tiger@localhost/test", echo=True) sess = Session(engine) @@ -488,7 +499,7 @@ at the attribute. Below this is illustrated using the return self.value + 5 - inspect(SomeObject).all_orm_descriptors.some_prop.info['foo'] = 'bar' + inspect(SomeObject).all_orm_descriptors.some_prop.info["foo"] = "bar" It is also available as a constructor argument for all :class:`.SchemaItem` objects (e.g. :class:`_schema.ForeignKey`, :class:`.UniqueConstraint` etc.) as well @@ -510,20 +521,19 @@ as the "order by label" logic introduced in 0.9 (see :ref:`migration_1068`). Given a mapping like the following:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) - A.b = column_property( - select([func.max(B.id)]).where(B.a_id == A.id).correlate(A) - ) + A.b = column_property(select([func.max(B.id)]).where(B.a_id == A.id).correlate(A)) A simple scenario that included "A.b" twice would fail to render correctly:: @@ -550,12 +560,12 @@ There were also many scenarios where the "order by" logic would fail to order by label, for example if the mapping were "polymorphic":: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) type = Column(String) - __mapper_args__ = {'polymorphic_on': type, 'with_polymorphic': '*'} + __mapper_args__ = {"polymorphic_on": type, "with_polymorphic": "*"} The order_by would fail to use the label, as it would be anonymized due to the polymorphic loading:: @@ -592,7 +602,7 @@ any SQL expression, in addition to integer values, as arguments. The ORM this is used to allow a bound parameter to be passed, which can be substituted with a value later:: - sel = select([table]).limit(bindparam('mylimit')).offset(bindparam('myoffset')) + sel = select([table]).limit(bindparam("mylimit")).offset(bindparam("myoffset")) Dialects which don't support non-integer LIMIT or OFFSET expressions may continue to not support this behavior; third party dialects may also need modification @@ -702,13 +712,9 @@ CHECK Constraints now support the ``%(column_0_name)s`` token in naming conventi The ``%(column_0_name)s`` will derive from the first column found in the expression of a :class:`.CheckConstraint`:: - metadata = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - foo = Table('foo', metadata, - Column('value', Integer), - ) + foo = Table("foo", metadata, Column("value", Integer)) CheckConstraint(foo.c.value > 5) @@ -743,10 +749,7 @@ Since at least version 0.8, a :class:`.Constraint` has had the ability to m = MetaData() - t = Table('t', m, - Column('a', Integer), - Column('b', Integer) - ) + t = Table("t", m, Column("a", Integer), Column("b", Integer)) uq = UniqueConstraint(t.c.a, t.c.b) # will auto-attach to Table @@ -762,12 +765,12 @@ the :class:`.Constraint` is also added:: m = MetaData() - a = Column('a', Integer) - b = Column('b', Integer) + a = Column("a", Integer) + b = Column("b", Integer) uq = UniqueConstraint(a, b) - t = Table('t', m, a, b) + t = Table("t", m, a, b) assert uq in t.constraints # constraint auto-attached @@ -781,12 +784,12 @@ tracking for the addition of names to a :class:`_schema.Table`:: m = MetaData() - a = Column('a', Integer) - b = Column('b', Integer) + a = Column("a", Integer) + b = Column("b", Integer) - uq = UniqueConstraint(a, 'b') + uq = UniqueConstraint(a, "b") - t = Table('t', m, a, b) + t = Table("t", m, a, b) # constraint *not* auto-attached, as we do not have tracking # to locate when a name 'b' becomes available on the table @@ -806,18 +809,17 @@ the :class:`.Constraint` is constructed:: m = MetaData() - a = Column('a', Integer) - b = Column('b', Integer) + a = Column("a", Integer) + b = Column("b", Integer) - t = Table('t', m, a, b) + t = Table("t", m, a, b) - uq = UniqueConstraint(a, 'b') + uq = UniqueConstraint(a, "b") # constraint auto-attached normally as in older versions assert uq in t.constraints - :ticket:`3341` :ticket:`3411` @@ -838,12 +840,11 @@ expressions are rendered as constants into the SELECT statement:: m = MetaData() t = Table( - 't', m, - Column('x', Integer), - Column('y', Integer, default=func.somefunction())) + "t", m, Column("x", Integer), Column("y", Integer, default=func.somefunction()) + ) stmt = select([t.c.x]) - print(t.insert().from_select(['x'], stmt)) + print(t.insert().from_select(["x"], stmt)) Will render:: @@ -870,9 +871,10 @@ embedded in SQL to render correctly, such as:: metadata = MetaData() - tbl = Table("derp", metadata, - Column("arr", ARRAY(Text), - server_default=array(["foo", "bar", "baz"])), + tbl = Table( + "derp", + metadata, + Column("arr", ARRAY(Text), server_default=array(["foo", "bar", "baz"])), ) print(CreateTable(tbl).compile(dialect=postgresql.dialect())) @@ -981,8 +983,9 @@ emitted for ten of the parameter sets, out of a total of 1000:: warnings.filterwarnings("once") for i in range(1000): - e.execute(select([cast( - ('foo_%d' % random.randint(0, 1000000)).encode('ascii'), Unicode)])) + e.execute( + select([cast(("foo_%d" % random.randint(0, 1000000)).encode("ascii"), Unicode)]) + ) The format of the warning here is:: @@ -1015,40 +1018,41 @@ onto the class. The string names are now resolved as attribute names in earnest:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) - name = Column('user_name', String(50)) + name = Column("user_name", String(50)) Above, the column ``user_name`` is mapped as ``name``. Previously, a call to :meth:`_query.Query.update` that was passed strings would have to have been called as follows:: - session.query(User).update({'user_name': 'moonbeam'}) + session.query(User).update({"user_name": "moonbeam"}) The given string is now resolved against the entity:: - session.query(User).update({'name': 'moonbeam'}) + session.query(User).update({"name": "moonbeam"}) It is typically preferable to use the attribute directly, to avoid any ambiguity:: - session.query(User).update({User.name: 'moonbeam'}) + session.query(User).update({User.name: "moonbeam"}) The change also indicates that synonyms and hybrid attributes can be referred to by string name as well:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) - name = Column('user_name', String(50)) + name = Column("user_name", String(50)) @hybrid_property def fullname(self): return self.name - session.query(User).update({'fullname': 'moonbeam'}) + + session.query(User).update({"fullname": "moonbeam"}) :ticket:`3228` @@ -1108,13 +1112,14 @@ it only became apparent as a result of :ticket:`3371`. Given a mapping:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) a = relationship("A") Given ``A``, with primary key of 7, but which we changed to be 10 @@ -1254,15 +1259,16 @@ attributes, a change in behavior can be seen here when assigning None. Given a mapping:: class A(Base): - __tablename__ = 'table_a' + __tablename__ = "table_a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'table_b' + __tablename__ = "table_b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('table_a.id')) + a_id = Column(ForeignKey("table_a.id")) a = relationship(A) In 1.0, the relationship-bound attribute takes precedence over the FK-bound @@ -1277,7 +1283,7 @@ only takes effect if a value is assigned; the None is not considered:: session.flush() b1 = B() - b1.a = a1 # we expect a_id to be '1'; takes precedence in 0.9 and 1.0 + b1.a = a1 # we expect a_id to be '1'; takes precedence in 0.9 and 1.0 b2 = B() b2.a = None # we expect a_id to be None; takes precedence only in 1.0 @@ -1339,7 +1345,7 @@ with yield-per (subquery loading could be in theory, however). When this error is raised, the :func:`.lazyload` option can be sent with an asterisk:: - q = sess.query(Object).options(lazyload('*')).yield_per(100) + q = sess.query(Object).options(lazyload("*")).yield_per(100) or use :meth:`_query.Query.enable_eagerloads`:: @@ -1348,8 +1354,11 @@ or use :meth:`_query.Query.enable_eagerloads`:: The :func:`.lazyload` option has the advantage that additional many-to-one joined loader options can still be used:: - q = sess.query(Object).options( - lazyload('*'), joinedload("some_manytoone")).yield_per(100) + q = ( + sess.query(Object) + .options(lazyload("*"), joinedload("some_manytoone")) + .yield_per(100) + ) .. _bug_3233: @@ -1370,15 +1379,17 @@ Starting with a mapping as:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) A query that joins to ``A.bs`` twice:: @@ -1392,9 +1403,9 @@ Will render:: The query deduplicates the redundant ``A.bs`` because it is attempting to support a case like the following:: - s.query(A).join(A.bs).\ - filter(B.foo == 'bar').\ - reset_joinpoint().join(A.bs, B.cs).filter(C.bar == 'bat') + s.query(A).join(A.bs).filter(B.foo == "bar").reset_joinpoint().join(A.bs, B.cs).filter( + C.bar == "bat" + ) That is, the ``A.bs`` is part of a "path". As part of :ticket:`3367`, arriving at the same endpoint twice without it being part of a @@ -1437,31 +1448,33 @@ a mapping as follows:: Base = declarative_base() + class A(Base): __tablename__ = "a" id = Column(Integer, primary_key=True) type = Column(String) - __mapper_args__ = {'polymorphic_on': type, 'polymorphic_identity': 'a'} + __mapper_args__ = {"polymorphic_on": type, "polymorphic_identity": "a"} class ASub1(A): - __mapper_args__ = {'polymorphic_identity': 'asub1'} + __mapper_args__ = {"polymorphic_identity": "asub1"} class ASub2(A): - __mapper_args__ = {'polymorphic_identity': 'asub2'} + __mapper_args__ = {"polymorphic_identity": "asub2"} class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(Integer, ForeignKey("a.id")) - a = relationship("A", primaryjoin="B.a_id == A.id", backref='b') + a = relationship("A", primaryjoin="B.a_id == A.id", backref="b") + s = Session() @@ -1543,26 +1556,28 @@ Previously, the sample code looked like:: from sqlalchemy.orm import Bundle + class DictBundle(Bundle): def create_row_processor(self, query, procs, labels): """Override create_row_processor to return values as dictionaries""" + def proc(row, result): - return dict( - zip(labels, (proc(row, result) for proc in procs)) - ) + return dict(zip(labels, (proc(row, result) for proc in procs))) + return proc The unused ``result`` member is now removed:: from sqlalchemy.orm import Bundle + class DictBundle(Bundle): def create_row_processor(self, query, procs, labels): """Override create_row_processor to return values as dictionaries""" + def proc(row): - return dict( - zip(labels, (proc(row) for proc in procs)) - ) + return dict(zip(labels, (proc(row) for proc in procs))) + return proc .. seealso:: @@ -1587,7 +1602,8 @@ join eager load will use a right-nested join. ``"nested"`` is now implied when using ``innerjoin=True``:: query(User).options( - joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True)) + joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True) + ) With the new default, this will render the FROM clause in the form:: @@ -1601,7 +1617,8 @@ optimization parameter to take effect in all cases. To get the older behavior, use ``innerjoin="unnested"``:: query(User).options( - joinedload("orders", innerjoin=False).joinedload("items", innerjoin="unnested")) + joinedload("orders", innerjoin=False).joinedload("items", innerjoin="unnested") + ) This will avoid right-nested joins and chain the joins together using all OUTER joins despite the innerjoin directive:: @@ -1626,15 +1643,16 @@ Subqueries no longer applied to uselist=False joined eager loads Given a joined eager load like the following:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b = relationship("B", uselist=False) class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) + s = Session() print(s.query(A).options(joinedload(A.b)).limit(5)) @@ -1709,7 +1727,8 @@ Change to single-table-inheritance criteria when using from_self(), count() Given a single-table inheritance mapping, such as:: class Widget(Base): - __table__ = 'widget_table' + __table__ = "widget_table" + class FooWidget(Widget): pass @@ -1769,20 +1788,20 @@ the "single table criteria" when joining on a relationship. Given a mapping as:: class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" id = Column(Integer, primary_key=True) type = Column(String) - related_id = Column(ForeignKey('related.id')) + related_id = Column(ForeignKey("related.id")) related = relationship("Related", backref="widget") - __mapper_args__ = {'polymorphic_on': type} + __mapper_args__ = {"polymorphic_on": type} class FooWidget(Widget): - __mapper_args__ = {'polymorphic_identity': 'foo'} + __mapper_args__ = {"polymorphic_identity": "foo"} class Related(Base): - __tablename__ = 'related' + __tablename__ = "related" id = Column(Integer, primary_key=True) It's been the behavior for quite some time that a JOIN on the relationship @@ -1850,7 +1869,7 @@ behavior of passing string values that become parameterized:: # This is a normal Core expression with a string argument - # we aren't talking about this!! - stmt = select([sometable]).where(sometable.c.somecolumn == 'value') + stmt = select([sometable]).where(sometable.c.somecolumn == "value") The Core tutorial has long featured an example of the use of this technique, using a :func:`_expression.select` construct where virtually all components of it @@ -1893,24 +1912,28 @@ one wishes the warnings to be exceptions, the should be used:: import warnings - warnings.simplefilter("error") # all warnings raise an exception + + warnings.simplefilter("error") # all warnings raise an exception Given the above warnings, our statement works just fine, but to get rid of the warnings we would rewrite our statement as follows:: from sqlalchemy import select, text - stmt = select([ - text("a"), - text("b") - ]).where(text("a = b")).select_from(text("sometable")) + + stmt = ( + select([text("a"), text("b")]).where(text("a = b")).select_from(text("sometable")) + ) and as the warnings suggest, we can give our statement more specificity about the text if we use :func:`_expression.column` and :func:`.table`:: from sqlalchemy import select, text, column, table - stmt = select([column("a"), column("b")]).\ - where(text("a = b")).select_from(table("sometable")) + stmt = ( + select([column("a"), column("b")]) + .where(text("a = b")) + .select_from(table("sometable")) + ) Where note also that :func:`.table` and :func:`_expression.column` can now be imported from "sqlalchemy" without the "sql" part. @@ -1927,10 +1950,11 @@ of this change we have enhanced its functionality. When we have a :func:`_expression.select` or :class:`_query.Query` that refers to some column name or named label, we might want to GROUP BY and/or ORDER BY known columns or labels:: - stmt = select([ - user.c.name, - func.count(user.c.id).label("id_count") - ]).group_by("name").order_by("id_count") + stmt = ( + select([user.c.name, func.count(user.c.id).label("id_count")]) + .group_by("name") + .order_by("id_count") + ) In the above statement we expect to see "ORDER BY id_count", as opposed to a re-statement of the function. The string argument given is actively @@ -1944,10 +1968,9 @@ the ``"name"`` expression has been resolved to ``users.name``!):: However, if we refer to a name that cannot be located, then we get the warning again, as below:: - stmt = select([ - user.c.name, - func.count(user.c.id).label("id_count") - ]).order_by("some_label") + stmt = select([user.c.name, func.count(user.c.id).label("id_count")]).order_by( + "some_label" + ) The output does what we say, but again it warns us:: @@ -1995,16 +2018,21 @@ that of an "executemany" style of invocation:: counter = itertools.count(1) t = Table( - 'my_table', metadata, - Column('id', Integer, default=lambda: next(counter)), - Column('data', String) + "my_table", + metadata, + Column("id", Integer, default=lambda: next(counter)), + Column("data", String), ) - conn.execute(t.insert().values([ - {"data": "d1"}, - {"data": "d2"}, - {"data": "d3"}, - ])) + conn.execute( + t.insert().values( + [ + {"data": "d1"}, + {"data": "d2"}, + {"data": "d3"}, + ] + ) + ) The above example will invoke ``next(counter)`` for each row individually as would be expected:: @@ -2034,16 +2062,21 @@ value is required; if an omitted value only refers to a server-side default, an exception is raised:: t = Table( - 'my_table', metadata, - Column('id', Integer, primary_key=True), - Column('data', String, server_default='some default') + "my_table", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String, server_default="some default"), ) - conn.execute(t.insert().values([ - {"data": "d1"}, - {"data": "d2"}, - {}, - ])) + conn.execute( + t.insert().values( + [ + {"data": "d1"}, + {"data": "d2"}, + {}, + ] + ) + ) will raise:: @@ -2109,7 +2142,7 @@ data is needed. A :class:`_schema.Table` can be set up for reflection by passing :paramref:`_schema.Table.autoload_with` alone:: - my_table = Table('my_table', metadata, autoload_with=some_engine) + my_table = Table("my_table", metadata, autoload_with=some_engine) :ticket:`3027` @@ -2224,8 +2257,8 @@ An :class:`_postgresql.ENUM` that is created **without** being explicitly associated with a :class:`_schema.MetaData` object will be created *and* dropped corresponding to :meth:`_schema.Table.create` and :meth:`_schema.Table.drop`:: - table = Table('sometable', metadata, - Column('some_enum', ENUM('a', 'b', 'c', name='myenum')) + table = Table( + "sometable", metadata, Column("some_enum", ENUM("a", "b", "c", name="myenum")) ) table.create(engine) # will emit CREATE TYPE and CREATE TABLE @@ -2242,11 +2275,9 @@ corresponding to :meth:`_schema.Table.create` and :meth:`_schema.Table.drop`, wi the exception of :meth:`_schema.Table.create` called with the ``checkfirst=True`` flag:: - my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata) + my_enum = ENUM("a", "b", "c", name="myenum", metadata=metadata) - table = Table('sometable', metadata, - Column('some_enum', my_enum) - ) + table = Table("sometable", metadata, Column("some_enum", my_enum)) # will fail: ENUM 'my_enum' does not exist table.create(engine) @@ -2256,10 +2287,9 @@ flag:: table.drop(engine) # will emit DROP TABLE, *not* DROP TYPE - metadata.drop_all(engine) # will emit DROP TYPE - - metadata.create_all(engine) # will emit CREATE TYPE + metadata.drop_all(engine) # will emit DROP TYPE + metadata.create_all(engine) # will emit CREATE TYPE :ticket:`3319` @@ -2334,13 +2364,14 @@ so that code like the following may proceed:: metadata = MetaData() user_tmp = Table( - "user_tmp", metadata, + "user_tmp", + metadata, Column("id", INT, primary_key=True), - Column('name', VARCHAR(50)), - prefixes=['TEMPORARY'] + Column("name", VARCHAR(50)), + prefixes=["TEMPORARY"], ) - e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug') + e = create_engine("postgresql://scott:tiger@localhost/test", echo="debug") with e.begin() as conn: user_tmp.create(conn, checkfirst=True) @@ -2357,21 +2388,23 @@ the temporary table:: metadata = MetaData() user_tmp = Table( - "user_tmp", metadata, + "user_tmp", + metadata, Column("id", INT, primary_key=True), - Column('name', VARCHAR(50)), - prefixes=['TEMPORARY'] + Column("name", VARCHAR(50)), + prefixes=["TEMPORARY"], ) - e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug') + e = create_engine("postgresql://scott:tiger@localhost/test", echo="debug") with e.begin() as conn: user_tmp.create(conn, checkfirst=True) m2 = MetaData() user = Table( - "user_tmp", m2, + "user_tmp", + m2, Column("id", INT, primary_key=True), - Column('name', VARCHAR(50)), + Column("name", VARCHAR(50)), ) # in 0.9, *will create* the new table, overwriting the old one. @@ -2548,11 +2581,13 @@ Code like the following will now function correctly and return floating points on MySQL:: >>> connection.execute( - ... select([ - ... matchtable.c.title.match('Agile Ruby Programming').label('ruby'), - ... matchtable.c.title.match('Dive Python').label('python'), - ... matchtable.c.title - ... ]).order_by(matchtable.c.id) + ... select( + ... [ + ... matchtable.c.title.match("Agile Ruby Programming").label("ruby"), + ... matchtable.c.title.match("Dive Python").label("python"), + ... matchtable.c.title, + ... ] + ... ).order_by(matchtable.c.id) ... ) [ (2.0, 0.0, 'Agile Web Development with Ruby On Rails'), @@ -2614,7 +2649,9 @@ Connecting to SQL Server with PyODBC using a DSN-less connection, e.g. with an explicit hostname, now requires a driver name - SQLAlchemy will no longer attempt to guess a default:: - engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0") + engine = create_engine( + "mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0" + ) SQLAlchemy's previously hardcoded default of "SQL Server" is obsolete on Windows, and SQLAlchemy cannot be tasked with guessing the best driver @@ -2642,13 +2679,16 @@ Improved support for CTEs in Oracle CTE support has been fixed up for Oracle, and there is also a new feature :meth:`_expression.CTE.with_suffixes` that can assist with Oracle's special directives:: - included_parts = select([ - part.c.sub_part, part.c.part, part.c.quantity - ]).where(part.c.part == "p1").\ - cte(name="included_parts", recursive=True).\ - suffix_with( + included_parts = ( + select([part.c.sub_part, part.c.part, part.c.quantity]) + .where(part.c.part == "p1") + .cte(name="included_parts", recursive=True) + .suffix_with( "search depth first by part set ord1", - "cycle part set y_cycle to 1 default 0", dialect='oracle') + "cycle part set y_cycle to 1 default 0", + dialect="oracle", + ) + ) :ticket:`3220` diff --git a/doc/build/changelog/migration_11.rst b/doc/build/changelog/migration_11.rst index 5c1b842b61..a9ede42231 100644 --- a/doc/build/changelog/migration_11.rst +++ b/doc/build/changelog/migration_11.rst @@ -207,29 +207,35 @@ expression, and ``func.date()`` applied to a datetime expression; both examples will return duplicate rows due to the joined eager load unless explicit typing is applied:: - result = session.query( - func.substr(A.some_thing, 0, 4), A - ).options(joinedload(A.bs)).all() + result = ( + session.query(func.substr(A.some_thing, 0, 4), A).options(joinedload(A.bs)).all() + ) - users = session.query( - func.date( - User.date_created, 'start of month' - ).label('month'), - User, - ).options(joinedload(User.orders)).all() + users = ( + session.query( + func.date(User.date_created, "start of month").label("month"), + User, + ) + .options(joinedload(User.orders)) + .all() + ) The above examples, in order to retain deduping, should be specified as:: - result = session.query( - func.substr(A.some_thing, 0, 4, type_=String), A - ).options(joinedload(A.bs)).all() + result = ( + session.query(func.substr(A.some_thing, 0, 4, type_=String), A) + .options(joinedload(A.bs)) + .all() + ) - users = session.query( - func.date( - User.date_created, 'start of month', type_=DateTime - ).label('month'), - User, - ).options(joinedload(User.orders)).all() + users = ( + session.query( + func.date(User.date_created, "start of month", type_=DateTime).label("month"), + User, + ) + .options(joinedload(User.orders)) + .all() + ) Additionally, the treatment of a so-called "unhashable" type is slightly different than its been in previous releases; internally we are using @@ -259,7 +265,6 @@ string value:: >>> some_user = User() >>> q = s.query(User).filter(User.name == some_user) - ... sqlalchemy.exc.ArgumentError: Object <__main__.User object at 0x103167e90> is not legal as a SQL literal value The exception is now immediate when the comparison is made between @@ -292,18 +297,18 @@ refer to specific elements of an "indexable" data type, such as an array or JSON field:: class Person(Base): - __tablename__ = 'person' + __tablename__ = "person" id = Column(Integer, primary_key=True) data = Column(JSON) - name = index_property('data', 'name') + name = index_property("data", "name") Above, the ``name`` attribute will read/write the field ``"name"`` from the JSON column ``data``, after initializing it to an empty dictionary:: - >>> person = Person(name='foobar') + >>> person = Person(name="foobar") >>> person.name foobar @@ -346,17 +351,18 @@ no longer inappropriately add the "single inheritance" criteria when the query is against a subquery expression such as an exists:: class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" id = Column(Integer, primary_key=True) type = Column(String) data = Column(String) - __mapper_args__ = {'polymorphic_on': type} + __mapper_args__ = {"polymorphic_on": type} class FooWidget(Widget): - __mapper_args__ = {'polymorphic_identity': 'foo'} + __mapper_args__ = {"polymorphic_identity": "foo"} - q = session.query(FooWidget).filter(FooWidget.data == 'bar').exists() + + q = session.query(FooWidget).filter(FooWidget.data == "bar").exists() session.query(q).all() @@ -433,10 +439,12 @@ removed would be lost, and the flush would incorrectly raise an error:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + e = create_engine("sqlite://", echo=True) Base.metadata.create_all(e) @@ -522,25 +530,23 @@ the :paramref:`.orm.mapper.passive_deletes` option:: class A(Base): __tablename__ = "a" - id = Column('id', Integer, primary_key=True) + id = Column("id", Integer, primary_key=True) type = Column(String) __mapper_args__ = { - 'polymorphic_on': type, - 'polymorphic_identity': 'a', - 'passive_deletes': True + "polymorphic_on": type, + "polymorphic_identity": "a", + "passive_deletes": True, } class B(A): - __tablename__ = 'b' - b_table_id = Column('b_table_id', Integer, primary_key=True) - bid = Column('bid', Integer, ForeignKey('a.id', ondelete="CASCADE")) - data = Column('data', String) + __tablename__ = "b" + b_table_id = Column("b_table_id", Integer, primary_key=True) + bid = Column("bid", Integer, ForeignKey("a.id", ondelete="CASCADE")) + data = Column("data", String) - __mapper_args__ = { - 'polymorphic_identity': 'b' - } + __mapper_args__ = {"polymorphic_identity": "b"} With the above mapping, the :paramref:`.orm.mapper.passive_deletes` option is configured on the base mapper; it takes effect for all non-base mappers @@ -571,22 +577,24 @@ Same-named backrefs will not raise an error when applied to concrete inheritance The following mapping has always been possible without issue:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b = relationship("B", foreign_keys="B.a_id", backref="a") + class A1(A): - __tablename__ = 'a1' + __tablename__ = "a1" id = Column(Integer, primary_key=True) b = relationship("B", foreign_keys="B.a1_id", backref="a1") - __mapper_args__ = {'concrete': True} + __mapper_args__ = {"concrete": True} + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - a1_id = Column(ForeignKey('a1.id')) + a_id = Column(ForeignKey("a.id")) + a1_id = Column(ForeignKey("a1.id")) Above, even though class ``A`` and class ``A1`` have a relationship named ``b``, no conflict warning or error occurs because class ``A1`` is @@ -596,22 +604,22 @@ However, if the relationships were configured the other way, an error would occur:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) class A1(A): - __tablename__ = 'a1' + __tablename__ = "a1" id = Column(Integer, primary_key=True) - __mapper_args__ = {'concrete': True} + __mapper_args__ = {"concrete": True} class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - a1_id = Column(ForeignKey('a1.id')) + a_id = Column(ForeignKey("a.id")) + a1_id = Column(ForeignKey("a1.id")) a = relationship("A", backref="b") a1 = relationship("A1", backref="b") @@ -634,22 +642,21 @@ on inherited mapper ''; this can cause dependency issues during flush". An example is as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") class ASub(A): - __tablename__ = 'a_sub' - id = Column(Integer, ForeignKey('a.id'), primary_key=True) + __tablename__ = "a_sub" + id = Column(Integer, ForeignKey("a.id"), primary_key=True) bs = relationship("B") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - + a_id = Column(ForeignKey("a.id")) This warning dates back to the 0.4 series in 2007 and is based on a version of the unit of work code that has since been entirely rewritten. Currently, there @@ -672,7 +679,7 @@ A hybrid method or property will now reflect the ``__doc__`` value present in the original docstring:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) name = Column(String) @@ -710,9 +717,9 @@ also propagated from the hybrid descriptor itself, rather than from the underlyi expression. That is, accessing ``A.some_name.info`` now returns the same dictionary that you'd get from ``inspect(A).all_orm_descriptors['some_name'].info``:: - >>> A.some_name.info['foo'] = 'bar' + >>> A.some_name.info["foo"] = "bar" >>> from sqlalchemy import inspect - >>> inspect(A).all_orm_descriptors['some_name'].info + >>> inspect(A).all_orm_descriptors["some_name"].info {'foo': 'bar'} Note that this ``.info`` dictionary is **separate** from that of a mapped attribute @@ -739,11 +746,11 @@ consistent. Given:: - u1 = User(id=7, name='x') + u1 = User(id=7, name="x") u1.orders = [ - Order(description='o1', address=Address(id=1, email_address='a')), - Order(description='o2', address=Address(id=1, email_address='b')), - Order(description='o3', address=Address(id=1, email_address='c')) + Order(description="o1", address=Address(id=1, email_address="a")), + Order(description="o2", address=Address(id=1, email_address="b")), + Order(description="o3", address=Address(id=1, email_address="c")), ] sess = Session() @@ -925,32 +932,32 @@ row on a different "path" that doesn't include the attribute. This is a deep use case that's hard to reproduce, but the general idea is as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) - c_id = Column(ForeignKey('c.id')) + b_id = Column(ForeignKey("b.id")) + c_id = Column(ForeignKey("c.id")) b = relationship("B") c = relationship("C") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - c_id = Column(ForeignKey('c.id')) + c_id = Column(ForeignKey("c.id")) c = relationship("C") class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) - d_id = Column(ForeignKey('d.id')) + d_id = Column(ForeignKey("d.id")) d = relationship("D") class D(Base): - __tablename__ = 'd' + __tablename__ = "d" id = Column(Integer, primary_key=True) @@ -959,7 +966,9 @@ deep use case that's hard to reproduce, but the general idea is as follows:: q = s.query(A) q = q.join(A.b).join(c_alias_1, B.c).join(c_alias_1.d) - q = q.options(contains_eager(A.b).contains_eager(B.c, alias=c_alias_1).contains_eager(C.d)) + q = q.options( + contains_eager(A.b).contains_eager(B.c, alias=c_alias_1).contains_eager(C.d) + ) q = q.join(c_alias_2, A.c) q = q.options(contains_eager(A.c, alias=c_alias_2)) @@ -1149,25 +1158,26 @@ statement:: >>> from sqlalchemy import table, column, select, literal, exists >>> orders = table( - ... 'orders', - ... column('region'), - ... column('amount'), - ... column('product'), - ... column('quantity') + ... "orders", + ... column("region"), + ... column("amount"), + ... column("product"), + ... column("quantity"), ... ) >>> >>> upsert = ( ... orders.update() - ... .where(orders.c.region == 'Region1') - ... .values(amount=1.0, product='Product1', quantity=1) - ... .returning(*(orders.c._all_columns)).cte('upsert')) + ... .where(orders.c.region == "Region1") + ... .values(amount=1.0, product="Product1", quantity=1) + ... .returning(*(orders.c._all_columns)) + ... .cte("upsert") + ... ) >>> >>> insert = orders.insert().from_select( ... orders.c.keys(), - ... select([ - ... literal('Region1'), literal(1.0), - ... literal('Product1'), literal(1) - ... ]).where(~exists(upsert.select())) + ... select([literal("Region1"), literal(1.0), literal("Product1"), literal(1)]).where( + ... ~exists(upsert.select()) + ... ), ... ) >>> >>> print(insert) # note formatting added for clarity @@ -1198,13 +1208,13 @@ RANGE and ROWS expressions for window functions:: >>> from sqlalchemy import func - >>> print(func.row_number().over(order_by='x', range_=(-5, 10))) + >>> print(func.row_number().over(order_by="x", range_=(-5, 10))) row_number() OVER (ORDER BY x RANGE BETWEEN :param_1 PRECEDING AND :param_2 FOLLOWING) - >>> print(func.row_number().over(order_by='x', rows=(None, 0))) + >>> print(func.row_number().over(order_by="x", rows=(None, 0))) row_number() OVER (ORDER BY x ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) - >>> print(func.row_number().over(order_by='x', range_=(-2, None))) + >>> print(func.row_number().over(order_by="x", range_=(-2, None))) row_number() OVER (ORDER BY x RANGE BETWEEN :param_1 PRECEDING AND UNBOUNDED FOLLOWING) :paramref:`.expression.over.range_` and :paramref:`.expression.over.rows` are specified as @@ -1230,10 +1240,13 @@ correlation of tables that are derived from the same FROM clause as the selectable, e.g. lateral correlation:: >>> from sqlalchemy import table, column, select, true - >>> people = table('people', column('people_id'), column('age'), column('name')) - >>> books = table('books', column('book_id'), column('owner_id')) - >>> subq = select([books.c.book_id]).\ - ... where(books.c.owner_id == people.c.people_id).lateral("book_subq") + >>> people = table("people", column("people_id"), column("age"), column("name")) + >>> books = table("books", column("book_id"), column("owner_id")) + >>> subq = ( + ... select([books.c.book_id]) + ... .where(books.c.owner_id == people.c.people_id) + ... .lateral("book_subq") + ... ) >>> print(select([people]).select_from(people.join(subq, true()))) SELECT people.people_id, people.age, people.name FROM people JOIN LATERAL (SELECT books.book_id AS book_id @@ -1262,10 +1275,7 @@ construct similar to an alias:: from sqlalchemy import func - selectable = people.tablesample( - func.bernoulli(1), - name='alias', - seed=func.random()) + selectable = people.tablesample(func.bernoulli(1), name="alias", seed=func.random()) stmt = select([selectable.c.people_id]) Assuming ``people`` with a column ``people_id``, the above @@ -1295,9 +1305,10 @@ What's changed is that this feature no longer turns on automatically for a *composite* primary key; previously, a table definition such as:: Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True) + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True), ) Would have "autoincrement" semantics applied to the ``'x'`` column, only @@ -1306,9 +1317,10 @@ disable this, one would have to turn off ``autoincrement`` on all columns:: # old way Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True, autoincrement=False), - Column('y', Integer, primary_key=True, autoincrement=False) + "some_table", + metadata, + Column("x", Integer, primary_key=True, autoincrement=False), + Column("y", Integer, primary_key=True, autoincrement=False), ) With the new behavior, the composite primary key will not have autoincrement @@ -1316,9 +1328,10 @@ semantics unless a column is marked explicitly with ``autoincrement=True``:: # column 'y' will be SERIAL/AUTO_INCREMENT/ auto-generating Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, autoincrement=True) + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, autoincrement=True), ) In order to anticipate some potential backwards-incompatible scenarios, @@ -1327,9 +1340,10 @@ for missing primary key values on composite primary key columns that don't have autoincrement set up; given a table such as:: Table( - 'b', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True) + "b", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True), ) An INSERT emitted with no values for this table will produce this warning:: @@ -1349,9 +1363,10 @@ default or something less common such as a trigger, the presence of a value generator can be indicated using :class:`.FetchedValue`:: Table( - 'b', metadata, - Column('x', Integer, primary_key=True, server_default=FetchedValue()), - Column('y', Integer, primary_key=True, server_default=FetchedValue()) + "b", + metadata, + Column("x", Integer, primary_key=True, server_default=FetchedValue()), + Column("y", Integer, primary_key=True, server_default=FetchedValue()), ) For the very unlikely case where a composite primary key is actually intended @@ -1359,9 +1374,10 @@ to store NULL in one or more of its columns (only supported on SQLite and MySQL) specify the column with ``nullable=True``:: Table( - 'b', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, nullable=True) + "b", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, nullable=True), ) In a related change, the ``autoincrement`` flag may be set to True @@ -1384,19 +1400,19 @@ New operators :meth:`.ColumnOperators.is_distinct_from` and :meth:`.ColumnOperators.isnot_distinct_from` allow the IS DISTINCT FROM and IS NOT DISTINCT FROM sql operation:: - >>> print(column('x').is_distinct_from(None)) + >>> print(column("x").is_distinct_from(None)) x IS DISTINCT FROM NULL Handling is provided for NULL, True and False:: - >>> print(column('x').isnot_distinct_from(False)) + >>> print(column("x").isnot_distinct_from(False)) x IS NOT DISTINCT FROM false For SQLite, which doesn't have this operator, "IS" / "IS NOT" is rendered, which on SQLite works for NULL unlike other backends:: >>> from sqlalchemy.dialects import sqlite - >>> print(column('x').is_distinct_from(None).compile(dialect=sqlite.dialect())) + >>> print(column("x").is_distinct_from(None).compile(dialect=sqlite.dialect())) x IS NOT NULL .. _change_1957: @@ -1445,19 +1461,15 @@ and the column arguments passed to :meth:`_expression.TextClause.columns`:: from sqlalchemy import text - stmt = text("SELECT users.id, addresses.id, users.id, " - "users.name, addresses.email_address AS email " - "FROM users JOIN addresses ON users.id=addresses.user_id " - "WHERE users.id = 1").columns( - User.id, - Address.id, - Address.user_id, - User.name, - Address.email_address - ) - - query = session.query(User).from_statement(stmt).\ - options(contains_eager(User.addresses)) + + stmt = text( + "SELECT users.id, addresses.id, users.id, " + "users.name, addresses.email_address AS email " + "FROM users JOIN addresses ON users.id=addresses.user_id " + "WHERE users.id = 1" + ).columns(User.id, Address.id, Address.user_id, User.name, Address.email_address) + + query = session.query(User).from_statement(stmt).options(contains_eager(User.addresses)) result = query.all() Above, the textual SQL contains the column "id" three times, which would @@ -1489,7 +1501,7 @@ Another aspect of this change is that the rules for matching columns have also b to rely upon "positional" matching more fully for compiled SQL constructs as well. Given a statement like the following:: - ua = users.alias('ua') + ua = users.alias("ua") stmt = select([users.c.user_id, ua.c.user_id]) The above statement will compile to:: @@ -1512,7 +1524,7 @@ fetch columns:: ua_id = row[ua.c.user_id] # this still raises, however - user_id = row['user_id'] + user_id = row["user_id"] Much less likely to get an "ambiguous column" error message ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1550,10 +1562,7 @@ string/integer/etc values:: three = 3 - t = Table( - 'data', MetaData(), - Column('value', Enum(MyEnum)) - ) + t = Table("data", MetaData(), Column("value", Enum(MyEnum))) e = create_engine("sqlite://") t.create(e) @@ -1600,8 +1609,9 @@ flag is used (1.1.0b2):: >>> from sqlalchemy import Table, MetaData, Column, Enum, create_engine >>> t = Table( - ... 'data', MetaData(), - ... Column('value', Enum("one", "two", "three", validate_strings=True)) + ... "data", + ... MetaData(), + ... Column("value", Enum("one", "two", "three", validate_strings=True)), ... ) >>> e = create_engine("sqlite://") >>> t.create(e) @@ -1674,10 +1684,10 @@ within logging, exception reporting, as well as ``repr()`` of the row itself:: >>> from sqlalchemy import create_engine >>> import random - >>> e = create_engine("sqlite://", echo='debug') - >>> some_value = ''.join(chr(random.randint(52, 85)) for i in range(5000)) + >>> e = create_engine("sqlite://", echo="debug") + >>> some_value = "".join(chr(random.randint(52, 85)) for i in range(5000)) >>> row = e.execute("select ?", [some_value]).first() - ... (lines are wrapped for clarity) ... + ... # (lines are wrapped for clarity) ... 2016-02-17 13:23:03,027 INFO sqlalchemy.engine.base.Engine select ? 2016-02-17 13:23:03,027 INFO sqlalchemy.engine.base.Engine ('E6@?>9HPOJB<:=TSTLA;9K;9FPM4M8M@;NM6GU @@ -1752,6 +1762,7 @@ replacing the ``None`` value:: json_value = Column(JSON(none_as_null=False), default="some default") + # would insert "some default" instead of "'null'", # now will insert "'null'" obj = MyObject(json_value=None) @@ -1769,6 +1780,7 @@ inconsistently vs. all other datatypes:: some_other_value = Column(String(50)) json_value = Column(JSON(none_as_null=False)) + # would result in NULL for some_other_value, # but json "'null'" for json_value. Now results in NULL for both # (the json_value is omitted from the INSERT) @@ -1786,9 +1798,7 @@ would be ignored in all cases:: # would insert SQL NULL and/or trigger defaults, # now inserts "'null'" - session.bulk_insert_mappings( - MyObject, - [{"json_value": None}]) + session.bulk_insert_mappings(MyObject, [{"json_value": None}]) The :class:`_types.JSON` type now implements the :attr:`.TypeEngine.should_evaluate_none` flag, @@ -1847,9 +1857,7 @@ is now in Core. The :class:`_types.ARRAY` type still **only works on PostgreSQL**, however it can be used directly, supporting special array use cases such as indexed access, as well as support for the ANY and ALL:: - mytable = Table("mytable", metadata, - Column("data", ARRAY(Integer, dimensions=2)) - ) + mytable = Table("mytable", metadata, Column("data", ARRAY(Integer, dimensions=2))) expr = mytable.c.data[5][6] @@ -1884,7 +1892,6 @@ such as:: subq = select([mytable.c.value]) select([mytable]).where(12 > any_(subq)) - :ticket:`3516` .. _change_3132: @@ -1897,12 +1904,14 @@ function for the ``array_agg()`` SQL function that returns an array, which is now available using :class:`_functions.array_agg`:: from sqlalchemy import func + stmt = select([func.array_agg(table.c.value)]) A PostgreSQL element for an aggregate ORDER BY is also added via :class:`_postgresql.aggregate_order_by`:: from sqlalchemy.dialects.postgresql import aggregate_order_by + expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc())) stmt = select([expr]) @@ -1914,8 +1923,8 @@ The PG dialect itself also provides an :func:`_postgresql.array_agg` wrapper to ensure the :class:`_postgresql.ARRAY` type:: from sqlalchemy.dialects.postgresql import array_agg - stmt = select([array_agg(table.c.value).contains('foo')]) + stmt = select([array_agg(table.c.value).contains("foo")]) Additionally, functions like ``percentile_cont()``, ``percentile_disc()``, ``rank()``, ``dense_rank()`` and others that require an ordering via @@ -1923,12 +1932,13 @@ Additionally, functions like ``percentile_cont()``, ``percentile_disc()``, :meth:`.FunctionElement.within_group` modifier:: from sqlalchemy import func - stmt = select([ - department.c.id, - func.percentile_cont(0.5).within_group( - department.c.salary.desc() - ) - ]) + + stmt = select( + [ + department.c.id, + func.percentile_cont(0.5).within_group(department.c.salary.desc()), + ] + ) The above statement would produce SQL similar to:: @@ -1956,7 +1966,7 @@ an :class:`_postgresql.ENUM` had to look like this:: # old way class MyEnum(TypeDecorator, SchemaType): - impl = postgresql.ENUM('one', 'two', 'three', name='myenum') + impl = postgresql.ENUM("one", "two", "three", name="myenum") def _set_table(self, table): self.impl._set_table(table) @@ -1966,8 +1976,7 @@ can be done like any other type:: # new way class MyEnum(TypeDecorator): - impl = postgresql.ENUM('one', 'two', 'three', name='myenum') - + impl = postgresql.ENUM("one", "two", "three", name="myenum") :ticket:`2919` @@ -1987,17 +1996,18 @@ translation works for DDL and SQL generation, as well as with the ORM. For example, if the ``User`` class were assigned the schema "per_user":: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) - __table_args__ = {'schema': 'per_user'} + __table_args__ = {"schema": "per_user"} On each request, the :class:`.Session` can be set up to refer to a different schema each time:: session = Session() - session.connection(execution_options={ - "schema_translate_map": {"per_user": "account_one"}}) + session.connection( + execution_options={"schema_translate_map": {"per_user": "account_one"}} + ) # will query from the ``account_one.user`` table session.query(User).get(5) @@ -2072,21 +2082,21 @@ Then, a mapping where we are equating a string "id" column on one table to an integer "id" column on the other:: class Person(Base): - __tablename__ = 'person' + __tablename__ = "person" id = Column(StringAsInt, primary_key=True) pets = relationship( - 'Pets', + "Pets", primaryjoin=( - 'foreign(Pets.person_id)' - '==cast(type_coerce(Person.id, Integer), Integer)' - ) + "foreign(Pets.person_id)" "==cast(type_coerce(Person.id, Integer), Integer)" + ), ) + class Pets(Base): - __tablename__ = 'pets' - id = Column('id', Integer, primary_key=True) - person_id = Column('person_id', Integer) + __tablename__ = "pets" + id = Column("id", Integer, primary_key=True) + person_id = Column("person_id", Integer) Above, in the :paramref:`_orm.relationship.primaryjoin` expression, we are using :func:`.type_coerce` to handle bound parameters passed via @@ -2166,8 +2176,7 @@ Column:: class MyObject(Base): # ... - json_value = Column( - JSON(none_as_null=False), nullable=False, default=JSON.NULL) + json_value = Column(JSON(none_as_null=False), nullable=False, default=JSON.NULL) Or, ensure the value is present on the object:: @@ -2182,7 +2191,6 @@ passed to :paramref:`_schema.Column.default` or :paramref:`_schema.Column.server # default=None is the same as omitting it entirely, does not apply JSON NULL json_value = Column(JSON(none_as_null=False), nullable=False, default=None) - .. seealso:: :ref:`change_3514` @@ -2195,9 +2203,11 @@ Columns no longer added redundantly with DISTINCT + ORDER BY A query such as the following will now augment only those columns that are missing from the SELECT list, without duplicates:: - q = session.query(User.id, User.name.label('name')).\ - distinct().\ - order_by(User.id, User.name, User.fullname) + q = ( + session.query(User.id, User.name.label("name")) + .distinct() + .order_by(User.id, User.name, User.fullname) + ) Produces:: @@ -2237,7 +2247,7 @@ now raises an error, whereas previously it would silently pick only the last defined validator:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) data = Column(String) @@ -2250,6 +2260,7 @@ last defined validator:: def _validate_data_two(self): assert "y" in data + configure_mappers() Will raise:: @@ -2321,7 +2332,7 @@ passed through the literal quoting system:: >>> from sqlalchemy.schema import MetaData, Table, Column, CreateTable >>> from sqlalchemy.types import String - >>> t = Table('t', MetaData(), Column('x', String(), server_default="hi ' there")) + >>> t = Table("t", MetaData(), Column("x", String(), server_default="hi ' there")) >>> print(CreateTable(t)) CREATE TABLE t ( @@ -2473,7 +2484,7 @@ This includes: one less dimension. Given a column with type ``ARRAY(Integer, dimensions=3)``, we can now perform this expression:: - int_expr = col[5][6][7] # returns an Integer expression object + int_expr = col[5][6][7] # returns an Integer expression object Previously, the indexed access to ``col[5]`` would return an expression of type :class:`.Integer` where we could no longer perform indexed access @@ -2490,7 +2501,7 @@ This includes: the :class:`_postgresql.ARRAY` type, this means that it is now straightforward to produce JSON expressions with multiple levels of indexed access:: - json_expr = json_col['key1']['attr1'][5] + json_expr = json_col["key1"]["attr1"][5] * The "textual" type that is returned by indexed access of :class:`.HSTORE` as well as the "textual" type that is returned by indexed access of @@ -2520,12 +2531,11 @@ support CAST operations to each other without the "astext" aspect. This means that in most cases, an application that was doing this:: - expr = json_col['somekey'].cast(Integer) + expr = json_col["somekey"].cast(Integer) Will now need to change to this:: - expr = json_col['somekey'].astext.cast(Integer) - + expr = json_col["somekey"].astext.cast(Integer) .. _change_2729: @@ -2536,12 +2546,21 @@ A table definition like the following will now emit CREATE TYPE as expected:: enum = Enum( - 'manager', 'place_admin', 'carwash_admin', - 'parking_admin', 'service_admin', 'tire_admin', - 'mechanic', 'carwasher', 'tire_mechanic', name="work_place_roles") + "manager", + "place_admin", + "carwash_admin", + "parking_admin", + "service_admin", + "tire_admin", + "mechanic", + "carwasher", + "tire_mechanic", + name="work_place_roles", + ) + class WorkPlacement(Base): - __tablename__ = 'work_placement' + __tablename__ = "work_placement" id = Column(Integer, primary_key=True) roles = Column(ARRAY(enum)) @@ -2580,10 +2599,11 @@ The new argument :paramref:`.PGInspector.get_view_names.include` allows specification of which sub-types of views should be returned:: from sqlalchemy import inspect + insp = inspect(engine) - plain_views = insp.get_view_names(include='plain') - all_views = insp.get_view_names(include=('plain', 'materialized')) + plain_views = insp.get_view_names(include="plain") + all_views = insp.get_view_names(include=("plain", "materialized")) :ticket:`3588` @@ -2668,9 +2688,7 @@ The MySQL dialect now accepts the value "AUTOCOMMIT" for the parameters:: connection = engine.connect() - connection = connection.execution_options( - isolation_level="AUTOCOMMIT" - ) + connection = connection.execution_options(isolation_level="AUTOCOMMIT") The isolation level makes use of the various "autocommit" attributes provided by most MySQL DBAPIs. @@ -2687,10 +2705,11 @@ on an InnoDB table featured AUTO_INCREMENT on one of its columns which was not the first column, e.g.:: t = Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True, autoincrement=False), - Column('y', Integer, primary_key=True, autoincrement=True), - mysql_engine='InnoDB' + "some_table", + metadata, + Column("x", Integer, primary_key=True, autoincrement=False), + Column("y", Integer, primary_key=True, autoincrement=True), + mysql_engine="InnoDB", ) DDL such as the following would be generated:: @@ -2720,12 +2739,13 @@ use the :class:`.PrimaryKeyConstraint` construct explicitly (1.1.0b2) (along with a KEY for the autoincrement column as required by MySQL), e.g.:: t = Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, autoincrement=True), - PrimaryKeyConstraint('x', 'y'), - UniqueConstraint('y'), - mysql_engine='InnoDB' + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, autoincrement=True), + PrimaryKeyConstraint("x", "y"), + UniqueConstraint("y"), + mysql_engine="InnoDB", ) Along with the change :ref:`change_3216`, composite primary keys with @@ -2735,14 +2755,13 @@ now defaults to the value ``"auto"`` and the ``autoincrement=False`` directives are no longer needed:: t = Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, autoincrement=True), - mysql_engine='InnoDB' + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, autoincrement=True), + mysql_engine="InnoDB", ) - - Dialect Improvements and Changes - SQLite ========================================= @@ -2849,8 +2868,7 @@ parameters. The four standard levels are supported as well as ``SNAPSHOT``:: engine = create_engine( - "mssql+pyodbc://scott:tiger@ms_2008", - isolation_level="REPEATABLE READ" + "mssql+pyodbc://scott:tiger@ms_2008", isolation_level="REPEATABLE READ" ) .. seealso:: @@ -2869,12 +2887,11 @@ which includes a length, an "un-lengthed" type under SQL Server would copy the "length" parameter as the value ``"max"``:: >>> from sqlalchemy import create_engine, inspect - >>> engine = create_engine('mssql+pyodbc://scott:tiger@ms_2008', echo=True) + >>> engine = create_engine("mssql+pyodbc://scott:tiger@ms_2008", echo=True) >>> engine.execute("create table s (x varchar(max), y varbinary(max))") >>> insp = inspect(engine) >>> for col in insp.get_columns("s"): - ... print(col['type'].__class__, col['type'].length) - ... + ... print(col["type"].__class__, col["type"].length) max max @@ -2884,8 +2901,7 @@ interprets as "max". The fix then is so that these lengths come out as None, so that the type objects work in non-SQL Server contexts:: >>> for col in insp.get_columns("s"): - ... print(col['type'].__class__, col['type'].length) - ... + ... print(col["type"].__class__, col["type"].length) None None @@ -2918,10 +2934,11 @@ This aliasing attempts to turn schema-qualified tables into aliases; given a table such as:: account_table = Table( - 'account', metadata, - Column('id', Integer, primary_key=True), - Column('info', String(100)), - schema="customer_schema" + "account", + metadata, + Column("id", Integer, primary_key=True), + Column("info", String(100)), + schema="customer_schema", ) The legacy mode of behavior will attempt to turn a schema-qualified table diff --git a/doc/build/changelog/migration_12.rst b/doc/build/changelog/migration_12.rst index 7073660f78..eb4f076d13 100644 --- a/doc/build/changelog/migration_12.rst +++ b/doc/build/changelog/migration_12.rst @@ -80,9 +80,11 @@ that is cacheable as well as more efficient. Given a query as below:: - q = session.query(User).\ - filter(User.name.like('%ed%')).\ - options(subqueryload(User.addresses)) + q = ( + session.query(User) + .filter(User.name.like("%ed%")) + .options(subqueryload(User.addresses)) + ) The SQL produced would be the query against ``User`` followed by the subqueryload for ``User.addresses`` (note the parameters are also listed):: @@ -106,9 +108,11 @@ subqueryload for ``User.addresses`` (note the parameters are also listed):: With "selectin" loading, we instead get a SELECT that refers to the actual primary key values loaded in the parent query:: - q = session.query(User).\ - filter(User.name.like('%ed%')).\ - options(selectinload(User.addresses)) + q = ( + session.query(User) + .filter(User.name.like("%ed%")) + .options(selectinload(User.addresses)) + ) Produces:: @@ -225,8 +229,9 @@ if not specified, the attribute defaults to ``None``:: from sqlalchemy.orm import query_expression from sqlalchemy.orm import with_expression + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) x = Column(Integer) y = Column(Integer) @@ -234,9 +239,9 @@ if not specified, the attribute defaults to ``None``:: # will be None normally... expr = query_expression() + # but let's give it x + y - a1 = session.query(A).options( - with_expression(A.expr, A.x + A.y)).first() + a1 = session.query(A).options(with_expression(A.expr, A.x + A.y)).first() print(a1.expr) .. seealso:: @@ -259,10 +264,9 @@ Below, we emit a DELETE against ``SomeEntity``, adding a FROM clause (or equivalent, depending on backend) against ``SomeOtherEntity``:: - query(SomeEntity).\ - filter(SomeEntity.id==SomeOtherEntity.id).\ - filter(SomeOtherEntity.foo=='bar').\ - delete() + query(SomeEntity).filter(SomeEntity.id == SomeOtherEntity.id).filter( + SomeOtherEntity.foo == "bar" + ).delete() .. seealso:: @@ -291,28 +295,26 @@ into multiple columns/expressions:: @hybrid.hybrid_property def name(self): - return self.first_name + ' ' + self.last_name + return self.first_name + " " + self.last_name @name.expression def name(cls): - return func.concat(cls.first_name, ' ', cls.last_name) + return func.concat(cls.first_name, " ", cls.last_name) @name.update_expression def name(cls, value): - f, l = value.split(' ', 1) + f, l = value.split(" ", 1) return [(cls.first_name, f), (cls.last_name, l)] Above, an UPDATE can be rendered using:: - session.query(Person).filter(Person.id == 5).update( - {Person.name: "Dr. No"}) + session.query(Person).filter(Person.id == 5).update({Person.name: "Dr. No"}) Similar functionality is available for composites, where composite values will be broken out into their individual columns for bulk UPDATE:: session.query(Vertex).update({Edge.start: Point(3, 4)}) - .. seealso:: :ref:`hybrid_bulk_update` @@ -342,6 +344,7 @@ Python:: def name(self, value): self.first_name = value + class FirstNameLastName(FirstNameOnly): # ... @@ -349,15 +352,15 @@ Python:: @FirstNameOnly.name.getter def name(self): - return self.first_name + ' ' + self.last_name + return self.first_name + " " + self.last_name @name.setter def name(self, value): - self.first_name, self.last_name = value.split(' ', maxsplit=1) + self.first_name, self.last_name = value.split(" ", maxsplit=1) @name.expression def name(cls): - return func.concat(cls.first_name, ' ', cls.last_name) + return func.concat(cls.first_name, " ", cls.last_name) Above, the ``FirstNameOnly.name`` hybrid is referenced by the ``FirstNameLastName`` subclass in order to repurpose it specifically to the @@ -426,10 +429,12 @@ if this "append" event is the second part of a bulk replace:: from sqlalchemy.orm.attributes import OP_BULK_REPLACE + @event.listens_for(SomeObject.collection, "bulk_replace") def process_collection(target, values, initiator): values[:] = [_make_value(value) for value in values] + @event.listens_for(SomeObject.collection, "append", retval=True) def process_collection(target, value, initiator): # make sure bulk_replace didn't already do it @@ -438,7 +443,6 @@ if this "append" event is the second part of a bulk replace:: else: return value - :ticket:`3896` .. _change_3303: @@ -457,11 +461,13 @@ extension:: Base = declarative_base() + class MyDataClass(Base): - __tablename__ = 'my_data' + __tablename__ = "my_data" id = Column(Integer, primary_key=True) data = Column(MutableDict.as_mutable(JSONEncodedDict)) + @event.listens_for(MyDataClass.data, "modified") def modified_json(instance): print("json value modified:", instance.data) @@ -511,7 +517,6 @@ becomes part of the next flush process:: model = session.query(MyModel).first() model.json_set &= {1, 3} - :ticket:`3853` .. _change_3769: @@ -527,7 +532,7 @@ is an association proxy that links to ``AtoB.bvalue``, which is itself an association proxy onto ``B``:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b_values = association_proxy("atob", "b_value") @@ -535,26 +540,26 @@ itself an association proxy onto ``B``:: class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) value = Column(String) c = relationship("C") class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) + b_id = Column(ForeignKey("b.id")) value = Column(String) class AtoB(Base): - __tablename__ = 'atob' + __tablename__ = "atob" - a_id = Column(ForeignKey('a.id'), primary_key=True) - b_id = Column(ForeignKey('b.id'), primary_key=True) + a_id = Column(ForeignKey("a.id"), primary_key=True) + b_id = Column(ForeignKey("b.id"), primary_key=True) a = relationship("A", backref="atob") b = relationship("B", backref="atob") @@ -567,7 +572,7 @@ query across the two proxies ``A.b_values``, ``AtoB.b_value``: .. sourcecode:: pycon+sql - >>> s.query(A).filter(A.b_values.contains('hi')).all() + >>> s.query(A).filter(A.b_values.contains("hi")).all() {opensql}SELECT a.id AS a_id FROM a WHERE EXISTS (SELECT 1 @@ -581,7 +586,7 @@ to query across the two proxies ``A.c_values``, ``AtoB.c_value``: .. sourcecode:: pycon+sql - >>> s.query(A).filter(A.c_values.any(value='x')).all() + >>> s.query(A).filter(A.c_values.any(value="x")).all() {opensql}SELECT a.id AS a_id FROM a WHERE EXISTS (SELECT 1 @@ -612,8 +617,8 @@ primary key value. The example now illustrates that a new ``identity_token`` field tracks this difference so that the two objects can co-exist in the same identity map:: - tokyo = WeatherLocation('Asia', 'Tokyo') - newyork = WeatherLocation('North America', 'New York') + tokyo = WeatherLocation("Asia", "Tokyo") + newyork = WeatherLocation("North America", "New York") tokyo.reports.append(Report(80.0)) newyork.reports.append(Report(75)) @@ -632,15 +637,14 @@ same identity map:: newyork_report = newyork.reports[0] tokyo_report = tokyo.reports[0] - assert inspect(newyork_report).identity_key == (Report, (1, ), "north_america") - assert inspect(tokyo_report).identity_key == (Report, (1, ), "asia") + assert inspect(newyork_report).identity_key == (Report, (1,), "north_america") + assert inspect(tokyo_report).identity_key == (Report, (1,), "asia") # the token representing the originating shard is also available directly assert inspect(newyork_report).identity_token == "north_america" assert inspect(tokyo_report).identity_token == "asia" - :ticket:`4137` New Features and Improvements - Core @@ -673,6 +677,7 @@ illustrates a recipe that will allow for the "liberal" behavior of the pre-1.1 from sqlalchemy import Boolean from sqlalchemy import TypeDecorator + class LiberalBoolean(TypeDecorator): impl = Boolean @@ -681,7 +686,6 @@ illustrates a recipe that will allow for the "liberal" behavior of the pre-1.1 value = bool(int(value)) return value - :ticket:`4102` .. _change_3919: @@ -844,7 +848,7 @@ other comparison operators has been flattened into one level. This will have the effect of more parenthesization being generated when comparison operators are combined together, such as:: - (column('q') == null()) != (column('y') == null()) + (column("q") == null()) != (column("y") == null()) Will now generate ``(q IS NULL) != (y IS NULL)`` rather than ``q IS NULL != y IS NULL``. @@ -862,9 +866,10 @@ and columns. These are specified via the :paramref:`_schema.Table.comment` and :paramref:`_schema.Column.comment` arguments:: Table( - 'my_table', metadata, - Column('q', Integer, comment="the Q value"), - comment="my Q table" + "my_table", + metadata, + Column("q", Integer, comment="the Q value"), + comment="my Q table", ) Above, DDL will be rendered appropriately upon table create to associate @@ -891,9 +896,11 @@ the 0.7 and 0.8 series. Given a statement as:: - stmt = users.delete().\ - where(users.c.id == addresses.c.id).\ - where(addresses.c.email_address.startswith('ed%')) + stmt = ( + users.delete() + .where(users.c.id == addresses.c.id) + .where(addresses.c.email_address.startswith("ed%")) + ) conn.execute(stmt) The resulting SQL from the above statement on a PostgreSQL backend @@ -930,7 +937,7 @@ can now be used to change the autoescape character, if desired. An expression such as:: - >>> column('x').startswith('total%score', autoescape=True) + >>> column("x").startswith("total%score", autoescape=True) Renders as:: @@ -940,7 +947,7 @@ Where the value of the parameter "x_1" is ``'total/%score'``. Similarly, an expression that has backslashes:: - >>> column('x').startswith('total/score', autoescape=True) + >>> column("x").startswith("total/score", autoescape=True) Will render the same way, with the value of the parameter "x_1" as ``'total//score'``. @@ -968,8 +975,8 @@ if the application is working with plain floats. float_value = connection.scalar( - select([literal(4.56)]) # the "BindParameter" will now be - # Float, not Numeric(asdecimal=True) + select([literal(4.56)]) # the "BindParameter" will now be + # Float, not Numeric(asdecimal=True) ) * Math operations between :class:`.Numeric`, :class:`.Float`, and @@ -978,11 +985,11 @@ if the application is working with plain floats. as well as if the type should be :class:`.Float`:: # asdecimal flag is maintained - expr = column('a', Integer) * column('b', Numeric(asdecimal=False)) + expr = column("a", Integer) * column("b", Numeric(asdecimal=False)) assert expr.type.asdecimal == False # Float subclass of Numeric is maintained - expr = column('a', Integer) * column('b', Float()) + expr = column("a", Integer) * column("b", Float()) assert isinstance(expr.type, Float) * The :class:`.Float` datatype will apply the ``float()`` processor to @@ -1009,9 +1016,7 @@ is added to the compiler to allow for the space. All three functions are named in the documentation now:: >>> from sqlalchemy import select, table, column, func, tuple_ - >>> t = table('t', - ... column('value'), column('x'), - ... column('y'), column('z'), column('q')) + >>> t = table("t", column("value"), column("x"), column("y"), column("z"), column("q")) >>> stmt = select([func.sum(t.c.value)]).group_by( ... func.grouping_sets( ... tuple_(t.c.x, t.c.y), @@ -1046,16 +1051,17 @@ localized to the current VALUES clause being processed:: def mydefault(context): - return context.get_current_parameters()['counter'] + 12 + return context.get_current_parameters()["counter"] + 12 - mytable = Table('mytable', metadata_obj, - Column('counter', Integer), - Column('counter_plus_twelve', - Integer, default=mydefault, onupdate=mydefault) + + mytable = Table( + "mytable", + metadata_obj, + Column("counter", Integer), + Column("counter_plus_twelve", Integer, default=mydefault, onupdate=mydefault), ) - stmt = mytable.insert().values( - [{"counter": 5}, {"counter": 18}, {"counter": 20}]) + stmt = mytable.insert().values([{"counter": 5}, {"counter": 18}, {"counter": 20}]) conn.execute(stmt) @@ -1077,7 +1083,8 @@ of the :meth:`.SessionEvents.after_commit` event which also emits before the sess = Session() - user = sess.query(User).filter_by(name='x').first() + user = sess.query(User).filter_by(name="x").first() + @event.listens_for(sess, "after_rollback") def after_rollback(session): @@ -1086,12 +1093,14 @@ of the :meth:`.SessionEvents.after_commit` event which also emits before the # to emit a lazy load. print("user name: %s" % user.name) + @event.listens_for(sess, "after_commit") def after_commit(session): # 'user.name' is present, assuming it was already # loaded. this is the existing behavior. print("user name: %s" % user.name) + if should_rollback: sess.rollback() else: @@ -1148,7 +1157,7 @@ In the case of assigning a collection to an attribute that would replace the previous collection, a side effect of this was that the collection being replaced would also be mutated, which is misleading and unnecessary:: - >>> a1, a2, a3 = Address('a1'), Address('a2'), Address('a3') + >>> a1, a2, a3 = Address("a1"), Address("a2"), Address("a3") >>> user.addresses = [a1, a2] >>> previous_collection = user.addresses @@ -1177,18 +1186,19 @@ existing collection. Given a mapping as:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") - @validates('bs') + @validates("bs") def convert_dict_to_b(self, key, value): - return B(data=value['data']) + return B(data=value["data"]) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) data = Column(String) Above, we could use the validator as follows, to convert from an incoming @@ -1217,7 +1227,7 @@ are new. Supposing a simple validator such as:: class A(Base): # ... - @validates('bs') + @validates("bs") def validate_b(self, key, value): assert value.data is not None return value @@ -1255,16 +1265,16 @@ Use flag_dirty() to mark an object as "dirty" without any attribute changing An exception is now raised if the :func:`.attributes.flag_modified` function is used to mark an attribute as modified that isn't actually loaded:: - a1 = A(data='adf') + a1 = A(data="adf") s.add(a1) s.flush() # expire, similarly as though we said s.commit() - s.expire(a1, 'data') + s.expire(a1, "data") # will raise InvalidRequestError - attributes.flag_modified(a1, 'data') + attributes.flag_modified(a1, "data") This because the flush process will most likely fail in any case if the attribute remains un-present by the time flush occurs. To mark an object @@ -1287,6 +1297,7 @@ such as :meth:`.SessionEvents.before_flush`, use the new A very old and undocumented keyword argument ``scope`` has been removed:: from sqlalchemy.orm import scoped_session + Session = scoped_session(sessionmaker()) session = Session(scope=None) @@ -1312,18 +1323,21 @@ it is re-stated during the UPDATE so that the "onupdate" rule does not overwrite it:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - favorite_b_id = Column(ForeignKey('b.id', name="favorite_b_fk")) + favorite_b_id = Column(ForeignKey("b.id", name="favorite_b_fk")) bs = relationship("B", primaryjoin="A.id == B.a_id") favorite_b = relationship( - "B", primaryjoin="A.favorite_b_id == B.id", post_update=True) + "B", primaryjoin="A.favorite_b_id == B.id", post_update=True + ) updated = Column(Integer, onupdate=my_onupdate_function) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id', name="a_fk")) + a_id = Column(ForeignKey("a.id", name="a_fk")) + a1 = A() b1 = B() @@ -1371,21 +1385,18 @@ now participates in the versioning feature, documented at Given a mapping:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) version_id = Column(Integer, default=0) - parent_id = Column(ForeignKey('node.id')) - favorite_node_id = Column(ForeignKey('node.id')) + parent_id = Column(ForeignKey("node.id")) + favorite_node_id = Column(ForeignKey("node.id")) nodes = relationship("Node", primaryjoin=remote(parent_id) == id) favorite_node = relationship( - "Node", primaryjoin=favorite_node_id == remote(id), - post_update=True + "Node", primaryjoin=favorite_node_id == remote(id), post_update=True ) - __mapper_args__ = { - 'version_id_col': version_id - } + __mapper_args__ = {"version_id_col": version_id} An UPDATE of a node that associates another node as "favorite" will now increment the version counter as well as match the current version:: @@ -1435,20 +1446,20 @@ Whereas in 1.1, an expression such as the following would produce a result with no return type (assume ``-%>`` is some special operator supported by the database):: - >>> column('x', types.DateTime).op('-%>')(None).type + >>> column("x", types.DateTime).op("-%>")(None).type NullType() Other types would use the default behavior of using the left-hand type as the return type:: - >>> column('x', types.String(50)).op('-%>')(None).type + >>> column("x", types.String(50)).op("-%>")(None).type String(length=50) These behaviors were mostly by accident, so the behavior has been made consistent with the second form, that is the default return type is the same as the left-hand expression:: - >>> column('x', types.DateTime).op('-%>')(None).type + >>> column("x", types.DateTime).op("-%>")(None).type DateTime() As most user-defined operators tend to be "comparison" operators, often @@ -1457,18 +1468,18 @@ one of the many special operators defined by PostgreSQL, the its documented behavior of allowing the return type to be :class:`.Boolean` in all cases, including for :class:`_types.ARRAY` and :class:`_types.JSON`:: - >>> column('x', types.String(50)).op('-%>', is_comparison=True)(None).type + >>> column("x", types.String(50)).op("-%>", is_comparison=True)(None).type Boolean() - >>> column('x', types.ARRAY(types.Integer)).op('-%>', is_comparison=True)(None).type + >>> column("x", types.ARRAY(types.Integer)).op("-%>", is_comparison=True)(None).type Boolean() - >>> column('x', types.JSON()).op('-%>', is_comparison=True)(None).type + >>> column("x", types.JSON()).op("-%>", is_comparison=True)(None).type Boolean() To assist with boolean comparison operators, a new shorthand method :meth:`.Operators.bool_op` has been added. This method should be preferred for on-the-fly boolean operators:: - >>> print(column('x', types.Integer).bool_op('-%>')(5)) + >>> print(column("x", types.Integer).bool_op("-%>")(5)) x -%> :x_1 @@ -1485,7 +1496,7 @@ Previously, it was not possible to produce a :obj:`_expression.literal_column` construct that stated a single percent sign:: >>> from sqlalchemy import literal_column - >>> print(literal_column('some%symbol')) + >>> print(literal_column("some%symbol")) some%%symbol The percent sign is now unaffected for dialects that are not set to @@ -1494,10 +1505,10 @@ dialects which do state one of these paramstyles will continue to escape as is appropriate:: >>> from sqlalchemy import literal_column - >>> print(literal_column('some%symbol')) + >>> print(literal_column("some%symbol")) some%symbol >>> from sqlalchemy.dialects import mysql - >>> print(literal_column('some%symbol').compile(dialect=mysql.dialect())) + >>> print(literal_column("some%symbol").compile(dialect=mysql.dialect())) some%%symbol As part of this change, the doubling that has been present when using @@ -1517,8 +1528,9 @@ A bug in the :func:`_expression.collate` and :meth:`.ColumnOperators.collate` functions, used to supply ad-hoc column collations at the statement level, is fixed, where a case sensitive name would not be quoted:: - stmt = select([mytable.c.x, mytable.c.y]).\ - order_by(mytable.c.somecolumn.collate("fr_FR")) + stmt = select([mytable.c.x, mytable.c.y]).order_by( + mytable.c.somecolumn.collate("fr_FR") + ) now renders:: @@ -1553,8 +1565,8 @@ sets. The feature is off by default and can be enabled using the ``use_batch_mode`` argument on :func:`_sa.create_engine`:: engine = create_engine( - "postgresql+psycopg2://scott:tiger@host/dbname", - use_batch_mode=True) + "postgresql+psycopg2://scott:tiger@host/dbname", use_batch_mode=True + ) The feature is considered to be experimental for the moment but may become on by default in a future release. @@ -1577,10 +1589,7 @@ now allows these values to be specified:: from sqlalchemy.dialects.postgresql import INTERVAL - Table( - 'my_table', metadata, - Column("some_interval", INTERVAL(fields="DAY TO SECOND")) - ) + Table("my_table", metadata, Column("some_interval", INTERVAL(fields="DAY TO SECOND"))) Additionally, all INTERVAL datatypes can now be reflected independently of the "fields" specifier present; the "fields" parameter in the datatype @@ -1610,12 +1619,10 @@ This :class:`_expression.Insert` subclass adds a new method from sqlalchemy.dialects.mysql import insert - insert_stmt = insert(my_table). \ - values(id='some_id', data='some data to insert') + insert_stmt = insert(my_table).values(id="some_id", data="some data to insert") on_conflict_stmt = insert_stmt.on_duplicate_key_update( - data=insert_stmt.inserted.data, - status='U' + data=insert_stmt.inserted.data, status="U" ) conn.execute(on_conflict_stmt) @@ -1748,9 +1755,15 @@ name, rather than the raw UPPERCASE format that Oracle uses:: Previously, the foreign keys result would look like:: - [{'referred_table': u'users', 'referred_columns': [u'id'], - 'referred_schema': None, 'name': 'USER_ID_FK', - 'constrained_columns': [u'user_id']}] + [ + { + "referred_table": "users", + "referred_columns": ["id"], + "referred_schema": None, + "name": "USER_ID_FK", + "constrained_columns": ["user_id"], + } + ] Where the above could create problems particularly with Alembic autogenerate. @@ -1774,20 +1787,17 @@ now be passed using brackets to manually specify where this split occurs, allowing database and/or owner names that themselves contain one or more dots:: - Table( - "some_table", metadata, - Column("q", String(50)), - schema="[MyDataBase.dbo]" - ) + Table("some_table", metadata, Column("q", String(50)), schema="[MyDataBase.dbo]") The above table will consider the "owner" to be ``MyDataBase.dbo``, which will also be quoted upon render, and the "database" as None. To individually refer to database name and owner, use two pairs of brackets:: Table( - "some_table", metadata, + "some_table", + metadata, Column("q", String(50)), - schema="[MyDataBase.SomeDB].[MyDB.owner]" + schema="[MyDataBase.SomeDB].[MyDB.owner]", ) Additionally, the :class:`.quoted_name` construct is now honored when diff --git a/doc/build/changelog/migration_13.rst b/doc/build/changelog/migration_13.rst index 169f67df5b..c3093c674b 100644 --- a/doc/build/changelog/migration_13.rst +++ b/doc/build/changelog/migration_13.rst @@ -130,14 +130,17 @@ like:: j = join(B, D, D.b_id == B.id).join(C, C.id == D.c_id) B_viacd = mapper( - B, j, non_primary=True, primary_key=[j.c.b_id], + B, + j, + non_primary=True, + primary_key=[j.c.b_id], properties={ "id": j.c.b_id, # so that 'id' looks the same as before - "c_id": j.c.c_id, # needed for disambiguation + "c_id": j.c.c_id, # needed for disambiguation "d_c_id": j.c.d_c_id, # needed for disambiguation "b_id": [j.c.b_id, j.c.d_b_id], "d_id": j.c.d_id, - } + }, ) A.b = relationship(B_viacd, primaryjoin=A.b_id == B_viacd.c.b_id) @@ -185,14 +188,14 @@ of collections all in one query without using JOIN or subqueries at all. Given a mapping:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", lazy="selectin") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) @@ -349,7 +352,7 @@ where the ``del`` operation is roughly equivalent to setting the attribute to th some_object = session.query(SomeObject).get(5) - del some_object.some_attribute # from a SQL perspective, works like "= None" + del some_object.some_attribute # from a SQL perspective, works like "= None" :ticket:`4354` @@ -366,10 +369,9 @@ along with that object's full lifecycle in memory:: from sqlalchemy import inspect - u1 = User(id=7, name='ed') - - inspect(u1).info['user_info'] = '7|ed' + u1 = User(id=7, name="ed") + inspect(u1).info["user_info"] = "7|ed" :ticket:`4257` @@ -399,23 +401,22 @@ Association proxy has new cascade_scalar_deletes flag Given a mapping as:: class A(Base): - __tablename__ = 'test_a' + __tablename__ = "test_a" id = Column(Integer, primary_key=True) - ab = relationship( - 'AB', backref='a', uselist=False) + ab = relationship("AB", backref="a", uselist=False) b = association_proxy( - 'ab', 'b', creator=lambda b: AB(b=b), - cascade_scalar_deletes=True) + "ab", "b", creator=lambda b: AB(b=b), cascade_scalar_deletes=True + ) class B(Base): - __tablename__ = 'test_b' + __tablename__ = "test_b" id = Column(Integer, primary_key=True) - ab = relationship('AB', backref='b', cascade='all, delete-orphan') + ab = relationship("AB", backref="b", cascade="all, delete-orphan") class AB(Base): - __tablename__ = 'test_ab' + __tablename__ = "test_ab" a_id = Column(Integer, ForeignKey(A.id), primary_key=True) b_id = Column(Integer, ForeignKey(B.id), primary_key=True) @@ -490,7 +491,7 @@ to a class-specific :class:`.AssociationProxyInstance`, demonstrated as:: class User(Base): # ... - keywords = association_proxy('kws', 'keyword') + keywords = association_proxy("kws", "keyword") proxy_state = inspect(User).all_orm_descriptors["keywords"].for_class(User) @@ -522,6 +523,7 @@ and is **not** an object reference or another association proxy:: # column-based association proxy values = association_proxy("elements", "value") + class Element(Base): # ... @@ -530,7 +532,7 @@ and is **not** an object reference or another association proxy:: The ``User.values`` association proxy refers to the ``Element.value`` column. Standard column operations are now available, such as ``like``:: - >>> print(s.query(User).filter(User.values.like('%foo%'))) + >>> print(s.query(User).filter(User.values.like("%foo%"))) SELECT "user".id AS user_id FROM "user" WHERE EXISTS (SELECT 1 @@ -539,7 +541,7 @@ Standard column operations are now available, such as ``like``:: ``equals``:: - >>> print(s.query(User).filter(User.values == 'foo')) + >>> print(s.query(User).filter(User.values == "foo")) SELECT "user".id AS user_id FROM "user" WHERE EXISTS (SELECT 1 @@ -564,7 +566,7 @@ comparison operator; **this is a change in behavior** in that previously, the association proxy used ``.contains`` as a list containment operator only. With a column-oriented comparison, it now behaves like a "like":: - >>> print(s.query(User).filter(User.values.contains('foo'))) + >>> print(s.query(User).filter(User.values.contains("foo"))) SELECT "user".id AS user_id FROM "user" WHERE EXISTS (SELECT 1 @@ -579,7 +581,7 @@ When using an object-based association proxy with a collection, the behavior is as before, that of testing for collection membership, e.g. given a mapping:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) user_elements = relationship("UserElement") @@ -589,7 +591,7 @@ as before, that of testing for collection membership, e.g. given a mapping:: class UserElement(Base): - __tablename__ = 'user_element' + __tablename__ = "user_element" id = Column(Integer, primary_key=True) user_id = Column(ForeignKey("user.id")) @@ -598,7 +600,7 @@ as before, that of testing for collection membership, e.g. given a mapping:: class Element(Base): - __tablename__ = 'element' + __tablename__ = "element" id = Column(Integer, primary_key=True) value = Column(String) @@ -633,21 +635,21 @@ any use cases arise where it causes side effects. As an example, given a mapping with association proxy:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") - b_data = association_proxy('bs', 'data') + b_data = association_proxy("bs", "data") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) data = Column(String) - a1 = A(bs=[B(data='b1'), B(data='b2')]) + a1 = A(bs=[B(data="b1"), B(data="b2")]) b_data = a1.b_data @@ -671,7 +673,7 @@ Above, because the ``A`` object would be garbage collected before the The change is that the ``b_data`` collection is now maintaining a strong reference to the ``a1`` object, so that it remains present:: - assert b_data == ['b1', 'b2'] + assert b_data == ["b1", "b2"] This change introduces the side effect that if an application is passing around the collection as above, **the parent object won't be garbage collected** until @@ -699,7 +701,9 @@ new association objects where appropriate:: id = Column(Integer, primary_key=True) b_rel = relationship( - "B", collection_class=set, cascade="all, delete-orphan", + "B", + collection_class=set, + cascade="all, delete-orphan", ) b = association_proxy("b_rel", "value", creator=lambda x: B(value=x)) @@ -712,6 +716,7 @@ new association objects where appropriate:: a_id = Column(Integer, ForeignKey("test_a.id"), nullable=False) value = Column(String) + # ... s = Session(e) @@ -728,7 +733,6 @@ new association objects where appropriate:: # against the deleted ones. assert len(s.new) == 1 - :ticket:`2642` .. _change_1103: @@ -749,14 +753,14 @@ having a duplicate temporarily present in the list is intrinsic to a Python "swap" operation. Given a standard one-to-many/many-to-one setup:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", backref="a") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) @@ -780,7 +784,7 @@ during the flush. The same issue can be demonstrated using plain duplicates:: >>> del a1.bs[1] >>> a1.bs # collection is unaffected so far... [<__main__.B object at 0x7f047af5fb70>] - >>> b1.a # however b1.a is None + >>> b1.a # however b1.a is None >>> >>> session.add(a1) >>> session.commit() # so upon flush + expire.... @@ -955,21 +959,21 @@ been removed. Previously, this did not take place for one-to-many, or one-to-one relationships, in the following situation:: class User(Base): - __tablename__ = 'users' + __tablename__ = "users" id = Column(Integer, primary_key=True) - addresses = relationship( - "Address", - passive_deletes="all") + addresses = relationship("Address", passive_deletes="all") + class Address(Base): - __tablename__ = 'addresses' + __tablename__ = "addresses" id = Column(Integer, primary_key=True) email = Column(String) - user_id = Column(Integer, ForeignKey('users.id')) + user_id = Column(Integer, ForeignKey("users.id")) user = relationship("User") + u1 = session.query(User).first() address = u1.addresses[0] u1.addresses.remove(address) @@ -1006,16 +1010,17 @@ joined together either with no separator or with an underscore separator. Below we define a convention that will name :class:`.UniqueConstraint` constraints with a name that joins together the names of all columns:: - metadata_obj = MetaData(naming_convention={ - "uq": "uq_%(table_name)s_%(column_0_N_name)s" - }) + metadata_obj = MetaData( + naming_convention={"uq": "uq_%(table_name)s_%(column_0_N_name)s"} + ) table = Table( - 'info', metadata_obj, - Column('a', Integer), - Column('b', Integer), - Column('c', Integer), - UniqueConstraint('a', 'b', 'c') + "info", + metadata_obj, + Column("a", Integer), + Column("b", Integer), + Column("c", Integer), + UniqueConstraint("a", "b", "c"), ) The CREATE TABLE for the above table will render as:: @@ -1037,11 +1042,12 @@ PostgreSQL where identifiers cannot be longer than 63 characters, a long constraint name would normally be generated from the table definition below:: long_names = Table( - 'long_names', metadata_obj, - Column('information_channel_code', Integer, key='a'), - Column('billing_convention_name', Integer, key='b'), - Column('product_identifier', Integer, key='c'), - UniqueConstraint('a', 'b', 'c') + "long_names", + metadata_obj, + Column("information_channel_code", Integer, key="a"), + Column("billing_convention_name", Integer, key="b"), + Column("product_identifier", Integer, key="c"), + UniqueConstraint("a", "b", "c"), ) The truncation logic will ensure a too-long name isn't generated for the @@ -1137,17 +1143,16 @@ modifier to produce a :class:`.BinaryExpression` that has a "left" and a "right" side:: class Venue(Base): - __tablename__ = 'venue' + __tablename__ = "venue" id = Column(Integer, primary_key=True) name = Column(String) descendants = relationship( "Venue", - primaryjoin=func.instr( - remote(foreign(name)), name + "/" - ).as_comparison(1, 2) == 1, + primaryjoin=func.instr(remote(foreign(name)), name + "/").as_comparison(1, 2) + == 1, viewonly=True, - order_by=name + order_by=name, ) Above, the :paramref:`_orm.relationship.primaryjoin` of the "descendants" relationship @@ -1162,8 +1167,12 @@ lazyload to produce SQL like:: and a joinedload, such as:: - v1 = s.query(Venue).filter_by(name="parent1").options( - joinedload(Venue.descendants)).one() + v1 = ( + s.query(Venue) + .filter_by(name="parent1") + .options(joinedload(Venue.descendants)) + .one() + ) to work as:: @@ -1195,12 +1204,12 @@ backend, such as "SELECT CAST(NULL AS INTEGER) WHERE 1!=1" for PostgreSQL, >>> from sqlalchemy import select, literal_column, bindparam >>> e = create_engine("postgresql://scott:tiger@localhost/test", echo=True) >>> with e.connect() as conn: - ... conn.execute( - ... select([literal_column('1')]). - ... where(literal_column('1').in_(bindparam('q', expanding=True))), - ... q=[] - ... ) - ... + ... conn.execute( + ... select([literal_column("1")]).where( + ... literal_column("1").in_(bindparam("q", expanding=True)) + ... ), + ... q=[], + ... ) SELECT 1 WHERE 1 IN (SELECT CAST(NULL AS INTEGER) WHERE 1!=1) The feature also works for tuple-oriented IN statements, where the "empty IN" @@ -1211,12 +1220,12 @@ such as on PostgreSQL:: >>> from sqlalchemy import select, literal_column, tuple_, bindparam >>> e = create_engine("postgresql://scott:tiger@localhost/test", echo=True) >>> with e.connect() as conn: - ... conn.execute( - ... select([literal_column('1')]). - ... where(tuple_(50, "somestring").in_(bindparam('q', expanding=True))), - ... q=[] - ... ) - ... + ... conn.execute( + ... select([literal_column("1")]).where( + ... tuple_(50, "somestring").in_(bindparam("q", expanding=True)) + ... ), + ... q=[], + ... ) SELECT 1 WHERE (%(param_1)s, %(param_2)s) IN (SELECT CAST(NULL AS INTEGER), CAST(NULL AS VARCHAR) WHERE 1!=1) @@ -1239,6 +1248,7 @@ variant expression in order to locate these methods:: from sqlalchemy import TypeDecorator, LargeBinary, func + class CompressedLargeBinary(TypeDecorator): impl = LargeBinary @@ -1248,13 +1258,15 @@ variant expression in order to locate these methods:: def column_expression(self, col): return func.uncompress(col, type_=self) + MyLargeBinary = LargeBinary().with_variant(CompressedLargeBinary(), "sqlite") The above expression will render a function within SQL when used on SQLite only:: from sqlalchemy import select, column from sqlalchemy.dialects import sqlite - print(select([column('x', CompressedLargeBinary)]).compile(dialect=sqlite.dialect())) + + print(select([column("x", CompressedLargeBinary)]).compile(dialect=sqlite.dialect())) will render:: @@ -1445,17 +1457,20 @@ queries used until now. Given a schema such as:: dv = Table( - 'data_values', metadata_obj, - Column('modulus', Integer, nullable=False), - Column('data', String(30)), - postgresql_partition_by='range(modulus)') + "data_values", + metadata_obj, + Column("modulus", Integer, nullable=False), + Column("data", String(30)), + postgresql_partition_by="range(modulus)", + ) sa.event.listen( dv, "after_create", sa.DDL( "CREATE TABLE data_values_4_10 PARTITION OF data_values " - "FOR VALUES FROM (4) TO (10)") + "FOR VALUES FROM (4) TO (10)" + ), ) The two table names ``'data_values'`` and ``'data_values_4_10'`` will come @@ -1492,9 +1507,7 @@ can now be explicitly ordered by passing a list of 2-tuples:: from sqlalchemy.dialects.mysql import insert - insert_stmt = insert(my_table).values( - id='some_existing_id', - data='inserted value') + insert_stmt = insert(my_table).values(id="some_existing_id", data="inserted value") on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update( [ @@ -1542,10 +1555,11 @@ keyword added to objects like :class:`.UniqueConstraint` as well as several :class:`_schema.Column` -specific variants:: some_table = Table( - 'some_table', metadata_obj, - Column('id', Integer, primary_key=True, sqlite_on_conflict_primary_key='FAIL'), - Column('data', Integer), - UniqueConstraint('id', 'data', sqlite_on_conflict='IGNORE') + "some_table", + metadata_obj, + Column("id", Integer, primary_key=True, sqlite_on_conflict_primary_key="FAIL"), + Column("data", Integer), + UniqueConstraint("id", "data", sqlite_on_conflict="IGNORE"), ) The above table would render in a CREATE TABLE statement as:: @@ -1651,7 +1665,8 @@ Pass it via :func:`_sa.create_engine`:: engine = create_engine( "mssql+pyodbc://scott:tiger@mssql2017:1433/test?driver=ODBC+Driver+13+for+SQL+Server", - fast_executemany=True) + fast_executemany=True, + ) .. seealso:: @@ -1678,12 +1693,16 @@ new ``mssql_identity_start`` and ``mssql_identity_increment`` parameters on :class:`_schema.Column`:: test = Table( - 'test', metadata_obj, + "test", + metadata_obj, Column( - 'id', Integer, primary_key=True, mssql_identity_start=100, - mssql_identity_increment=10 + "id", + Integer, + primary_key=True, + mssql_identity_start=100, + mssql_identity_increment=10, ), - Column('name', String(20)) + Column("name", String(20)), ) In order to emit ``IDENTITY`` on a non-primary key column, which is a little-used @@ -1693,9 +1712,10 @@ primary key column:: test = Table( - 'test', metadata_obj, - Column('id', Integer, primary_key=True, autoincrement=False), - Column('number', Integer, autoincrement=True) + "test", + metadata_obj, + Column("id", Integer, primary_key=True, autoincrement=False), + Column("number", Integer, autoincrement=True), ) .. seealso:: diff --git a/doc/build/changelog/migration_14.rst b/doc/build/changelog/migration_14.rst index 82d7acf1ec..d1933566b4 100644 --- a/doc/build/changelog/migration_14.rst +++ b/doc/build/changelog/migration_14.rst @@ -70,9 +70,12 @@ to be used freely against ORM entities:: with Session(engine, future=True) as sess: - stmt = select(User).where( - User.name == 'sandy' - ).join(User.addresses).where(Address.email_address.like("%gmail%")) + stmt = ( + select(User) + .where(User.name == "sandy") + .join(User.addresses) + .where(Address.email_address.like("%gmail%")) + ) result = sess.execute(stmt) @@ -121,16 +124,19 @@ Similar adjustments have been made to "bulk updates and deletes" such that Core :func:`_sql.update` and :func:`_sql.delete` can be used for bulk operations. A bulk update like the following:: - session.query(User).filter(User.name == 'sandy').update({"password": "foobar"}, synchronize_session="fetch") + session.query(User).filter(User.name == "sandy").update( + {"password": "foobar"}, synchronize_session="fetch" + ) can now be achieved in :term:`2.0 style` (and indeed the above runs internally in this way) as follows:: with Session(engine, future=True) as sess: - stmt = update(User).where( - User.name == 'sandy' - ).values(password="foobar").execution_options( - synchronize_session="fetch" + stmt = ( + update(User) + .where(User.name == "sandy") + .values(password="foobar") + .execution_options(synchronize_session="fetch") ) sess.execute(stmt) @@ -676,7 +682,7 @@ that are in the columns clause of the SELECT statement. A common beginner mist is code such as the following:: stmt = select(users) - stmt = stmt.where(stmt.c.name == 'foo') + stmt = stmt.where(stmt.c.name == "foo") The above code appears intuitive and that it would generate "SELECT * FROM users WHERE name='foo'", however veteran SQLAlchemy users will @@ -688,8 +694,7 @@ the use case above, as in a case like the above it links directly to the columns present in the ``users.c`` collection:: stmt = select(users) - stmt = stmt.where(stmt.selected_columns.name == 'foo') - + stmt = stmt.where(stmt.selected_columns.name == "foo") :ticket:`4617` @@ -745,7 +750,9 @@ With the new implementation, :meth:`_sql.Select.join` and :meth:`_orm.Query.join`, adding JOIN criteria to the existing statement by matching to the left entity:: - stmt = select(user_table).join(addresses_table, user_table.c.id == addresses_table.c.user_id) + stmt = select(user_table).join( + addresses_table, user_table.c.id == addresses_table.c.user_id + ) producing:: @@ -839,7 +846,7 @@ returns a new :class:`_engine.URL` object with changes applied:: To alter the contents of the :attr:`_engine.URL.query` dictionary, methods such as :meth:`_engine.URL.update_query_dict` may be used:: - >>> url.update_query_dict({"sslcert": '/path/to/crt'}) + >>> url.update_query_dict({"sslcert": "/path/to/crt"}) postgresql://user:***@host/dbname?sslcert=%2Fpath%2Fto%2Fcrt To upgrade code that is mutating these fields directly, a **backwards and @@ -855,6 +862,7 @@ style:: some_url.drivername = some_drivername return some_url + def set_ssl_cert(some_url, ssl_cert): # check for 1.4 if hasattr(some_url, "update_query_dict"): @@ -869,7 +877,9 @@ to strings, using sequences of strings to represent multiple parameters. For example:: >>> from sqlalchemy.engine import make_url - >>> url = make_url("postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&sslcert=%2Fpath%2Fto%2Fcrt") + >>> url = make_url( + ... "postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&sslcert=%2Fpath%2Fto%2Fcrt" + ... ) >>> url.query immutabledict({'alt_host': ('host1', 'host2'), 'sslcert': '/path/to/crt'}) @@ -901,25 +911,24 @@ method. A backwards compatible approach would look like:: from sqlalchemy.engine import CreateEnginePlugin + class MyPlugin(CreateEnginePlugin): def __init__(self, url, kwargs): # check for 1.4 style if hasattr(CreateEnginePlugin, "update_url"): - self.my_argument_one = url.query['my_argument_one'] - self.my_argument_two = url.query['my_argument_two'] + self.my_argument_one = url.query["my_argument_one"] + self.my_argument_two = url.query["my_argument_two"] else: # legacy - self.my_argument_one = url.query.pop('my_argument_one') - self.my_argument_two = url.query.pop('my_argument_two') + self.my_argument_one = url.query.pop("my_argument_one") + self.my_argument_two = url.query.pop("my_argument_two") - self.my_argument_three = kwargs.pop('my_argument_three', None) + self.my_argument_three = kwargs.pop("my_argument_three", None) def update_url(self, url): # this method runs in 1.4 only and should be used to consume # plugin-specific arguments - return url.difference_update_query( - ["my_argument_one", "my_argument_two"] - ) + return url.difference_update_query(["my_argument_one", "my_argument_two"]) See the docstring at :class:`_engine.CreateEnginePlugin` for complete details on how this class is used. @@ -974,9 +983,9 @@ track for the old calling style:: stmt = select(users_table).where( case( - (users_table.c.name == 'wendy', 'W'), - (users_table.c.name == 'jack', 'J'), - else_='E' + (users_table.c.name == "wendy", "W"), + (users_table.c.name == "jack", "J"), + else_="E", ) ) @@ -1128,9 +1137,11 @@ not line up with these two tables will create an additional FROM entry:: address_alias = aliased(Address) - q = session.query(User).\ - join(address_alias, User.addresses).\ - filter(Address.email_address == 'foo') + q = ( + session.query(User) + .join(address_alias, User.addresses) + .filter(Address.email_address == "foo") + ) The above query selects from a JOIN of ``User`` and ``address_alias``, the latter of which is an alias of the ``Address`` entity. However, the @@ -1189,11 +1200,13 @@ JOIN clauses but also through the WHERE clause Above, we can add a WHERE clause to link the new ``Address`` entity with the previous ``address_alias`` entity and that will remove the warning:: - q = session.query(User).\ - join(address_alias, User.addresses).\ - filter(Address.email_address == 'foo').\ - filter(Address.id == address_alias.id) # resolve cartesian products, - # will no longer warn + q = ( + session.query(User) + .join(address_alias, User.addresses) + .filter(Address.email_address == "foo") + .filter(Address.id == address_alias.id) + ) # resolve cartesian products, + # will no longer warn The cartesian product warning considers **any** kind of link between two FROM clauses to be a resolution, even if the end result set is still @@ -1201,11 +1214,13 @@ wasteful, as the linter is intended only to detect the common case of a FROM clause that is completely unexpected. If the FROM clause is referred to explicitly elsewhere and linked to the other FROMs, no warning is emitted:: - q = session.query(User).\ - join(address_alias, User.addresses).\ - filter(Address.email_address == 'foo').\ - filter(Address.id > address_alias.id) # will generate a lot of rows, - # but no warning + q = ( + session.query(User) + .join(address_alias, User.addresses) + .filter(Address.email_address == "foo") + .filter(Address.id > address_alias.id) + ) # will generate a lot of rows, + # but no warning Full cartesian products are also allowed if they are explicitly stated; if we wanted for example the cartesian product of ``User`` and ``Address``, we can @@ -1256,7 +1271,6 @@ including methods such as: with engine.connect() as conn: row = conn.execute(table.select().where(table.c.id == 5)).one() - :meth:`_engine.Result.one_or_none` - same, but also returns None for no rows :meth:`_engine.Result.all` - returns all rows @@ -1278,12 +1292,12 @@ including methods such as: .. sourcecode:: with engine.connect() as conn: - # requests x, y, z - result = conn.execute(select(table.c.x, table.c.y, table.c.z)) + # requests x, y, z + result = conn.execute(select(table.c.x, table.c.y, table.c.z)) - # iterate rows as y, x - for y, x in result.columns("y", "x"): - print("Y: %s X: %s" % (y, x)) + # iterate rows as y, x + for y, x in result.columns("y", "x"): + print("Y: %s X: %s" % (y, x)) :meth:`_engine.Result.scalars` - returns lists of scalar objects, from the first column by default but can also be selected: @@ -1300,10 +1314,10 @@ dictionaries: .. sourcecode:: with engine.connect() as conn: - result = conn.execute(select(table.c.x, table.c.y, table.c.z)) + result = conn.execute(select(table.c.x, table.c.y, table.c.z)) - for map_ in result.mappings(): - print("Y: %(y)s X: %(x)s" % map_) + for map_ in result.mappings(): + print("Y: %(y)s X: %(x)s" % map_) When using Core, the object returned by :meth:`_engine.Connection.execute` is an instance of :class:`.CursorResult`, which continues to feature the same API @@ -1374,8 +1388,8 @@ can be summarized. Given a "named tuple" in pseudo code as:: The biggest cross-incompatible difference is the behavior of ``__contains__``:: - "id" in row # True for a mapping, False for a named tuple - "some name" in row # False for a mapping, True for a named tuple + "id" in row # True for a mapping, False for a named tuple + "some name" in row # False for a mapping, True for a named tuple In 1.4, when a ``LegacyRow`` is returned by a Core result set, the above ``"id" in row`` comparison will continue to succeed, however a deprecation @@ -1402,7 +1416,7 @@ when the row was first fetched. This means for example when retrieving a datetime value from SQLite, the data for the row as present in the :class:`.RowProxy` object would previously have looked like:: - row_proxy = (1, '2019-12-31 19:56:58.272106') + row_proxy = (1, "2019-12-31 19:56:58.272106") and then upon access via ``__getitem__``, the ``datetime.strptime()`` function would be used on the fly to convert the above string date into a ``datetime`` @@ -1478,8 +1492,8 @@ allows for greater cross-compatibility between the two, which is a key goal of the 2.0 transition:: >>> from sqlalchemy import column, select - >>> c1, c2, c3, c4 = column('c1'), column('c2'), column('c3'), column('c4') - >>> stmt = select(c1, c2, c3.label('c2'), c2, c4) + >>> c1, c2, c3, c4 = column("c1"), column("c2"), column("c3"), column("c4") + >>> stmt = select(c1, c2, c3.label("c2"), c2, c4) >>> print(stmt) SELECT c1, c2, c3 AS c2, c2, c4 @@ -1522,7 +1536,7 @@ does not imply deduplication of column objects, although it does imply deduplication of implicitly generated labels:: >>> from sqlalchemy import table - >>> user = table('user', column('id'), column('name')) + >>> user = table("user", column("id"), column("name")) >>> stmt = select(user.c.id, user.c.name, user.c.id).apply_labels() >>> print(stmt) SELECT "user".id AS user_id, "user".name AS user_name, "user".id AS id_1 @@ -1606,7 +1620,7 @@ prominently with CAST:: For CAST against expressions that don't have a name, the previous logic is used to generate the usual "anonymous" labels:: - >>> print(select(cast('hi there,' + foo.c.data, String))) + >>> print(select(cast("hi there," + foo.c.data, String))) SELECT CAST(:data_1 + foo.data AS VARCHAR) AS anon_1 FROM foo @@ -1614,14 +1628,14 @@ A :func:`.cast` against a :class:`.Label`, despite having to omit the label expression as these don't render inside of a CAST, will nonetheless make use of the given name:: - >>> print(select(cast(('hi there,' + foo.c.data).label('hello_data'), String))) + >>> print(select(cast(("hi there," + foo.c.data).label("hello_data"), String))) SELECT CAST(:data_1 + foo.data AS VARCHAR) AS hello_data FROM foo And of course as was always the case, :class:`.Label` can be applied to the expression on the outside to apply an "AS " label directly:: - >>> print(select(cast(('hi there,' + foo.c.data), String).label('hello_data'))) + >>> print(select(cast(("hi there," + foo.c.data), String).label("hello_data"))) SELECT CAST(:data_1 + foo.data AS VARCHAR) AS hello_data FROM foo @@ -1762,12 +1776,11 @@ should be opted in to, rather than being turned on by default. To ensure that a CREATE CONSTRAINT is emitted for these types, set these flags to ``True``:: - class Spam(Base): - __tablename__ = "spam" - id = Column(Integer, primary_key=True) - boolean = Column(Boolean(create_constraint=True)) - enum = Column(Enum("a", "b", "c", create_constraint=True)) - + class Spam(Base): + __tablename__ = "spam" + id = Column(Integer, primary_key=True) + boolean = Column(Boolean(create_constraint=True)) + enum = Column(Enum("a", "b", "c", create_constraint=True)) :ticket:`5367` @@ -1796,13 +1809,14 @@ To configure column-level raiseload on a mapping, the the attribute:: class Book(Base): - __tablename__ = 'book' + __tablename__ = "book" book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = deferred(Column(String(2000)), raiseload=True) excerpt = deferred(Column(Text), raiseload=True) + book_w_excerpt = session.query(Book).options(undefer(Book.excerpt)).first() It was originally considered that the existing :func:`.raiseload` option that @@ -1810,8 +1824,7 @@ works for :func:`_orm.relationship` attributes be expanded to also support colum attributes. However, this would break the "wildcard" behavior of :func:`.raiseload`, which is documented as allowing one to prevent all relationships from loading:: - session.query(Order).options( - joinedload(Order.items), raiseload('*')) + session.query(Order).options(joinedload(Order.items), raiseload("*")) Above, if we had expanded :func:`.raiseload` to accommodate for columns as well, the wildcard would also prevent columns from loading and thus be a @@ -2003,11 +2016,7 @@ as entity / column should work:: row._mapping[u1] # same as row[0] - row = ( - s.query(User.id, Address.email_address) - .join(User.addresses) - .first() - ) + row = s.query(User.id, Address.email_address).join(User.addresses).first() row._mapping[User.id] # same as row[0] row._mapping["id"] # same as row[0] @@ -2202,13 +2211,11 @@ use of the :paramref:`_orm.Session.future` flag to :term:`2.0-style` mode:: Session = sessionmaker(engine, future=True) with Session() as session: - u1 = User() - session.add(u1) - - a1 = Address() - a1.user = u1 # <--- will not add "a1" to the Session - + u1 = User() + session.add(u1) + a1 = Address() + a1.user = u1 # <--- will not add "a1" to the Session :ticket:`5150` @@ -2225,7 +2232,7 @@ selectin/subquery loaders will run an "immediateload" operation for a given relationship, when an expired object is unexpired or an object is refreshed:: >>> a1 = session.query(A).options(joinedload(A.bs)).first() - >>> a1.data = 'new data' + >>> a1.data = "new data" >>> session.commit() Above, the ``A`` object was loaded with a ``joinedload()`` option associated @@ -2251,7 +2258,7 @@ a refresh scenario, which resembles the query emitted by "lazyload", emitted as an additional query:: >>> a1 = session.query(A).options(selectinload(A.bs)).first() - >>> a1.data = 'new data' + >>> a1.data = "new data" >>> session.commit() >>> a1.data SELECT a.id AS a_id, a.data AS a_data @@ -2333,9 +2340,11 @@ eventually identified in :ticket:`4519` where this empty collection could be harmful, which is when the object is merged into a session:: >>> u1 = User(id=1) # create an empty User to merge with id=1 in the database - >>> merged1 = session.merge(u1) # value of merged1.addresses is unchanged from that of the DB + >>> merged1 = session.merge( + ... u1 + ... ) # value of merged1.addresses is unchanged from that of the DB - >>> u2 = User(id=2) # create an empty User to merge with id=2 in the database + >>> u2 = User(id=2) # create an empty User to merge with id=2 in the database >>> u2.addresses [] >>> merged2 = session.merge(u2) # value of merged2.addresses has been emptied in the DB @@ -2364,7 +2373,9 @@ however is not added to ``__dict__`` until it is actually mutated:: >>> u1 = User() >>> l1 = u1.addresses # new list is created, associated with the state >>> assert u1.addresses is l1 # you get the same list each time you access it - >>> assert "addresses" not in u1.__dict__ # but it won't go into __dict__ until it's mutated + >>> assert ( + ... "addresses" not in u1.__dict__ + ... ) # but it won't go into __dict__ until it's mutated >>> from sqlalchemy import inspect >>> inspect(u1).attrs.addresses.history History(added=None, unchanged=None, deleted=None) @@ -2386,7 +2397,9 @@ the object contains certain values based on its ``__dict__``:: >>> u1.addresses [] # this will now fail, would pass before - >>> assert {k: v for k, v in u1.__dict__.items() if not k.startswith("_")} == {"addresses": []} + >>> assert {k: v for k, v in u1.__dict__.items() if not k.startswith("_")} == { + ... "addresses": [] + ... } or to ensure that the collection won't require a lazy load to proceed, the (admittedly awkward) code below will now also fail:: @@ -2415,10 +2428,11 @@ SQLAlchemy has always had logic to detect when an object in the :class:`.Session to be inserted has the same primary key as an object that is already present:: class Product(Base): - __tablename__ = 'product' + __tablename__ = "product" id = Column(Integer, primary_key=True) + session = Session(engine) # add Product with primary key 1 @@ -2500,8 +2514,7 @@ disallowed:: # ... # this is now an error - addresses = relationship( - "Address", viewonly=True, cascade="all, delete-orphan") + addresses = relationship("Address", viewonly=True, cascade="all, delete-orphan") The above will raise:: @@ -2542,10 +2555,7 @@ inheritance mapping:: s.commit() - print( - s.query(Manager).select_entity_from(s.query(Employee).subquery()).all() - ) - + print(s.query(Manager).select_entity_from(s.query(Employee).subquery()).all()) The subquery selects both the ``Engineer`` and the ``Manager`` rows, and even though the outer query is against ``Manager``, we get a non ``Manager`` @@ -2818,8 +2828,9 @@ effect. When "optional" is used on a :class:`.Sequence` that is present in the integer primary key column of a table:: Table( - "some_table", metadata, - Column("id", Integer, Sequence("some_seq", optional=True), primary_key=True) + "some_table", + metadata, + Column("id", Integer, Sequence("some_seq", optional=True), primary_key=True), ) The above :class:`.Sequence` is only used for DDL and INSERT statements if the diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index dd3ce59f9e..781d530ad2 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -206,22 +206,22 @@ deprecation class is emitted only when an environment variable Given the example program below:: - from sqlalchemy import column - from sqlalchemy import create_engine - from sqlalchemy import select - from sqlalchemy import table + from sqlalchemy import column + from sqlalchemy import create_engine + from sqlalchemy import select + from sqlalchemy import table - engine = create_engine("sqlite://") + engine = create_engine("sqlite://") - engine.execute("CREATE TABLE foo (id integer)") - engine.execute("INSERT INTO foo (id) VALUES (1)") + engine.execute("CREATE TABLE foo (id integer)") + engine.execute("INSERT INTO foo (id) VALUES (1)") - foo = table("foo", column("id")) - result = engine.execute(select([foo.c.id])) + foo = table("foo", column("id")) + result = engine.execute(select([foo.c.id])) - print(result.fetchall()) + print(result.fetchall()) The above program uses several patterns that many users will already identify as "legacy", namely the use of the :meth:`_engine.Engine.execute` method @@ -268,32 +268,31 @@ With warnings turned on, our program now has a lot to say:: With the above guidance, we can migrate our program to use 2.0 styles, and as a bonus our program is much clearer:: - from sqlalchemy import column - from sqlalchemy import create_engine - from sqlalchemy import select - from sqlalchemy import table - from sqlalchemy import text - + from sqlalchemy import column + from sqlalchemy import create_engine + from sqlalchemy import select + from sqlalchemy import table + from sqlalchemy import text - engine = create_engine("sqlite://") - # don't rely on autocommit for DML and DDL - with engine.begin() as connection: - # use connection.execute(), not engine.execute() - # use the text() construct to execute textual SQL - connection.execute(text("CREATE TABLE foo (id integer)")) - connection.execute(text("INSERT INTO foo (id) VALUES (1)")) + engine = create_engine("sqlite://") + # don't rely on autocommit for DML and DDL + with engine.begin() as connection: + # use connection.execute(), not engine.execute() + # use the text() construct to execute textual SQL + connection.execute(text("CREATE TABLE foo (id integer)")) + connection.execute(text("INSERT INTO foo (id) VALUES (1)")) - foo = table("foo", column("id")) - with engine.connect() as connection: - # use connection.execute(), not engine.execute() - # select() now accepts column / table expressions positionally - result = connection.execute(select(foo.c.id)) + foo = table("foo", column("id")) - print(result.fetchall()) + with engine.connect() as connection: + # use connection.execute(), not engine.execute() + # select() now accepts column / table expressions positionally + result = connection.execute(select(foo.c.id)) + print(result.fetchall()) The goal of "2.0 deprecations mode" is that a program which runs with no :class:`_exc.RemovedIn20Warning` warnings with "2.0 deprecations mode" turned @@ -385,8 +384,6 @@ The new engine is described at :class:`_future.Engine` which delivers a new conn.commit() # commit as you go - - Migration to 2.0 Step Five - Use the ``future`` flag on Session ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -420,6 +417,7 @@ in 1.4 which are now closely matched to the patterns used by the :class:`_orm.Session` may be used as a context manager:: from sqlalchemy.orm import Session + with Session(engine) as session: session.add(MyObject()) session.commit() @@ -465,7 +463,7 @@ the underlying DBAPI transaction, but in SQLAlchemy conn = engine.connect() # won't autocommit in 2.0 - conn.execute(some_table.insert().values(foo='bar')) + conn.execute(some_table.insert().values(foo="bar")) Nor will this autocommit:: @@ -481,10 +479,7 @@ execution option, will be removed:: conn = engine.connect() # won't autocommit in 2.0 - conn.execute( - text("EXEC my_procedural_thing()").execution_options(autocommit=True) - ) - + conn.execute(text("EXEC my_procedural_thing()").execution_options(autocommit=True)) **Migration to 2.0** @@ -493,13 +488,13 @@ style` execution is to make use of the :meth:`_engine.Connection.begin` method, or the :meth:`_engine.Engine.begin` context manager:: with engine.begin() as conn: - conn.execute(some_table.insert().values(foo='bar')) - conn.execute(some_other_table.insert().values(bat='hoho')) + conn.execute(some_table.insert().values(foo="bar")) + conn.execute(some_other_table.insert().values(bat="hoho")) with engine.connect() as conn: with conn.begin(): - conn.execute(some_table.insert().values(foo='bar')) - conn.execute(some_other_table.insert().values(bat='hoho')) + conn.execute(some_table.insert().values(foo="bar")) + conn.execute(some_other_table.insert().values(bat="hoho")) with engine.begin() as conn: conn.execute(text("EXEC my_procedural_thing()")) @@ -511,8 +506,8 @@ when a statement is first invoked in the absence of an explicit call to :meth:`_future.Connection.begin`:: with engine.connect() as conn: - conn.execute(some_table.insert().values(foo='bar')) - conn.execute(some_other_table.insert().values(bat='hoho')) + conn.execute(some_table.insert().values(foo="bar")) + conn.execute(some_other_table.insert().values(bat="hoho")) conn.commit() @@ -550,7 +545,7 @@ explicit as to how the transaction should be used. For the vast majority of Core use cases, it's the pattern that is already recommended:: with engine.begin() as conn: - conn.execute(some_table.insert().values(foo='bar')) + conn.execute(some_table.insert().values(foo="bar")) For "commit as you go, or rollback instead" usage, which resembles how the :class:`_orm.Session` is normally used today, the "future" version of @@ -568,7 +563,7 @@ a statement is first invoked:: engine = create_engine(..., future=True) with engine.connect() as conn: - conn.execute(some_table.insert().values(foo='bar')) + conn.execute(some_table.insert().values(foo="bar")) conn.commit() conn.execute(text("some other SQL")) @@ -618,11 +613,11 @@ execution patterns, is removed:: metadata_obj = MetaData(bind=engine) # no longer supported - metadata_obj.create_all() # requires Engine or Connection + metadata_obj.create_all() # requires Engine or Connection metadata_obj.reflect() # requires Engine or Connection - t = Table('t', metadata_obj, autoload=True) # use autoload_with=engine + t = Table("t", metadata_obj, autoload=True) # use autoload_with=engine result = engine.execute(t.select()) # no longer supported @@ -652,7 +647,7 @@ the ORM-level :meth:`_orm.Session.execute` method):: metadata_obj.reflect(engine) # reflect individual table - t = Table('t', metadata_obj, autoload_with=engine) + t = Table("t", metadata_obj, autoload_with=engine) # connection level: @@ -667,12 +662,11 @@ the ORM-level :meth:`_orm.Session.execute` method):: metadata_obj.reflect(connection) # reflect individual table - t = Table('t', metadata_obj, autoload_with=connection) + t = Table("t", metadata_obj, autoload_with=connection) # execute SQL statements result = conn.execute(t.select()) - **Discussion** @@ -742,36 +736,36 @@ execution and "bound metadata" are no longer as widely used so in 2.0 we seek to finally reduce the number of choices for how to execute a statement in Core from "many choices":: - # many choices + # many choices - # bound metadata? - metadata_obj = MetaData(engine) + # bound metadata? + metadata_obj = MetaData(engine) - # or not? - metadata_obj = MetaData() + # or not? + metadata_obj = MetaData() - # execute from engine? - result = engine.execute(stmt) + # execute from engine? + result = engine.execute(stmt) - # or execute the statement itself (but only if you did - # "bound metadata" above, which means you can't get rid of "bound" if any - # part of your program uses this form) - result = stmt.execute() + # or execute the statement itself (but only if you did + # "bound metadata" above, which means you can't get rid of "bound" if any + # part of your program uses this form) + result = stmt.execute() - # execute from connection, but it autocommits? - conn = engine.connect() - conn.execute(stmt) + # execute from connection, but it autocommits? + conn = engine.connect() + conn.execute(stmt) - # execute from connection, but autocommit isn't working, so use the special - # option? - conn.execution_options(autocommit=True).execute(stmt) + # execute from connection, but autocommit isn't working, so use the special + # option? + conn.execution_options(autocommit=True).execute(stmt) - # or on the statement ?! - conn.execute(stmt.execution_options(autocommit=True)) + # or on the statement ?! + conn.execute(stmt.execution_options(autocommit=True)) - # or execute from connection, and we use explicit transaction? - with conn.begin(): - conn.execute(stmt) + # or execute from connection, and we use explicit transaction? + with conn.begin(): + conn.execute(stmt) to "one choice", where by "one choice" we mean "explicit connection with explicit transaction"; there are still a few ways to demarcate @@ -779,23 +773,22 @@ transaction blocks depending on need. The "one choice" is to procure a :class:`_engine.Connection` and then to explicitly demarcate the transaction, in the case that the operation is a write operation:: - # one choice - work with explicit connection, explicit transaction - # (there remain a few variants on how to demarcate the transaction) - - # "begin once" - one transaction only per checkout - with engine.begin() as conn: - result = conn.execute(stmt) + # one choice - work with explicit connection, explicit transaction + # (there remain a few variants on how to demarcate the transaction) - # "commit as you go" - zero or more commits per checkout - with engine.connect() as conn: - result = conn.execute(stmt) - conn.commit() + # "begin once" - one transaction only per checkout + with engine.begin() as conn: + result = conn.execute(stmt) - # "commit as you go" but with a transaction block instead of autobegin - with engine.connect() as conn: - with conn.begin(): - result = conn.execute(stmt) + # "commit as you go" - zero or more commits per checkout + with engine.connect() as conn: + result = conn.execute(stmt) + conn.commit() + # "commit as you go" but with a transaction block instead of autobegin + with engine.connect() as conn: + with conn.begin(): + result = conn.execute(stmt) execute() method more strict, execution options are more prominent ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -809,24 +802,22 @@ available argument patterns. The new API in the 1.4 series is described at require modification:: - connection = engine.connect() - - # direct string SQL not supported; use text() or exec_driver_sql() method - result = connection.execute("select * from table") + connection = engine.connect() - # positional parameters no longer supported, only named - # unless using exec_driver_sql() - result = connection.execute(table.insert(), ('x', 'y', 'z')) + # direct string SQL not supported; use text() or exec_driver_sql() method + result = connection.execute("select * from table") - # **kwargs no longer accepted, pass a single dictionary - result = connection.execute(table.insert(), x=10, y=5) + # positional parameters no longer supported, only named + # unless using exec_driver_sql() + result = connection.execute(table.insert(), ("x", "y", "z")) - # multiple *args no longer accepted, pass a list - result = connection.execute( - table.insert(), - {"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8} - ) + # **kwargs no longer accepted, pass a single dictionary + result = connection.execute(table.insert(), x=10, y=5) + # multiple *args no longer accepted, pass a list + result = connection.execute( + table.insert(), {"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8} + ) **Migration to 2.0** @@ -835,21 +826,19 @@ argument styles that are accepted by the 1.x :meth:`_engine.Connection.execute` method, so the following code is cross-compatible between 1.x and 2.0:: - connection = engine.connect() - - from sqlalchemy import text - result = connection.execute(text("select * from table")) + connection = engine.connect() - # pass a single dictionary for single statement execution - result = connection.execute(table.insert(), {"x": 10, "y": 5}) + from sqlalchemy import text - # pass a list of dictionaries for executemany - result = connection.execute( - table.insert(), - [{"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}] - ) + result = connection.execute(text("select * from table")) + # pass a single dictionary for single statement execution + result = connection.execute(table.insert(), {"x": 10, "y": 5}) + # pass a list of dictionaries for executemany + result = connection.execute( + table.insert(), [{"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}] + ) **Discussion** @@ -892,11 +881,10 @@ tuples when using "future" mode:: row = result.first() # suppose the row is (1, 2) - "x" in row # evaluates to False, in 1.x / future=False, this would be True + "x" in row # evaluates to False, in 1.x / future=False, this would be True 1 in row # evaluates to True, in 1.x / future=False, this would be False - **Migration to 2.0** Application code or test suites that are testing for a particular key @@ -941,10 +929,7 @@ or attribute:: stmt = select(User, Address).join(User.addresses) for row in session.execute(stmt).mappings(): - print("the user is: %s the address is: %s" % ( - row[User], - row[Address] - )) + print("the user is: %s the address is: %s" % (row[User], row[Address])) .. seealso:: @@ -981,14 +966,10 @@ now accepts its WHEN criteria positionally, rather than as a list:: # list emits a deprecation warning case_clause = case( - [ - (table.c.x == 5, "five"), - (table.c.x == 7, "seven") - ], - else_="neither five nor seven" + [(table.c.x == 5, "five"), (table.c.x == 7, "seven")], + else_="neither five nor seven", ) - **Migration to 2.0** Only the "generative" style of :func:`_sql.select` will be supported. The list @@ -1011,9 +992,7 @@ is cross-compatible with 1.4 and 2.0:: # case conditions passed positionally case_clause = case( - (table.c.x == 5, "five"), - (table.c.x == 7, "seven"), - else_="neither five nor seven" + (table.c.x == 5, "five"), (table.c.x == 7, "seven"), else_="neither five nor seven" ) **Discussion** @@ -1032,14 +1011,14 @@ documented style in the Core tutorial. Examples of "structural" vs. "data" elements are as follows:: - # table columns for CREATE TABLE - structural - table = Table("table", metadata_obj, Column('x', Integer), Column('y', Integer)) + # table columns for CREATE TABLE - structural + table = Table("table", metadata_obj, Column("x", Integer), Column("y", Integer)) - # columns in a SELECT statement - structural - stmt = select(table.c.x, table.c.y) + # columns in a SELECT statement - structural + stmt = select(table.c.x, table.c.y) - # literal elements in an IN clause - data - stmt = stmt.where(table.c.y.in_([1, 2, 3])) + # literal elements in an IN clause - data + stmt = stmt.where(table.c.y.in_([1, 2, 3])) .. seealso:: @@ -1066,10 +1045,7 @@ constructor arguments to :func:`_sql.insert`, :func:`_sql.update` and stmt = table.delete(table.c.x > 15) # no longer supported - stmt = table.update( - table.c.x < 15, - preserve_parameter_order=True - ).values( + stmt = table.update(table.c.x < 15, preserve_parameter_order=True).values( [(table.c.y, 20), (table.c.x, table.c.y + 10)] ) @@ -1088,10 +1064,12 @@ examples:: stmt = table.delete().where(table.c.x > 15) # use generative methods, ordered_values() replaces preserve_parameter_order - stmt = table.update().where( - table.c.x < 15, - ).ordered_values( - (table.c.y, 20), (table.c.x, table.c.y + 10) + stmt = ( + table.update() + .where( + table.c.x < 15, + ) + .ordered_values((table.c.y, 20), (table.c.x, table.c.y + 10)) ) **Discussion** @@ -1162,9 +1140,7 @@ Code that works with classical mappings should change imports and code from:: from sqlalchemy.orm import mapper - mapper(SomeClass, some_table, properties={ - "related": relationship(SomeRelatedClass) - }) + mapper(SomeClass, some_table, properties={"related": relationship(SomeRelatedClass)}) To work from a central :class:`_orm.registry` object:: @@ -1172,9 +1148,9 @@ To work from a central :class:`_orm.registry` object:: mapper_reg = registry() - mapper_reg.map_imperatively(SomeClass, some_table, properties={ - "related": relationship(SomeRelatedClass) - }) + mapper_reg.map_imperatively( + SomeClass, some_table, properties={"related": relationship(SomeRelatedClass)} + ) The above :class:`_orm.registry` is also the source for declarative mappings, and classical mappings now have access to this registry including string-based @@ -1186,19 +1162,23 @@ configuration on :func:`_orm.relationship`:: Base = mapper_reg.generate_base() + class SomeRelatedClass(Base): - __tablename__ = 'related' + __tablename__ = "related" # ... - mapper_reg.map_imperatively(SomeClass, some_table, properties={ - "related": relationship( - "SomeRelatedClass", - primaryjoin="SomeRelatedClass.related_id == SomeClass.id" - ) - }) - + mapper_reg.map_imperatively( + SomeClass, + some_table, + properties={ + "related": relationship( + "SomeRelatedClass", + primaryjoin="SomeRelatedClass.related_id == SomeClass.id", + ) + }, + ) **Discussion** @@ -1637,7 +1617,6 @@ will all be removed in 2.0:: # string use removed q = session.query(Address).filter(with_parent(u1, "addresses")) - **Migration to 2.0** Modern SQLAlchemy 1.x versions support the recommended technique which @@ -1684,7 +1663,6 @@ attributes in a list will be removed:: # chaining removed q = session.query(User).join("orders", "items", "keywords") - **Migration to 2.0** Use individual calls to :meth:`_orm.Query.join` for 1.x /2.0 cross compatible @@ -1720,24 +1698,29 @@ ORM Query - join(..., aliased=True), from_joinpoint removed The ``aliased=True`` option on :meth:`_query.Query.join` is removed, as is the ``from_joinpoint`` flag:: - # no longer supported - q = session.query(Node).\ - join("children", aliased=True).filter(Node.name == "some sub child"). - join("children", from_joinpoint=True, aliased=True).\ - filter(Node.name == 'some sub sub child') + # no longer supported + q = ( + session.query(Node) + .join("children", aliased=True) + .filter(Node.name == "some sub child") + .join("children", from_joinpoint=True, aliased=True) + .filter(Node.name == "some sub sub child") + ) **Migration to 2.0** Use explicit aliases instead:: - n1 = aliased(Node) - n2 = aliased(Node) - - q = select(Node).join(Node.children.of_type(n1)).\ - where(n1.name == "some sub child").\ - join(n1.children.of_type(n2)).\ - where(n2.name == "some sub child") + n1 = aliased(Node) + n2 = aliased(Node) + q = ( + select(Node) + .join(Node.children.of_type(n1)) + .where(n1.name == "some sub child") + .join(n1.children.of_type(n2)) + .where(n2.name == "some sub child") + ) **Discussion** @@ -1776,8 +1759,13 @@ as well as "address.email_address" but only return User objects:: # 1.xx code - result = session.query(User).join(User.addresses).\ - distinct().order_by(Address.email_address).all() + result = ( + session.query(User) + .join(User.addresses) + .distinct() + .order_by(Address.email_address) + .all() + ) In version 2.0, the "email_address" column will not be automatically added to the columns clause, and the above query will fail, since relational @@ -1792,8 +1780,12 @@ returning the main entity object, and not the extra column, use the # 1.4 / 2.0 code - stmt = select(User, Address.email_address).join(User.addresses).\ - distinct().order_by(Address.email_address) + stmt = ( + select(User, Address.email_address) + .join(User.addresses) + .distinct() + .order_by(Address.email_address) + ) result = session.execute(stmt).columns(User).all() @@ -1820,10 +1812,12 @@ Selecting from the query itself as a subquery, e.g. "from_self()" The :meth:`_orm.Query.from_self` method will be removed from :class:`_orm.Query`:: # from_self is removed - q = session.query(User, Address.email_address).\ - join(User.addresses).\ - from_self(User).order_by(Address.email_address) - + q = ( + session.query(User, Address.email_address) + .join(User.addresses) + .from_self(User) + .order_by(Address.email_address) + ) **Migration to 2.0** @@ -1837,8 +1831,7 @@ since the final query wants to query in terms of both the ``User`` and from sqlalchemy.orm import aliased - subq = session.query(User, Address.email_address).\ - join(User.addresses).subquery() + subq = session.query(User, Address.email_address).join(User.addresses).subquery() ua = aliased(User, subq) @@ -1850,8 +1843,7 @@ The same form may be used in :term:`2.0 style`:: from sqlalchemy.orm import aliased - subq = select(User, Address.email_address).\ - join(User.addresses).subquery() + subq = select(User, Address.email_address).join(User.addresses).subquery() ua = aliased(User, subq) @@ -1861,7 +1853,6 @@ The same form may be used in :term:`2.0 style`:: result = session.execute(stmt) - **Discussion** The :meth:`_query.Query.from_self` method is a very complicated method that is rarely @@ -1892,16 +1883,15 @@ our ``User`` and ``Address`` entities have overlapping column names, we can select from both entities at once without having to specify any particular labeling:: - # 1.4 / 2.0 code + # 1.4 / 2.0 code - subq = select(User, Address).\ - join(User.addresses).subquery() + subq = select(User, Address).join(User.addresses).subquery() - ua = aliased(User, subq) - aa = aliased(Address, subq) + ua = aliased(User, subq) + aa = aliased(Address, subq) - stmt = select(ua, aa).order_by(aa.email_address) - result = session.execute(stmt) + stmt = select(ua, aa).order_by(aa.email_address) + result = session.execute(stmt) The above query will disambiguate the ``.id`` column of ``User`` and ``Address``, where ``Address.id`` is rendered and tracked as ``id_1``:: @@ -1985,9 +1975,7 @@ where the "joined eager loading" loader strategy is used with collections:: # In the new API, uniquing is available but not implicitly # enabled - result = session.execute( - select(User).options(joinedload(User.addresses)) - ) + result = session.execute(select(User).options(joinedload(User.addresses))) # this actually will raise an error to let the user know that # uniquing should be applied @@ -2056,16 +2044,15 @@ to achieve 2.0 style querying that's in terms of a specific relationship: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" posts = relationship(Post, lazy="dynamic") + jack = session.get(User, 5) # filter Jack's blog posts - posts = session.scalars( - jack.posts.statement.where(Post.headline == "this is a post") - ) + posts = session.scalars(jack.posts.statement.where(Post.headline == "this is a post")) * Use the :func:`_orm.with_parent` function to construct a :func:`_sql.select` construct directly:: @@ -2075,9 +2062,9 @@ to achieve 2.0 style querying that's in terms of a specific relationship: jack = session.get(User, 5) posts = session.scalars( - select(Post). - where(with_parent(jack, User.posts)). - where(Post.headline == "this is a post") + select(Post) + .where(with_parent(jack, User.posts)) + .where(Post.headline == "this is a post") ) **Discussion** @@ -2112,7 +2099,6 @@ is, this pattern:: # commits, won't be supported sess.flush() - **Migration to 2.0** The main reason a :class:`_orm.Session` is used in "autocommit" mode @@ -2128,7 +2114,7 @@ be called:: sess = Session(engine) sess.begin() # begin explicitly; if not called, will autobegin - # when database access is needed + # when database access is needed sess.add(obj) @@ -2166,6 +2152,7 @@ a decorator may be used:: import contextlib + @contextlib.contextmanager def transaction(session): if not session.in_transaction(): @@ -2174,7 +2161,6 @@ a decorator may be used:: else: yield - The above context manager may be used in the same way the "subtransaction" flag works, such as in the following example:: @@ -2184,12 +2170,14 @@ The above context manager may be used in the same way the with transaction(session): method_b(session) + # method_b also starts a transaction, but when # called from method_a participates in the ongoing # transaction. def method_b(session): with transaction(session): - session.add(SomeObject('bat', 'lala')) + session.add(SomeObject("bat", "lala")) + Session = sessionmaker(engine) @@ -2204,8 +2192,10 @@ or methods to be concerned with the details of transaction demarcation:: def method_a(session): method_b(session) + def method_b(session): - session.add(SomeObject('bat', 'lala')) + session.add(SomeObject("bat", "lala")) + Session = sessionmaker(engine) diff --git a/doc/build/changelog/unreleased_14/8525.rst b/doc/build/changelog/unreleased_14/8525.rst index 3031ec378c..8508e396b4 100644 --- a/doc/build/changelog/unreleased_14/8525.rst +++ b/doc/build/changelog/unreleased_14/8525.rst @@ -7,4 +7,4 @@ Database via Azure Active Directory", which apparently lacks the ``system_views`` view entirely. Error catching has been extended that under no circumstances will this method ever fail, provided database connectivity - is present. \ No newline at end of file + is present. diff --git a/doc/build/changelog/unreleased_14/8569.rst b/doc/build/changelog/unreleased_14/8569.rst index fc3b3f7398..5ae6fce091 100644 --- a/doc/build/changelog/unreleased_14/8569.rst +++ b/doc/build/changelog/unreleased_14/8569.rst @@ -10,4 +10,4 @@ combinations of SQL label names and aliasing. This "wrapping" is not appropriate for :func:`_orm.contains_eager` which has always had the contract that the user-defined SQL statement is unmodified with the - exception of adding the appropriate columns to be fetched. \ No newline at end of file + exception of adding the appropriate columns to be fetched. diff --git a/doc/build/changelog/unreleased_20/3482.rst b/doc/build/changelog/unreleased_20/3482.rst index c18606caa3..e82f33b794 100644 --- a/doc/build/changelog/unreleased_20/3482.rst +++ b/doc/build/changelog/unreleased_20/3482.rst @@ -11,4 +11,4 @@ :meth:`_sql.ColumnOperators.contains`, :meth:`_sql.ColumnOperators.startswith`, etc. Huge thanks to Matias Martinez Rebori for their meticulous and complete efforts in implementing - these new methods. \ No newline at end of file + these new methods. diff --git a/doc/build/changelog/unreleased_20/4926.rst b/doc/build/changelog/unreleased_20/4926.rst index cfd3461ea5..954c488b15 100644 --- a/doc/build/changelog/unreleased_20/4926.rst +++ b/doc/build/changelog/unreleased_20/4926.rst @@ -20,4 +20,4 @@ .. seealso:: - :ref:`change_4926` \ No newline at end of file + :ref:`change_4926` diff --git a/doc/build/changelog/unreleased_20/5465.rst b/doc/build/changelog/unreleased_20/5465.rst index 2bf9f01a9f..824bfb4763 100644 --- a/doc/build/changelog/unreleased_20/5465.rst +++ b/doc/build/changelog/unreleased_20/5465.rst @@ -35,4 +35,4 @@ .. seealso:: - :ref:`change_5465_oracle` \ No newline at end of file + :ref:`change_5465_oracle` diff --git a/doc/build/changelog/unreleased_20/7156.rst b/doc/build/changelog/unreleased_20/7156.rst index cd81c9a6c1..4912a44c89 100644 --- a/doc/build/changelog/unreleased_20/7156.rst +++ b/doc/build/changelog/unreleased_20/7156.rst @@ -20,4 +20,4 @@ .. seealso:: - :ref:`postgresql_ranges` \ No newline at end of file + :ref:`postgresql_ranges` diff --git a/doc/build/changelog/unreleased_20/7161.rst b/doc/build/changelog/unreleased_20/7161.rst index ec584b3a97..50e7870157 100644 --- a/doc/build/changelog/unreleased_20/7161.rst +++ b/doc/build/changelog/unreleased_20/7161.rst @@ -7,4 +7,4 @@ dialect dependent, with PostgreSQL, MySQL/MariaDB and SQLite supporting it, and Oracle and SQL Server not supporting it. Third party dialects should also seek to ensure their :meth:`_engine.Inspector.has_table` method - searches for views as well as tables for the given name. \ No newline at end of file + searches for views as well as tables for the given name. diff --git a/doc/build/changelog/unreleased_20/7257.rst b/doc/build/changelog/unreleased_20/7257.rst index 6dd9a47609..1258712493 100644 --- a/doc/build/changelog/unreleased_20/7257.rst +++ b/doc/build/changelog/unreleased_20/7257.rst @@ -65,4 +65,4 @@ * Removed the very old "dbapi_proxy" module, which in very early SQLAlchemy releases was used to provide a transparent connection pool - over a raw DBAPI connection. \ No newline at end of file + over a raw DBAPI connection. diff --git a/doc/build/changelog/unreleased_20/7299.rst b/doc/build/changelog/unreleased_20/7299.rst index af2955e218..3aa4f4261d 100644 --- a/doc/build/changelog/unreleased_20/7299.rst +++ b/doc/build/changelog/unreleased_20/7299.rst @@ -14,4 +14,4 @@ as configured with the :class:`_types.Numeric`, :class:`_types.Float` , and related datatypes, just without the ability to maintain precision beyond 15 significant digits when using SQLite, unless alternate representations such - as strings are used. \ No newline at end of file + as strings are used. diff --git a/doc/build/changelog/unreleased_20/7316.rst b/doc/build/changelog/unreleased_20/7316.rst index 817d994de5..c361301457 100644 --- a/doc/build/changelog/unreleased_20/7316.rst +++ b/doc/build/changelog/unreleased_20/7316.rst @@ -18,4 +18,4 @@ :class:`_postgresql.ENUM` datatype is now a required keyword argument. The "name" is necessary in any case in order for the :class:`_postgresql.ENUM` to be usable as an error would be raised at SQL/DDL render time if "name" - were not present. \ No newline at end of file + were not present. diff --git a/doc/build/changelog/unreleased_20/7433.rst b/doc/build/changelog/unreleased_20/7433.rst index 5de470e594..2e17d92cbd 100644 --- a/doc/build/changelog/unreleased_20/7433.rst +++ b/doc/build/changelog/unreleased_20/7433.rst @@ -15,4 +15,4 @@ .. seealso:: - :ref:`change_7433` \ No newline at end of file + :ref:`change_7433` diff --git a/doc/build/changelog/unreleased_20/7631.rst b/doc/build/changelog/unreleased_20/7631.rst index d2e0992ab2..ce4b89ac0d 100644 --- a/doc/build/changelog/unreleased_20/7631.rst +++ b/doc/build/changelog/unreleased_20/7631.rst @@ -15,4 +15,4 @@ .. seealso:: - :ref:`ticket_7631` \ No newline at end of file + :ref:`ticket_7631` diff --git a/doc/build/changelog/unreleased_20/7759.rst b/doc/build/changelog/unreleased_20/7759.rst index b7f3bff8d0..0ba5ae6234 100644 --- a/doc/build/changelog/unreleased_20/7759.rst +++ b/doc/build/changelog/unreleased_20/7759.rst @@ -9,4 +9,4 @@ some scenarios as it allows the nesting attribute to be set simultaneously along with the explicit level of the CTE. - The :meth:`.HasCTE.add_cte` method also accepts multiple CTE objects. \ No newline at end of file + The :meth:`.HasCTE.add_cte` method also accepts multiple CTE objects. diff --git a/doc/build/changelog/unreleased_20/8141.rst b/doc/build/changelog/unreleased_20/8141.rst index 8c70c754cc..cf05636060 100644 --- a/doc/build/changelog/unreleased_20/8141.rst +++ b/doc/build/changelog/unreleased_20/8141.rst @@ -8,4 +8,4 @@ This phrase is not accepted by all databases and the operation will fail on a database that does not support it as there is no similarly compatible fallback within the scope of a single DDL statement. - Pull request courtesy Mike Fiedler. \ No newline at end of file + Pull request courtesy Mike Fiedler. diff --git a/doc/build/changelog/unreleased_20/8216.rst b/doc/build/changelog/unreleased_20/8216.rst index c213e37cd5..18b54fb5cb 100644 --- a/doc/build/changelog/unreleased_20/8216.rst +++ b/doc/build/changelog/unreleased_20/8216.rst @@ -9,4 +9,4 @@ .. seealso:: - :ref:`postgresql_json_types` - PostgreSQL JSON types. \ No newline at end of file + :ref:`postgresql_json_types` - PostgreSQL JSON types. diff --git a/doc/build/changelog/unreleased_20/8403.rst b/doc/build/changelog/unreleased_20/8403.rst index f7d2b6550a..ce2873d6d2 100644 --- a/doc/build/changelog/unreleased_20/8403.rst +++ b/doc/build/changelog/unreleased_20/8403.rst @@ -13,4 +13,4 @@ False, which leaves the previous behavior unchanged; this is to support existing code that makes explicit use of these attributes in queries. To migrate to the newer approach, apply explicit attributes to the abstract - base class as needed. \ No newline at end of file + base class as needed. diff --git a/doc/build/changelog/unreleased_20/8491.rst b/doc/build/changelog/unreleased_20/8491.rst index 155515700b..02661b14c6 100644 --- a/doc/build/changelog/unreleased_20/8491.rst +++ b/doc/build/changelog/unreleased_20/8491.rst @@ -9,4 +9,4 @@ unnecessary use of a prepared statement for this query has been fixed. Rationale is to eliminate the need for PostgreSQL to produce a query plan when the ping is emitted. The operation is not currently supported by the - ``psycopg2`` driver which continues to use ``SELECT 1``. \ No newline at end of file + ``psycopg2`` driver which continues to use ``SELECT 1``. diff --git a/doc/build/changelog/unreleased_20/composite_dataclass.rst b/doc/build/changelog/unreleased_20/composite_dataclass.rst index d5cd70574e..810d605a76 100644 --- a/doc/build/changelog/unreleased_20/composite_dataclass.rst +++ b/doc/build/changelog/unreleased_20/composite_dataclass.rst @@ -9,4 +9,4 @@ Additionally, classes mapped by :class:`_orm.composite` now support ordering comparison operations, e.g. ``<``, ``>=``, etc. - See the new documentation at :ref:`mapper_composite` for examples. \ No newline at end of file + See the new documentation at :ref:`mapper_composite` for examples. diff --git a/doc/build/changelog/unreleased_20/decl_fks.rst b/doc/build/changelog/unreleased_20/decl_fks.rst index 94de46eac6..d8f5597a5b 100644 --- a/doc/build/changelog/unreleased_20/decl_fks.rst +++ b/doc/build/changelog/unreleased_20/decl_fks.rst @@ -6,4 +6,4 @@ :func:`_orm.declared_attr` to achieve this mapping; the :class:`_schema.ForeignKey` object is copied along with the :class:`_schema.Column` itself when the column is applied to the declared - mapping. \ No newline at end of file + mapping. diff --git a/doc/build/changelog/unreleased_20/exec_default.rst b/doc/build/changelog/unreleased_20/exec_default.rst index 05ff5862b0..1a8edfac72 100644 --- a/doc/build/changelog/unreleased_20/exec_default.rst +++ b/doc/build/changelog/unreleased_20/exec_default.rst @@ -7,4 +7,4 @@ value. The :meth:`.Connection.scalar` method should be used instead, which has been reworked with new internal codepaths to suit invoking a SELECT for default generation objects without going through the - :meth:`.Connection.execute` method. \ No newline at end of file + :meth:`.Connection.execute` method. diff --git a/doc/build/changelog/unreleased_20/other_deprecations.rst b/doc/build/changelog/unreleased_20/other_deprecations.rst index 7e4f2a0dbe..0758dc9078 100644 --- a/doc/build/changelog/unreleased_20/other_deprecations.rst +++ b/doc/build/changelog/unreleased_20/other_deprecations.rst @@ -5,4 +5,4 @@ contract of this method, which is that it can iterate objects through arbitrary result sets, is long obsolete and no longer tested. Arbitrary statements can return objects by using constructs such - as :meth`.Select.from_statement` or :func:`_orm.aliased`. \ No newline at end of file + as :meth`.Select.from_statement` or :func:`_orm.aliased`. diff --git a/doc/build/changelog/unreleased_20/prop_name.rst b/doc/build/changelog/unreleased_20/prop_name.rst index d085d0ddce..7abe646ab1 100644 --- a/doc/build/changelog/unreleased_20/prop_name.rst +++ b/doc/build/changelog/unreleased_20/prop_name.rst @@ -16,4 +16,4 @@ :func:`_orm.synonym` function * :class:`_orm.CompositeProperty` becomes an alias for the primary name :class:`_orm.Composite`, constructed as always from the - :func:`_orm.composite` function \ No newline at end of file + :func:`_orm.composite` function diff --git a/doc/build/changelog/unreleased_20/sybase.rst b/doc/build/changelog/unreleased_20/sybase.rst index 8a98f57ae2..39f8c12c84 100644 --- a/doc/build/changelog/unreleased_20/sybase.rst +++ b/doc/build/changelog/unreleased_20/sybase.rst @@ -7,4 +7,4 @@ .. seealso:: - :ref:`external_toplevel` \ No newline at end of file + :ref:`external_toplevel` diff --git a/doc/build/changelog/whatsnew_20.rst b/doc/build/changelog/whatsnew_20.rst index 2858476114..c5ec3887cc 100644 --- a/doc/build/changelog/whatsnew_20.rst +++ b/doc/build/changelog/whatsnew_20.rst @@ -181,7 +181,7 @@ helper): stmt = select(str_col, int_col) # (variable) stmt: ReturningInsert[Tuple[str, int]] - ins_stmt = insert(table('t')).returning(str_col, int_col) + ins_stmt = insert(table("t")).returning(str_col, int_col) * The ``Tuple[]`` type from any row returning construct, when invoked with an ``.execute()`` method, carries through to :class:`_engine.Result` @@ -204,15 +204,15 @@ helper): row = result.first() if row is not None: - # for typed tuple unpacking or indexed access, - # use row.tuple() or row.t (this is the small typing-oriented accessor) - strval, intval = row.t + # for typed tuple unpacking or indexed access, + # use row.tuple() or row.t (this is the small typing-oriented accessor) + strval, intval = row.t - # (variable) strval: str - strval + # (variable) strval: str + strval - # (variable) intval: int - intval + # (variable) intval: int + intval * Scalar values for single-column statements do the right thing with methods like :meth:`_engine.Connection.scalar`, :meth:`_engine.Result.scalars`, @@ -429,6 +429,7 @@ with using the :class:`_orm.DeclarativeBase` class, which produces the same from sqlalchemy.orm import DeclarativeBase + class Base(DeclarativeBase): pass @@ -439,54 +440,60 @@ The :func:`_orm.mapped_column` is an ORM-typing aware construct that can be swapped directly for the use of :class:`_schema.Column`. Given a 1.x style mapping as:: - from sqlalchemy import Column - from sqlalchemy.orm import relationship - from sqlalchemy.orm import DeclarativeBase + from sqlalchemy import Column + from sqlalchemy.orm import relationship + from sqlalchemy.orm import DeclarativeBase - class Base(DeclarativeBase): - pass - class User(Base): - __tablename__ = 'user_account' + class Base(DeclarativeBase): + pass + - id = Column(Integer, primary_key=True) - name = Column(String(30), nullable=False) - fullname = Column(String) - addresses = relationship("Address", back_populates="user") + class User(Base): + __tablename__ = "user_account" - class Address(Base): - __tablename__ = "address" + id = Column(Integer, primary_key=True) + name = Column(String(30), nullable=False) + fullname = Column(String) + addresses = relationship("Address", back_populates="user") - id = Column(Integer, primary_key=True) - email_address = Column(String, nullable=False) - user_id = Column(ForeignKey("user_account.id"), nullable=False) - user = relationship("User", back_populates="addresses") + + class Address(Base): + __tablename__ = "address" + + id = Column(Integer, primary_key=True) + email_address = Column(String, nullable=False) + user_id = Column(ForeignKey("user_account.id"), nullable=False) + user = relationship("User", back_populates="addresses") We replace :class:`_schema.Column` with :func:`_orm.mapped_column`; no arguments need to change:: - from sqlalchemy.orm import DeclarativeBase - from sqlalchemy.orm import mapped_column - from sqlalchemy.orm import relationship + from sqlalchemy.orm import DeclarativeBase + from sqlalchemy.orm import mapped_column + from sqlalchemy.orm import relationship + + + class Base(DeclarativeBase): + pass - class Base(DeclarativeBase): - pass - class User(Base): - __tablename__ = 'user_account' + class User(Base): + __tablename__ = "user_account" - id = mapped_column(Integer, primary_key=True) - name = mapped_column(String(30), nullable=False) - fullname = mapped_column(String) - addresses = relationship("Address", back_populates="user") + id = mapped_column(Integer, primary_key=True) + name = mapped_column(String(30), nullable=False) + fullname = mapped_column(String) + addresses = relationship("Address", back_populates="user") - class Address(Base): - __tablename__ = "address" - id = mapped_column(Integer, primary_key=True) - email_address = mapped_column(String, nullable=False) - user_id = mapped_column(ForeignKey("user_account.id"), nullable=False) - user = relationship("User", back_populates="addresses") + class Address(Base): + __tablename__ = "address" + + id = mapped_column(Integer, primary_key=True) + email_address = mapped_column(String, nullable=False) + user_id = mapped_column(ForeignKey("user_account.id"), nullable=False) + user = relationship("User", back_populates="addresses") The individual columns above are **not yet typed with Python types**, and are instead typed as ``Mapped[Any]``; this is because we can declare any @@ -510,31 +517,34 @@ The mapping within this interim step will be more verbose, however with proficiency, this step can be combined with subsequent steps to update mappings more directly:: - from typing import List - from typing import Optional - from sqlalchemy.orm import DeclarativeBase - from sqlalchemy.orm import Mapped - from sqlalchemy.orm import mapped_column - from sqlalchemy.orm import relationship + from typing import List + from typing import Optional + from sqlalchemy.orm import DeclarativeBase + from sqlalchemy.orm import Mapped + from sqlalchemy.orm import mapped_column + from sqlalchemy.orm import relationship - class Base(DeclarativeBase): - pass - class User(Base): - __tablename__ = 'user_account' + class Base(DeclarativeBase): + pass + + + class User(Base): + __tablename__ = "user_account" - id: Mapped[int] = mapped_column(Integer, primary_key=True) - name: Mapped[str] = mapped_column(String(30), nullable=False) - fullname: Mapped[Optional[str]] = mapped_column(String) - addresses: Mapped[List["Address"]] = relationship("Address", back_populates="user") + id: Mapped[int] = mapped_column(Integer, primary_key=True) + name: Mapped[str] = mapped_column(String(30), nullable=False) + fullname: Mapped[Optional[str]] = mapped_column(String) + addresses: Mapped[List["Address"]] = relationship("Address", back_populates="user") - class Address(Base): - __tablename__ = "address" - id: Mapped[int] = mapped_column(Integer, primary_key=True) - email_address: Mapped[str] = mapped_column(String, nullable=False) - user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id"), nullable=False) - user: Mapped["User"] = relationship("User", back_populates="addresses") + class Address(Base): + __tablename__ = "address" + + id: Mapped[int] = mapped_column(Integer, primary_key=True) + email_address: Mapped[str] = mapped_column(String, nullable=False) + user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id"), nullable=False) + user: Mapped["User"] = relationship("User", back_populates="addresses") At this point, our ORM mapping is fully typed and will produce exact-typed :func:`_sql.select`, :class:`_orm.Query` and :class:`_engine.Result` @@ -553,32 +563,34 @@ class from the left hand annotation, supporting forward references as well (as :func:`_orm.relationship` has supported string-based forward references for ten years already ;) ):: - from typing import List - from typing import Optional - from sqlalchemy.orm import DeclarativeBase - from sqlalchemy.orm import Mapped - from sqlalchemy.orm import mapped_column - from sqlalchemy.orm import relationship + from typing import List + from typing import Optional + from sqlalchemy.orm import DeclarativeBase + from sqlalchemy.orm import Mapped + from sqlalchemy.orm import mapped_column + from sqlalchemy.orm import relationship - class Base(DeclarativeBase): - pass - class User(Base): - __tablename__ = 'user_account' + class Base(DeclarativeBase): + pass - id: Mapped[int] = mapped_column(primary_key=True) - name: Mapped[str] = mapped_column(String(30)) - fullname: Mapped[Optional[str]] - addresses: Mapped[List["Address"]] = relationship(back_populates="user") - class Address(Base): - __tablename__ = "address" + class User(Base): + __tablename__ = "user_account" + + id: Mapped[int] = mapped_column(primary_key=True) + name: Mapped[str] = mapped_column(String(30)) + fullname: Mapped[Optional[str]] + addresses: Mapped[List["Address"]] = relationship(back_populates="user") - id: Mapped[int] = mapped_column(primary_key=True) - email_address: Mapped[str] - user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id")) - user: Mapped["User"] = relationship(back_populates="addresses") + class Address(Base): + __tablename__ = "address" + + id: Mapped[int] = mapped_column(primary_key=True) + email_address: Mapped[str] + user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id")) + user: Mapped["User"] = relationship(back_populates="addresses") Step five - make use of pep-593 ``Annotated`` to package common directives into types ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -620,45 +632,50 @@ constructs, but currently is limited to :func:`_orm.mapped_column`. The example below adds additional ``Annotated`` types in addition to our ``str50`` example to illustrate this feature:: - from typing_extensions import Annotated - from typing import List - from typing import Optional - from sqlalchemy import ForeignKey - from sqlalchemy import String - from sqlalchemy.orm import DeclarativeBase - from sqlalchemy.orm import Mapped - from sqlalchemy.orm import mapped_column - from sqlalchemy.orm import relationship + from typing_extensions import Annotated + from typing import List + from typing import Optional + from sqlalchemy import ForeignKey + from sqlalchemy import String + from sqlalchemy.orm import DeclarativeBase + from sqlalchemy.orm import Mapped + from sqlalchemy.orm import mapped_column + from sqlalchemy.orm import relationship - # declarative base from previous example - str50 = Annotated[str, 50] + # declarative base from previous example + str50 = Annotated[str, 50] - class Base(DeclarativeBase): - registry = registry(type_annotation_map={ - str50: String(50), - }) - # set up mapped_column() overrides, using whole column styles that are - # expected to be used in multiple places - intpk = Annotated[int, mapped_column(primary_key=True)] - user_fk = Annotated[int, mapped_column(ForeignKey('user_account.id'))] + class Base(DeclarativeBase): + registry = registry( + type_annotation_map={ + str50: String(50), + } + ) + + # set up mapped_column() overrides, using whole column styles that are + # expected to be used in multiple places + intpk = Annotated[int, mapped_column(primary_key=True)] + user_fk = Annotated[int, mapped_column(ForeignKey("user_account.id"))] - class User(Base): - __tablename__ = 'user_account' - id: Mapped[intpk] - name: Mapped[str50] - fullname: Mapped[Optional[str]] - addresses: Mapped[List["Address"]] = relationship(back_populates="user") + class User(Base): + __tablename__ = "user_account" - class Address(Base): - __tablename__ = "address" + id: Mapped[intpk] + name: Mapped[str50] + fullname: Mapped[Optional[str]] + addresses: Mapped[List["Address"]] = relationship(back_populates="user") - id: Mapped[intpk] - email_address: Mapped[str50] - user_id: Mapped[user_fk] - user: Mapped["User"] = relationship(back_populates="addresses") + + class Address(Base): + __tablename__ = "address" + + id: Mapped[intpk] + email_address: Mapped[str50] + user_id: Mapped[user_fk] + user: Mapped["User"] = relationship(back_populates="addresses") Above, columns that are mapped with ``Mapped[str50]``, ``Mapped[intpk]``, or ``Mapped[user_fk]`` draw from both the @@ -770,6 +787,7 @@ example mapping from "Step 5" of :ref:`whatsnew_20_orm_declarative_typing`:: class Base(MappedAsDataclass, DeclarativeBase): """subclasses will be converted to dataclasses""" + intpk = Annotated[int, mapped_column(primary_key=True)] str30 = Annotated[str, mapped_column(String(30))] user_fk = Annotated[int, mapped_column(ForeignKey("user_account.id"))] @@ -792,9 +810,7 @@ example mapping from "Step 5" of :ref:`whatsnew_20_orm_declarative_typing`:: id: Mapped[intpk] = mapped_column(init=False) email_address: Mapped[str] user_id: Mapped[user_fk] = mapped_column(init=False) - user: Mapped["User"] = relationship( - back_populates="addresses", default=None - ) + user: Mapped["User"] = relationship(back_populates="addresses", default=None) The above mapping has used the ``@dataclasses.dataclass`` decorator directly on each mapped class at the same time that the declarative mapping was @@ -1000,12 +1016,12 @@ inheritance:: >>> users = session.scalars( ... insert(User).returning(User), ... [ - ... {"name": "spongebob", "fullname": "Spongebob Squarepants"}, - ... {"name": "sandy", "fullname": "Sandy Cheeks"}, - ... {"name": "patrick", "fullname": "Patrick Star"}, - ... {"name": "squidward", "fullname": "Squidward Tentacles"}, - ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"}, - ... ] + ... {"name": "spongebob", "fullname": "Spongebob Squarepants"}, + ... {"name": "sandy", "fullname": "Sandy Cheeks"}, + ... {"name": "patrick", "fullname": "Patrick Star"}, + ... {"name": "squidward", "fullname": "Squidward Tentacles"}, + ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"}, + ... ], ... ) >>> print(users.all()) [User(name='spongebob', fullname='Spongebob Squarepants'), @@ -1035,9 +1051,9 @@ a SQL UPDATE statement that is invoked using DBAPI :term:`executemany`:: >>> session.execute( ... update(User), ... [ - ... {"id": 1, "fullname": "Spongebob Squarepants"}, - ... {"id": 3, "fullname": "Patrick Star"}, - ... ] + ... {"id": 1, "fullname": "Spongebob Squarepants"}, + ... {"id": 3, "fullname": "Patrick Star"}, + ... ], ... ) .. seealso:: @@ -1056,16 +1072,15 @@ with column expressions or full ORM entities:: >>> from sqlalchemy.dialects.sqlite import insert as sqlite_upsert >>> stmt = sqlite_upsert(User).values( ... [ - ... {"name": "spongebob", "fullname": "Spongebob Squarepants"}, - ... {"name": "sandy", "fullname": "Sandy Cheeks"}, - ... {"name": "patrick", "fullname": "Patrick Star"}, - ... {"name": "squidward", "fullname": "Squidward Tentacles"}, - ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"}, - ... ] + ... {"name": "spongebob", "fullname": "Spongebob Squarepants"}, + ... {"name": "sandy", "fullname": "Sandy Cheeks"}, + ... {"name": "patrick", "fullname": "Patrick Star"}, + ... {"name": "squidward", "fullname": "Squidward Tentacles"}, + ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"}, + ... ] ... ) >>> stmt = stmt.on_conflict_do_update( - ... index_elements=[User.name], - ... set_=dict(fullname=stmt.excluded.fullname) + ... index_elements=[User.name], set_=dict(fullname=stmt.excluded.fullname) ... ) >>> result = session.scalars(stmt.returning(User)) >>> print(result.all()) @@ -1092,10 +1107,10 @@ may also proceed whether or not explicit use of RETURNING is present:: >>> from sqlalchemy import update >>> stmt = ( - ... update(User). - ... where(User.name == "squidward"). - ... values(name="spongebob"). - ... returning(User) + ... update(User) + ... .where(User.name == "squidward") + ... .values(name="spongebob") + ... .returning(User) ... ) >>> result = session.scalars(stmt, execution_options={"synchronize_session": "fetch"}) >>> print(result.all()) @@ -1432,7 +1447,7 @@ and pylance. Given a program as below:: from sqlalchemy.dialects.mysql import VARCHAR - type_ = String(255).with_variant(VARCHAR(255, charset='utf8mb4'), "mysql", "mariadb") + type_ = String(255).with_variant(VARCHAR(255, charset="utf8mb4"), "mysql", "mariadb") if typing.TYPE_CHECKING: reveal_type(type_) @@ -1604,10 +1619,7 @@ standard "precision" value divided by 0.3103:: from sqlalchemy.dialects import oracle - Table( - "some_table", metadata, - Column("value", oracle.FLOAT(126)) - ) + Table("some_table", metadata, Column("value", oracle.FLOAT(126))) A binary precision value of 126 is synonymous with using the :class:`_sqltypes.DOUBLE_PRECISION` datatype, and a value of 63 is equivalent @@ -1627,11 +1639,11 @@ the :meth:`_types.TypeEngine.with_variant` method as follows:: from sqlalchemy.dialects import oracle Table( - "some_table", metadata, - Column("value", Float(5).with_variant(oracle.FLOAT(16), "oracle")) + "some_table", + metadata, + Column("value", Float(5).with_variant(oracle.FLOAT(16), "oracle")), ) - .. _change_7086: ``match()`` operator on PostgreSQL uses ``plainto_tsquery()`` rather than ``to_tsquery()`` diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index 6dd8deef2c..4f77e1ada9 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -21,7 +21,7 @@ Basic Usage Recall from :doc:`/core/engines` that an :class:`_engine.Engine` is created via the :func:`_sa.create_engine` call:: - engine = create_engine('mysql+mysqldb://scott:tiger@localhost/test') + engine = create_engine("mysql+mysqldb://scott:tiger@localhost/test") The typical usage of :func:`_sa.create_engine` is once per particular database URL, held globally for the lifetime of a single application process. A single @@ -48,7 +48,7 @@ a textual statement to the database looks like:: with engine.connect() as connection: result = connection.execute(text("select username from users")) for row in result: - print("username:", row['username']) + print("username:", row["username"]) Above, the :meth:`_engine.Engine.connect` method returns a :class:`_engine.Connection` object, and by using it in a Python context manager (e.g. the ``with:`` @@ -112,7 +112,9 @@ illustrated in the example below:: with engine.connect() as connection: connection.execute(some_table.insert(), {"x": 7, "y": "this is some data"}) - connection.execute(some_other_table.insert(), {"q": 8, "p": "this is some more data"}) + connection.execute( + some_other_table.insert(), {"q": 8, "p": "this is some more data"} + ) connection.commit() # commit the transaction @@ -175,7 +177,9 @@ once" block:: with engine.connect() as connection: with connection.begin(): connection.execute(some_table.insert(), {"x": 7, "y": "this is some data"}) - connection.execute(some_other_table.insert(), {"q": 8, "p": "this is some more data"}) + connection.execute( + some_other_table.insert(), {"q": 8, "p": "this is some more data"} + ) # transaction is committed @@ -193,7 +197,9 @@ returned by the :meth:`_engine.Connection.begin` method:: with engine.begin() as connection: connection.execute(some_table.insert(), {"x": 7, "y": "this is some data"}) - connection.execute(some_other_table.insert(), {"q": 8, "p": "this is some more data"}) + connection.execute( + some_other_table.insert(), {"q": 8, "p": "this is some more data"} + ) # transaction is committed, and Connection is released to the connection # pool @@ -211,7 +217,6 @@ returned by the :meth:`_engine.Connection.begin` method:: >>> with e.begin() as conn: ... conn.commit() ... conn.begin() - ... 2021-11-08 09:49:07,517 INFO sqlalchemy.engine.Engine BEGIN (implicit) 2021-11-08 09:49:07,517 INFO sqlalchemy.engine.Engine COMMIT Traceback (most recent call last): @@ -239,7 +244,9 @@ after a previous call to :meth:`_engine.Connection.commit` or :meth:`_engine.Con # run a new statement outside of a block. The connection # autobegins - connection.execute(some_other_table.insert(), {"q": 8, "p": "this is some more data"}) + connection.execute( + some_other_table.insert(), {"q": 8, "p": "this is some more data"} + ) # commit explicitly connection.commit() @@ -310,9 +317,11 @@ certain backend, an error is raised. For example, to force REPEATABLE READ on a specific connection, then begin a transaction:: - with engine.connect().execution_options(isolation_level="REPEATABLE READ") as connection: - with connection.begin(): - connection.execute() + with engine.connect().execution_options( + isolation_level="REPEATABLE READ" + ) as connection: + with connection.begin(): + connection.execute("") .. tip:: The return value of the :meth:`_engine.Connection.execution_options` method is the same @@ -342,8 +351,7 @@ parameter to :func:`.sa.create_engine`:: from sqlalchemy import create_engine eng = create_engine( - "postgresql://scott:tiger@localhost/test", - isolation_level="REPEATABLE READ" + "postgresql://scott:tiger@localhost/test", isolation_level="REPEATABLE READ" ) With the above setting, each new DBAPI connection the moment it's created will @@ -367,9 +375,7 @@ per-connection isolation level setting:: eng = create_engine( "postgresql+psycopg2://scott:tiger@localhost/test", - execution_options={ - "isolation_level": "REPEATABLE READ" - } + execution_options={"isolation_level": "REPEATABLE READ"}, ) With the above setting, the DBAPI connection will be set to use a @@ -435,10 +441,10 @@ of this approach. If we wanted to check out a :class:`_engine.Connection` object and use it "autocommit" mode, we would proceed as follows:: - with engine.connect() as connection: - connection.execution_options(isolation_level="AUTOCOMMIT") - connection.execute() - connection.execute() + with engine.connect() as connection: + connection.execution_options(isolation_level="AUTOCOMMIT") + connection.execute("") + connection.execute("") Above illustrates normal usage of "DBAPI autocommit" mode. There is no need to make use of methods such as :meth:`_engine.Connection.begin` @@ -461,13 +467,13 @@ itself just like any other isolation level. In the example below, statements remain **autocommitting** regardless of SQLAlchemy-level transaction blocks:: - with engine.connect() as connection: - connection = connection.execution_options(isolation_level="AUTOCOMMIT") + with engine.connect() as connection: + connection = connection.execution_options(isolation_level="AUTOCOMMIT") - # this begin() does not affect the DBAPI connection, isolation stays at AUTOCOMMIT - with connection.begin() as trans: - connection.execute() - connection.execute() + # this begin() does not affect the DBAPI connection, isolation stays at AUTOCOMMIT + with connection.begin() as trans: + connection.execute("") + connection.execute("") When we run a block like the above with logging turned on, the logging will attempt to indicate that while a DBAPI level ``.commit()`` is called, @@ -484,15 +490,15 @@ don't impact the DBAPI connection itself**. To illustrate, the code below will raise an error, as :meth:`_engine.Connection.begin` is being called after autobegin has already occurred:: - with engine.connect() as connection: - connection = connection.execution_options(isolation_level="AUTOCOMMIT") + with engine.connect() as connection: + connection = connection.execution_options(isolation_level="AUTOCOMMIT") - # "transaction" is autobegin (but has no effect due to autocommit) - connection.execute() + # "transaction" is autobegin (but has no effect due to autocommit) + connection.execute("") - # this will raise; "transaction" is already begun - with connection.begin() as trans: - connection.execute() + # this will raise; "transaction" is already begun + with connection.begin() as trans: + connection.execute("") The above example also demonstrates the same theme that the "autocommit" isolation level is a configurational detail of the underlying database @@ -538,7 +544,7 @@ before we call upon :meth:`_engine.Connection.begin`:: connection.execution_options(isolation_level="AUTOCOMMIT") # run statement(s) in autocommit mode - connection.execute() + connection.execute("") # "commit" the autobegun "transaction" connection.commit() @@ -548,7 +554,7 @@ before we call upon :meth:`_engine.Connection.begin`:: # use a begin block with connection.begin() as trans: - connection.execute() + connection.execute("") Above, to manually revert the isolation level we made use of :attr:`_engine.Connection.default_isolation_level` to restore the default @@ -562,11 +568,11 @@ use two blocks :: with engine.connect().execution_options(isolation_level="AUTOCOMMIT") as connection: # run statement in autocommit mode - connection.execute() + connection.execute("") # use a regular block with engine.begin() as connection: - connection.execute() + connection.execute("") To sum up: @@ -675,11 +681,7 @@ combination has includes: These three behaviors are illustrated in the example below:: with engine.connect() as conn: - result = ( - conn. - execution_options(yield_per=100). - execute(text("select * from table")) - ) + result = conn.execution_options(yield_per=100).execute(text("select * from table")) for partition in result.partitions(): # partition is an iterable that will be at most 100 items @@ -764,9 +766,10 @@ to render under different schema names without any changes. Given a table:: user_table = Table( - 'user', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)) + "user", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String(50)), ) The "schema" of this :class:`_schema.Table` as defined by the @@ -776,7 +779,8 @@ that all :class:`_schema.Table` objects with a schema of ``None`` would instead render the schema as ``user_schema_one``:: connection = engine.connect().execution_options( - schema_translate_map={None: "user_schema_one"}) + schema_translate_map={None: "user_schema_one"} + ) result = connection.execute(user_table.select()) @@ -790,10 +794,11 @@ map can specify any number of target->destination schemas:: connection = engine.connect().execution_options( schema_translate_map={ - None: "user_schema_one", # no schema name -> "user_schema_one" - "special": "special_schema", # schema="special" becomes "special_schema" - "public": None # Table objects with schema="public" will render with no schema - }) + None: "user_schema_one", # no schema name -> "user_schema_one" + "special": "special_schema", # schema="special" becomes "special_schema" + "public": None, # Table objects with schema="public" will render with no schema + } + ) The :paramref:`.Connection.execution_options.schema_translate_map` parameter affects all DDL and SQL constructs generated from the SQL expression language, @@ -903,7 +908,9 @@ used items when the size of the cache reaches a certain threshold. The size of this cache defaults to 500 and may be configured using the :paramref:`_sa.create_engine.query_cache_size` parameter:: - engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test", query_cache_size=1200) + engine = create_engine( + "postgresql+psycopg2://scott:tiger@localhost/test", query_cache_size=1200 + ) The size of the cache can grow to be a factor of 150% of the size given, before it's pruned back down to the target size. A cache of size 1200 above can therefore @@ -932,45 +939,43 @@ section :ref:`dbengine_logging` for background on logging configuration. As an example, we will examine the logging produced by the following program:: - from sqlalchemy import Column - from sqlalchemy import create_engine - from sqlalchemy import ForeignKey - from sqlalchemy import Integer - from sqlalchemy import String - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.orm import relationship - from sqlalchemy.orm import Session + from sqlalchemy import Column + from sqlalchemy import create_engine + from sqlalchemy import ForeignKey + from sqlalchemy import Integer + from sqlalchemy import String + from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.orm import relationship + from sqlalchemy.orm import Session - Base = declarative_base() + Base = declarative_base() - class A(Base): - __tablename__ = "a" + class A(Base): + __tablename__ = "a" - id = Column(Integer, primary_key=True) - data = Column(String) - bs = relationship("B") + id = Column(Integer, primary_key=True) + data = Column(String) + bs = relationship("B") - class B(Base): - __tablename__ = "b" - id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey("a.id")) - data = Column(String) + class B(Base): + __tablename__ = "b" + id = Column(Integer, primary_key=True) + a_id = Column(ForeignKey("a.id")) + data = Column(String) - e = create_engine("sqlite://", echo=True) - Base.metadata.create_all(e) + e = create_engine("sqlite://", echo=True) + Base.metadata.create_all(e) - s = Session(e) + s = Session(e) - s.add_all( - [A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()])] - ) - s.commit() + s.add_all([A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()])]) + s.commit() - for a_rec in s.query(A): - print(a_rec.bs) + for a_rec in s.query(A): + print(a_rec.bs) When run, each SQL statement that's logged will include a bracketed cache statistics badge to the left of the parameters passed. The four @@ -1164,9 +1169,9 @@ when using the ORM :meth:`_orm.Session.execute` method for SQLAlchemy-2.0 style invocations. For example, to run a series of SQL statements and have them cached in a particular dictionary:: - my_cache = {} - with engine.connect().execution_options(compiled_cache=my_cache) as conn: - conn.execute(table.select()) + my_cache = {} + with engine.connect().execution_options(compiled_cache=my_cache) as conn: + conn.execute(table.select()) The SQLAlchemy ORM uses the above technique to hold onto per-mapper caches within the unit of work "flush" process that are separate from the default @@ -1176,9 +1181,9 @@ relationship loader queries. The cache can also be disabled with this argument by sending a value of ``None``:: - # disable caching for this connection - with engine.connect().execution_options(compiled_cache=None) as conn: - conn.execute(table.select()) + # disable caching for this connection + with engine.connect().execution_options(compiled_cache=None) as conn: + conn.execute(table.select()) .. _engine_thirdparty_caching: @@ -1217,6 +1222,7 @@ a SQL string directly, dialect authors can apply the attribute as follows:: from sqlalchemy.engine.default import DefaultDialect + class MyDialect(DefaultDialect): supports_statement_cache = True @@ -1242,9 +1248,9 @@ like this:: def limit_clause(self, select, **kw): text = "" if select._limit is not None: - text += " \n LIMIT %d" % (select._limit, ) + text += " \n LIMIT %d" % (select._limit,) if select._offset is not None: - text += " \n OFFSET %d" % (select._offset, ) + text += " \n OFFSET %d" % (select._offset,) return text The above routine renders the :attr:`.Select._limit` and @@ -1363,6 +1369,7 @@ approach:: from sqlalchemy import lambda_stmt + def run_my_statement(connection, parameter): stmt = lambda_stmt(lambda: select(table)) stmt += lambda s: s.where(table.c.col == parameter) @@ -1370,6 +1377,7 @@ approach:: return connection.execute(stmt) + with engine.connect() as conn: result = run_my_statement(some_connection, "some parameter") @@ -1405,9 +1413,10 @@ Basic guidelines include: def upd(id_, newname): stmt = lambda_stmt(lambda: users.update()) stmt += lambda s: s.values(name=newname) - stmt += lambda s: s.where(users.c.id==id_) + stmt += lambda s: s.where(users.c.id == id_) return stmt + with engine.begin() as conn: conn.execute(upd(7, "foo")) @@ -1438,12 +1447,10 @@ Basic guidelines include: >>> def my_stmt(x, y): ... stmt = lambda_stmt(lambda: select(func.max(x, y))) ... return stmt - ... >>> engine = create_engine("sqlite://", echo=True) >>> with engine.connect() as conn: ... print(conn.scalar(my_stmt(5, 10))) ... print(conn.scalar(my_stmt(12, 8))) - ... {opensql}SELECT max(?, ?) AS max_1 [generated in 0.00057s] (5, 10){stop} 10 @@ -1494,15 +1501,14 @@ Basic guidelines include: >>> def my_stmt(x, y): ... def get_x(): ... return x + ... ... def get_y(): ... return y - ... + ... ... stmt = lambda_stmt(lambda: select(func.max(get_x(), get_y()))) ... return stmt - ... >>> with engine.connect() as conn: ... print(conn.scalar(my_stmt(5, 10))) - ... Traceback (most recent call last): # ... sqlalchemy.exc.InvalidRequestError: Can't invoke Python callable get_x() @@ -1518,9 +1524,10 @@ Basic guidelines include: >>> def my_stmt(x, y): ... def get_x(): ... return x + ... ... def get_y(): ... return y - ... + ... ... x_param, y_param = get_x(), get_y() ... stmt = lambda_stmt(lambda: select(func.max(x_param, y_param))) ... return stmt @@ -1539,14 +1546,11 @@ Basic guidelines include: ... def __init__(self, x, y): ... self.x = x ... self.y = y - ... >>> def my_stmt(foo): ... stmt = lambda_stmt(lambda: select(func.max(foo.x, foo.y))) ... return stmt - ... >>> with engine.connect() as conn: - ... print(conn.scalar(my_stmt(Foo(5, 10)))) - ... + ... print(conn.scalar(my_stmt(Foo(5, 10)))) Traceback (most recent call last): # ... sqlalchemy.exc.InvalidRequestError: Closure variable named 'foo' inside of @@ -1583,8 +1587,7 @@ Basic guidelines include: >>> def my_stmt(foo): ... stmt = lambda_stmt( - ... lambda: select(func.max(foo.x, foo.y)), - ... track_closure_variables=False + ... lambda: select(func.max(foo.x, foo.y)), track_closure_variables=False ... ) ... return stmt @@ -1600,13 +1603,9 @@ Basic guidelines include: >>> def my_stmt(self, foo): ... stmt = lambda_stmt( - ... lambda: select(*self.column_expressions), - ... track_closure_variables=False - ... ) - ... stmt = stmt.add_criteria( - ... lambda: self.where_criteria, - ... track_on=[self] + ... lambda: select(*self.column_expressions), track_closure_variables=False ... ) + ... stmt = stmt.add_criteria(lambda: self.where_criteria, track_on=[self]) ... return stmt Using ``track_on`` means the given objects will be stored long term in the @@ -1629,7 +1628,7 @@ SQL expression construct by producing a structure that represents all the state within the construct:: >>> from sqlalchemy import select, column - >>> stmt = select(column('q')) + >>> stmt = select(column("q")) >>> cache_key = stmt._generate_cache_key() >>> print(cache_key) # somewhat paraphrased CacheKey(key=( @@ -1843,8 +1842,7 @@ To disable the "insertmanyvalues" feature for a given backend for an :func:`_sa.create_engine`:: engine = create_engine( - "mariadb+mariadbconnector://scott:tiger@host/db", - use_insertmanyvalues=False + "mariadb+mariadbconnector://scott:tiger@host/db", use_insertmanyvalues=False ) The feature can also be disabled from being used implicitly for a particular @@ -1852,11 +1850,11 @@ The feature can also be disabled from being used implicitly for a particular :paramref:`_schema.Table.implicit_returning` parameter as ``False``:: t = Table( - 't', + "t", metadata, - Column('id', Integer, primary_key=True), - Column('x', Integer), - implicit_returning=False + Column("id", Integer, primary_key=True), + Column("x", Integer), + implicit_returning=False, ) The reason one might want to disable RETURNING for a specific table is to @@ -1921,13 +1919,15 @@ execution option, such as per execution:: result = conn.execute( table.insert().returning(table.c.id), parameterlist, - execution_options={"insertmanyvalues_page_size": 100} + execution_options={"insertmanyvalues_page_size": 100}, ) Or configured on the statement itself:: - stmt = table.insert().returning(table.c.id).execution_options( - insertmanyvalues_page_size=100 + stmt = ( + table.insert() + .returning(table.c.id) + .execution_options(insertmanyvalues_page_size=100) ) with e.begin() as conn: result = conn.execute(stmt, parameterlist) @@ -2071,7 +2071,6 @@ method may be used:: with engine.connect() as conn: conn.exec_driver_sql("SET param='bar'") - .. versionadded:: 1.4 Added the :meth:`_engine.Connection.exec_driver_sql` method. .. _dbapi_connections_cursor: @@ -2148,7 +2147,7 @@ may potentially be used with your DBAPI. An example of this pattern is:: connection = engine.raw_connection() try: cursor_obj = connection.cursor() - cursor_obj.callproc("my_procedure", ['x', 'y', 'z']) + cursor_obj.callproc("my_procedure", ["x", "y", "z"]) results = list(cursor_obj.fetchall()) cursor_obj.close() connection.commit() @@ -2194,11 +2193,12 @@ Multiple result set support is available from a raw DBAPI cursor using the finally: connection.close() - - Registering New Dialects ======================== +.. highlight:: ini +.. format:off + The :func:`_sa.create_engine` function call locates the given dialect using setuptools entrypoints. These entry points can be established for third party dialects within the setup.py script. For example, @@ -2228,6 +2228,9 @@ were in fact a MySQL dialect, the entry point could be established like this:: The above entrypoint would then be accessed as ``create_engine("mysql+foodialect://")``. +.. format:on +.. highlight:: python + Registering Dialects In-Process ------------------------------- @@ -2235,6 +2238,7 @@ SQLAlchemy also allows a dialect to be registered within the current process, by the need for separate installation. Use the ``register()`` function as follows:: from sqlalchemy.dialects import registry + registry.register("mysql.foodialect", "myapp.dialect", "MyMySQLDialect") The above will respond to ``create_engine("mysql+foodialect://")`` and load the diff --git a/doc/build/core/constraints.rst b/doc/build/core/constraints.rst index 6815da4dc1..a100acc162 100644 --- a/doc/build/core/constraints.rst +++ b/doc/build/core/constraints.rst @@ -33,11 +33,13 @@ column. The single column foreign key is more common, and at the column level is specified by constructing a :class:`~sqlalchemy.schema.ForeignKey` object as an argument to a :class:`~sqlalchemy.schema.Column` object:: - user_preference = Table('user_preference', metadata_obj, - Column('pref_id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False), - Column('pref_name', String(40), nullable=False), - Column('pref_value', String(100)) + user_preference = Table( + "user_preference", + metadata_obj, + Column("pref_id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.user_id"), nullable=False), + Column("pref_name", String(40), nullable=False), + Column("pref_value", String(100)), ) Above, we define a new table ``user_preference`` for which each row must @@ -64,21 +66,27 @@ known as a *composite* foreign key, and almost always references a table that has a composite primary key. Below we define a table ``invoice`` which has a composite primary key:: - invoice = Table('invoice', metadata_obj, - Column('invoice_id', Integer, primary_key=True), - Column('ref_num', Integer, primary_key=True), - Column('description', String(60), nullable=False) + invoice = Table( + "invoice", + metadata_obj, + Column("invoice_id", Integer, primary_key=True), + Column("ref_num", Integer, primary_key=True), + Column("description", String(60), nullable=False), ) And then a table ``invoice_item`` with a composite foreign key referencing ``invoice``:: - invoice_item = Table('invoice_item', metadata_obj, - Column('item_id', Integer, primary_key=True), - Column('item_name', String(60), nullable=False), - Column('invoice_id', Integer, nullable=False), - Column('ref_num', Integer, nullable=False), - ForeignKeyConstraint(['invoice_id', 'ref_num'], ['invoice.invoice_id', 'invoice.ref_num']) + invoice_item = Table( + "invoice_item", + metadata_obj, + Column("item_id", Integer, primary_key=True), + Column("item_name", String(60), nullable=False), + Column("invoice_id", Integer, nullable=False), + Column("ref_num", Integer, nullable=False), + ForeignKeyConstraint( + ["invoice_id", "ref_num"], ["invoice.invoice_id", "invoice.ref_num"] + ), ) It's important to note that the @@ -126,22 +134,20 @@ statements, on all backends other than SQLite which does not support most forms of ALTER. Given a schema like:: node = Table( - 'node', metadata_obj, - Column('node_id', Integer, primary_key=True), - Column( - 'primary_element', Integer, - ForeignKey('element.element_id') - ) + "node", + metadata_obj, + Column("node_id", Integer, primary_key=True), + Column("primary_element", Integer, ForeignKey("element.element_id")), ) element = Table( - 'element', metadata_obj, - Column('element_id', Integer, primary_key=True), - Column('parent_node_id', Integer), + "element", + metadata_obj, + Column("element_id", Integer, primary_key=True), + Column("parent_node_id", Integer), ForeignKeyConstraint( - ['parent_node_id'], ['node.node_id'], - name='fk_element_parent_node_id' - ) + ["parent_node_id"], ["node.node_id"], name="fk_element_parent_node_id" + ), ) When we call upon :meth:`_schema.MetaData.create_all` on a backend such as the @@ -151,7 +157,7 @@ constraints are created separately: .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... metadata_obj.create_all(conn, checkfirst=False) + ... metadata_obj.create_all(conn, checkfirst=False) {opensql}CREATE TABLE element ( element_id SERIAL NOT NULL, parent_node_id INTEGER, @@ -179,7 +185,7 @@ those constraints that are named: .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... metadata_obj.drop_all(conn, checkfirst=False) + ... metadata_obj.drop_all(conn, checkfirst=False) {opensql}ALTER TABLE element DROP CONSTRAINT fk_element_parent_node_id DROP TABLE node DROP TABLE element @@ -205,13 +211,16 @@ to manually resolve dependency cycles. We can add this flag only to the ``'element'`` table as follows:: element = Table( - 'element', metadata_obj, - Column('element_id', Integer, primary_key=True), - Column('parent_node_id', Integer), + "element", + metadata_obj, + Column("element_id", Integer, primary_key=True), + Column("parent_node_id", Integer), ForeignKeyConstraint( - ['parent_node_id'], ['node.node_id'], - use_alter=True, name='fk_element_parent_node_id' - ) + ["parent_node_id"], + ["node.node_id"], + use_alter=True, + name="fk_element_parent_node_id", + ), ) in our CREATE DDL we will see the ALTER statement only for this constraint, @@ -220,7 +229,7 @@ and not the other one: .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... metadata_obj.create_all(conn, checkfirst=False) + ... metadata_obj.create_all(conn, checkfirst=False) {opensql}CREATE TABLE element ( element_id SERIAL NOT NULL, parent_node_id INTEGER, @@ -282,22 +291,29 @@ generation of this clause via the ``onupdate`` and ``ondelete`` keyword arguments. The value is any string which will be output after the appropriate "ON UPDATE" or "ON DELETE" phrase:: - child = Table('child', metadata_obj, - Column('id', Integer, - ForeignKey('parent.id', onupdate="CASCADE", ondelete="CASCADE"), - primary_key=True - ) - ) - - composite = Table('composite', metadata_obj, - Column('id', Integer, primary_key=True), - Column('rev_id', Integer), - Column('note_id', Integer), + child = Table( + "child", + metadata_obj, + Column( + "id", + Integer, + ForeignKey("parent.id", onupdate="CASCADE", ondelete="CASCADE"), + primary_key=True, + ), + ) + + composite = Table( + "composite", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("rev_id", Integer), + Column("note_id", Integer), ForeignKeyConstraint( - ['rev_id', 'note_id'], - ['revisions.id', 'revisions.note_id'], - onupdate="CASCADE", ondelete="SET NULL" - ) + ["rev_id", "note_id"], + ["revisions.id", "revisions.note_id"], + onupdate="CASCADE", + ondelete="SET NULL", + ), ) Note that these clauses require ``InnoDB`` tables when used with MySQL. @@ -327,17 +343,16 @@ unique constraints and/or those with multiple columns are created via the from sqlalchemy import UniqueConstraint metadata_obj = MetaData() - mytable = Table('mytable', metadata_obj, - + mytable = Table( + "mytable", + metadata_obj, # per-column anonymous unique constraint - Column('col1', Integer, unique=True), - - Column('col2', Integer), - Column('col3', Integer), - + Column("col1", Integer, unique=True), + Column("col2", Integer), + Column("col3", Integer), # explicit/composite unique constraint. 'name' is optional. - UniqueConstraint('col2', 'col3', name='uix_1') - ) + UniqueConstraint("col2", "col3", name="uix_1"), + ) CHECK Constraint ---------------- @@ -388,12 +403,14 @@ option of being configured directly:: from sqlalchemy import PrimaryKeyConstraint - my_table = Table('mytable', metadata_obj, - Column('id', Integer), - Column('version_id', Integer), - Column('data', String(50)), - PrimaryKeyConstraint('id', 'version_id', name='mytable_pk') - ) + my_table = Table( + "mytable", + metadata_obj, + Column("id", Integer), + Column("version_id", Integer), + Column("data", String(50)), + PrimaryKeyConstraint("id", "version_id", name="mytable_pk"), + ) .. seealso:: @@ -468,11 +485,11 @@ one exception case where an existing name can be further embellished). An example naming convention that suits basic cases is as follows:: convention = { - "ix": 'ix_%(column_0_label)s', - "uq": "uq_%(table_name)s_%(column_0_name)s", - "ck": "ck_%(table_name)s_%(constraint_name)s", - "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", - "pk": "pk_%(table_name)s" + "ix": "ix_%(column_0_label)s", + "uq": "uq_%(table_name)s_%(column_0_name)s", + "ck": "ck_%(table_name)s_%(constraint_name)s", + "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", + "pk": "pk_%(table_name)s", } metadata_obj = MetaData(naming_convention=convention) @@ -482,10 +499,12 @@ the target :class:`_schema.MetaData` collection. For example, we can observe the name produced when we create an unnamed :class:`.UniqueConstraint`:: - >>> user_table = Table('user', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30), nullable=False), - ... UniqueConstraint('name') + >>> user_table = Table( + ... "user", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30), nullable=False), + ... UniqueConstraint("name"), ... ) >>> list(user_table.constraints)[1].name 'uq_user_name' @@ -493,10 +512,12 @@ For example, we can observe the name produced when we create an unnamed This same feature takes effect even if we just use the :paramref:`_schema.Column.unique` flag:: - >>> user_table = Table('user', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30), nullable=False, unique=True) - ... ) + >>> user_table = Table( + ... "user", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30), nullable=False, unique=True), + ... ) >>> list(user_table.constraints)[1].name 'uq_user_name' @@ -543,16 +564,17 @@ deterministically truncated using a 4-character suffix based on the md5 hash of the long name. For example, the naming convention below will generate very long names given the column names in use:: - metadata_obj = MetaData(naming_convention={ - "uq": "uq_%(table_name)s_%(column_0_N_name)s" - }) + metadata_obj = MetaData( + naming_convention={"uq": "uq_%(table_name)s_%(column_0_N_name)s"} + ) long_names = Table( - 'long_names', metadata_obj, - Column('information_channel_code', Integer, key='a'), - Column('billing_convention_name', Integer, key='b'), - Column('product_identifier', Integer, key='c'), - UniqueConstraint('a', 'b', 'c') + "long_names", + metadata_obj, + Column("information_channel_code", Integer, key="a"), + Column("billing_convention_name", Integer, key="b"), + Column("product_identifier", Integer, key="c"), + UniqueConstraint("a", "b", "c"), ) On the PostgreSQL dialect, names longer than 63 characters will be truncated @@ -580,20 +602,22 @@ that as follows:: import uuid + def fk_guid(constraint, table): - str_tokens = [ - table.name, - ] + [ - element.parent.name for element in constraint.elements - ] + [ - element.target_fullname for element in constraint.elements - ] - guid = uuid.uuid5(uuid.NAMESPACE_OID, "_".join(str_tokens).encode('ascii')) + str_tokens = ( + [ + table.name, + ] + + [element.parent.name for element in constraint.elements] + + [element.target_fullname for element in constraint.elements] + ) + guid = uuid.uuid5(uuid.NAMESPACE_OID, "_".join(str_tokens).encode("ascii")) return str(guid) + convention = { "fk_guid": fk_guid, - "ix": 'ix_%(column_0_label)s', + "ix": "ix_%(column_0_label)s", "fk": "fk_%(fk_guid)s", } @@ -602,18 +626,21 @@ name as follows:: >>> metadata_obj = MetaData(naming_convention=convention) - >>> user_table = Table('user', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('version', Integer, primary_key=True), - ... Column('data', String(30)) - ... ) - >>> address_table = Table('address', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', Integer), - ... Column('user_version_id', Integer) - ... ) - >>> fk = ForeignKeyConstraint(['user_id', 'user_version_id'], - ... ['user.id', 'user.version']) + >>> user_table = Table( + ... "user", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("version", Integer, primary_key=True), + ... Column("data", String(30)), + ... ) + >>> address_table = Table( + ... "address", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("user_id", Integer), + ... Column("user_version_id", Integer), + ... ) + >>> fk = ForeignKeyConstraint(["user_id", "user_version_id"], ["user.id", "user.version"]) >>> address_table.append_constraint(fk) >>> fk.name fk_0cd51ab5-8d70-56e8-a83c-86661737766d @@ -646,9 +673,11 @@ A typical convention is ``"ck_%(table_name)s_%(constraint_name)s"``:: naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) - Table('foo', metadata_obj, - Column('value', Integer), - CheckConstraint('value > 5', name='value_gt_5') + Table( + "foo", + metadata_obj, + Column("value", Integer), + CheckConstraint("value > 5", name="value_gt_5"), ) The above table will produce the name ``ck_foo_value_gt_5``:: @@ -663,13 +692,9 @@ token; we can make use of this by ensuring we use a :class:`_schema.Column` or :func:`_expression.column` element within the constraint's expression, either by declaring the constraint separate from the table:: - metadata_obj = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - foo = Table('foo', metadata_obj, - Column('value', Integer) - ) + foo = Table("foo", metadata_obj, Column("value", Integer)) CheckConstraint(foo.c.value > 5) @@ -677,13 +702,10 @@ or by using a :func:`_expression.column` inline:: from sqlalchemy import column - metadata_obj = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - foo = Table('foo', metadata_obj, - Column('value', Integer), - CheckConstraint(column('value') > 5) + foo = Table( + "foo", metadata_obj, Column("value", Integer), CheckConstraint(column("value") > 5) ) Both will produce the name ``ck_foo_value``:: @@ -712,9 +734,7 @@ and :class:`.Enum` which generate a CHECK constraint accompanying the type. The name for the constraint here is most directly set up by sending the "name" parameter, e.g. :paramref:`.Boolean.name`:: - Table('foo', metadata_obj, - Column('flag', Boolean(name='ck_foo_flag')) - ) + Table("foo", metadata_obj, Column("flag", Boolean(name="ck_foo_flag"))) The naming convention feature may be combined with these types as well, normally by using a convention which includes ``%(constraint_name)s`` @@ -724,9 +744,7 @@ and then applying a name to the type:: naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) - Table('foo', metadata_obj, - Column('flag', Boolean(name='flag_bool')) - ) + Table("foo", metadata_obj, Column("flag", Boolean(name="flag_bool"))) The above table will produce the constraint name ``ck_foo_flag_bool``:: @@ -748,13 +766,9 @@ The CHECK constraint may also make use of the ``column_0_name`` token, which works nicely with :class:`.SchemaType` since these constraints have only one column:: - metadata_obj = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - Table('foo', metadata_obj, - Column('flag', Boolean()) - ) + Table("foo", metadata_obj, Column("flag", Boolean())) The above schema will produce:: @@ -869,19 +883,17 @@ objects directly. :class:`.Index` also supports identify columns:: metadata_obj = MetaData() - mytable = Table('mytable', metadata_obj, - Column('col1', Integer), - - Column('col2', Integer), - - Column('col3', Integer), - Column('col4', Integer), - + mytable = Table( + "mytable", + metadata_obj, + Column("col1", Integer), + Column("col2", Integer), + Column("col3", Integer), + Column("col4", Integer), # place an index on col1, col2 - Index('idx_col12', 'col1', 'col2'), - + Index("idx_col12", "col1", "col2"), # place a unique index on col3, col4 - Index('idx_col34', 'col3', 'col4', unique=True) + Index("idx_col34", "col3", "col4", unique=True), ) The :class:`~sqlalchemy.schema.Index` object also supports its own ``create()`` method: @@ -903,14 +915,14 @@ value, the :meth:`_expression.ColumnElement.desc` modifier may be used:: from sqlalchemy import Index - Index('someindex', mytable.c.somecol.desc()) + Index("someindex", mytable.c.somecol.desc()) Or with a backend that supports functional indexes such as PostgreSQL, a "case insensitive" index can be created using the ``lower()`` function:: from sqlalchemy import func, Index - Index('someindex', func.lower(mytable.c.somecol)) + Index("someindex", func.lower(mytable.c.somecol)) Index API --------- diff --git a/doc/build/core/custom_types.rst b/doc/build/core/custom_types.rst index 7f628f373d..6f29adc14d 100644 --- a/doc/build/core/custom_types.rst +++ b/doc/build/core/custom_types.rst @@ -24,6 +24,7 @@ can be associated with any type:: from sqlalchemy.ext.compiler import compiles from sqlalchemy.types import BINARY + @compiles(BINARY, "sqlite") def compile_binary_sqlite(type_, compiler, **kw): return "BLOB" @@ -93,6 +94,7 @@ which coerces as needed:: from sqlalchemy.types import TypeDecorator, Unicode + class CoerceUTF8(TypeDecorator): """Safely coerce Python bytestrings to Unicode before passing off to the database.""" @@ -101,7 +103,7 @@ which coerces as needed:: def process_bind_param(self, value, dialect): if isinstance(value, str): - value = value.decode('utf-8') + value = value.decode("utf-8") return value Rounding Numerics @@ -113,6 +115,7 @@ many decimal places. Here's a recipe that rounds them down:: from sqlalchemy.types import TypeDecorator, Numeric from decimal import Decimal + class SafeNumeric(TypeDecorator): """Adds quantization to Numeric.""" @@ -120,12 +123,11 @@ many decimal places. Here's a recipe that rounds them down:: def __init__(self, *arg, **kw): TypeDecorator.__init__(self, *arg, **kw) - self.quantize_int = - self.impl.scale + self.quantize_int = -self.impl.scale self.quantize = Decimal(10) ** self.quantize_int def process_bind_param(self, value, dialect): - if isinstance(value, Decimal) and \ - value.as_tuple()[2] < self.quantize_int: + if isinstance(value, Decimal) and value.as_tuple()[2] < self.quantize_int: value = value.quantize(self.quantize) return value @@ -147,6 +149,7 @@ denormalize:: import datetime + class TZDateTime(TypeDecorator): impl = DateTime cache_ok = True @@ -155,9 +158,7 @@ denormalize:: if value is not None: if not value.tzinfo: raise TypeError("tzinfo is required") - value = value.astimezone(datetime.timezone.utc).replace( - tzinfo=None - ) + value = value.astimezone(datetime.timezone.utc).replace(tzinfo=None) return value def process_result_value(self, value, dialect): @@ -165,7 +166,6 @@ denormalize:: value = value.replace(tzinfo=datetime.timezone.utc) return value - .. _custom_guid_type: Backend-agnostic GUID Type @@ -180,6 +180,7 @@ binary in CHAR(16) if desired:: from sqlalchemy.dialects.postgresql import UUID import uuid + class GUID(TypeDecorator): """Platform-independent GUID type. @@ -187,11 +188,12 @@ binary in CHAR(16) if desired:: CHAR(32), storing as stringified hex values. """ + impl = CHAR cache_ok = True def load_dialect_impl(self, dialect): - if dialect.name == 'postgresql': + if dialect.name == "postgresql": return dialect.type_descriptor(UUID()) else: return dialect.type_descriptor(CHAR(32)) @@ -199,7 +201,7 @@ binary in CHAR(16) if desired:: def process_bind_param(self, value, dialect): if value is None: return value - elif dialect.name == 'postgresql': + elif dialect.name == "postgresql": return str(value) else: if not isinstance(value, uuid.UUID): @@ -225,6 +227,7 @@ to/from JSON. Can be modified to use Python's builtin json encoder:: from sqlalchemy.types import TypeDecorator, VARCHAR import json + class JSONEncodedDict(TypeDecorator): """Represents an immutable structure as a json-encoded string. @@ -269,12 +272,12 @@ dictionary-oriented JSON structure, we can apply this as:: json_type = MutableDict.as_mutable(JSONEncodedDict) + class MyClass(Base): # ... json_data = Column(json_type) - .. seealso:: :ref:`mutable_toplevel` @@ -295,8 +298,7 @@ get at this with a type like ``JSONEncodedDict``, we need to from sqlalchemy import type_coerce, String - stmt = select(my_table).where( - type_coerce(my_table.c.json_data, String).like('%foo%')) + stmt = select(my_table).where(type_coerce(my_table.c.json_data, String).like("%foo%")) :class:`.TypeDecorator` provides a built-in system for working up type translations like these based on operators. If we wanted to frequently use the @@ -307,6 +309,7 @@ method:: from sqlalchemy.sql import operators from sqlalchemy import String + class JSONEncodedDict(TypeDecorator): impl = VARCHAR @@ -365,6 +368,7 @@ in conjunction with :data:`~.sqlalchemy.sql.expression.func`:: from sqlalchemy import func from sqlalchemy.types import UserDefinedType + class Geometry(UserDefinedType): def get_col_spec(self): return "GEOMETRY" @@ -378,13 +382,18 @@ in conjunction with :data:`~.sqlalchemy.sql.expression.func`:: We can apply the ``Geometry`` type into :class:`_schema.Table` metadata and use it in a :func:`_expression.select` construct:: - geometry = Table('geometry', metadata, - Column('geom_id', Integer, primary_key=True), - Column('geom_data', Geometry) - ) + geometry = Table( + "geometry", + metadata, + Column("geom_id", Integer, primary_key=True), + Column("geom_data", Geometry), + ) - print(select(geometry).where( - geometry.c.geom_data == 'LINESTRING(189412 252431,189631 259122)')) + print( + select(geometry).where( + geometry.c.geom_data == "LINESTRING(189412 252431,189631 259122)" + ) + ) The resulting SQL embeds both functions as appropriate. ``ST_AsText`` is applied to the columns clause so that the return value is run through @@ -401,7 +410,7 @@ with the labeling of the wrapped expression. Such as, if we rendered a :func:`_expression.select` against a :func:`.label` of our expression, the string label is moved to the outside of the wrapped expression:: - print(select(geometry.c.geom_data.label('my_data'))) + print(select(geometry.c.geom_data.label("my_data"))) Output:: @@ -413,11 +422,21 @@ Another example is we decorate PostgreSQL ``pgcrypto`` extension to encrypt/decrypt values transparently:: - from sqlalchemy import create_engine, String, select, func, \ - MetaData, Table, Column, type_coerce, TypeDecorator + from sqlalchemy import ( + create_engine, + String, + select, + func, + MetaData, + Table, + Column, + type_coerce, + TypeDecorator, + ) from sqlalchemy.dialects.postgresql import BYTEA + class PGPString(TypeDecorator): impl = BYTEA @@ -438,24 +457,24 @@ transparently:: def column_expression(self, col): return func.pgp_sym_decrypt(col, self.passphrase) + metadata_obj = MetaData() - message = Table('message', metadata_obj, - Column('username', String(50)), - Column('message', - PGPString("this is my passphrase")), - ) + message = Table( + "message", + metadata_obj, + Column("username", String(50)), + Column("message", PGPString("this is my passphrase")), + ) engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test", echo=True) with engine.begin() as conn: metadata_obj.create_all(conn) - conn.execute(message.insert(), username="some user", - message="this is my message") + conn.execute(message.insert(), username="some user", message="this is my message") - print(conn.scalar( - select(message.c.message).\ - where(message.c.username == "some user") - )) + print( + conn.scalar(select(message.c.message).where(message.c.username == "some user")) + ) The ``pgp_sym_encrypt`` and ``pgp_sym_decrypt`` functions are applied to the INSERT and SELECT statements:: @@ -494,7 +513,7 @@ is given a string representing the SQL operator to render, and the return value is a Python callable that accepts any arbitrary right-hand side expression:: >>> from sqlalchemy import column - >>> expr = column('x').op('>>')(column('y')) + >>> expr = column("x").op(">>")(column("y")) >>> print(expr) x >> y @@ -519,6 +538,7 @@ SQL itself:: from sqlalchemy import Integer + class MyInt(Integer): class comparator_factory(Integer.Comparator): def __add__(self, other): @@ -543,6 +563,7 @@ object directly:: from sqlalchemy import Integer + class MyInt(Integer): class comparator_factory(Integer.Comparator): def __add__(self, other): @@ -556,6 +577,7 @@ to integers:: from sqlalchemy import Integer, func + class MyInt(Integer): class comparator_factory(Integer.Comparator): def log(self, other): @@ -584,17 +606,18 @@ along with a :class:`.custom_op` to produce the factorial expression:: from sqlalchemy.sql.expression import UnaryExpression from sqlalchemy.sql import operators + class MyInteger(Integer): class comparator_factory(Integer.Comparator): def factorial(self): - return UnaryExpression(self.expr, - modifier=operators.custom_op("!"), - type_=MyInteger) + return UnaryExpression( + self.expr, modifier=operators.custom_op("!"), type_=MyInteger + ) Using the above type:: >>> from sqlalchemy.sql import column - >>> print(column('x', MyInteger).factorial()) + >>> print(column("x", MyInteger).factorial()) x ! .. seealso:: @@ -644,10 +667,19 @@ create a new :class:`_schema.Table` object against a new :class:`_schema.MetaDat for this database table elsewhere using reflection, it will not have this datatype. For example:: - >>> from sqlalchemy import Table, Column, MetaData, create_engine, PickleType, Integer + >>> from sqlalchemy import ( + ... Table, + ... Column, + ... MetaData, + ... create_engine, + ... PickleType, + ... Integer, + ... ) >>> metadata = MetaData() - >>> my_table = Table("my_table", metadata, Column('id', Integer), Column("data", PickleType)) - >>> engine = create_engine("sqlite://", echo='debug') + >>> my_table = Table( + ... "my_table", metadata, Column("id", Integer), Column("data", PickleType) + ... ) + >>> engine = create_engine("sqlite://", echo="debug") >>> my_table.create(engine) INFO sqlalchemy.engine.base.Engine CREATE TABLE my_table ( @@ -698,7 +730,12 @@ use reflection in combination with explicit :class:`_schema.Column` objects for columns for which we want to use a custom or decorated datatype:: >>> metadata_three = MetaData() - >>> my_reflected_table = Table("my_table", metadata_three, Column("data", PickleType), autoload_with=engine) + >>> my_reflected_table = Table( + ... "my_table", + ... metadata_three, + ... Column("data", PickleType), + ... autoload_with=engine, + ... ) The ``my_reflected_table`` object above is reflected, and will load the definition of the "id" column from the SQLite database. But for the "data" @@ -721,6 +758,7 @@ for example we knew that we wanted all :class:`.BLOB` datatypes to in fact be from sqlalchemy import PickleType from sqlalchemy import Table + @event.listens_for(Table, "column_reflect") def _setup_pickletype(inspector, table, column_info): if isinstance(column_info["type"], BLOB): @@ -736,4 +774,4 @@ In practice, the above event-based approach would likely have additional rules in order to affect only those columns where the datatype is important, such as a lookup table of table names and possibly column names, or other heuristics in order to accurately determine which columns should be established with an -in Python datatype. \ No newline at end of file +in Python datatype. diff --git a/doc/build/core/ddl.rst b/doc/build/core/ddl.rst index bccfb76ccf..0e6d641c6a 100644 --- a/doc/build/core/ddl.rst +++ b/doc/build/core/ddl.rst @@ -32,9 +32,11 @@ other DDL elements except it accepts a string which is the text to be emitted: event.listen( metadata, "after_create", - DDL("ALTER TABLE users ADD CONSTRAINT " + DDL( + "ALTER TABLE users ADD CONSTRAINT " "cst_user_name_length " - " CHECK (length(user_name) >= 8)") + " CHECK (length(user_name) >= 8)" + ), ) A more comprehensive method of creating libraries of DDL constructs is to use @@ -54,9 +56,10 @@ method. For example, if we wanted to create a trigger but only on the PostgreSQL backend, we could invoke this as:: mytable = Table( - 'mytable', metadata, - Column('id', Integer, primary_key=True), - Column('data', String(50)) + "mytable", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), ) func = DDL( @@ -73,30 +76,18 @@ the PostgreSQL backend, we could invoke this as:: "FOR EACH ROW EXECUTE PROCEDURE my_func();" ) - event.listen( - mytable, - 'after_create', - func.execute_if(dialect='postgresql') - ) + event.listen(mytable, "after_create", func.execute_if(dialect="postgresql")) - event.listen( - mytable, - 'after_create', - trigger.execute_if(dialect='postgresql') - ) + event.listen(mytable, "after_create", trigger.execute_if(dialect="postgresql")) The :paramref:`.ExecutableDDLElement.execute_if.dialect` keyword also accepts a tuple of string dialect names:: event.listen( - mytable, - "after_create", - trigger.execute_if(dialect=('postgresql', 'mysql')) + mytable, "after_create", trigger.execute_if(dialect=("postgresql", "mysql")) ) event.listen( - mytable, - "before_drop", - trigger.execute_if(dialect=('postgresql', 'mysql')) + mytable, "before_drop", trigger.execute_if(dialect=("postgresql", "mysql")) ) The :meth:`.ExecutableDDLElement.execute_if` method can also work against a callable @@ -251,7 +242,9 @@ statement emitted for the index: .. sourcecode:: python+sql >>> from sqlalchemy import create_engine - >>> postgresql_engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test", echo=True) + >>> postgresql_engine = create_engine( + ... "postgresql+psycopg2://scott:tiger@localhost/test", echo=True + ... ) >>> meta.create_all(postgresql_engine) {opensql}BEGIN (implicit) select relname from pg_class c join pg_namespace n on n.oid=c.relnamespace where pg_catalog.pg_table_is_visible(c.oid) and relname=%(name)s @@ -286,10 +279,8 @@ to inspect the database versioning information would best use the given .. sourcecode:: python+sql def only_pg_14(ddl_element, target, bind, dialect, **kw): - return ( - dialect.name == "postgresql" and - dialect.server_version_info >= (14,) - ) + return dialect.name == "postgresql" and dialect.server_version_info >= (14,) + my_table = Table( "my_table", diff --git a/doc/build/core/defaults.rst b/doc/build/core/defaults.rst index bccc8375c1..60e7e2bc57 100644 --- a/doc/build/core/defaults.rst +++ b/doc/build/core/defaults.rst @@ -59,9 +59,7 @@ Scalar Defaults The simplest kind of default is a scalar value used as the default value of a column:: - Table("mytable", metadata_obj, - Column("somecolumn", Integer, default=12) - ) + Table("mytable", metadata_obj, Column("somecolumn", Integer, default=12)) Above, the value "12" will be bound as the column value during an INSERT if no other value is supplied. @@ -70,10 +68,7 @@ A scalar value may also be associated with an UPDATE statement, though this is not very common (as UPDATE statements are usually looking for dynamic defaults):: - Table("mytable", metadata_obj, - Column("somecolumn", Integer, onupdate=25) - ) - + Table("mytable", metadata_obj, Column("somecolumn", Integer, onupdate=25)) Python-Executed Functions ------------------------- @@ -86,13 +81,18 @@ incrementing counter to a primary key column:: # a function which counts upwards i = 0 + + def mydefault(): global i i += 1 return i - t = Table("mytable", metadata_obj, - Column('id', Integer, primary_key=True, default=mydefault), + + t = Table( + "mytable", + metadata_obj, + Column("id", Integer, primary_key=True, default=mydefault), ) It should be noted that for real "incrementing sequence" behavior, the @@ -109,11 +109,12 @@ the :paramref:`_schema.Column.onupdate` attribute:: import datetime - t = Table("mytable", metadata_obj, - Column('id', Integer, primary_key=True), - + t = Table( + "mytable", + metadata_obj, + Column("id", Integer, primary_key=True), # define 'last_updated' to be populated with datetime.now() - Column('last_updated', DateTime, onupdate=datetime.datetime.now), + Column("last_updated", DateTime, onupdate=datetime.datetime.now), ) When an update statement executes and no value is passed for ``last_updated``, @@ -139,11 +140,14 @@ updated on the row. To access the context, provide a function that accepts a single ``context`` argument:: def mydefault(context): - return context.get_current_parameters()['counter'] + 12 + return context.get_current_parameters()["counter"] + 12 - t = Table('mytable', metadata_obj, - Column('counter', Integer), - Column('counter_plus_twelve', Integer, default=mydefault, onupdate=mydefault) + + t = Table( + "mytable", + metadata_obj, + Column("counter", Integer), + Column("counter_plus_twelve", Integer, default=mydefault, onupdate=mydefault), ) The above default generation function is applied so that it will execute for @@ -184,18 +188,21 @@ The :paramref:`_schema.Column.default` and :paramref:`_schema.Column.onupdate` k also be passed SQL expressions, which are in most cases rendered inline within the INSERT or UPDATE statement:: - t = Table("mytable", metadata_obj, - Column('id', Integer, primary_key=True), - + t = Table( + "mytable", + metadata_obj, + Column("id", Integer, primary_key=True), # define 'create_date' to default to now() - Column('create_date', DateTime, default=func.now()), - + Column("create_date", DateTime, default=func.now()), # define 'key' to pull its default from the 'keyvalues' table - Column('key', String(20), default=select(keyvalues.c.key).where(keyvalues.c.type='type1')), - + Column( + "key", + String(20), + default=select(keyvalues.c.key).where(keyvalues.c.type="type1"), + ), # define 'last_modified' to use the current_timestamp SQL function on update - Column('last_modified', DateTime, onupdate=func.utc_timestamp()) - ) + Column("last_modified", DateTime, onupdate=func.utc_timestamp()), + ) Above, the ``create_date`` column will be populated with the result of the ``now()`` SQL function (which, depending on backend, compiles into ``NOW()`` @@ -257,10 +264,12 @@ placed in the CREATE TABLE statement during a :meth:`_schema.Table.create` opera .. sourcecode:: python+sql - t = Table('test', metadata_obj, - Column('abc', String(20), server_default='abc'), - Column('created_at', DateTime, server_default=func.sysdate()), - Column('index_value', Integer, server_default=text("0")) + t = Table( + "test", + metadata_obj, + Column("abc", String(20), server_default="abc"), + Column("created_at", DateTime, server_default=func.sysdate()), + Column("index_value", Integer, server_default=text("0")), ) A create call for the above table will produce:: @@ -296,10 +305,12 @@ may be called out using :class:`.FetchedValue` as a marker:: from sqlalchemy.schema import FetchedValue - t = Table('test', metadata_obj, - Column('id', Integer, primary_key=True), - Column('abc', TIMESTAMP, server_default=FetchedValue()), - Column('def', String(20), server_onupdate=FetchedValue()) + t = Table( + "test", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("abc", TIMESTAMP, server_default=FetchedValue()), + Column("def", String(20), server_onupdate=FetchedValue()), ) The :class:`.FetchedValue` indicator does not affect the rendered DDL for the @@ -344,13 +355,17 @@ The :class:`~sqlalchemy.schema.Sequence` may be placed on any column as a configured to fire off during UPDATE operations if desired. It is most commonly used in conjunction with a single integer primary key column:: - table = Table("cartitems", metadata_obj, + table = Table( + "cartitems", + metadata_obj, Column( "cart_id", Integer, - Sequence('cart_id_seq', metadata=metadata_obj), primary_key=True), + Sequence("cart_id_seq", metadata=metadata_obj), + primary_key=True, + ), Column("description", String(40)), - Column("createdate", DateTime()) + Column("createdate", DateTime()), ) Where above, the table "cartitems" is associated with a sequence named @@ -397,7 +412,7 @@ object, it can be invoked with its "next value" instruction by passing it directly to a SQL execution method:: with my_engine.connect() as conn: - seq = Sequence('some_sequence') + seq = Sequence("some_sequence") nextid = conn.execute(seq) In order to embed the "next value" function of a :class:`.Sequence` @@ -405,7 +420,7 @@ inside of a SQL statement like a SELECT or INSERT, use the :meth:`.Sequence.next method, which will render at statement compilation time a SQL function that is appropriate for the target backend:: - >>> my_seq = Sequence('some_sequence') + >>> my_seq = Sequence("some_sequence") >>> stmt = select(my_seq.next_value()) >>> print(stmt.compile(dialect=postgresql.dialect())) SELECT nextval('some_sequence') AS next_value_1 @@ -418,24 +433,29 @@ Associating a Sequence with the MetaData For many years, the SQLAlchemy documentation referred to the example of associating a :class:`.Sequence` with a table as follows:: - table = Table("cartitems", metadata_obj, - Column("cart_id", Integer, Sequence('cart_id_seq'), - primary_key=True), + table = Table( + "cartitems", + metadata_obj, + Column("cart_id", Integer, Sequence("cart_id_seq"), primary_key=True), Column("description", String(40)), - Column("createdate", DateTime()) + Column("createdate", DateTime()), ) While the above is a prominent idiomatic pattern, it is recommended that the :class:`.Sequence` in most cases be explicitly associated with the :class:`_schema.MetaData`, using the :paramref:`.Sequence.metadata` parameter:: - table = Table("cartitems", metadata_obj, + table = Table( + "cartitems", + metadata_obj, Column( "cart_id", Integer, - Sequence('cart_id_seq', metadata=metadata_obj), primary_key=True), + Sequence("cart_id_seq", metadata=metadata_obj), + primary_key=True, + ), Column("description", String(40)), - Column("createdate", DateTime()) + Column("createdate", DateTime()), ) The :class:`.Sequence` object is a first class @@ -480,8 +500,8 @@ The preceding sections illustrate how to associate a :class:`.Sequence` with a :class:`_schema.Column` as the **Python side default generator**:: Column( - "cart_id", Integer, Sequence('cart_id_seq', metadata=metadata_obj), - primary_key=True) + "cart_id", Integer, Sequence("cart_id_seq", metadata=metadata_obj), primary_key=True + ) In the above case, the :class:`.Sequence` will automatically be subject to CREATE SEQUENCE / DROP SEQUENCE DDL when the related :class:`_schema.Table` @@ -497,24 +517,30 @@ we illustrate the same :class:`.Sequence` being associated with the :class:`_schema.Column` both as the Python-side default generator as well as the server-side default generator:: - cart_id_seq = Sequence('cart_id_seq', metadata=metadata_obj) - table = Table("cartitems", metadata_obj, + cart_id_seq = Sequence("cart_id_seq", metadata=metadata_obj) + table = Table( + "cartitems", + metadata_obj, Column( - "cart_id", Integer, cart_id_seq, - server_default=cart_id_seq.next_value(), primary_key=True), + "cart_id", + Integer, + cart_id_seq, + server_default=cart_id_seq.next_value(), + primary_key=True, + ), Column("description", String(40)), - Column("createdate", DateTime()) + Column("createdate", DateTime()), ) or with the ORM:: class CartItem(Base): - __tablename__ = 'cartitems' + __tablename__ = "cartitems" - cart_id_seq = Sequence('cart_id_seq', metadata=Base.metadata) + cart_id_seq = Sequence("cart_id_seq", metadata=Base.metadata) cart_id = Column( - Integer, cart_id_seq, - server_default=cart_id_seq.next_value(), primary_key=True) + Integer, cart_id_seq, server_default=cart_id_seq.next_value(), primary_key=True + ) description = Column(String(40)) createdate = Column(DateTime) @@ -665,8 +691,8 @@ Example:: data = Table( "data", metadata_obj, - Column('id', Integer, Identity(start=42, cycle=True), primary_key=True), - Column('data', String) + Column("id", Integer, Identity(start=42, cycle=True), primary_key=True), + Column("data", String), ) The DDL for the ``data`` table when run on a PostgreSQL 12 backend will look diff --git a/doc/build/core/engines.rst b/doc/build/core/engines.rst index 60895ba966..7b6e7b7474 100644 --- a/doc/build/core/engines.rst +++ b/doc/build/core/engines.rst @@ -22,7 +22,8 @@ Creating an engine is just a matter of issuing a single call, :func:`_sa.create_engine()`:: from sqlalchemy import create_engine - engine = create_engine('postgresql+psycopg2://scott:tiger@localhost:5432/mydatabase') + + engine = create_engine("postgresql+psycopg2://scott:tiger@localhost:5432/mydatabase") The above engine creates a :class:`.Dialect` object tailored towards PostgreSQL, as well as a :class:`_pool.Pool` object which will establish a DBAPI @@ -118,13 +119,13 @@ The PostgreSQL dialect uses psycopg2 as the default DBAPI. Other PostgreSQL DBAPIs include pg8000 and asyncpg:: # default - engine = create_engine('postgresql://scott:tiger@localhost/mydatabase') + engine = create_engine("postgresql://scott:tiger@localhost/mydatabase") # psycopg2 - engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/mydatabase') + engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/mydatabase") # pg8000 - engine = create_engine('postgresql+pg8000://scott:tiger@localhost/mydatabase') + engine = create_engine("postgresql+pg8000://scott:tiger@localhost/mydatabase") More notes on connecting to PostgreSQL at :ref:`postgresql_toplevel`. @@ -135,13 +136,13 @@ The MySQL dialect uses mysqlclient as the default DBAPI. There are other MySQL DBAPIs available, including PyMySQL:: # default - engine = create_engine('mysql://scott:tiger@localhost/foo') + engine = create_engine("mysql://scott:tiger@localhost/foo") # mysqlclient (a maintained fork of MySQL-Python) - engine = create_engine('mysql+mysqldb://scott:tiger@localhost/foo') + engine = create_engine("mysql+mysqldb://scott:tiger@localhost/foo") # PyMySQL - engine = create_engine('mysql+pymysql://scott:tiger@localhost/foo') + engine = create_engine("mysql+pymysql://scott:tiger@localhost/foo") More notes on connecting to MySQL at :ref:`mysql_toplevel`. @@ -150,9 +151,9 @@ Oracle The Oracle dialect uses cx_oracle as the default DBAPI:: - engine = create_engine('oracle://scott:tiger@127.0.0.1:1521/sidname') + engine = create_engine("oracle://scott:tiger@127.0.0.1:1521/sidname") - engine = create_engine('oracle+cx_oracle://scott:tiger@tnsname') + engine = create_engine("oracle+cx_oracle://scott:tiger@tnsname") More notes on connecting to Oracle at :ref:`oracle_toplevel`. @@ -163,10 +164,10 @@ The SQL Server dialect uses pyodbc as the default DBAPI. pymssql is also available:: # pyodbc - engine = create_engine('mssql+pyodbc://scott:tiger@mydsn') + engine = create_engine("mssql+pyodbc://scott:tiger@mydsn") # pymssql - engine = create_engine('mssql+pymssql://scott:tiger@hostname:port/dbname') + engine = create_engine("mssql+pymssql://scott:tiger@hostname:port/dbname") More notes on connecting to SQL Server at :ref:`mssql_toplevel`. @@ -182,22 +183,22 @@ For a relative file path, this requires three slashes:: # sqlite:/// # where is relative: - engine = create_engine('sqlite:///foo.db') + engine = create_engine("sqlite:///foo.db") And for an absolute file path, the three slashes are followed by the absolute path:: # Unix/Mac - 4 initial slashes in total - engine = create_engine('sqlite:////absolute/path/to/foo.db') + engine = create_engine("sqlite:////absolute/path/to/foo.db") # Windows - engine = create_engine('sqlite:///C:\\path\\to\\foo.db') + engine = create_engine("sqlite:///C:\\path\\to\\foo.db") # Windows alternative using raw string - engine = create_engine(r'sqlite:///C:\path\to\foo.db') + engine = create_engine(r"sqlite:///C:\path\to\foo.db") To use a SQLite ``:memory:`` database, specify an empty URL:: - engine = create_engine('sqlite://') + engine = create_engine("sqlite://") More notes on connecting to SQLite at :ref:`sqlite_toplevel`. @@ -273,9 +274,7 @@ often specified in the query string of the URL directly. A common example of this is DBAPIs that accept an argument ``encoding`` for character encodings, such as most MySQL DBAPIs:: - engine = create_engine( - "mysql+pymysql://user:pass@host/test?charset=utf8mb4" - ) + engine = create_engine("mysql+pymysql://user:pass@host/test?charset=utf8mb4") The advantage of using the query string is that additional DBAPI options may be specified in configuration files in a manner that's portable to the DBAPI @@ -294,7 +293,9 @@ supported at this level. method directly as follows:: >>> from sqlalchemy import create_engine - >>> engine = create_engine("mysql+pymysql://some_user:some_pass@some_host/test?charset=utf8mb4") + >>> engine = create_engine( + ... "mysql+pymysql://some_user:some_pass@some_host/test?charset=utf8mb4" + ... ) >>> args, kwargs = engine.dialect.create_connect_args(engine.url) >>> args, kwargs ([], {'host': 'some_host', 'database': 'test', 'user': 'some_user', 'password': 'some_pass', 'charset': 'utf8mb4', 'client_flag': 2}) @@ -319,14 +320,14 @@ underlying implementation the connection:: engine = create_engine( "postgresql+psycopg2://user:pass@hostname/dbname", - connect_args={"connection_factory": MyConnectionFactory} + connect_args={"connection_factory": MyConnectionFactory}, ) Another example is the pyodbc "timeout" parameter:: engine = create_engine( - "mssql+pyodbc://user:pass@sqlsrvr?driver=ODBC+Driver+13+for+SQL+Server", - connect_args={"timeout": 30} + "mssql+pyodbc://user:pass@sqlsrvr?driver=ODBC+Driver+13+for+SQL+Server", + connect_args={"timeout": 30}, ) The above example also illustrates that both URL "query string" parameters as @@ -347,9 +348,10 @@ collections can then be modified in place to alter how they are used:: engine = create_engine("postgresql+psycopg2://user:pass@hostname/dbname") + @event.listens_for(engine, "do_connect") def receive_do_connect(dialect, conn_rec, cargs, cparams): - cparams['connection_factory'] = MyConnectionFactory + cparams["connection_factory"] = MyConnectionFactory .. _engines_dynamic_tokens: @@ -366,9 +368,10 @@ parameter, this could be implemented as:: engine = create_engine("postgresql+psycopg2://user@hostname/dbname") + @event.listens_for(engine, "do_connect") def provide_token(dialect, conn_rec, cargs, cparams): - cparams['token'] = get_authentication_token() + cparams["token"] = get_authentication_token() .. seealso:: @@ -387,9 +390,8 @@ SQLAlchemy:: from sqlalchemy import event - engine = create_engine( - "postgresql+psycopg2://user:pass@hostname/dbname" - ) + engine = create_engine("postgresql+psycopg2://user:pass@hostname/dbname") + @event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): @@ -397,7 +399,6 @@ SQLAlchemy:: cursor_obj.execute("SET some session variables") cursor_obj.close() - Fully Replacing the DBAPI ``connect()`` function ------------------------------------------------ @@ -407,9 +408,8 @@ and returning it:: from sqlalchemy import event - engine = create_engine( - "postgresql+psycopg2://user:pass@hostname/dbname" - ) + engine = create_engine("postgresql+psycopg2://user:pass@hostname/dbname") + @event.listens_for(engine, "do_connect") def receive_do_connect(dialect, conn_rec, cargs, cparams): @@ -469,7 +469,7 @@ For example, to log SQL queries using Python logging instead of the import logging logging.basicConfig() - logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO) + logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO) By default, the log level is set to ``logging.WARN`` within the entire ``sqlalchemy`` namespace so that no log operations occur, even within an @@ -497,10 +497,9 @@ parameters are a shortcut to immediate logging to ``sys.stdout``:: >>> from sqlalchemy import create_engine, text - >>> e = create_engine("sqlite://", echo=True, echo_pool='debug') + >>> e = create_engine("sqlite://", echo=True, echo_pool="debug") >>> with e.connect() as conn: - ... print(conn.scalar(text("select 'hi'"))) - ... + ... print(conn.scalar(text("select 'hi'"))) 2020-10-24 12:54:57,701 DEBUG sqlalchemy.pool.impl.SingletonThreadPool Created new connection 2020-10-24 12:54:57,701 DEBUG sqlalchemy.pool.impl.SingletonThreadPool Connection checked out from pool 2020-10-24 12:54:57,702 INFO sqlalchemy.engine.Engine select 'hi' @@ -512,6 +511,7 @@ parameters are a shortcut to immediate logging to ``sys.stdout``:: Use of these flags is roughly equivalent to:: import logging + logging.basicConfig() logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO) logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG) @@ -535,10 +535,9 @@ string. To set this to a specific name, use the >>> from sqlalchemy import create_engine >>> from sqlalchemy import text - >>> e = create_engine("sqlite://", echo=True, logging_name='myengine') + >>> e = create_engine("sqlite://", echo=True, logging_name="myengine") >>> with e.connect() as conn: ... conn.execute(text("select 'hi'")) - ... 2020-10-24 12:47:04,291 INFO sqlalchemy.engine.Engine.myengine select 'hi' 2020-10-24 12:47:04,292 INFO sqlalchemy.engine.Engine.myengine () @@ -607,7 +606,6 @@ these parameters from being logged for privacy purposes, enable the >>> e = create_engine("sqlite://", echo=True, hide_parameters=True) >>> with e.connect() as conn: ... conn.execute(text("select :some_private_name"), {"some_private_name": "pii"}) - ... 2020-10-24 12:48:32,808 INFO sqlalchemy.engine.Engine select ? 2020-10-24 12:48:32,808 INFO sqlalchemy.engine.Engine [SQL parameters hidden due to hide_parameters=True] diff --git a/doc/build/core/event.rst b/doc/build/core/event.rst index 0f1f87cb24..3347b1336c 100644 --- a/doc/build/core/event.rst +++ b/doc/build/core/event.rst @@ -25,16 +25,19 @@ and that a user-defined listener function should receive two positional argument from sqlalchemy.event import listen from sqlalchemy.pool import Pool + def my_on_connect(dbapi_con, connection_record): print("New DBAPI connection:", dbapi_con) - listen(Pool, 'connect', my_on_connect) + + listen(Pool, "connect", my_on_connect) To listen with the :func:`.listens_for` decorator looks like:: from sqlalchemy.event import listens_for from sqlalchemy.pool import Pool + @listens_for(Pool, "connect") def my_on_connect(dbapi_con, connection_record): print("New DBAPI connection:", dbapi_con) @@ -54,9 +57,10 @@ that accepts ``**keyword`` arguments, by passing ``named=True`` to either from sqlalchemy.event import listens_for from sqlalchemy.pool import Pool + @listens_for(Pool, "connect", named=True) def my_on_connect(**kw): - print("New DBAPI connection:", kw['dbapi_connection']) + print("New DBAPI connection:", kw["dbapi_connection"]) When using named argument passing, the names listed in the function argument specification will be used as keys in the dictionary. @@ -68,10 +72,11 @@ as long as the names match up:: from sqlalchemy.event import listens_for from sqlalchemy.pool import Pool + @listens_for(Pool, "connect", named=True) def my_on_connect(dbapi_connection, **kw): print("New DBAPI connection:", dbapi_connection) - print("Connection record:", kw['connection_record']) + print("Connection record:", kw["connection_record"]) Above, the presence of ``**kw`` tells :func:`.listens_for` that arguments should be passed to the function by name, rather than positionally. @@ -95,25 +100,26 @@ and objects:: from sqlalchemy.engine import Engine import psycopg2 + def connect(): - return psycopg2.connect(user='ed', host='127.0.0.1', dbname='test') + return psycopg2.connect(user="ed", host="127.0.0.1", dbname="test") + my_pool = QueuePool(connect) - my_engine = create_engine('postgresql+psycopg2://ed@localhost/test') + my_engine = create_engine("postgresql+psycopg2://ed@localhost/test") # associate listener with all instances of Pool - listen(Pool, 'connect', my_on_connect) + listen(Pool, "connect", my_on_connect) # associate listener with all instances of Pool # via the Engine class - listen(Engine, 'connect', my_on_connect) + listen(Engine, "connect", my_on_connect) # associate listener with my_pool - listen(my_pool, 'connect', my_on_connect) + listen(my_pool, "connect", my_on_connect) # associate listener with my_engine.pool - listen(my_engine, 'connect', my_on_connect) - + listen(my_engine, "connect", my_on_connect) .. _event_modifiers: @@ -130,11 +136,12 @@ this value can be supported:: def validate_phone(target, value, oldvalue, initiator): """Strip non-numeric characters from a phone number""" - return re.sub(r'\D', '', value) + return re.sub(r"\D", "", value) + # setup listener on UserContact.phone attribute, instructing # it to use the return value - listen(UserContact.phone, 'set', validate_phone, retval=True) + listen(UserContact.phone, "set", validate_phone, retval=True) Event Reference --------------- diff --git a/doc/build/core/functions.rst b/doc/build/core/functions.rst index efa7c78d33..6fcee6edaa 100644 --- a/doc/build/core/functions.rst +++ b/doc/build/core/functions.rst @@ -44,7 +44,7 @@ common SQL functions that set up the expected return type for each function automatically. The are invoked in the same way as any other member of the :data:`_sql.func` namespace:: - select(func.count('*')).select_from(some_table) + select(func.count("*")).select_from(some_table) Note that any name not known to :data:`_sql.func` generates the function name as is - there is no restriction on what SQL functions can be called, known or diff --git a/doc/build/core/index.rst b/doc/build/core/index.rst index 0c99aa5723..08e4a5e061 100644 --- a/doc/build/core/index.rst +++ b/doc/build/core/index.rst @@ -21,4 +21,4 @@ Language provides a schema-centric usage paradigm. .. toctree:: :hidden: - tutorial \ No newline at end of file + tutorial diff --git a/doc/build/core/metadata.rst b/doc/build/core/metadata.rst index 393d110846..04fcdb0b9e 100644 --- a/doc/build/core/metadata.rst +++ b/doc/build/core/metadata.rst @@ -37,12 +37,12 @@ The remaining positional arguments are mostly from sqlalchemy import Table, Column, Integer, String user = Table( - 'user', + "user", metadata_obj, - Column('user_id', Integer, primary_key=True), - Column('user_name', String(16), nullable=False), - Column('email_address', String(60)), - Column('nickname', String(50), nullable=False) + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + Column("email_address", String(60)), + Column("nickname", String(50), nullable=False), ) Above, a table called ``user`` is described, which contains four columns. The @@ -69,7 +69,7 @@ dependency (that is, each table is preceded by all tables which it references):: >>> for t in metadata_obj.sorted_tables: - ... print(t.name) + ... print(t.name) user user_preference invoice @@ -82,10 +82,12 @@ module-level variables in an application. Once a accessors which allow inspection of its properties. Given the following :class:`~sqlalchemy.schema.Table` definition:: - employees = Table('employees', metadata_obj, - Column('employee_id', Integer, primary_key=True), - Column('employee_name', String(60), nullable=False), - Column('employee_dept', Integer, ForeignKey("departments.department_id")) + employees = Table( + "employees", + metadata_obj, + Column("employee_id", Integer, primary_key=True), + Column("employee_name", String(60), nullable=False), + Column("employee_dept", Integer, ForeignKey("departments.department_id")), ) Note the :class:`~sqlalchemy.schema.ForeignKey` object used in this table - @@ -100,11 +102,11 @@ table include:: employees.c.employee_id # via string - employees.c['employee_id'] + employees.c["employee_id"] # a tuple of columns may be returned using multiple strings # (new in 2.0) - emp_id, name, type = employees.c['employee_id', "name", "type"] + emp_id, name, type = employees.c["employee_id", "name", "type"] # iterate through all columns for c in employees.c: @@ -341,11 +343,11 @@ using a Core :class:`_schema.Table` object as follows:: metadata_obj = MetaData() financial_info = Table( - 'financial_info', + "financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('value', String(100), nullable=False), - schema='remote_banks' + Column("id", Integer, primary_key=True), + Column("value", String(100), nullable=False), + schema="remote_banks", ) SQL that is rendered using this :class:`_schema.Table`, such as the SELECT @@ -362,7 +364,7 @@ using the combination of the schema and table name. We can view this in the :attr:`_schema.MetaData.tables` collection by searching for the key ``'remote_banks.financial_info'``:: - >>> metadata_obj.tables['remote_banks.financial_info'] + >>> metadata_obj.tables["remote_banks.financial_info"] Table('financial_info', MetaData(), Column('id', Integer(), table=, primary_key=True, nullable=False), Column('value', String(length=100), table=, nullable=False), @@ -375,9 +377,9 @@ objects, even if the referring table is also in that same schema:: customer = Table( "customer", metadata_obj, - Column('id', Integer, primary_key=True), - Column('financial_info_id', ForeignKey("remote_banks.financial_info.id")), - schema='remote_banks' + Column("id", Integer, primary_key=True), + Column("financial_info_id", ForeignKey("remote_banks.financial_info.id")), + schema="remote_banks", ) The :paramref:`_schema.Table.schema` argument may also be used with certain @@ -387,7 +389,7 @@ important on a database such as Microsoft SQL Server where there are often dotted "database/owner" tokens. The tokens may be placed directly in the name at once, such as:: - schema="dbo.scott" + schema = "dbo.scott" .. seealso:: @@ -410,10 +412,10 @@ construct:: metadata_obj = MetaData(schema="remote_banks") financial_info = Table( - 'financial_info', + "financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('value', String(100), nullable=False), + Column("id", Integer, primary_key=True), + Column("value", String(100), nullable=False), ) Above, for any :class:`_schema.Table` object (or :class:`_schema.Sequence` object @@ -423,7 +425,7 @@ act as though the parameter were set to the value ``"remote_banks"``. This includes that the :class:`_schema.Table` is cataloged in the :class:`_schema.MetaData` using the schema-qualified name, that is:: - metadata_obj.tables['remote_banks.financial_info'] + metadata_obj.tables["remote_banks.financial_info"] When using the :class:`_schema.ForeignKey` or :class:`_schema.ForeignKeyConstraint` objects to refer to this table, either the schema-qualified name or the @@ -433,20 +435,20 @@ table:: # either will work: refers_to_financial_info = Table( - 'refers_to_financial_info', + "refers_to_financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('fiid', ForeignKey('financial_info.id')), + Column("id", Integer, primary_key=True), + Column("fiid", ForeignKey("financial_info.id")), ) # or refers_to_financial_info = Table( - 'refers_to_financial_info', + "refers_to_financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('fiid', ForeignKey('remote_banks.financial_info.id')), + Column("id", Integer, primary_key=True), + Column("fiid", ForeignKey("remote_banks.financial_info.id")), ) When using a :class:`_schema.MetaData` object that sets @@ -459,11 +461,11 @@ to specify that it should not be schema qualified may use the special symbol metadata_obj = MetaData(schema="remote_banks") financial_info = Table( - 'financial_info', + "financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('value', String(100), nullable=False), - schema=BLANK_SCHEMA # will not use "remote_banks" + Column("id", Integer, primary_key=True), + Column("value", String(100), nullable=False), + schema=BLANK_SCHEMA, # will not use "remote_banks" ) .. seealso:: @@ -512,6 +514,7 @@ Oracle CURRENT_SCHEMA variable to an alternate name:: engine = create_engine("oracle+cx_oracle://scott:tiger@tsn_name") + @event.listens_for(engine, "connect", insert=True) def set_current_schema(dbapi_connection, connection_record): cursor_obj = dbapi_connection.cursor() @@ -553,11 +556,13 @@ example, MySQL has different table backend types, including "MyISAM" and "InnoDB". This can be expressed with :class:`~sqlalchemy.schema.Table` using ``mysql_engine``:: - addresses = Table('engine_email_addresses', metadata_obj, - Column('address_id', Integer, primary_key=True), - Column('remote_user_id', Integer, ForeignKey(users.c.user_id)), - Column('email_address', String(20)), - mysql_engine='InnoDB' + addresses = Table( + "engine_email_addresses", + metadata_obj, + Column("address_id", Integer, primary_key=True), + Column("remote_user_id", Integer, ForeignKey(users.c.user_id)), + Column("email_address", String(20)), + mysql_engine="InnoDB", ) Other backends may support table-level options as well - these would be diff --git a/doc/build/core/operators.rst b/doc/build/core/operators.rst index 38ff28dcca..4f0c2a5af2 100644 --- a/doc/build/core/operators.rst +++ b/doc/build/core/operators.rst @@ -11,17 +11,17 @@ Operator Reference >>> user_table = Table( ... "user_account", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30)), - ... Column('fullname', String) + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30)), + ... Column("fullname", String), ... ) >>> from sqlalchemy import ForeignKey >>> address_table = Table( ... "address", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', None, ForeignKey('user_account.id')), - ... Column('email_address', String, nullable=False) + ... Column("id", Integer, primary_key=True), + ... Column("user_id", None, ForeignKey("user_account.id")), + ... Column("email_address", String, nullable=False), ... ) >>> metadata_obj.create_all(engine) BEGIN (implicit) @@ -30,47 +30,59 @@ Operator Reference >>> Base = declarative_base() >>> from sqlalchemy.orm import relationship >>> class User(Base): - ... __tablename__ = 'user_account' - ... + ... __tablename__ = "user_account" + ... ... id = Column(Integer, primary_key=True) ... name = Column(String(30)) ... fullname = Column(String) - ... + ... ... addresses = relationship("Address", back_populates="user") - ... + ... ... def __repr__(self): - ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" + ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): - ... __tablename__ = 'address' - ... + ... __tablename__ = "address" + ... ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) - ... user_id = Column(Integer, ForeignKey('user_account.id')) - ... + ... user_id = Column(Integer, ForeignKey("user_account.id")) + ... ... user = relationship("User", back_populates="addresses") - ... + ... ... def __repr__(self): ... return f"Address(id={self.id!r}, email_address={self.email_address!r})" >>> conn = engine.connect() >>> from sqlalchemy.orm import Session >>> session = Session(conn) - >>> session.add_all([ - ... User(name="spongebob", fullname="Spongebob Squarepants", addresses=[ - ... Address(email_address="spongebob@sqlalchemy.org") - ... ]), - ... User(name="sandy", fullname="Sandy Cheeks", addresses=[ - ... Address(email_address="sandy@sqlalchemy.org"), - ... Address(email_address="squirrel@squirrelpower.org") - ... ]), - ... User(name="patrick", fullname="Patrick Star", addresses=[ - ... Address(email_address="pat999@aol.com") - ... ]), - ... User(name="squidward", fullname="Squidward Tentacles", addresses=[ - ... Address(email_address="stentcl@sqlalchemy.org") - ... ]), - ... User(name="ehkrabs", fullname="Eugene H. Krabs"), - ... ]) + >>> session.add_all( + ... [ + ... User( + ... name="spongebob", + ... fullname="Spongebob Squarepants", + ... addresses=[Address(email_address="spongebob@sqlalchemy.org")], + ... ), + ... User( + ... name="sandy", + ... fullname="Sandy Cheeks", + ... addresses=[ + ... Address(email_address="sandy@sqlalchemy.org"), + ... Address(email_address="squirrel@squirrelpower.org"), + ... ], + ... ), + ... User( + ... name="patrick", + ... fullname="Patrick Star", + ... addresses=[Address(email_address="pat999@aol.com")], + ... ), + ... User( + ... name="squidward", + ... fullname="Squidward Tentacles", + ... addresses=[Address(email_address="stentcl@sqlalchemy.org")], + ... ), + ... User(name="ehkrabs", fullname="Eugene H. Krabs"), + ... ] + ... ) >>> session.commit() BEGIN ... >>> conn.begin() @@ -108,49 +120,49 @@ strings, dates, and many others: * :meth:`_sql.ColumnOperators.__eq__` (Python "``==``" operator):: - >>> print(column('x') == 5) + >>> print(column("x") == 5) x = :x_1 .. * :meth:`_sql.ColumnOperators.__ne__` (Python "``!=``" operator):: - >>> print(column('x') != 5) + >>> print(column("x") != 5) x != :x_1 .. * :meth:`_sql.ColumnOperators.__gt__` (Python "``>``" operator):: - >>> print(column('x') > 5) + >>> print(column("x") > 5) x > :x_1 .. * :meth:`_sql.ColumnOperators.__lt__` (Python "``<``" operator):: - >>> print(column('x') < 5) + >>> print(column("x") < 5) x < :x_1 .. * :meth:`_sql.ColumnOperators.__ge__` (Python "``>=``" operator):: - >>> print(column('x') >= 5) + >>> print(column("x") >= 5) x >= :x_1 .. * :meth:`_sql.ColumnOperators.__le__` (Python "``<=``" operator):: - >>> print(column('x') <= 5) + >>> print(column("x") <= 5) x <= :x_1 .. * :meth:`_sql.ColumnOperators.between`:: - >>> print(column('x').between(5, 10)) + >>> print(column("x").between(5, 10)) x BETWEEN :x_1 AND :x_2 .. @@ -171,7 +183,7 @@ IN is available most typically by passing a list of values to the :meth:`_sql.ColumnOperators.in_` method:: - >>> print(column('x').in_([1, 2, 3])) + >>> print(column("x").in_([1, 2, 3])) x IN (__[POSTCOMPILE_x_1]) The special bound form ``__[POSTCOMPILE`` is rendered into individual parameters @@ -211,12 +223,12 @@ NOT IN "NOT IN" is available via the :meth:`_sql.ColumnOperators.not_in` operator:: - >>> print(column('x').not_in([1, 2, 3])) + >>> print(column("x").not_in([1, 2, 3])) (x NOT IN (__[POSTCOMPILE_x_1])) This is typically more easily available by negating with the ``~`` operator:: - >>> print(~column('x').in_([1, 2, 3])) + >>> print(~column("x").in_([1, 2, 3])) (x NOT IN (__[POSTCOMPILE_x_1])) Tuple IN Expressions @@ -229,7 +241,7 @@ building block for tuple comparisons. The :meth:`_sql.Tuple.in_` operator then receives a list of tuples:: >>> from sqlalchemy import tuple_ - >>> tup = tuple_(column('x', Integer), column('y', Integer)) + >>> tup = tuple_(column("x", Integer), column("y", Integer)) >>> expr = tup.in_([(1, 2), (3, 4)]) >>> print(expr) (x, y) IN (__[POSTCOMPILE_param_1]) @@ -256,14 +268,14 @@ operators work with subqueries. The form provides that a :class:`_sql.Select` construct is passed in directly, without any explicit conversion to a named subquery:: - >>> print(column('x').in_(select(user_table.c.id))) + >>> print(column("x").in_(select(user_table.c.id))) x IN (SELECT user_account.id FROM user_account) Tuples work as expected:: >>> print( - ... tuple_(column('x'), column('y')).in_( + ... tuple_(column("x"), column("y")).in_( ... select(user_table.c.id, address_table.c.id).join(address_table) ... ) ... ) @@ -283,14 +295,14 @@ databases support: as " IS NULL". The ``NULL`` constant is most easily acquired using regular Python ``None``:: - >>> print(column('x').is_(None)) + >>> print(column("x").is_(None)) x IS NULL SQL NULL is also explicitly available, if needed, using the :func:`_sql.null` construct:: >>> from sqlalchemy import null - >>> print(column('x').is_(null())) + >>> print(column("x").is_(null())) x IS NULL The :meth:`_sql.ColumnOperators.is_` operator is automatically invoked when @@ -300,7 +312,7 @@ databases support: explicitly, paricularly when used with a dynamic value:: >>> a = None - >>> print(column('x') == a) + >>> print(column("x") == a) x IS NULL Note that the Python ``is`` operator is **not overloaded**. Even though @@ -311,26 +323,26 @@ databases support: Similar to :meth:`_sql.ColumnOperators.is_`, produces "IS NOT":: - >>> print(column('x').is_not(None)) + >>> print(column("x").is_not(None)) x IS NOT NULL Is similarly equivalent to ``!= None``:: - >>> print(column('x') != None) + >>> print(column("x") != None) x IS NOT NULL * :meth:`_sql.ColumnOperators.is_distinct_from`: Produces SQL IS DISTINCT FROM:: - >>> print(column('x').is_distinct_from('some value')) + >>> print(column("x").is_distinct_from("some value")) x IS DISTINCT FROM :x_1 * :meth:`_sql.ColumnOperators.isnot_distinct_from`: Produces SQL IS NOT DISTINCT FROM:: - >>> print(column('x').isnot_distinct_from('some value')) + >>> print(column("x").isnot_distinct_from("some value")) x IS NOT DISTINCT FROM :x_1 String Comparisons @@ -338,7 +350,7 @@ String Comparisons * :meth:`_sql.ColumnOperators.like`:: - >>> print(column('x').like('word')) + >>> print(column("x").like("word")) x LIKE :x_1 .. @@ -348,14 +360,14 @@ String Comparisons Case insensitive LIKE makes use of the SQL ``lower()`` function on a generic backend. On the PostgreSQL backend it will use ``ILIKE``:: - >>> print(column('x').ilike('word')) + >>> print(column("x").ilike("word")) lower(x) LIKE lower(:x_1) .. * :meth:`_sql.ColumnOperators.notlike`:: - >>> print(column('x').notlike('word')) + >>> print(column("x").notlike("word")) x NOT LIKE :x_1 .. @@ -363,7 +375,7 @@ String Comparisons * :meth:`_sql.ColumnOperators.notilike`:: - >>> print(column('x').notilike('word')) + >>> print(column("x").notilike("word")) lower(x) NOT LIKE lower(:x_1) .. @@ -378,21 +390,21 @@ backends or sometimes a function like ``concat()``: * :meth:`_sql.ColumnOperators.startswith`:: The string containment operators - >>> print(column('x').startswith('word')) + >>> print(column("x").startswith("word")) x LIKE :x_1 || '%' .. * :meth:`_sql.ColumnOperators.endswith`:: - >>> print(column('x').endswith('word')) + >>> print(column("x").endswith("word")) x LIKE '%' || :x_1 .. * :meth:`_sql.ColumnOperators.contains`:: - >>> print(column('x').contains('word')) + >>> print(column("x").contains("word")) x LIKE '%' || :x_1 || '%' .. @@ -408,7 +420,7 @@ behaviors and results on different databases: This is a dialect-specific operator that makes use of the MATCH feature of the underlying database, if available:: - >>> print(column('x').match('word')) + >>> print(column("x").match("word")) x MATCH :x_1 .. @@ -419,13 +431,13 @@ behaviors and results on different databases: for example the PostgreSQL dialect:: >>> from sqlalchemy.dialects import postgresql - >>> print(column('x').regexp_match('word').compile(dialect=postgresql.dialect())) + >>> print(column("x").regexp_match("word").compile(dialect=postgresql.dialect())) x ~ %(x_1)s Or MySQL:: >>> from sqlalchemy.dialects import mysql - >>> print(column('x').regexp_match('word').compile(dialect=mysql.dialect())) + >>> print(column("x").regexp_match("word").compile(dialect=mysql.dialect())) x REGEXP %s .. @@ -440,20 +452,20 @@ String Alteration String concatenation:: - >>> print(column('x').concat("some string")) + >>> print(column("x").concat("some string")) x || :x_1 This operator is available via :meth:`_sql.ColumnOperators.__add__`, that is, the Python ``+`` operator, when working with a column expression that derives from :class:`_types.String`:: - >>> print(column('x', String) + "some string") + >>> print(column("x", String) + "some string") x || :x_1 The operator will produce the appropriate database-specific construct, such as on MySQL it's historically been the ``concat()`` SQL function:: - >>> print((column('x', String) + "some string").compile(dialect=mysql.dialect())) + >>> print((column("x", String) + "some string").compile(dialect=mysql.dialect())) concat(x, %s) .. @@ -463,7 +475,7 @@ String Alteration Complementary to :meth:`_sql.ColumnOperators.regexp` this produces REGEXP REPLACE equivalent for the backends which support it:: - >>> print(column('x').regexp_replace('foo', 'bar').compile(dialect=postgresql.dialect())) + >>> print(column("x").regexp_replace("foo", "bar").compile(dialect=postgresql.dialect())) REGEXP_REPLACE(x, %(x_1)s, %(x_2)s) .. @@ -473,7 +485,11 @@ String Alteration Produces the COLLATE SQL operator which provides for specific collations at expression time:: - >>> print((column('x').collate('latin1_german2_ci') == 'Müller').compile(dialect=mysql.dialect())) + >>> print( + ... (column("x").collate("latin1_german2_ci") == "Müller").compile( + ... dialect=mysql.dialect() + ... ) + ... ) (x COLLATE latin1_german2_ci) = %s @@ -481,7 +497,11 @@ String Alteration >>> from sqlalchemy import literal - >>> print((literal('Müller').collate('latin1_german2_ci') == column('x')).compile(dialect=mysql.dialect())) + >>> print( + ... (literal("Müller").collate("latin1_german2_ci") == column("x")).compile( + ... dialect=mysql.dialect() + ... ) + ... ) (%s COLLATE latin1_german2_ci) = x .. @@ -491,10 +511,10 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__add__`, :meth:`_sql.ColumnOperators.__radd__` (Python "``+``" operator):: - >>> print(column('x') + 5) + >>> print(column("x") + 5) x + :x_1 - >>> print(5 + column('x')) + >>> print(5 + column("x")) :x_1 + x .. @@ -507,10 +527,10 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__sub__`, :meth:`_sql.ColumnOperators.__rsub__` (Python "``-``" operator):: - >>> print(column('x') - 5) + >>> print(column("x") - 5) x - :x_1 - >>> print(5 - column('x')) + >>> print(5 - column("x")) :x_1 - x .. @@ -518,10 +538,10 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__mul__`, :meth:`_sql.ColumnOperators.__rmul__` (Python "``*``" operator):: - >>> print(column('x') * 5) + >>> print(column("x") * 5) x * :x_1 - >>> print(5 * column('x')) + >>> print(5 * column("x")) :x_1 * x .. @@ -529,9 +549,9 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__truediv__`, :meth:`_sql.ColumnOperators.__rtruediv__` (Python "``/``" operator). This is the Python ``truediv`` operator, which will ensure integer true division occurs:: - >>> print(column('x') / 5) + >>> print(column("x") / 5) x / CAST(:x_1 AS NUMERIC) - >>> print(5 / column('x')) + >>> print(5 / column("x")) :x_1 / CAST(x AS NUMERIC) .. versionchanged:: 2.0 The Python ``/`` operator now ensures integer true division takes place @@ -543,17 +563,17 @@ Arithmetic Operators For the default backend as well as backends such as PostgreSQL, the SQL ``/`` operator normally behaves this way for integer values:: - >>> print(column('x') // 5) + >>> print(column("x") // 5) x / :x_1 - >>> print(5 // column('x', Integer)) + >>> print(5 // column("x", Integer)) :x_1 / x For backends that don't use floor division by default, or when used with numeric values, the FLOOR() function is used to ensure floor division:: - >>> print(column('x') // 5.5) + >>> print(column("x") // 5.5) FLOOR(x / :x_1) - >>> print(5 // column('x', Numeric)) + >>> print(5 // column("x", Numeric)) FLOOR(:x_1 / x) .. versionadded:: 2.0 Support for FLOOR division @@ -563,9 +583,9 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__mod__`, :meth:`_sql.ColumnOperators.__rmod__` (Python "``%``" operator):: - >>> print(column('x') % 5) + >>> print(column("x") % 5) x % :x_1 - >>> print(5 % column('x')) + >>> print(5 % column("x")) :x_1 % x .. @@ -578,10 +598,10 @@ The most common conjunction, "AND", is automatically applied if we make repeated :meth:`_sql.Update.where` and :meth:`_sql.Delete.where`:: >>> print( - ... select(address_table.c.email_address). - ... where(user_table.c.name == 'squidward'). - ... where(address_table.c.user_id == user_table.c.id) - ... ) + ... select(address_table.c.email_address) + ... .where(user_table.c.name == "squidward") + ... .where(address_table.c.user_id == user_table.c.id) + ... ) SELECT address.email_address FROM address, user_account WHERE user_account.name = :name_1 AND address.user_id = user_account.id @@ -589,12 +609,11 @@ The most common conjunction, "AND", is automatically applied if we make repeated :meth:`_sql.Select.where`, :meth:`_sql.Update.where` and :meth:`_sql.Delete.where` also accept multiple expressions with the same effect:: >>> print( - ... select(address_table.c.email_address). - ... where( - ... user_table.c.name == 'squidward', - ... address_table.c.user_id == user_table.c.id - ... ) - ... ) + ... select(address_table.c.email_address).where( + ... user_table.c.name == "squidward", + ... address_table.c.user_id == user_table.c.id, + ... ) + ... ) SELECT address.email_address FROM address, user_account WHERE user_account.name = :name_1 AND address.user_id = user_account.id @@ -604,11 +623,10 @@ The "AND" conjunction, as well as its partner "OR", are both available directly >>> from sqlalchemy import and_, or_ >>> print( - ... select(address_table.c.email_address). - ... where( + ... select(address_table.c.email_address).where( ... and_( - ... or_(user_table.c.name == 'squidward', user_table.c.name == 'sandy'), - ... address_table.c.user_id == user_table.c.id + ... or_(user_table.c.name == "squidward", user_table.c.name == "sandy"), + ... address_table.c.user_id == user_table.c.id, ... ) ... ) ... ) @@ -621,13 +639,13 @@ A negation is available using the :func:`_sql.not_` function. This will typically invert the operator in a boolean expression:: >>> from sqlalchemy import not_ - >>> print(not_(column('x') == 5)) + >>> print(not_(column("x") == 5)) x != :x_1 It also may apply a keyword such as ``NOT`` when appropriate:: >>> from sqlalchemy import Boolean - >>> print(not_(column('x', Boolean))) + >>> print(not_(column("x", Boolean))) NOT x @@ -647,7 +665,7 @@ The above conjunction functions :func:`_sql.and_`, :func:`_sql.or_`, The Python binary ``&`` operator is overloaded to behave the same as :func:`_sql.and_` (note parenthesis around the two operands):: - >>> print((column('x') == 5) & (column('y') == 10)) + >>> print((column("x") == 5) & (column("y") == 10)) x = :x_1 AND y = :y_1 .. @@ -658,7 +676,7 @@ The above conjunction functions :func:`_sql.and_`, :func:`_sql.or_`, The Python binary ``|`` operator is overloaded to behave the same as :func:`_sql.or_` (note parenthesis around the two operands):: - >>> print((column('x') == 5) | (column('y') == 10)) + >>> print((column("x") == 5) | (column("y") == 10)) x = :x_1 OR y = :y_1 .. @@ -670,11 +688,11 @@ The above conjunction functions :func:`_sql.and_`, :func:`_sql.or_`, as :func:`_sql.not_`, either inverting the existing operator, or applying the ``NOT`` keyword to the expression as a whole:: - >>> print(~(column('x') == 5)) + >>> print(~(column("x") == 5)) x != :x_1 >>> from sqlalchemy import Boolean - >>> print(~column('x', Boolean)) + >>> print(~column("x", Boolean)) NOT x .. @@ -690,4 +708,4 @@ TODO .. Setup code, not for display >>> conn.close() - ROLLBACK \ No newline at end of file + ROLLBACK diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst index a0e8157048..7f6523bffd 100644 --- a/doc/build/core/pooling.rst +++ b/doc/build/core/pooling.rst @@ -35,8 +35,9 @@ directly to :func:`~sqlalchemy.create_engine` as keyword arguments: ``pool_size``, ``max_overflow``, ``pool_recycle`` and ``pool_timeout``. For example:: - engine = create_engine('postgresql+psycopg2://me@localhost/mydb', - pool_size=20, max_overflow=0) + engine = create_engine( + "postgresql+psycopg2://me@localhost/mydb", pool_size=20, max_overflow=0 + ) All SQLAlchemy pool implementations have in common that none of them "pre create" connections - all implementations wait @@ -62,9 +63,10 @@ connection pooling is to be disabled, which can be achieved by using the :class:`.NullPool` implementation:: from sqlalchemy.pool import NullPool + engine = create_engine( - 'postgresql+psycopg2://scott:tiger@localhost/test', - poolclass=NullPool) + "postgresql+psycopg2://scott:tiger@localhost/test", poolclass=NullPool + ) Using a Custom Connection Function ---------------------------------- @@ -84,10 +86,12 @@ by any additional options:: import sqlalchemy.pool as pool import psycopg2 + def getconn(): - c = psycopg2.connect(user='ed', host='127.0.0.1', dbname='test') + c = psycopg2.connect(user="ed", host="127.0.0.1", dbname="test") return c + mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5) DBAPI connections can then be procured from the pool using the @@ -249,6 +253,7 @@ behaviors are needed:: some_engine = create_engine(...) + @event.listens_for(some_engine, "engine_connect") def ping_connection(connection, branch): if branch: @@ -306,6 +311,7 @@ that they are replaced with new ones upon next checkout. This flow is illustrated by the code example below:: from sqlalchemy import create_engine, exc + e = create_engine(...) c = e.connect() @@ -345,6 +351,7 @@ such as MySQL that automatically close connections that have been stale after a period of time:: from sqlalchemy import create_engine + e = create_engine("mysql+mysqldb://scott:tiger@localhost/test", pool_recycle=3600) Above, any DBAPI connection that has been open for more than one hour will be invalidated and replaced, @@ -461,8 +468,7 @@ close these connections out. The difference between FIFO and LIFO is basically whether or not its desirable for the pool to keep a full set of connections ready to go even during idle periods:: - engine = create_engine( - "postgreql://", pool_use_lifo=True, pool_pre_ping=True) + engine = create_engine("postgreql://", pool_use_lifo=True, pool_pre_ping=True) Above, we also make use of the :paramref:`_sa.create_engine.pool_pre_ping` flag so that connections which are closed from the server side are gracefully @@ -504,8 +510,8 @@ are three general approaches to this: more than once:: from sqlalchemy.pool import NullPool - engine = create_engine("mysql+mysqldb://user:pass@host/dbname", poolclass=NullPool) + engine = create_engine("mysql+mysqldb://user:pass@host/dbname", poolclass=NullPool) 2. Call :meth:`_engine.Engine.dispose` on any given :class:`_engine.Engine`, passing the :paramref:`.Engine.dispose.close` parameter with a value of @@ -561,19 +567,20 @@ are three general approaches to this: engine = create_engine("...") + @event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): - connection_record.info['pid'] = os.getpid() + connection_record.info["pid"] = os.getpid() + @event.listens_for(engine, "checkout") def checkout(dbapi_connection, connection_record, connection_proxy): pid = os.getpid() - if connection_record.info['pid'] != pid: + if connection_record.info["pid"] != pid: connection_record.dbapi_connection = connection_proxy.dbapi_connection = None raise exc.DisconnectionError( - "Connection record belongs to pid %s, " - "attempting to check out in pid %s" % - (connection_record.info['pid'], pid) + "Connection record belongs to pid %s, " + "attempting to check out in pid %s" % (connection_record.info["pid"], pid) ) Above, we use an approach similar to that described in diff --git a/doc/build/core/reflection.rst b/doc/build/core/reflection.rst index 7002722c6d..c2c636d85f 100644 --- a/doc/build/core/reflection.rst +++ b/doc/build/core/reflection.rst @@ -13,7 +13,7 @@ existing within the database. This process is called *reflection*. In the most simple case you need only specify the table name, a :class:`~sqlalchemy.schema.MetaData` object, and the ``autoload_with`` argument:: - >>> messages = Table('messages', metadata_obj, autoload_with=engine) + >>> messages = Table("messages", metadata_obj, autoload_with=engine) >>> [c.name for c in messages.columns] ['message_id', 'message_name', 'date'] @@ -30,8 +30,8 @@ Below, assume the table ``shopping_cart_items`` references a table named ``shopping_carts``. Reflecting the ``shopping_cart_items`` table has the effect such that the ``shopping_carts`` table will also be loaded:: - >>> shopping_cart_items = Table('shopping_cart_items', metadata_obj, autoload_with=engine) - >>> 'shopping_carts' in metadata_obj.tables: + >>> shopping_cart_items = Table("shopping_cart_items", metadata_obj, autoload_with=engine) + >>> "shopping_carts" in metadata_obj.tables True The :class:`~sqlalchemy.schema.MetaData` has an interesting "singleton-like" @@ -43,7 +43,7 @@ you the already-existing :class:`~sqlalchemy.schema.Table` object if one already exists with the given name. Such as below, we can access the already generated ``shopping_carts`` table just by naming it:: - shopping_carts = Table('shopping_carts', metadata_obj) + shopping_carts = Table("shopping_carts", metadata_obj) Of course, it's a good idea to use ``autoload_with=engine`` with the above table regardless. This is so that the table's attributes will be loaded if they have @@ -61,11 +61,16 @@ Individual columns can be overridden with explicit values when reflecting tables; this is handy for specifying custom datatypes, constraints such as primary keys that may not be configured within the database, etc.:: - >>> mytable = Table('mytable', metadata_obj, - ... Column('id', Integer, primary_key=True), # override reflected 'id' to have primary key - ... Column('mydata', Unicode(50)), # override reflected 'mydata' to be Unicode - ... # additional Column objects which require no change are reflected normally - ... autoload_with=some_engine) + >>> mytable = Table( + ... "mytable", + ... metadata_obj, + ... Column( + ... "id", Integer, primary_key=True + ... ), # override reflected 'id' to have primary key + ... Column("mydata", Unicode(50)), # override reflected 'mydata' to be Unicode + ... # additional Column objects which require no change are reflected normally + ... autoload_with=some_engine, + ... ) .. seealso:: @@ -92,10 +97,12 @@ extrapolate these constraints. Use the "override" technique for this, specifying explicitly those columns which are part of the primary key or have foreign key constraints:: - my_view = Table("some_view", metadata, - Column("view_id", Integer, primary_key=True), - Column("related_thing", Integer, ForeignKey("othertable.thing_id")), - autoload_with=engine + my_view = Table( + "some_view", + metadata, + Column("view_id", Integer, primary_key=True), + Column("related_thing", Integer, ForeignKey("othertable.thing_id")), + autoload_with=engine, ) Reflecting All Tables at Once @@ -109,8 +116,8 @@ object's dictionary of tables:: metadata_obj = MetaData() metadata_obj.reflect(bind=someengine) - users_table = metadata_obj.tables['users'] - addresses_table = metadata_obj.tables['addresses'] + users_table = metadata_obj.tables["users"] + addresses_table = metadata_obj.tables["addresses"] ``metadata.reflect()`` also provides a handy way to clear or delete all the rows in a database:: @@ -149,7 +156,7 @@ The end result is that :class:`_schema.Table` objects from the "project" schema will be reflected, and they will be populated as schema-qualified with that name:: - >>> metadata_obj.tables['project.messages'] + >>> metadata_obj.tables["project.messages"] Table('messages', MetaData(), Column('message_id', INTEGER(), table=), schema='project') Similarly, an individual :class:`_schema.Table` object that includes the @@ -157,7 +164,7 @@ Similarly, an individual :class:`_schema.Table` object that includes the database schema, overriding any default schema that may have been configured on the owning :class:`_schema.MetaData` collection:: - >>> messages = Table('messages', metadata_obj, schema="project", autoload_with=someengine) + >>> messages = Table("messages", metadata_obj, schema="project", autoload_with=someengine) >>> messages Table('messages', MetaData(), Column('message_id', INTEGER(), table=), schema='project') @@ -246,7 +253,9 @@ semantically equivalent:: >>> # reflect in non-schema qualified fashion >>> messages_table_1 = Table("messages", metadata_obj, autoload_with=someengine) >>> # reflect in schema qualified fashion - >>> messages_table_2 = Table("messages", metadata_obj, schema="project", autoload_with=someengine) + >>> messages_table_2 = Table( + ... "messages", metadata_obj, schema="project", autoload_with=someengine + ... ) >>> # two different objects >>> messages_table_1 is messages_table_2 False @@ -280,7 +289,9 @@ fashion then loads a related table that will also be performed in a schema qualified fashion:: >>> # reflect "messages" in a schema qualified fashion - >>> messages_table_1 = Table("messages", metadata_obj, schema="project", autoload_with=someengine) + >>> messages_table_1 = Table( + ... "messages", metadata_obj, schema="project", autoload_with=someengine + ... ) The above ``messages_table_1`` will refer to ``projects`` also in a schema qualified fashion. This "projects" table will be reflected automatically by @@ -299,12 +310,12 @@ fashion, there are now two projects tables that are not the same: >>> messages_table_1.c.project_id.references(projects_table_1.c.project_id) False - >>> it refers to this one + >>> # it refers to this one >>> projects_table_2 = metadata_obj.tables["project.projects"] >>> messages_table_1.c.project_id.references(projects_table_2.c.project_id) True - >>> they're different, as one non-schema qualified and the other one is + >>> # they're different, as one non-schema qualified and the other one is >>> projects_table_1 is projects_table_2 False @@ -343,7 +354,8 @@ database is also available. This is known as the "Inspector":: from sqlalchemy import create_engine from sqlalchemy import inspect - engine = create_engine('...') + + engine = create_engine("...") insp = inspect(engine) print(insp.get_table_names()) @@ -472,7 +484,7 @@ The format of this dictionary is described at :meth:`_reflection.Inspector.get_c >>> metadata_obj = MetaData() >>> @event.listens_for(metadata_obj, "column_reflect") - >>> def genericize_datatypes(inspector, tablename, column_dict): + ... def genericize_datatypes(inspector, tablename, column_dict): ... column_dict["type"] = column_dict["type"].as_generic() >>> my_generic_table = Table("my_table", metadata_obj, autoload_with=mysql_engine) diff --git a/doc/build/core/type_basics.rst b/doc/build/core/type_basics.rst index e6925c4029..74e210164b 100644 --- a/doc/build/core/type_basics.rst +++ b/doc/build/core/type_basics.rst @@ -43,10 +43,10 @@ values to and from the database, as in the example below:: metadata_obj = MetaData() user = Table( - 'user', + "user", metadata_obj, - Column('user_name', String, primary_key=True), - Column('email_address', String(60)), + Column("user_name", String, primary_key=True), + Column("email_address", String(60)), ) When using a particular :class:`_types.TypeEngine` class in a diff --git a/doc/build/dialects/mssql.rst b/doc/build/dialects/mssql.rst index c107e4ca6d..3aff100873 100644 --- a/doc/build/dialects/mssql.rst +++ b/doc/build/dialects/mssql.rst @@ -19,12 +19,38 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQL server are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.mssql import \ - BIGINT, BINARY, BIT, CHAR, DATE, DATETIME, DATETIME2, \ - DATETIMEOFFSET, DECIMAL, FLOAT, IMAGE, INTEGER, JSON, MONEY, \ - NCHAR, NTEXT, NUMERIC, NVARCHAR, REAL, SMALLDATETIME, \ - SMALLINT, SMALLMONEY, SQL_VARIANT, TEXT, TIME, \ - TIMESTAMP, TINYINT, UNIQUEIDENTIFIER, VARBINARY, VARCHAR + from sqlalchemy.dialects.mssql import ( + BIGINT, + BINARY, + BIT, + CHAR, + DATE, + DATETIME, + DATETIME2, + DATETIMEOFFSET, + DECIMAL, + FLOAT, + IMAGE, + INTEGER, + JSON, + MONEY, + NCHAR, + NTEXT, + NUMERIC, + NVARCHAR, + REAL, + SMALLDATETIME, + SMALLINT, + SMALLMONEY, + SQL_VARIANT, + TEXT, + TIME, + TIMESTAMP, + TINYINT, + UNIQUEIDENTIFIER, + VARBINARY, + VARCHAR, + ) Types which are specific to SQL Server, or have SQL Server-specific construction arguments, are as follows: diff --git a/doc/build/dialects/mysql.rst b/doc/build/dialects/mysql.rst index 87d8f7224e..a46bf721e2 100644 --- a/doc/build/dialects/mysql.rst +++ b/doc/build/dialects/mysql.rst @@ -19,12 +19,42 @@ MySQL Data Types As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with MySQL are importable from the top level dialect:: - from sqlalchemy.dialects.mysql import \ - BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, \ - DATETIME, DECIMAL, DECIMAL, DOUBLE, ENUM, FLOAT, INTEGER, \ - LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \ - NUMERIC, NVARCHAR, REAL, SET, SMALLINT, TEXT, TIME, TIMESTAMP, \ - TINYBLOB, TINYINT, TINYTEXT, VARBINARY, VARCHAR, YEAR + from sqlalchemy.dialects.mysql import ( + BIGINT, + BINARY, + BIT, + BLOB, + BOOLEAN, + CHAR, + DATE, + DATETIME, + DECIMAL, + DECIMAL, + DOUBLE, + ENUM, + FLOAT, + INTEGER, + LONGBLOB, + LONGTEXT, + MEDIUMBLOB, + MEDIUMINT, + MEDIUMTEXT, + NCHAR, + NUMERIC, + NVARCHAR, + REAL, + SET, + SMALLINT, + TEXT, + TIME, + TIMESTAMP, + TINYBLOB, + TINYINT, + TINYTEXT, + VARBINARY, + VARCHAR, + YEAR, + ) Types which are specific to MySQL, or have MySQL-specific construction arguments, are as follows: diff --git a/doc/build/dialects/oracle.rst b/doc/build/dialects/oracle.rst index 62e72eb829..d676f633b9 100644 --- a/doc/build/dialects/oracle.rst +++ b/doc/build/dialects/oracle.rst @@ -12,11 +12,26 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with Oracle are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.oracle import \ - BFILE, BLOB, CHAR, CLOB, DATE, \ - DOUBLE_PRECISION, FLOAT, INTERVAL, LONG, NCLOB, NCHAR, \ - NUMBER, NVARCHAR, NVARCHAR2, RAW, TIMESTAMP, VARCHAR, \ - VARCHAR2 + from sqlalchemy.dialects.oracle import ( + BFILE, + BLOB, + CHAR, + CLOB, + DATE, + DOUBLE_PRECISION, + FLOAT, + INTERVAL, + LONG, + NCLOB, + NCHAR, + NUMBER, + NVARCHAR, + NVARCHAR2, + RAW, + TIMESTAMP, + VARCHAR, + VARCHAR2, + ) .. versionadded:: 1.2.19 Added :class:`_types.NCHAR` to the list of datatypes exported by the Oracle dialect. diff --git a/doc/build/dialects/postgresql.rst b/doc/build/dialects/postgresql.rst index f6b0073802..0cb3984dde 100644 --- a/doc/build/dialects/postgresql.rst +++ b/doc/build/dialects/postgresql.rst @@ -77,6 +77,7 @@ was needed in order to allow this combination to work, described below. from sqlalchemy import TypeDecorator from sqlalchemy.dialects.postgresql import ARRAY + class ArrayOfEnum(TypeDecorator): impl = ARRAY @@ -84,8 +85,7 @@ was needed in order to allow this combination to work, described below. return sa.cast(bindvalue, self) def result_processor(self, dialect, coltype): - super_rp = super(ArrayOfEnum, self).result_processor( - dialect, coltype) + super_rp = super(ArrayOfEnum, self).result_processor(dialect, coltype) def handle_raw_string(value): inner = re.match(r"^{(.*)}$", value).group(1) @@ -95,6 +95,7 @@ was needed in order to allow this combination to work, described below. if value is None: return None return super_rp(handle_raw_string(value)) + return process E.g.:: @@ -132,9 +133,10 @@ the result set correctly without any special steps. E.g.:: Table( - 'mydata', metadata, - Column('id', Integer, primary_key=True), - Column('data', CastingArray(JSONB)) + "mydata", + metadata, + Column("id", Integer, primary_key=True), + Column("data", CastingArray(JSONB)), ) .. _postgresql_ranges: @@ -156,52 +158,53 @@ values or by using the :class:`_postgresql.Range` data object. E.g. an example of a fully typed model using the :class:`_postgresql.TSRANGE` datatype:: - from datetime import datetime + from datetime import datetime - from sqlalchemy.dialects.postgresql import Range - from sqlalchemy.dialects.postgresql import TSRANGE - from sqlalchemy.orm import DeclarativeBase - from sqlalchemy.orm import Mapped - from sqlalchemy.orm import mapped_column + from sqlalchemy.dialects.postgresql import Range + from sqlalchemy.dialects.postgresql import TSRANGE + from sqlalchemy.orm import DeclarativeBase + from sqlalchemy.orm import Mapped + from sqlalchemy.orm import mapped_column + + + class Base(DeclarativeBase): + pass - class Base(DeclarativeBase): - pass - class RoomBooking(Base): + class RoomBooking(Base): - __tablename__ = 'room_booking' + __tablename__ = "room_booking" - id: Mapped[int] = mapped_column(primary_key=True) - room: Mapped[str] - during: Mapped[Range[datetime]] = mapped_column(TSRANGE) + id: Mapped[int] = mapped_column(primary_key=True) + room: Mapped[str] + during: Mapped[Range[datetime]] = mapped_column(TSRANGE) To represent data for the ``during`` column above, the :class:`_postgresql.Range` type is a simple dataclass that will represent the bounds of the range. Below illustrates an INSERT of a row into the above ``room_booking`` table:: - from sqlalchemy import create_engine - from sqlalchemy.orm import Session + from sqlalchemy import create_engine + from sqlalchemy.orm import Session - engine = create_engine("postgresql+psycopg://scott:tiger@pg14/dbname") + engine = create_engine("postgresql+psycopg://scott:tiger@pg14/dbname") - Base.metadata.create_all(engine) + Base.metadata.create_all(engine) - with Session(engine) as session: - booking = RoomBooking( - room="101", - during=Range(datetime(2013, 3, 23), datetime(2013, 3, 25)) - ) - session.add(booking) - session.commit() + with Session(engine) as session: + booking = RoomBooking( + room="101", during=Range(datetime(2013, 3, 23), datetime(2013, 3, 25)) + ) + session.add(booking) + session.commit() Selecting from any range column will also return :class:`_postgresql.Range` objects as indicated:: - from sqlalchemy import select + from sqlalchemy import select - with Session(engine) as session: - for row in session.execute(select(RoomBooking.during)): - print(row) + with Session(engine) as session: + for row in session.execute(select(RoomBooking.during)): + print(row) The available range datatypes are as follows: @@ -236,12 +239,14 @@ datatype:: from sqlalchemy.orm import Mapped from sqlalchemy.orm import mapped_column + class Base(DeclarativeBase): pass + class EventCalendar(Base): - __tablename__ = 'event_calendar' + __tablename__ = "event_calendar" id: Mapped[int] = mapped_column(primary_key=True) event_name: Mapped[str] @@ -260,11 +265,11 @@ Illustrating insertion and selecting of a record:: with Session(engine) as session: calendar = EventCalendar( event_name="SQLAlchemy Tutorial Sessions", - in_session_periods= [ + in_session_periods=[ Range(datetime(2013, 3, 23), datetime(2013, 3, 25)), Range(datetime(2013, 4, 12), datetime(2013, 4, 15)), Range(datetime(2013, 5, 9), datetime(2013, 5, 12)), - ] + ], ) session.add(calendar) session.commit() @@ -298,12 +303,43 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with PostgreSQL are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.postgresql import \ - ARRAY, BIGINT, BIT, BOOLEAN, BYTEA, CHAR, CIDR, DATE, \ - DOUBLE_PRECISION, ENUM, FLOAT, HSTORE, INET, INTEGER, \ - INTERVAL, JSON, JSONB, MACADDR, MONEY, NUMERIC, OID, REAL, SMALLINT, TEXT, \ - TIME, TIMESTAMP, UUID, VARCHAR, INT4RANGE, INT8RANGE, NUMRANGE, \ - DATERANGE, TSRANGE, TSTZRANGE, TSVECTOR + from sqlalchemy.dialects.postgresql import ( + ARRAY, + BIGINT, + BIT, + BOOLEAN, + BYTEA, + CHAR, + CIDR, + DATE, + DOUBLE_PRECISION, + ENUM, + FLOAT, + HSTORE, + INET, + INTEGER, + INTERVAL, + JSON, + JSONB, + MACADDR, + MONEY, + NUMERIC, + OID, + REAL, + SMALLINT, + TEXT, + TIME, + TIMESTAMP, + UUID, + VARCHAR, + INT4RANGE, + INT8RANGE, + NUMRANGE, + DATERANGE, + TSRANGE, + TSTZRANGE, + TSVECTOR, + ) Types which are specific to PostgreSQL, or have PostgreSQL-specific construction arguments, are as follows: @@ -441,18 +477,17 @@ SQLAlchemy supports PostgreSQL EXCLUDE constraints via the For example:: - from sqlalchemy.dialects.postgresql import ExcludeConstraint, TSRANGE + from sqlalchemy.dialects.postgresql import ExcludeConstraint, TSRANGE + - class RoomBooking(Base): + class RoomBooking(Base): - __tablename__ = 'room_booking' + __tablename__ = "room_booking" - room = Column(Integer(), primary_key=True) - during = Column(TSRANGE()) + room = Column(Integer(), primary_key=True) + during = Column(TSRANGE()) - __table_args__ = ( - ExcludeConstraint(('room', '='), ('during', '&&')), - ) + __table_args__ = (ExcludeConstraint(("room", "="), ("during", "&&")),) PostgreSQL DML Constructs ------------------------- diff --git a/doc/build/dialects/sqlite.rst b/doc/build/dialects/sqlite.rst index 6d40daf5fe..d25301fa53 100644 --- a/doc/build/dialects/sqlite.rst +++ b/doc/build/dialects/sqlite.rst @@ -12,10 +12,23 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQLite are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.sqlite import \ - BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, \ - INTEGER, NUMERIC, JSON, SMALLINT, TEXT, TIME, TIMESTAMP, \ - VARCHAR + from sqlalchemy.dialects.sqlite import ( + BLOB, + BOOLEAN, + CHAR, + DATE, + DATETIME, + DECIMAL, + FLOAT, + INTEGER, + NUMERIC, + JSON, + SMALLINT, + TEXT, + TIME, + TIMESTAMP, + VARCHAR, + ) .. module:: sqlalchemy.dialects.sqlite diff --git a/doc/build/errors.rst b/doc/build/errors.rst index 64e30bf593..bb49138dbd 100644 --- a/doc/build/errors.rst +++ b/doc/build/errors.rst @@ -441,7 +441,7 @@ Normally, a Core SQL construct or ORM :class:`_query.Query` object can be string directly, such as when we use ``print()``:: >>> from sqlalchemy import column - >>> print(column('x') == 5) + >>> print(column("x") == 5) x = :x_1 When the above SQL expression is stringified, the :class:`.StrSQLCompiler` @@ -455,11 +455,9 @@ to turn into a string, such as the PostgreSQL >>> from sqlalchemy.dialects.postgresql import insert >>> from sqlalchemy import table, column - >>> my_table = table('my_table', column('x'), column('y')) - >>> insert_stmt = insert(my_table).values(x='foo') - >>> insert_stmt = insert_stmt.on_conflict_do_nothing( - ... index_elements=['y'] - ... ) + >>> my_table = table("my_table", column("x"), column("y")) + >>> insert_stmt = insert(my_table).values(x="foo") + >>> insert_stmt = insert_stmt.on_conflict_do_nothing(index_elements=["y"]) >>> print(insert_stmt) Traceback (most recent call last): @@ -501,14 +499,12 @@ This often occurs when attempting to use a :func:`.column_property` or declarative such as:: class Bar(Base): - __tablename__ = 'bar' + __tablename__ = "bar" id = Column(Integer, primary_key=True) cprop = deferred(Column(Integer)) - __table_args__ = ( - CheckConstraint(cprop > 5), - ) + __table_args__ = (CheckConstraint(cprop > 5),) Above, the ``cprop`` attribute is used inline before it has been mapped, however this ``cprop`` attribute is not a :class:`_schema.Column`, @@ -527,16 +523,12 @@ The solution is to access the :class:`_schema.Column` directly using the :attr:`.ColumnProperty.expression` attribute:: class Bar(Base): - __tablename__ = 'bar' + __tablename__ = "bar" id = Column(Integer, primary_key=True) cprop = deferred(Column(Integer)) - __table_args__ = ( - CheckConstraint(cprop.expression > 5), - ) - - + __table_args__ = (CheckConstraint(cprop.expression > 5),) .. _error_cd3x: @@ -547,14 +539,14 @@ This error occurs when a statement makes use of :func:`.bindparam` either implicitly or explicitly and does not provide a value when the statement is executed:: - stmt = select(table.c.column).where(table.c.id == bindparam('my_param')) + stmt = select(table.c.column).where(table.c.id == bindparam("my_param")) - result = conn.execute(stmt) + result = conn.execute(stmt) Above, no value has been provided for the parameter "my_param". The correct approach is to provide a value:: - result = conn.execute(stmt, my_param=12) + result = conn.execute(stmt, my_param=12) When the message takes the form "a value is required for bind parameter in parameter group ", the message is referring to the "executemany" style @@ -570,21 +562,19 @@ the final string format of the statement which will be used for each set of parameters in the list. As the second entry does not contain "b", this error is generated:: - m = MetaData() - t = Table( - 't', m, - Column('a', Integer), - Column('b', Integer), - Column('c', Integer) - ) - - e.execute( - t.insert(), [ - {"a": 1, "b": 2, "c": 3}, - {"a": 2, "c": 4}, - {"a": 3, "b": 4, "c": 5}, - ] - ) + m = MetaData() + t = Table("t", m, Column("a", Integer), Column("b", Integer), Column("c", Integer)) + + e.execute( + t.insert(), + [ + {"a": 1, "b": 2, "c": 3}, + {"a": 2, "c": 4}, + {"a": 3, "b": 4, "c": 5}, + ], + ) + +.. code-block:: sqlalchemy.exc.StatementError: (sqlalchemy.exc.InvalidRequestError) A value is required for bind parameter 'b', in parameter group 1 @@ -593,13 +583,14 @@ this error is generated:: Since "b" is required, pass it as ``None`` so that the INSERT may proceed:: - e.execute( - t.insert(), [ - {"a": 1, "b": 2, "c": 3}, - {"a": 2, "b": None, "c": 4}, - {"a": 3, "b": 4, "c": 5}, - ] - ) + e.execute( + t.insert(), + [ + {"a": 1, "b": 2, "c": 3}, + {"a": 2, "b": None, "c": 4}, + {"a": 3, "b": 4, "c": 5}, + ], + ) .. seealso:: @@ -620,12 +611,7 @@ Core and the full rationale is discussed at :ref:`change_4617`. Given an example as:: m = MetaData() - t = Table( - 't', m, - Column('a', Integer), - Column('b', Integer), - Column('c', Integer) - ) + t = Table("t", m, Column("a", Integer), Column("b", Integer), Column("c", Integer)) stmt = select(t) Above, ``stmt`` represents a SELECT statement. The error is produced when we want @@ -678,10 +664,12 @@ construct:: a1 = Address.__table__ - q = s.query(User).\ - join(a1, User.addresses).\ - filter(Address.email_address == 'ed@foo.com').all() - + q = ( + s.query(User) + .join(a1, User.addresses) + .filter(Address.email_address == "ed@foo.com") + .all() + ) The above pattern also allows an arbitrary selectable, such as a Core :class:`_sql.Join` or :class:`_sql.Alias` object, @@ -690,23 +678,26 @@ Core element would need to be referred towards directly:: a1 = Address.__table__.alias() - q = s.query(User).\ - join(a1, User.addresses).\ - filter(a1.c.email_address == 'ed@foo.com').all() + q = ( + s.query(User) + .join(a1, User.addresses) + .filter(a1.c.email_address == "ed@foo.com") + .all() + ) The correct way to specify a join target is always by using the mapped class itself or an :class:`_orm.aliased` object, in the latter case using the :meth:`_orm.PropComparator.of_type` modifier to set up an alias:: # normal join to relationship entity - q = s.query(User).\ - join(User.addresses).\ - filter(Address.email_address == 'ed@foo.com') + q = s.query(User).join(User.addresses).filter(Address.email_address == "ed@foo.com") # name Address target explicitly, not necessary but legal - q = s.query(User).\ - join(Address, User.addresses).\ - filter(Address.email_address == 'ed@foo.com') + q = ( + s.query(User) + .join(Address, User.addresses) + .filter(Address.email_address == "ed@foo.com") + ) Join to an alias:: @@ -715,15 +706,14 @@ Join to an alias:: a1 = aliased(Address) # of_type() form; recommended - q = s.query(User).\ - join(User.addresses.of_type(a1)).\ - filter(a1.email_address == 'ed@foo.com') + q = ( + s.query(User) + .join(User.addresses.of_type(a1)) + .filter(a1.email_address == "ed@foo.com") + ) # target, onclause form - q = s.query(User).\ - join(a1, User.addresses).\ - filter(a1.email_address == 'ed@foo.com') - + q = s.query(User).join(a1, User.addresses).filter(a1.email_address == "ed@foo.com") .. _error_xaj2: @@ -741,7 +731,7 @@ alias to one side or the other; SQLAlchemy applies an alias to the right side of the join. For example given a joined inheritance mapping as:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) manager_id = Column(ForeignKey("manager.id")) name = Column(String(50)) @@ -750,17 +740,18 @@ of the join. For example given a joined inheritance mapping as:: reports_to = relationship("Manager", foreign_keys=manager_id) __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type, + "polymorphic_identity": "employee", + "polymorphic_on": type, } + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) __mapper_args__ = { - 'polymorphic_identity':'manager', - 'inherit_condition': id == Employee.id + "polymorphic_identity": "manager", + "inherit_condition": id == Employee.id, } The above mapping includes a relationship between the ``Employee`` and @@ -824,10 +815,10 @@ embedding the join into a new subquery: If we then wanted to use :func:`_orm.contains_eager` to populate the ``reports_to`` attribute, we refer to the alias:: - >>> stmt =select(Employee).join( - ... Employee.reports_to.of_type(manager_alias) - ... ).options( - ... contains_eager(Employee.reports_to.of_type(manager_alias)) + >>> stmt = ( + ... select(Employee) + ... .join(Employee.reports_to.of_type(manager_alias)) + ... .options(contains_eager(Employee.reports_to.of_type(manager_alias))) ... ) Without using the explicit :func:`_orm.aliased` object, in some more nested @@ -960,6 +951,7 @@ is set on a many-to-one or many-to-many relationship, such as:: # configuration step occurs a = relationship("A", back_populates="bs", cascade="all, delete-orphan") + configure_mappers() Above, the "delete-orphan" setting on ``B.a`` indicates the intent that @@ -1175,17 +1167,17 @@ silence each warning. For the typical example that's missing :paramref:`_orm.relationship.back_populates`, given the following mapping:: - class Parent(Base): - __tablename__ = "parent" - id = Column(Integer, primary_key=True) - children = relationship("Child") + class Parent(Base): + __tablename__ = "parent" + id = Column(Integer, primary_key=True) + children = relationship("Child") - class Child(Base): - __tablename__ = "child" - id = Column(Integer, primary_key=True) - parent_id = Column(ForeignKey("parent.id")) - parent = relationship("Parent") + class Child(Base): + __tablename__ = "child" + id = Column(Integer, primary_key=True) + parent_id = Column(ForeignKey("parent.id")) + parent = relationship("Parent") The above mapping will generate warnings:: @@ -1196,16 +1188,16 @@ The relationships ``Child.parent`` and ``Parent.children`` appear to be in confl The solution is to apply :paramref:`_orm.relationship.back_populates`:: class Parent(Base): - __tablename__ = "parent" - id = Column(Integer, primary_key=True) - children = relationship("Child", back_populates="parent") + __tablename__ = "parent" + id = Column(Integer, primary_key=True) + children = relationship("Child", back_populates="parent") - class Child(Base): - __tablename__ = "child" - id = Column(Integer, primary_key=True) - parent_id = Column(ForeignKey("parent.id")) - parent = relationship("Parent", back_populates="children") + class Child(Base): + __tablename__ = "child" + id = Column(Integer, primary_key=True) + parent_id = Column(ForeignKey("parent.id")) + parent = relationship("Parent", back_populates="children") For more customized relationships where an "overlap" situation may be intentional and cannot be resolved, the :paramref:`_orm.relationship.overlaps` @@ -1215,29 +1207,28 @@ same underlying table that include custom :paramref:`_orm.relationship.primaryjoin` conditions that limit the related items in each case:: - class Parent(Base): - __tablename__ = "parent" - id = Column(Integer, primary_key=True) - c1 = relationship( - "Child", - primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 0)", - backref="parent", - overlaps="c2, parent" - ) - c2 = relationship( - "Child", - primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 1)", - overlaps="c1, parent" - ) - + class Parent(Base): + __tablename__ = "parent" + id = Column(Integer, primary_key=True) + c1 = relationship( + "Child", + primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 0)", + backref="parent", + overlaps="c2, parent", + ) + c2 = relationship( + "Child", + primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 1)", + overlaps="c1, parent", + ) - class Child(Base): - __tablename__ = "child" - id = Column(Integer, primary_key=True) - parent_id = Column(ForeignKey("parent.id")) - flag = Column(Integer) + class Child(Base): + __tablename__ = "child" + id = Column(Integer, primary_key=True) + parent_id = Column(ForeignKey("parent.id")) + flag = Column(Integer) Above, the ORM will know that the overlap between ``Parent.c1``, ``Parent.c2`` and ``Child.parent`` is intentional. @@ -1289,8 +1280,7 @@ the ``prebuffer_rows`` execution option may be used as follows:: # result internally pre-fetches all objects result = sess.execute( - select(User).where(User.id == 7), - execution_options={"prebuffer_rows": True} + select(User).where(User.id == 7), execution_options={"prebuffer_rows": True} ) # context manager is closed, so session_obj above is closed, identity @@ -1576,17 +1566,17 @@ SQLAlchemy pattern present only in 1.x versions. The issue occurs when one invok the :meth:`.Executable.execute` method directly off of a Core expression object that is not associated with any :class:`_engine.Engine`:: - metadata_obj = MetaData() - table = Table('t', metadata_obj, Column('q', Integer)) + metadata_obj = MetaData() + table = Table("t", metadata_obj, Column("q", Integer)) - stmt = select(table) - result = stmt.execute() # <--- raises + stmt = select(table) + result = stmt.execute() # <--- raises What the logic is expecting is that the :class:`_schema.MetaData` object has been **bound** to a :class:`_engine.Engine`:: - engine = create_engine("mysql+pymysql://user:pass@host/db") - metadata_obj = MetaData(bind=engine) + engine = create_engine("mysql+pymysql://user:pass@host/db") + metadata_obj = MetaData(bind=engine) Where above, any statement that derives from a :class:`_schema.Table` which in turn derives from that :class:`_schema.MetaData` will implicitly make use of @@ -1596,12 +1586,12 @@ Note that the concept of bound metadata is **not present in SQLAlchemy 2.0**. The correct way to invoke statements is via the :meth:`_engine.Connection.execute` method of a :class:`_engine.Connection`:: - with engine.connect() as conn: - result = conn.execute(stmt) + with engine.connect() as conn: + result = conn.execute(stmt) When using the ORM, a similar facility is available via the :class:`.Session`:: - result = session.execute(stmt) + result = session.execute(stmt) .. seealso:: diff --git a/doc/build/faq/connections.rst b/doc/build/faq/connections.rst index 4a21318f40..946a71018b 100644 --- a/doc/build/faq/connections.rst +++ b/doc/build/faq/connections.rst @@ -27,8 +27,9 @@ How do I pass custom connect arguments to my database API? The :func:`_sa.create_engine` call accepts additional arguments either directly via the ``connect_args`` keyword argument:: - e = create_engine("mysql+mysqldb://scott:tiger@localhost/test", - connect_args={"encoding": "utf8"}) + e = create_engine( + "mysql+mysqldb://scott:tiger@localhost/test", connect_args={"encoding": "utf8"} + ) Or for basic string and integer arguments, they can usually be specified in the query string of the URL:: @@ -244,59 +245,57 @@ A connection will transparently reconnect for single-parameter and no-parameter statement executions:: - import time - - from sqlalchemy import event - - - def reconnecting_engine(engine, num_retries, retry_interval): - def _run_with_retries(fn, context, cursor_obj, statement, *arg, **kw): - for retry in range(num_retries + 1): - try: - fn(cursor_obj, statement, context=context, *arg) - except engine.dialect.dbapi.Error as raw_dbapi_err: - connection = context.root_connection - if engine.dialect.is_disconnect( - raw_dbapi_err, connection, cursor_obj - ): - if retry > num_retries: - raise - engine.logger.error( - "disconnection error, retrying operation", - exc_info=True, - ) - connection.invalidate() - - # use SQLAlchemy 2.0 API if available - if hasattr(connection, "rollback"): - connection.rollback() - else: - trans = connection.get_transaction() - if trans: - trans.rollback() - - time.sleep(retry_interval) - context.cursor = cursor_obj = connection.connection.cursor() - else: - raise - else: - return True - - e = engine.execution_options(isolation_level="AUTOCOMMIT") - - @event.listens_for(e, "do_execute_no_params") - def do_execute_no_params(cursor_obj, statement, context): - return _run_with_retries( - context.dialect.do_execute_no_params, context, cursor_obj, statement - ) - - @event.listens_for(e, "do_execute") - def do_execute(cursor_obj, statement, parameters, context): - return _run_with_retries( - context.dialect.do_execute, context, cursor_obj, statement, parameters - ) - - return e + import time + + from sqlalchemy import event + + + def reconnecting_engine(engine, num_retries, retry_interval): + def _run_with_retries(fn, context, cursor_obj, statement, *arg, **kw): + for retry in range(num_retries + 1): + try: + fn(cursor_obj, statement, context=context, *arg) + except engine.dialect.dbapi.Error as raw_dbapi_err: + connection = context.root_connection + if engine.dialect.is_disconnect(raw_dbapi_err, connection, cursor_obj): + if retry > num_retries: + raise + engine.logger.error( + "disconnection error, retrying operation", + exc_info=True, + ) + connection.invalidate() + + # use SQLAlchemy 2.0 API if available + if hasattr(connection, "rollback"): + connection.rollback() + else: + trans = connection.get_transaction() + if trans: + trans.rollback() + + time.sleep(retry_interval) + context.cursor = cursor_obj = connection.connection.cursor() + else: + raise + else: + return True + + e = engine.execution_options(isolation_level="AUTOCOMMIT") + + @event.listens_for(e, "do_execute_no_params") + def do_execute_no_params(cursor_obj, statement, context): + return _run_with_retries( + context.dialect.do_execute_no_params, context, cursor_obj, statement + ) + + @event.listens_for(e, "do_execute") + def do_execute(cursor_obj, statement, parameters, context): + return _run_with_retries( + context.dialect.do_execute, context, cursor_obj, statement, parameters + ) + + return e Given the above recipe, a reconnection mid-transaction may be demonstrated using the following proof of concept script. Once run, it will emit a @@ -316,9 +315,7 @@ using the following proof of concept script. Once run, it will emit a time.sleep(5) e = reconnecting_engine( - create_engine( - "mysql+mysqldb://scott:tiger@localhost/test", echo_pool=True - ), + create_engine("mysql+mysqldb://scott:tiger@localhost/test", echo_pool=True), num_retries=5, retry_interval=2, ) @@ -374,7 +371,10 @@ configured using ``reset_on_return``:: from sqlalchemy import create_engine from sqlalchemy.pool import QueuePool - engine = create_engine('mysql+mysqldb://scott:tiger@localhost/myisam_database', pool=QueuePool(reset_on_return=False)) + engine = create_engine( + "mysql+mysqldb://scott:tiger@localhost/myisam_database", + pool=QueuePool(reset_on_return=False), + ) I'm on SQL Server - how do I turn those ROLLBACKs into COMMITs? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -383,8 +383,9 @@ I'm on SQL Server - how do I turn those ROLLBACKs into COMMITs? to ``True``, ``False``, and ``None``. Setting to ``commit`` will cause a COMMIT as any connection is returned to the pool:: - engine = create_engine('mssql+pyodbc://scott:tiger@mydsn', pool=QueuePool(reset_on_return='commit')) - + engine = create_engine( + "mssql+pyodbc://scott:tiger@mydsn", pool=QueuePool(reset_on_return="commit") + ) I am using multiple connections with a SQLite database (typically to test transaction operation), and my test program is not working! ---------------------------------------------------------------------------------------------------------------------------------------------------------- diff --git a/doc/build/faq/metadata_schema.rst b/doc/build/faq/metadata_schema.rst index ab879b0aec..7dcb8fa417 100644 --- a/doc/build/faq/metadata_schema.rst +++ b/doc/build/faq/metadata_schema.rst @@ -88,9 +88,12 @@ metadata creation sequence as a string, using this recipe:: from sqlalchemy import create_mock_engine + def dump(sql, *multiparams, **params): print(sql.compile(dialect=engine.dialect)) - engine = create_mock_engine('postgresql+psycopg2://', dump) + + + engine = create_mock_engine("postgresql+psycopg2://", dump) metadata_obj.create_all(engine, checkfirst=False) The `Alembic `_ tool also supports diff --git a/doc/build/faq/ormconfiguration.rst b/doc/build/faq/ormconfiguration.rst index d33046685f..9962b25970 100644 --- a/doc/build/faq/ormconfiguration.rst +++ b/doc/build/faq/ormconfiguration.rst @@ -48,7 +48,7 @@ applied directly to the mapper:: class SomeClass(Base): __table__ = some_table_with_no_pk __mapper_args__ = { - 'primary_key':[some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar] + "primary_key": [some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar] } Better yet is when using fully declared table metadata, use the ``primary_key=True`` @@ -142,16 +142,18 @@ Given the example as follows:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(A): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) As of SQLAlchemy version 0.9.5, the above condition is detected, and will warn that the ``id`` column of ``A`` and ``B`` is being combined under @@ -161,33 +163,33 @@ that a ``B`` object's primary key will always mirror that of its ``A``. A mapping which resolves this is as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(A): - __tablename__ = 'b' + __tablename__ = "b" - b_id = Column('id', Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + b_id = Column("id", Integer, primary_key=True) + a_id = Column(Integer, ForeignKey("a.id")) Suppose we did want ``A.id`` and ``B.id`` to be mirrors of each other, despite the fact that ``B.a_id`` is where ``A.id`` is related. We could combine them together using :func:`.column_property`:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(A): - __tablename__ = 'b' + __tablename__ = "b" # probably not what you want, but this is a demonstration id = column_property(Column(Integer, primary_key=True), A.id) - a_id = Column(Integer, ForeignKey('a.id')) - - + a_id = Column(Integer, ForeignKey("a.id")) I'm using Declarative and setting primaryjoin/secondaryjoin using an ``and_()`` or ``or_()``, and I am getting an error message about foreign keys. ------------------------------------------------------------------------------------------------------------------------------------------------------------------ @@ -197,21 +199,27 @@ Are you doing this?:: class MyClass(Base): # .... - foo = relationship("Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar")) + foo = relationship( + "Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar") + ) That's an ``and_()`` of two string expressions, which SQLAlchemy cannot apply any mapping towards. Declarative allows :func:`_orm.relationship` arguments to be specified as strings, which are converted into expression objects using ``eval()``. But this doesn't occur inside of an ``and_()`` expression - it's a special operation declarative applies only to the *entirety* of what's passed to primaryjoin or other arguments as a string:: class MyClass(Base): # .... - foo = relationship("Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)") + foo = relationship( + "Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)" + ) Or if the objects you need are already available, skip the strings:: class MyClass(Base): # .... - foo = relationship(Dest, primaryjoin=and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)) + foo = relationship( + Dest, primaryjoin=and_(MyClass.id == Dest.foo_id, MyClass.foo == Dest.bar) + ) The same idea applies to all the other arguments, such as ``foreign_keys``:: @@ -276,7 +284,9 @@ the two queries may not see the same results: .. sourcecode:: python+sql - >>> user = session.scalars(select(User).options(subqueryload(User.addresses)).limit(1)).first() + >>> user = session.scalars( + ... select(User).options(subqueryload(User.addresses)).limit(1) + ... ).first() {opensql}-- the "main" query SELECT users.id AS users_id FROM users @@ -321,7 +331,9 @@ so that the main query always returns the same set of rows. This generally means that you should :meth:`_sql.Select.order_by` on a unique column on the table. The primary key is a good choice for this:: - session.scalars(select(User).options(subqueryload(User.addresses)).order_by(User.id).limit(1)).first() + session.scalars( + select(User).options(subqueryload(User.addresses)).order_by(User.id).limit(1) + ).first() Note that the :func:`_orm.joinedload` eager loader strategy does not suffer from the same problem because only one query is ever issued, so the load query diff --git a/doc/build/faq/performance.rst b/doc/build/faq/performance.rst index 5c0e399d30..1aa94e1c7f 100644 --- a/doc/build/faq/performance.rst +++ b/doc/build/faq/performance.rst @@ -215,16 +215,16 @@ using a recipe like the following:: logger = logging.getLogger("myapp.sqltime") logger.setLevel(logging.DEBUG) + @event.listens_for(Engine, "before_cursor_execute") - def before_cursor_execute(conn, cursor, statement, - parameters, context, executemany): - conn.info.setdefault('query_start_time', []).append(time.time()) + def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): + conn.info.setdefault("query_start_time", []).append(time.time()) logger.debug("Start Query: %s", statement) + @event.listens_for(Engine, "after_cursor_execute") - def after_cursor_execute(conn, cursor, statement, - parameters, context, executemany): - total = time.time() - conn.info['query_start_time'].pop(-1) + def after_cursor_execute(conn, cursor, statement, parameters, context, executemany): + total = time.time() - conn.info["query_start_time"].pop(-1) logger.debug("Query Complete!") logger.debug("Total Time: %f", total) @@ -255,6 +255,7 @@ Below is a simple recipe which works profiling into a context manager:: import pstats import contextlib + @contextlib.contextmanager def profiled(): pr = cProfile.Profile() @@ -262,7 +263,7 @@ Below is a simple recipe which works profiling into a context manager:: yield pr.disable() s = io.StringIO() - ps = pstats.Stats(pr, stream=s).sort_stats('cumulative') + ps = pstats.Stats(pr, stream=s).sort_stats("cumulative") ps.print_stats() # uncomment this to see who's calling what # ps.print_callers() @@ -271,7 +272,7 @@ Below is a simple recipe which works profiling into a context manager:: To profile a section of code:: with profiled(): - session.scalars(select(FooClass).where(FooClass.somevalue==8)).all() + session.scalars(select(FooClass).where(FooClass.somevalue == 8)).all() The output of profiling can be used to give an idea where time is being spent. A section of profiling output looks like this:: @@ -357,12 +358,13 @@ this:: from sqlalchemy import TypeDecorator import time + class Foo(TypeDecorator): impl = String def process_result_value(self, value, thing): # intentionally add slowness for illustration purposes - time.sleep(.001) + time.sleep(0.001) return value the profiling output of this intentionally slow operation can be seen like this:: diff --git a/doc/build/faq/sessions.rst b/doc/build/faq/sessions.rst index 0f9f7575b8..2642ed5a8e 100644 --- a/doc/build/faq/sessions.rst +++ b/doc/build/faq/sessions.rst @@ -91,12 +91,14 @@ does not properly handle the exception. For example:: from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base - Base = declarative_base(create_engine('sqlite://')) + Base = declarative_base(create_engine("sqlite://")) + class Foo(Base): - __tablename__ = 'foo' + __tablename__ = "foo" id = Column(Integer, primary_key=True) + Base.metadata.create_all() session = sessionmaker()() @@ -113,7 +115,6 @@ does not properly handle the exception. For example:: # continue using session without rolling back session.commit() - The usage of the :class:`.Session` should fit within a structure similar to this:: try: @@ -186,13 +187,13 @@ point of view there is still a transaction that is now in an inactive state. Given a block such as:: - sess = Session() # begins a logical transaction - try: - sess.flush() + sess = Session() # begins a logical transaction + try: + sess.flush() - sess.commit() - except: - sess.rollback() + sess.commit() + except: + sess.rollback() Above, when a :class:`.Session` is first created, assuming "autocommit mode" isn't used, a logical transaction is established within the :class:`.Session`. @@ -237,7 +238,7 @@ will **deduplicate the objects based on primary key**. That is, if we for example use the ``User`` mapping described at :ref:`ormtutorial_toplevel`, and we had a SQL query like the following:: - q = session.query(User).outerjoin(User.addresses).filter(User.name == 'jack') + q = session.query(User).outerjoin(User.addresses).filter(User.name == "jack") Above, the sample data used in the tutorial has two rows in the ``addresses`` table for the ``users`` row with the name ``'jack'``, primary key value 5. @@ -257,7 +258,9 @@ This is because when the :class:`_query.Query` object returns full entities, the are **deduplicated**. This does not occur if we instead request individual columns back:: - >>> session.query(User.id, User.name).outerjoin(User.addresses).filter(User.name == 'jack').all() + >>> session.query(User.id, User.name).outerjoin(User.addresses).filter( + ... User.name == "jack" + ... ).all() [(5, 'jack'), (5, 'jack')] There are two main reasons the :class:`_query.Query` will deduplicate: @@ -338,6 +341,7 @@ one:: print("ITER!") return iter([1, 2, 3, 4, 5]) + list(Iterates()) output:: @@ -422,7 +426,7 @@ be performed for any :term:`persistent` object using :meth:`.Session.expire`:: o = session.scalars(select(SomeClass).limit(1)).first() o.foo_id = 7 - Session.expire(o, ['foo']) # object must be persistent for this + Session.expire(o, ["foo"]) # object must be persistent for this foo_7 = session.get(Foo, 7) @@ -444,11 +448,10 @@ have meaning until the row is inserted; otherwise there is no row yet:: Session.flush() # emits INSERT # expire this because we already set .foo to None - Session.expire(o, ['foo']) + Session.expire(o, ["foo"]) assert new_obj.foo is foo_7 # now it loads - .. topic:: Attribute loading for non-persistent objects One variant on the "pending" behavior above is if we use the flag @@ -504,21 +507,21 @@ The function can be demonstrated as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", backref="a") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - c_id = Column(ForeignKey('c.id')) + a_id = Column(ForeignKey("a.id")) + c_id = Column(ForeignKey("c.id")) c = relationship("C", backref="bs") class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) diff --git a/doc/build/faq/sqlexpressions.rst b/doc/build/faq/sqlexpressions.rst index 5dcf3e96ad..eeb5be3f37 100644 --- a/doc/build/faq/sqlexpressions.rst +++ b/doc/build/faq/sqlexpressions.rst @@ -19,7 +19,7 @@ function (note the Python ``print`` function also calls ``str()`` automatically if we don't use it explicitly):: >>> from sqlalchemy import table, column, select - >>> t = table('my_table', column('x')) + >>> t = table("my_table", column("x")) >>> statement = select(t) >>> print(str(statement)) SELECT my_table.x @@ -31,7 +31,7 @@ The ``str()`` builtin, or an equivalent, can be invoked on ORM as:: >>> from sqlalchemy import column - >>> print(column('x') == 'some value') + >>> print(column("x") == "some value") x = :x_1 Stringifying for Specific Databases @@ -59,6 +59,7 @@ instantiate a :class:`.Dialect` object directly, as below where we use a PostgreSQL dialect:: from sqlalchemy.dialects import postgresql + print(statement.compile(dialect=postgresql.dialect())) Note that any dialect can be assembled using :func:`_sa.create_engine` itself @@ -98,7 +99,7 @@ flag, passed to ``compile_kwargs``:: from sqlalchemy.sql import table, column, select - t = table('t', column('x')) + t = table("t", column("x")) s = select(t).where(t.c.x == 5) @@ -159,12 +160,14 @@ datatype:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) data = Column(UUID) + stmt = select(A).where(A.data == uuid.uuid4()) Given the above model and statement which will compare a column to a single @@ -216,6 +219,7 @@ include: their positional order for the statement as compiled:: import re + e = create_engine("sqlite+pysqlite://") # will use qmark style, i.e. ? for param @@ -224,7 +228,7 @@ include: # params in positional order params = (repr(compiled.params[name]) for name in compiled.positiontup) - print(re.sub(r'\?', lambda m: next(params), str(compiled))) + print(re.sub(r"\?", lambda m: next(params), str(compiled))) The above snippet prints:: @@ -240,6 +244,7 @@ include: from sqlalchemy.ext.compiler import compiles from sqlalchemy.sql.expression import BindParameter + @compiles(BindParameter) def _render_literal_bindparam(element, compiler, use_my_literal_recipe=False, **kw): if not use_my_literal_recipe: @@ -250,6 +255,7 @@ include: # render the value directly return repr(element.value) + e = create_engine("postgresql+psycopg2://") print(stmt.compile(e, compile_kwargs={"use_my_literal_recipe": True})) @@ -265,6 +271,7 @@ include: from sqlalchemy import TypeDecorator + class UUIDStringify(TypeDecorator): impl = UUID @@ -275,6 +282,7 @@ include: or locally within the statement using :func:`_sql.type_coerce`, such as :: from sqlalchemy import type_coerce + stmt = select(A).where(type_coerce(A.data, UUIDStringify) == uuid.uuid4()) print(stmt.compile(e, compile_kwargs={"literal_binds": True})) @@ -331,7 +339,7 @@ in the same way, such as SQLite's positional form:: >>> e = create_engine("sqlite+pysqlite://") >>> compiled = stmt.compile(e, compile_kwargs={"render_postcompile": True}) >>> params = (repr(compiled.params[name]) for name in compiled.positiontup) - >>> print(re.sub(r'\?', lambda m: next(params), str(compiled))) + >>> print(re.sub(r"\?", lambda m: next(params), str(compiled))) SELECT a.id, a.data FROM a WHERE a.data IN (UUID('aa1944d6-9a5a-45d5-b8da-0ba1ef0a4f38'), UUID('a81920e6-15e2-4392-8a3c-d775ffa9ccd2'), UUID('b5574cdb-ff9b-49a3-be52-dbc89f087bfa')) @@ -414,13 +422,13 @@ I'm using op() to generate a custom operator and my parenthesis are not coming o The :meth:`.Operators.op` method allows one to create a custom database operator otherwise not known by SQLAlchemy:: - >>> print(column('q').op('->')(column('p'))) + >>> print(column("q").op("->")(column("p"))) q -> p However, when using it on the right side of a compound expression, it doesn't generate parenthesis as we expect:: - >>> print((column('q1') + column('q2')).op('->')(column('p'))) + >>> print((column("q1") + column("q2")).op("->")(column("p"))) q1 + q2 -> p Where above, we probably want ``(q1 + q2) -> p``. @@ -430,14 +438,14 @@ the :paramref:`.Operators.op.precedence` parameter, to a high number, where 100 is the maximum value, and the highest number used by any SQLAlchemy operator is currently 15:: - >>> print((column('q1') + column('q2')).op('->', precedence=100)(column('p'))) + >>> print((column("q1") + column("q2")).op("->", precedence=100)(column("p"))) (q1 + q2) -> p We can also usually force parenthesization around a binary expression (e.g. an expression that has left/right operands and an operator) using the :meth:`_expression.ColumnElement.self_group` method:: - >>> print((column('q1') + column('q2')).self_group().op('->')(column('p'))) + >>> print((column("q1") + column("q2")).self_group().op("->")(column("p"))) (q1 + q2) -> p Why are the parentheses rules like this? @@ -449,7 +457,7 @@ generate parenthesis based on groupings, it uses operator precedence and if the operator is known to be associative, so that parenthesis are generated minimally. Otherwise, an expression like:: - column('a') & column('b') & column('c') & column('d') + column("a") & column("b") & column("c") & column("d") would produce:: @@ -459,7 +467,7 @@ which is fine but would probably annoy people (and be reported as a bug). In other cases, it leads to things that are more likely to confuse databases or at the very least readability, such as:: - column('q', ARRAY(Integer, dimensions=2))[5][6] + column("q", ARRAY(Integer, dimensions=2))[5][6] would produce:: @@ -476,16 +484,16 @@ What if we defaulted the value of :paramref:`.Operators.op.precedence` to 100, e.g. the highest? Then this expression makes more parenthesis, but is otherwise OK, that is, these two are equivalent:: - >>> print((column('q') - column('y')).op('+', precedence=100)(column('z'))) + >>> print((column("q") - column("y")).op("+", precedence=100)(column("z"))) (q - y) + z - >>> print((column('q') - column('y')).op('+')(column('z'))) + >>> print((column("q") - column("y")).op("+")(column("z"))) q - y + z but these two are not:: - >>> print(column('q') - column('y').op('+', precedence=100)(column('z'))) + >>> print(column("q") - column("y").op("+", precedence=100)(column("z"))) q - y + z - >>> print(column('q') - column('y').op('+')(column('z'))) + >>> print(column("q") - column("y").op("+")(column("z"))) q - (y + z) For now, it's not clear that as long as we are doing parenthesization based on diff --git a/doc/build/faq/thirdparty.rst b/doc/build/faq/thirdparty.rst index 27c8fbf743..4b8bb7c556 100644 --- a/doc/build/faq/thirdparty.rst +++ b/doc/build/faq/thirdparty.rst @@ -28,17 +28,18 @@ by queries. This may be illustrated from code based on the following:: import numpy + class A(Base): __tablename__ = "a" id = Column(Integer, primary_key=True) data = Column(Integer) + # .. later session.add(A(data=numpy.int64(10))) session.commit() - In the latter case, the issue is due to the ``numpy.int64`` datatype overriding the ``__eq__()`` method and enforcing that the return type of an expression is ``numpy.True`` or ``numpy.False``, which breaks SQLAlchemy's expression @@ -47,9 +48,9 @@ expressions from Python equality comparisons:: >>> import numpy >>> from sqlalchemy import column, Integer - >>> print(column('x', Integer) == numpy.int64(10)) # works + >>> print(column("x", Integer) == numpy.int64(10)) # works x = :x_1 - >>> print(numpy.int64(10) == column('x', Integer)) # breaks + >>> print(numpy.int64(10) == column("x", Integer)) # breaks False These errors are both solved in the same way, which is that special numpy @@ -61,9 +62,7 @@ applying the Python ``int()`` function to types like ``numpy.int32`` and session.add(A(data=int(data))) - result = session.execute( - select(A.data).where(int(data) == A.data) - ) + result = session.execute(select(A.data).where(int(data) == A.data)) session.commit() @@ -72,4 +71,4 @@ applying the Python ``int()`` function to types like ``numpy.int32`` and SQL expression for WHERE/HAVING role expected, got True ------------------------------------------------------- -See :ref:`numpy_int64`. \ No newline at end of file +See :ref:`numpy_int64`. diff --git a/doc/build/intro.rst b/doc/build/intro.rst index dcc672a9d4..a93dc767ed 100644 --- a/doc/build/intro.rst +++ b/doc/build/intro.rst @@ -240,7 +240,7 @@ Python prompt like this: .. sourcecode:: python+sql >>> import sqlalchemy - >>> sqlalchemy.__version__ # doctest: +SKIP + >>> sqlalchemy.__version__ # doctest: +SKIP 2.0.0 Next Steps diff --git a/doc/build/orm/backref.rst b/doc/build/orm/backref.rst index 382f82241c..01f4c90736 100644 --- a/doc/build/orm/backref.rst +++ b/doc/build/orm/backref.rst @@ -42,6 +42,7 @@ Starting with the following example:: from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.orm import DeclarativeBase, relationship + class Base(DeclarativeBase): pass @@ -68,6 +69,7 @@ it's equivalent to the following:: from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.orm import DeclarativeBase, relationship + class Base(DeclarativeBase): pass @@ -109,6 +111,7 @@ which also includes the :paramref:`_orm.relationship.backref` keyword:: from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.orm import DeclarativeBase, relationship + class Base(DeclarativeBase): pass diff --git a/doc/build/orm/basic_relationships.rst b/doc/build/orm/basic_relationships.rst index 7589539dd3..4d0c56e593 100644 --- a/doc/build/orm/basic_relationships.rst +++ b/doc/build/orm/basic_relationships.rst @@ -18,10 +18,10 @@ The setup for each of the following sections is as follows:: from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass - Declarative vs. Imperative Forms ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -74,17 +74,15 @@ nonetheless remains preferred by a vocal minority of users), the above configuration looks like:: registry.map_imperatively( - Parent, parent_table, properties={ - "children": relationship( - "Child", back_populates="parent" - ) - } + Parent, + parent_table, + properties={"children": relationship("Child", back_populates="parent")}, ) registry.map_imperatively( - Child, child_table, properties={ - "parent": relationship("Parent", back_populates="children") - } + Child, + child_table, + properties={"parent": relationship("Parent", back_populates="children")}, ) Additionally, the default collection style for non-annotated mappings is @@ -156,6 +154,7 @@ relationship is generated implicitly:: id: Mapped[int] = mapped_column(primary_key=True) children: Mapped[list["Child"]] = relationship(backref="parent") + class Child(Base): __tablename__ = "child" @@ -345,7 +344,6 @@ Declarative configuration below:: parent_id = mapped_column(ForeignKey("parent.id")) parent = relationship("Parent", back_populates="child") - .. _relationships_many_to_many: Many To Many @@ -371,9 +369,11 @@ with which to link:: from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass + # note for a Core table, we use the sqlalchemy.Column construct, # not sqlalchemy.orm.mapped_column association_table = Table( @@ -433,9 +433,11 @@ for each :func:`_orm.relationship` specify the common association table:: from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass + association_table = Table( "association", Base.metadata, @@ -461,7 +463,6 @@ for each :func:`_orm.relationship` specify the common association table:: secondary=association_table, back_populates="children" ) - When using the :paramref:`_orm.relationship.backref` parameter instead of :paramref:`_orm.relationship.back_populates`, the backref will automatically use the same :paramref:`_orm.relationship.secondary` argument for the @@ -478,9 +479,11 @@ reverse relationship:: from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass + association_table = Table( "association", Base.metadata, @@ -636,9 +639,11 @@ from ``Parent`` to ``Child`` makes explicit use of ``Association``:: from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass + class Association(Base): __tablename__ = "association" left_id: Mapped[int] = mapped_column(ForeignKey("left.id"), primary_key=True) @@ -669,9 +674,11 @@ constructs, linked to the existing ones using :paramref:`_orm.relationship.back_ from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass + class Association(Base): __tablename__ = "association" left_id: Mapped[int] = mapped_column(ForeignKey("left.id"), primary_key=True) @@ -764,9 +771,11 @@ and ``Child.parent_associations -> Association.parent``:: from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass + class Association(Base): __tablename__ = "association" @@ -780,16 +789,22 @@ and ``Child.parent_associations -> Association.parent``:: # association between Assocation -> Parent parent: Mapped["Parent"] = relationship(back_populates="child_associations") + class Parent(Base): __tablename__ = "left" id: Mapped[int] = mapped_column(primary_key=True) # many-to-many relationship to Child, bypassing the `Association` class - children: Mapped[list["Child"]] = relationship(secondary="association", back_populates="parents") + children: Mapped[list["Child"]] = relationship( + secondary="association", back_populates="parents" + ) # association between Parent -> Association -> Child - child_associations: Mapped[list["Association"]] = relationship(back_populates="parent") + child_associations: Mapped[list["Association"]] = relationship( + back_populates="parent" + ) + class Child(Base): __tablename__ = "right" @@ -797,10 +812,14 @@ and ``Child.parent_associations -> Association.parent``:: id: Mapped[int] = mapped_column(primary_key=True) # many-to-many relationship to Parent, bypassing the `Association` class - parents: Mapped[list["Parent"]] = relationship(secondary="association", back_populates="children") + parents: Mapped[list["Parent"]] = relationship( + secondary="association", back_populates="children" + ) # association between Child -> Association -> Parent - parent_associations: Mapped[list["Association"]] = relationship(back_populates="child") + parent_associations: Mapped[list["Association"]] = relationship( + back_populates="child" + ) When using this ORM model to make changes, changes made to ``Parent.children`` will not be coordinated with changes made to @@ -851,7 +870,10 @@ additional association columns, as below:: ) # association between Parent -> Association -> Child - child_associations: Mapped[list["Association"]] = relationship(back_populates="parent") + child_associations: Mapped[list["Association"]] = relationship( + back_populates="parent" + ) + class Child(Base): __tablename__ = "right" @@ -864,7 +886,9 @@ additional association columns, as below:: ) # association between Child -> Association -> Parent - parent_associations: Mapped[list["Association"]] = relationship(back_populates="child") + parent_associations: Mapped[list["Association"]] = relationship( + back_populates="child" + ) The above mapping will not write any changes to ``Parent.children`` or ``Child.parents`` to the database, preventing conflicting writes. However, reads @@ -915,15 +939,15 @@ or Imperative mappings, a string name is also supported directly by the :func:`_orm.relationship` construct:: registry.map_imperatively( - Parent, parent_table, properties={ - "children": relationship("Child", back_populates="parent") - } + Parent, + parent_table, + properties={"children": relationship("Child", back_populates="parent")}, ) registry.map_imperatively( - Child, child_table, properties={ - "parent": relationship("Parent", back_populates="children") - } + Child, + child_table, + properties={"parent": relationship("Parent", back_populates="children")}, ) These string names are resolved into classes in the mapper resolution stage, @@ -993,7 +1017,6 @@ name within the :class:`_orm.registry`:: primaryjoin="myapp.mymodel.Parent.id == myapp.mymodel.Child.parent_id", ) - The qualified path can be any partial path that removes ambiguity between the names. For example, to disambiguate between ``myapp.model1.Child`` and ``myapp.model2.Child``, @@ -1136,8 +1159,7 @@ using a lambda as:: id: Mapped[int] = mapped_column(primary_key=True) children: Mapped[list["Child"]] = relationship( - "Child", - secondary=lambda: association_table + "Child", secondary=lambda: association_table ) Or to illustrate locating the same :class:`.Table` object by name, @@ -1152,7 +1174,6 @@ the :class:`.MetaData` collection:: id: Mapped[int] = mapped_column(primary_key=True) children: Mapped[list["Child"]] = relationship(secondary="association") - .. warning:: When passed as a string, :paramref:`_orm.relationship.secondary` argument is interpreted using Python's ``eval()`` function, even though it's typically the name of a table. diff --git a/doc/build/orm/cascades.rst b/doc/build/orm/cascades.rst index 48109f4651..2a66ebfc0e 100644 --- a/doc/build/orm/cascades.rst +++ b/doc/build/orm/cascades.rst @@ -109,10 +109,10 @@ and added to another:: >>> user1 = sess1.scalars(select(User).filter_by(id=1)).first() >>> address1 = user1.addresses[0] - >>> sess1.close() # user1, address1 no longer associated with sess1 + >>> sess1.close() # user1, address1 no longer associated with sess1 >>> user1.addresses.remove(address1) # address1 no longer associated with user1 >>> sess2 = Session() - >>> sess2.add(user1) # ... but it still gets added to the new session, + >>> sess2.add(user1) # ... but it still gets added to the new session, >>> address1 in sess2 # because it's still "pending" for flush True @@ -735,6 +735,7 @@ illustrated in the example below:: addresses = relationship("Address", cascade="all, delete-orphan") + # ... del user.addresses[1] diff --git a/doc/build/orm/collections.rst b/doc/build/orm/collections.rst index 06de6f6203..8f4c095ccf 100644 --- a/doc/build/orm/collections.rst +++ b/doc/build/orm/collections.rst @@ -175,9 +175,11 @@ below where ``list`` is used:: from sqlalchemy.orm import mapped_column from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass + class Parent(Base): __tablename__ = "parent" @@ -186,6 +188,7 @@ below where ``list`` is used:: # use a list children: Mapped[list["Child"]] = relationship() + class Child(Base): __tablename__ = "child" @@ -202,9 +205,11 @@ Or for a ``set``, illustrated in the same from sqlalchemy.orm import mapped_column from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass + class Parent(Base): __tablename__ = "parent" @@ -213,6 +218,7 @@ Or for a ``set``, illustrated in the same # use a set children: Mapped[set["Child"]] = relationship() + class Child(Base): __tablename__ = "child" @@ -244,6 +250,7 @@ Python code, as well as in a few special cases, the collection class for a # non-annotated mapping + class Parent(Base): __tablename__ = "parent" @@ -251,6 +258,7 @@ Python code, as well as in a few special cases, the collection class for a children = relationship("Child", collection_class=set) + class Child(Base): __tablename__ = "child" @@ -358,6 +366,7 @@ of the ``Note.text`` field:: cascade="all, delete-orphan", ) + class Note(Base): __tablename__ = "note" @@ -568,16 +577,21 @@ interface are detected and instrumented via duck-typing: class ListLike: def __init__(self): self.data = [] + def append(self, item): self.data.append(item) + def remove(self, item): self.data.remove(item) + def extend(self, items): self.data.extend(items) + def __iter__(self): return iter(self.data) + def foo(self): - return 'foo' + return "foo" ``append``, ``remove``, and ``extend`` are known list-like methods, and will be instrumented automatically. ``__iter__`` is not a mutator method and won't @@ -592,10 +606,13 @@ explicit about the interface you are implementing by providing an def __init__(self): self.data = set() + def append(self, item): self.data.add(item) + def remove(self, item): self.data.remove(item) + def __iter__(self): return iter(self.data) @@ -623,6 +640,7 @@ get the job done. from sqlalchemy.orm.collections import collection + class SetLike: __emulates__ = set @@ -681,6 +699,7 @@ collection support to other classes. It uses a keying function to delegate to from sqlalchemy.util import OrderedDict from sqlalchemy.orm.collections import MappedCollection + class NodeMap(OrderedDict, MappedCollection): """Holds 'Node' objects, keyed by the 'name' attribute with insert order maintained.""" @@ -744,6 +763,7 @@ to restrict the decorations to just your usage in relationships. For example: class MyAwesomeList(some.great.library.AwesomeList): pass + # ... relationship(..., collection_class=MyAwesomeList) The ORM uses this approach for built-ins, quietly substituting a trivial diff --git a/doc/build/orm/composites.rst b/doc/build/orm/composites.rst index 5285ee845d..d2f494401f 100644 --- a/doc/build/orm/composites.rst +++ b/doc/build/orm/composites.rst @@ -17,6 +17,7 @@ Python type:: import dataclasses + @dataclasses.dataclass class Point: x: int @@ -46,6 +47,7 @@ of the columns to be generated, in this case the names; the from sqlalchemy.orm import DeclarativeBase, Mapped from sqlalchemy.orm import composite, mapped_column + class Base(DeclarativeBase): pass @@ -191,6 +193,7 @@ illustrate an equvalent mapping as that of the main section above. from sqlalchemy import Integer from sqlalchemy.orm import mapped_column, composite + class Vertex(Base): __tablename__ = "vertices" @@ -211,6 +214,7 @@ illustrate an equvalent mapping as that of the main section above. from sqlalchemy.orm import mapped_column, composite, Mapped + class Vertex(Base): __tablename__ = "vertices" @@ -239,7 +243,6 @@ illustrate an equvalent mapping as that of the main section above. }, ) - .. _composite_legacy_no_dataclass: Using Legacy Non-Dataclasses @@ -269,11 +272,7 @@ not using a dataclass:: return f"Point(x={self.x!r}, y={self.y!r})" def __eq__(self, other): - return ( - isinstance(other, Point) - and other.x == self.x - and other.y == self.y - ) + return isinstance(other, Point) and other.x == self.x and other.y == self.y def __ne__(self, other): return not self.__eq__(other) @@ -315,6 +314,7 @@ the same expression that the base "greater than" does:: from sqlalchemy.orm import mapped_column from sqlalchemy.sql import and_ + @dataclasses.dataclass class Point: x: int @@ -335,23 +335,21 @@ the same expression that the base "greater than" does:: ] ) + class Base(DeclarativeBase): pass + class Vertex(Base): __tablename__ = "vertices" id: Mapped[int] = mapped_column(primary_key=True) start: Mapped[Point] = composite( - mapped_column("x1"), - mapped_column("y1"), - comparator_factory=PointComparator + mapped_column("x1"), mapped_column("y1"), comparator_factory=PointComparator ) end: Mapped[Point] = composite( - mapped_column("x2"), - mapped_column("y2"), - comparator_factory=PointComparator + mapped_column("x2"), mapped_column("y2"), comparator_factory=PointComparator ) Since ``Point`` is a dataclass, we may make use of @@ -405,6 +403,7 @@ four source columns ultimately resides:: from sqlalchemy.orm import Mapped from sqlalchemy.orm import mapped_column + @dataclasses.dataclass class Point: x: int @@ -423,9 +422,8 @@ four source columns ultimately resides:: def __composite_values__(self): """generate a row from a Vertex""" - return ( - dataclasses.astuple(self.start) + dataclasses.astuple(self.end) - ) + return dataclasses.astuple(self.start) + dataclasses.astuple(self.end) + class Base(DeclarativeBase): pass @@ -449,9 +447,7 @@ The above mapping can then be used in terms of ``HasVertex``, ``Vertex``, and session.add(hv) session.commit() - stmt = select(HasVertex).where( - HasVertex.vertex == Vertex(Point(1, 2), Point(3, 4)) - ) + stmt = select(HasVertex).where(HasVertex.vertex == Vertex(Point(1, 2), Point(3, 4))) hv = session.scalars(stmt).first() print(hv.vertex.start) diff --git a/doc/build/orm/contextual.rst b/doc/build/orm/contextual.rst index f821b271f0..8908f07857 100644 --- a/doc/build/orm/contextual.rst +++ b/doc/build/orm/contextual.rst @@ -253,6 +253,7 @@ this in conjunction with a hypothetical event marker provided by the web framewo Session = scoped_session(sessionmaker(bind=some_engine), scopefunc=get_current_request) + @on_request_end def remove_session(req): Session.remove() diff --git a/doc/build/orm/dataclasses.rst b/doc/build/orm/dataclasses.rst index 18817d2fe4..f0b6984382 100644 --- a/doc/build/orm/dataclasses.rst +++ b/doc/build/orm/dataclasses.rst @@ -62,6 +62,7 @@ below:: class Base(MappedAsDataclass, DeclarativeBase): """subclasses will be converted to dataclasses""" + class User(Base): __tablename__ = "user_account" @@ -79,6 +80,7 @@ Or may be applied directly to classes that extend from the Declarative base:: class Base(DeclarativeBase): pass + class User(MappedAsDataclass, Base): """User class will be converted to a dataclass""" @@ -97,6 +99,7 @@ decorator is supported:: reg = registry() + @reg.mapped_as_dataclass class User: __tablename__ = "user_account" @@ -124,6 +127,7 @@ class configuration arguments are passed as class-level parameters:: class Base(DeclarativeBase): pass + class User(MappedAsDataclass, Base, repr=False, unsafe_hash=True): """User class will be converted to a dataclass""" @@ -203,6 +207,7 @@ database-generated, is not part of the constructor at all:: reg = registry() + @reg.mapped_as_dataclass class User: __tablename__ = "user_account" @@ -211,8 +216,9 @@ database-generated, is not part of the constructor at all:: name: Mapped[str] fullname: Mapped[str] = mapped_column(default=None) + # 'fullname' is optional keyword argument - u1 = User('name') + u1 = User("name") Column Defaults ~~~~~~~~~~~~~~~ @@ -238,14 +244,14 @@ but where the parameter is optional in the constructor:: reg = registry() + @reg.mapped_as_dataclass class User: __tablename__ = "user_account" id: Mapped[int] = mapped_column(init=False, primary_key=True) created_at: Mapped[datetime] = mapped_column( - insert_default=func.utc_timestamp(), - default=None + insert_default=func.utc_timestamp(), default=None ) With the above mapping, an ``INSERT`` for a new ``User`` object where no @@ -254,7 +260,7 @@ parameter for ``created_at`` were passed proceeds as: .. sourcecode:: pycon+sql >>> with Session(e) as session: - ... session.add(User()) + ... session.add(User()) {sql}... session.commit() BEGIN (implicit) INSERT INTO user_account (created_at) VALUES (utc_timestamp()) @@ -289,11 +295,13 @@ invalid, as they do not see the ``init=False`` parameter present:: reg = registry() + @reg.mapped_as_dataclass class User: __tablename__ = "user_account" id: Mapped[intpk] + # typing error: Argument missing for parameter "id" u1 = User() @@ -311,6 +319,7 @@ the other arguments can remain within the ``Annotated`` construct:: reg = registry() + @reg.mapped_as_dataclass class User: __tablename__ = "user_account" @@ -343,11 +352,14 @@ scalar object references may make use of reg = registry() + @reg.mapped_as_dataclass class Parent: __tablename__ = "parent" id: Mapped[int] = mapped_column(primary_key=True) - children: Mapped[List["Child"]] = relationship(default_factory=list, back_populates='parent') + children: Mapped[List["Child"]] = relationship( + default_factory=list, back_populates="parent" + ) @reg.mapped_as_dataclass @@ -560,9 +572,7 @@ association:: user_id: int = field( init=False, metadata={"sa": mapped_column(ForeignKey("user.id"))} ) - email_address: str = field( - default=None, metadata={"sa": mapped_column(String(50))} - ) + email_address: str = field(default=None, metadata={"sa": mapped_column(String(50))}) .. _orm_declarative_dataclasses_mixin: @@ -615,9 +625,7 @@ came from a mixin that is itself a dataclass, the form would be:: user_id: int = field( init=False, metadata={"sa": lambda: mapped_column(ForeignKey("user.id"))} ) - email_address: str = field( - default=None, metadata={"sa": mapped_column(String(50))} - ) + email_address: str = field(default=None, metadata={"sa": mapped_column(String(50))}) @mapper_registry.mapped @@ -668,6 +676,7 @@ variables:: mapper_registry = registry() + @dataclass class User: id: int = field(init=False) @@ -676,38 +685,43 @@ variables:: nickname: str = None addresses: List[Address] = field(default_factory=list) + @dataclass class Address: id: int = field(init=False) user_id: int = field(init=False) email_address: str = None + metadata_obj = MetaData() user = Table( - 'user', + "user", metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('fullname', String(50)), - Column('nickname', String(12)), + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), ) address = Table( - 'address', + "address", metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String(50)), + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), ) - mapper_registry.map_imperatively(User, user, properties={ - 'addresses': relationship(Address, backref='user', order_by=address.c.id), - }) + mapper_registry.map_imperatively( + User, + user, + properties={ + "addresses": relationship(Address, backref="user", order_by=address.c.id), + }, + ) mapper_registry.map_imperatively(Address, address) - .. _orm_declarative_attrs_imperative_table: Applying ORM mappings to an existing attrs class @@ -788,6 +802,7 @@ object is declared inline with the declarative class. The } } + @mapper_registry.mapped @define(slots=False) class Address: @@ -802,7 +817,6 @@ object is declared inline with the declarative class. The user_id: int email_address: Optional[str] - .. note:: The ``attrs`` ``slots=True`` option, which enables ``__slots__`` on a mapped class, cannot be used with SQLAlchemy mappings without fully implementing alternative @@ -835,6 +849,7 @@ as well:: mapper_registry = registry() + @define(slots=False) class User: id: int @@ -843,34 +858,40 @@ as well:: nickname: str addresses: List[Address] + @define(slots=False) class Address: id: int user_id: int email_address: Optional[str] + metadata_obj = MetaData() user = Table( - 'user', + "user", metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('fullname', String(50)), - Column('nickname', String(12)), + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), ) address = Table( - 'address', + "address", metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String(50)), + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), ) - mapper_registry.map_imperatively(User, user, properties={ - 'addresses': relationship(Address, backref='user', order_by=address.c.id), - }) + mapper_registry.map_imperatively( + User, + user, + properties={ + "addresses": relationship(Address, backref="user", order_by=address.c.id), + }, + ) mapper_registry.map_imperatively(Address, address) diff --git a/doc/build/orm/declarative_config.rst b/doc/build/orm/declarative_config.rst index 7215bc9b0f..7653f9192a 100644 --- a/doc/build/orm/declarative_config.rst +++ b/doc/build/orm/declarative_config.rst @@ -73,9 +73,7 @@ objects but also relationships and SQL expressions:: id: Mapped[int] = mapped_column(primary_key=True) user_id: Mapped[int] = mapped_column(ForeignKey("user.id")) email_address: Mapped[str] - address_statistics: Mapped[Optional[str]] = mapped_column( - Text, deferred=True - ) + address_statistics: Mapped[Optional[str]] = mapped_column(Text, deferred=True) user: Mapped["User"] = relationship(back_populates="addresses") @@ -106,6 +104,7 @@ hybrid table style:: from sqlalchemy.orm import deferred from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass @@ -120,9 +119,7 @@ hybrid table style:: Column("lastname", String(50)), ) - fullname = column_property( - __table__.c.firstname + " " + __table__.c.lastname - ) + fullname = column_property(__table__.c.firstname + " " + __table__.c.lastname) addresses = relationship("Address", back_populates="user") @@ -185,14 +182,12 @@ particular columns as part of what the ORM should consider to be a primary key for the class, independently of schema-level primary key constraints:: class GroupUsers(Base): - __tablename__ = 'group_users' + __tablename__ = "group_users" user_id = mapped_column(String(40)) group_id = mapped_column(String(40)) - __mapper_args__ = { - "primary_key": [user_id, group_id] - } + __mapper_args__ = {"primary_key": [user_id, group_id]} .. seealso:: @@ -249,7 +244,6 @@ configuring a single-table inheritance mapping:: polymorphic_identity="employee", ) - .. seealso:: :ref:`single_inheritance` - background on the ORM single table inheritance @@ -285,22 +279,24 @@ collection:: def __mapper_args__(cls): return { "exclude_properties": [ - column.key for column in cls.__table__.c if - column.info.get("exclude", False) + column.key + for column in cls.__table__.c + if column.info.get("exclude", False) ] } + class Base(DeclarativeBase): pass + class SomeClass(ExcludeColsWFlag, Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" id = mapped_column(Integer, primary_key=True) data = mapped_column(String) not_needed = mapped_column(String, info={"exclude": True}) - Above, the ``ExcludeColsWFlag`` mixin provides a per-class ``__mapper_args__`` hook that will scan for :class:`.Column` objects that include the key/value ``'exclude': True`` passed to the :paramref:`.Column.info` parameter, and then @@ -327,7 +323,7 @@ assumed to be completed and the 'configure' step has finished:: class MyClass(Base): @classmethod def __declare_last__(cls): - "" + """ """ # do something with mappings ``__declare_first__()`` @@ -339,7 +335,7 @@ configuration via the :meth:`.MapperEvents.before_configured` event:: class MyClass(Base): @classmethod def __declare_first__(cls): - "" + """ """ # do something before mappings are configured .. versionadded:: 0.9.3 @@ -402,7 +398,6 @@ be illustrated using :meth:`_orm.registry.mapped` as follows:: id = mapped_column(Integer, primary_key=True) - .. seealso:: :ref:`declarative_abstract` @@ -421,7 +416,7 @@ subclasses to extend just from the special class:: __abstract__ = True def some_helpful_method(self): - """""" + """ """ @declared_attr def __mapper_args__(cls): diff --git a/doc/build/orm/declarative_mixins.rst b/doc/build/orm/declarative_mixins.rst index 12caf889f5..8927e710dc 100644 --- a/doc/build/orm/declarative_mixins.rst +++ b/doc/build/orm/declarative_mixins.rst @@ -25,9 +25,11 @@ An example of some commonly mixed-in idioms is below:: from sqlalchemy.orm import mapped_column from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass + class CommonMixin: """define a series of common elements that may be applied to mapped classes using this class as a mixin class.""" @@ -41,6 +43,7 @@ An example of some commonly mixed-in idioms is below:: id: Mapped[int] = mapped_column(primary_key=True) + class HasLogRecord: """mark classes that have a many-to-one relationship to the ``LogRecord`` class.""" @@ -51,9 +54,11 @@ An example of some commonly mixed-in idioms is below:: def log_record(self) -> Mapped["LogRecord"]: return relationship("LogRecord") + class LogRecord(CommonMixin, Base): log_info: Mapped[str] + class MyModel(CommonMixin, HasLogRecord, Base): name: Mapped[str] @@ -155,6 +160,7 @@ below illustrates some of the the previous section's example in terms of the from sqlalchemy.orm import mapped_column from sqlalchemy.orm import relationship + class Base(DeclarativeBase): """define a series of common elements that may be applied to mapped classes using this class as a base class.""" @@ -168,6 +174,7 @@ below illustrates some of the the previous section's example in terms of the id: Mapped[int] = mapped_column(primary_key=True) + class HasLogRecord: """mark classes that have a many-to-one relationship to the ``LogRecord`` class.""" @@ -178,9 +185,11 @@ below illustrates some of the the previous section's example in terms of the def log_record(self) -> Mapped["LogRecord"]: return relationship("LogRecord") + class LogRecord(Base): log_info: Mapped[str] + class MyModel(HasLogRecord, Base): name: Mapped[str] @@ -203,6 +212,7 @@ example below:: from sqlalchemy.orm import mapped_column from sqlalchemy.orm import relationship + class Base: """define a series of common elements that may be applied to mapped classes using this class as a base class.""" @@ -216,8 +226,10 @@ example below:: id = mapped_column(Integer, primary_key=True) + Base = declarative_base(cls=Base) + class HasLogRecord: """mark classes that have a many-to-one relationship to the ``LogRecord`` class.""" @@ -228,9 +240,11 @@ example below:: def log_record(self): return relationship("LogRecord") + class LogRecord(Base): log_info = mapped_column(String) + class MyModel(HasLogRecord, Base): name = mapped_column(String) @@ -320,9 +334,11 @@ reference a common target class via many-to-one:: from sqlalchemy.orm import mapped_column from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass + class RefTargetMixin: target_id: Mapped[int] = mapped_column(ForeignKey("target.id")) @@ -330,6 +346,7 @@ reference a common target class via many-to-one:: def target(cls) -> Mapped["Target"]: return relationship("Target") + class Foo(RefTargetMixin, Base): __tablename__ = "foo" id: Mapped[int] = mapped_column(primary_key=True) @@ -369,6 +386,7 @@ explicit primaryjoin which refers to pending mapped columns on both __tablename__ = "target" id: Mapped[int] = mapped_column(primary_key=True) + class RefTargetMixin: target_id: Mapped[int] = mapped_column(ForeignKey("target.id")) @@ -398,9 +416,11 @@ columns together:: from sqlalchemy.orm import Mapped from sqlalchemy.orm import mapped_column + class Base(DeclarativeBase): pass + class SomethingMixin: x: Mapped[int] y: Mapped[int] @@ -409,6 +429,7 @@ columns together:: def x_plus_y(cls) -> Mapped[int]: return column_property(cls.x + cls.y) + class Something(SomethingMixin, Base): __tablename__ = "something" @@ -495,6 +516,7 @@ of ``Person``, but not for the ``Manager`` subclass of ``Person``:: from sqlalchemy.orm import Mapped from sqlalchemy.orm import mapped_column + class Base(DeclarativeBase): pass @@ -512,7 +534,7 @@ of ``Person``, but not for the ``Manager`` subclass of ``Person``:: class Engineer(Person): - id: Mapped[int] = mapped_column(ForeignKey('person.id'), primary_key=True) + id: Mapped[int] = mapped_column(ForeignKey("person.id"), primary_key=True) primary_language: Mapped[str] @@ -572,6 +594,7 @@ for inheriting subclasses by default:: from sqlalchemy.orm import Mapped from sqlalchemy.orm import mapped_column + class Base(DeclarativeBase): pass @@ -597,7 +620,7 @@ for inheriting subclasses by default:: return cls.__name__.lower() - id: Mapped[int] = mapped_column(ForeignKey('person.id'), primary_key=True) + id: Mapped[int] = mapped_column(ForeignKey("person.id"), primary_key=True) primary_language: Mapped[str] @@ -608,7 +631,6 @@ for inheriting subclasses by default:: __mapper_args__ = {"polymorphic_identity": "manager"} - .. _mixin_inheritance_columns: Using :func:`_orm.declared_attr` to generate table-specific inheriting columns @@ -627,12 +649,14 @@ a primary key:: class HasId: id: Mapped[int] = mapped_column(primary_key=True) + class Person(HasId, Base): __tablename__ = "person" discriminator: Mapped[str] __mapper_args__ = {"polymorphic_on": "discriminator"} + # this mapping will fail, as there's no primary key class Engineer(Person): __tablename__ = "engineer" @@ -731,9 +755,7 @@ establish it as part of ``__table_args__``:: @declared_attr def __table_args__(cls): - return ( - Index(f"test_idx_{cls.__tablename__}", "a", "b"), - ) + return (Index(f"test_idx_{cls.__tablename__}", "a", "b"),) class MyModel(MyMixin, Base): diff --git a/doc/build/orm/declarative_styles.rst b/doc/build/orm/declarative_styles.rst index ee1cedc8c6..10d14e4ebc 100644 --- a/doc/build/orm/declarative_styles.rst +++ b/doc/build/orm/declarative_styles.rst @@ -59,9 +59,11 @@ of the base:: from sqlalchemy.orm import Mapped from sqlalchemy.orm import mapped_column + class Base(DeclarativeBase): pass + class User(Base): __tablename__ = "user" @@ -73,6 +75,7 @@ of the base:: addresses: Mapped[List["Address"]] = relationship(back_populates="user") + class Address(Base): __tablename__ = "address" diff --git a/doc/build/orm/declarative_tables.rst b/doc/build/orm/declarative_tables.rst index b4c5240883..9faba2f3a0 100644 --- a/doc/build/orm/declarative_tables.rst +++ b/doc/build/orm/declarative_tables.rst @@ -14,10 +14,10 @@ The following examples assume a declarative base class as:: from sqlalchemy.orm import DeclarativeBase + class Base(DeclarativeBase): pass - All of the examples that follow illustrate a class inheriting from the above ``Base``. The decorator style introduced at :ref:`orm_declarative_decorator` is fully supported with all the following examples as well, as are legacy @@ -44,6 +44,7 @@ Declarative mapping:: from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import mapped_column + class Base(DeclarativeBase): pass @@ -154,6 +155,7 @@ Below illustrates the mapping from the previous section, adding the use of from sqlalchemy.orm import Mapped from sqlalchemy.orm import mapped_column + class Base(DeclarativeBase): pass @@ -316,16 +318,19 @@ the registry and Declarative base could be configured as:: from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import Mapped, mapped_column, registry + class Base(DeclarativeBase): - registry = registry(type_annotation_map={ - int: BIGINT, - datetime.datetime: TIMESTAMP(timezone=True), - str: String().with_variant(NVARCHAR, "mssql"), - }) + registry = registry( + type_annotation_map={ + int: BIGINT, + datetime.datetime: TIMESTAMP(timezone=True), + str: String().with_variant(NVARCHAR, "mssql"), + } + ) class SomeClass(Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" id: Mapped[int] = mapped_column(primary_key=True) date: Mapped[datetime.datetime] @@ -401,13 +406,16 @@ declare two variants of :class:`.String` and :class:`.Numeric`:: num_12_4 = Annotated[Decimal, 12] num_6_2 = Annotated[Decimal, 6] + class Base(DeclarativeBase): - registry = registry(type_annotation_map={ - str_30: String(30), - str_50: String(50), - num_12_4: Numeric(12, 4), - num_6_2: Numeric(6, 2) - }) + registry = registry( + type_annotation_map={ + str_30: String(30), + str_50: String(50), + num_12_4: Numeric(12, 4), + num_6_2: Numeric(6, 2), + } + ) The Python type passed to the ``Annotated`` container, in the above example the ``str`` and ``Decimal`` types, is important only for the benefit of typing @@ -421,13 +429,13 @@ must be present for the ``Annotated`` construct to be valid. We can then use these augmented types directly in our mapping where they will be matched to the more specific type constructions, as in the following example:: - class SomeClass(Base): - __tablename__ = 'some_table' + class SomeClass(Base): + __tablename__ = "some_table" - short_name: Mapped[str_30] = mapped_column(primary_key=True) - long_name: Mapped[str_50] - num_value: Mapped[num_12_4] - short_num_value: Mapped[num_6_2] + short_name: Mapped[str_30] = mapped_column(primary_key=True) + long_name: Mapped[str_50] + num_value: Mapped[num_12_4] + short_num_value: Mapped[num_6_2] a CREATE TABLE for the above mapping will illustrate the different variants of ``VARCHAR`` and ``NUMERIC`` we've configured, and looks like:: @@ -543,9 +551,11 @@ of the ``NULL`` / ``NOT NULL`` setting that takes place in the database:: mapped_column(nullable=False), ] + class Base(DeclarativeBase): pass + class SomeClass(Base): # ... @@ -577,19 +587,22 @@ default for the ``created_at`` column:: mapped_column(nullable=False, server_default=func.CURRENT_TIMESTAMP()), ] + class Base(DeclarativeBase): pass + class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id: Mapped[intpk] + class SomeClass(Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" # add ForeignKey to mapped_column(Integer, primary_key=True) - id: Mapped[intpk] = mapped_column(ForeignKey('parent.id')) + id: Mapped[intpk] = mapped_column(ForeignKey("parent.id")) # change server default from CURRENT_TIMESTAMP to UTC_TIMESTAMP created_at: Mapped[timestamp] = mapped_column(server_default=func.UTC_TIMESTAMP()) @@ -716,9 +729,11 @@ dictionary:: from sqlalchemy.orm import DeclarativeBase + class Base(DeclarativeBase): pass + class MyClass(Base): __tablename__ = "sometable" __table_args__ = {"schema": "some_schema"} @@ -734,6 +749,7 @@ subclass by assigning to the ``metadata`` attribute directly:: metadata_obj = MetaData(schema="some_schema") + class Base(DeclarativeBase): metadata = metadata_obj @@ -780,10 +796,10 @@ that are commonly used include: additional SQL statements:: class User(Base): - __tablename__ = "user" + __tablename__ = "user" - id: Mapped[int] = mapped_column(primary_key=True) - important_identifier: Mapped[str] = mapped_column(active_history=True) + id: Mapped[int] = mapped_column(primary_key=True) + important_identifier: Mapped[str] = mapped_column(active_history=True) See the docstring for :func:`_orm.mapped_column` for a list of supported parameters. @@ -811,10 +827,10 @@ In the example below, the ``User`` class is mapped with alternate names given to the columns themselves:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" - id: Mapped[int] = mapped_column('user_id', primary_key=True) - name: Mapped[str] = mapped_column('user_name') + id: Mapped[int] = mapped_column("user_id", primary_key=True) + name: Mapped[str] = mapped_column("user_name") Where above ``User.id`` resolves to a column named ``user_id`` and ``User.name`` resolves to a column named ``user_name``. We @@ -822,7 +838,7 @@ may write a :func:`_sql.select` statement using our Python attribute names and will see the SQL names generated:: >>> from sqlalchemy import select - >>> print(select(User.id, User.name).where(User.name == 'x')) + >>> print(select(User.id, User.name).where(User.name == "x")) SELECT "user".user_id, "user".user_name FROM "user" WHERE "user".user_name = :user_name_1 @@ -901,9 +917,11 @@ directly:: from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.orm import DeclarativeBase + class Base(DeclarativeBase): pass + # construct a Table directly. The Base.metadata collection is # usually a good choice for MetaData but any MetaData # collection may be used. @@ -1019,6 +1037,7 @@ directly:: Column("user_name", String), ) + class User(Base): __table__ = user_table @@ -1041,6 +1060,7 @@ declaration, typing tools will be able to match the attribute to the from sqlalchemy.orm import column_property from sqlalchemy.orm import Mapped + class User(Base): __table__ = user_table @@ -1085,12 +1105,12 @@ associate additional parameters with the column. Options include: Column("bio", Text), ) + class User(Base): __table__ = user_table bio = deferred(user_table.c.bio) - .. seealso:: :ref:`orm_queryguide_column_deferral` - full description of deferred column loading @@ -1101,20 +1121,22 @@ associate additional parameters with the column. Options include: collection when inspecting the history of the attribute. This may incur additional SQL statements:: - from sqlalchemy.orm import deferred + from sqlalchemy.orm import deferred - user_table = Table( - "user", - Base.metadata, - Column("id", Integer, primary_key=True), - Column("important_identifier", String) - ) + user_table = Table( + "user", + Base.metadata, + Column("id", Integer, primary_key=True), + Column("important_identifier", String), + ) - class User(Base): - __table__ = user_table - important_identifier = column_property(user_table.c.important_identifier, active_history=True) + class User(Base): + __table__ = user_table + important_identifier = column_property( + user_table.c.important_identifier, active_history=True + ) .. seealso:: @@ -1151,13 +1173,13 @@ use a declarative hybrid mapping, passing the from sqlalchemy import Table from sqlalchemy.orm import DeclarativeBase - engine = create_engine( - "postgresql+psycopg2://user:pass@hostname/my_existing_database" - ) + engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database") + class Base(DeclarativeBase): pass + class MyClass(Base): __table__ = Table( "mytable", @@ -1174,17 +1196,18 @@ objects at once, then refer to them from the :class:`.MetaData`:: from sqlalchemy import Table from sqlalchemy.orm import DeclarativeBase - engine = create_engine( - "postgresql+psycopg2://user:pass@hostname/my_existing_database" - ) + engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database") + class Base(DeclarativeBase): pass + Base.metadata.reflect(engine) + class MyClass(Base): - __table__ = Base.metadata.tables['mytable'] + __table__ = Base.metadata.tables["mytable"] One caveat to the approach of using ``__table__`` is that the mapped classes cannot be declared until the tables have been reflected, which requires the database @@ -1212,9 +1235,11 @@ use the ``__tablename__`` attribute:: from sqlalchemy.ext.declarative import DeferredReflection from sqlalchemy.orm import DeclarativeBase + class Base(DeclarativeBase): pass + class Reflected(DeferredReflection): __abstract__ = True @@ -1235,9 +1260,7 @@ the ``Reflected.prepare`` method is called. The above mapping is not complete until we do so, given an :class:`_engine.Engine`:: - engine = create_engine( - "postgresql+psycopg2://user:pass@hostname/my_existing_database" - ) + engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database") Reflected.prepare(engine) The purpose of the ``Reflected`` class is to define the scope at which @@ -1295,6 +1318,7 @@ as illustrated below:: from sqlalchemy import event from sqlalchemy.orm import DeclarativeBase + class Base(DeclarativeBase): pass @@ -1302,14 +1326,13 @@ as illustrated below:: @event.listens_for(Base.metadata, "column_reflect") def column_reflect(inspector, table, column_info): # set column.key = "attr_" - column_info['key'] = "attr_%s" % column_info['name'].lower() + column_info["key"] = "attr_%s" % column_info["name"].lower() With the above event, the reflection of :class:`_schema.Column` objects will be intercepted with our event that adds a new ".key" element, such as in a mapping as below:: class MyClass(Base): - __table__ = Table("some_table", Base.metadata, - autoload_with=some_engine) + __table__ = Table("some_table", Base.metadata, autoload_with=some_engine) The approach also works with both the :class:`.DeferredReflection` base class as well as with the :ref:`automap_toplevel` extension. For automap @@ -1363,7 +1386,7 @@ map such a table as in the following example:: metadata, Column("user_id", String(40), nullable=False), Column("group_id", String(40), nullable=False), - UniqueConstraint("user_id", "group_id") + UniqueConstraint("user_id", "group_id"), ) @@ -1373,9 +1396,7 @@ map such a table as in the following example:: class GroupUsers(Base): __table__ = group_users - __mapper_args__ = { - "primary_key": [group_users.c.user_id, group_users.c.group_id] - } + __mapper_args__ = {"primary_key": [group_users.c.user_id, group_users.c.group_id]} Above, the ``group_users`` table is an association table of some kind with string columns ``user_id`` and ``group_id``, but no primary key is set up; @@ -1404,9 +1425,7 @@ way. Example:: class User(Base): __table__ = user_table - __mapper_args__ = { - 'include_properties': ['user_id', 'user_name'] - } + __mapper_args__ = {"include_properties": ["user_id", "user_name"]} In the above example, the ``User`` class will map to the ``user_table`` table, only including the ``user_id`` and ``user_name`` columns - the rest are not referenced. @@ -1415,9 +1434,7 @@ Similarly:: class Address(Base): __table__ = address_table - __mapper_args__ = { - 'exclude_properties': ["street", "city", "state", "zip"] - } + __mapper_args__ = {"exclude_properties": ["street", "city", "state", "zip"]} will map the ``Address`` class to the ``address_table`` table, including all columns present except ``street``, ``city``, ``state``, and ``zip``. @@ -1431,7 +1448,7 @@ mapping to multi-table constructs that might have repeated names:: class User(Base): __table__ = user_table __mapper_args__ = { - 'include_properties': [user_table.c.user_id, user_table.c.user_name] + "include_properties": [user_table.c.user_id, user_table.c.user_name] } When columns are not included in a mapping, these columns will not be diff --git a/doc/build/orm/extensions/associationproxy.rst b/doc/build/orm/extensions/associationproxy.rst index c5541f39a9..184074e9e6 100644 --- a/doc/build/orm/extensions/associationproxy.rst +++ b/doc/build/orm/extensions/associationproxy.rst @@ -27,9 +27,11 @@ Each ``User`` can have any number of ``Keyword`` objects, and vice-versa from sqlalchemy import Column, ForeignKey, Integer, String, Table from sqlalchemy.orm import DeclarativeBase, relationship + class Base(DeclarativeBase): pass + class User(Base): __tablename__ = "user" id = mapped_column(Integer, primary_key=True) @@ -175,6 +177,7 @@ collection of ``User`` to the ``.keyword`` attribute present on each from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import DeclarativeBase, relationship + class Base(DeclarativeBase): pass @@ -234,7 +237,6 @@ objects that are obtained from the underlying ``UserKeywordAssociation`` element >>> user = User("log") >>> for kw in (Keyword("new_from_blammo"), Keyword("its_big")): ... user.keywords.append(kw) - ... >>> print(user.keywords) [Keyword('new_from_blammo'), Keyword('its_big')] @@ -301,6 +303,7 @@ when new elements are added to the dictionary:: from sqlalchemy.orm import DeclarativeBase, relationship from sqlalchemy.orm.collections import attribute_mapped_collection + class Base(DeclarativeBase): pass @@ -385,6 +388,7 @@ present on ``UserKeywordAssociation``:: from sqlalchemy.orm import DeclarativeBase, relationship from sqlalchemy.orm.collections import attribute_mapped_collection + class Base(DeclarativeBase): pass @@ -445,21 +449,18 @@ transparently using the association proxy. In the example below, we illustrate usage of the assignment operator, also appropriately handled by the association proxy, to apply a dictionary value to the collection at once:: - >>> user = User('log') - >>> user.keywords = { - ... 'sk1':'kw1', - ... 'sk2':'kw2' - ... } + >>> user = User("log") + >>> user.keywords = {"sk1": "kw1", "sk2": "kw2"} >>> print(user.keywords) {'sk1': 'kw1', 'sk2': 'kw2'} - >>> user.keywords['sk3'] = 'kw3' - >>> del user.keywords['sk2'] + >>> user.keywords["sk3"] = "kw3" + >>> del user.keywords["sk2"] >>> print(user.keywords) {'sk1': 'kw1', 'sk3': 'kw3'} >>> # illustrate un-proxied usage - ... print(user.user_keyword_associations['sk3'].kw) + ... print(user.user_keyword_associations["sk3"].kw) <__main__.Keyword object at 0x12ceb90> One caveat with our example above is that because ``Keyword`` objects are created @@ -497,6 +498,7 @@ to a related object, as in the example mapping below:: from sqlalchemy.orm import DeclarativeBase, relationship from sqlalchemy.orm.collections import attribute_mapped_collection + class Base(DeclarativeBase): pass @@ -518,9 +520,7 @@ to a related object, as in the example mapping below:: ) # column-targeted association proxy - special_keys = association_proxy( - "user_keyword_associations", "special_key" - ) + special_keys = association_proxy("user_keyword_associations", "special_key") class UserKeywordAssociation(Base): @@ -536,7 +536,6 @@ to a related object, as in the example mapping below:: id = Column(Integer, primary_key=True) keyword = Column("keyword", String(64)) - The SQL generated takes the form of a correlated subquery against the EXISTS SQL operator so that it can be used in a WHERE clause without the need for additional modifications to the enclosing query. If the @@ -663,4 +662,4 @@ API Documentation :inherited-members: .. autoclass:: AssociationProxyExtensionType - :members: \ No newline at end of file + :members: diff --git a/doc/build/orm/extensions/asyncio.rst b/doc/build/orm/extensions/asyncio.rst index 3b87981c32..3a81bf9bb3 100644 --- a/doc/build/orm/extensions/asyncio.rst +++ b/doc/build/orm/extensions/asyncio.rst @@ -154,6 +154,7 @@ illustrates a complete example including mapper and session configuration:: from sqlalchemy.ext.asyncio import async_sessionmaker, create_async_engine from sqlalchemy.orm import DeclarativeBase, relationship, selectinload + class Base(DeclarativeBase): pass @@ -590,7 +591,6 @@ constructs are illustrated below:: asyncio.run(go()) - The above example prints something along the lines of:: New DBAPI connection: > @@ -780,14 +780,14 @@ the usual ``await`` keywords are necessary, including for the :meth:`_asyncio.async_scoped_session.remove` method:: async def some_function(some_async_session, some_object): - # use the AsyncSession directly - some_async_session.add(some_object) + # use the AsyncSession directly + some_async_session.add(some_object) - # use the AsyncSession via the context-local proxy - await AsyncScopedSession.commit() + # use the AsyncSession via the context-local proxy + await AsyncScopedSession.commit() - # "remove" the current proxied AsyncSession for the local context - await AsyncScopedSession.remove() + # "remove" the current proxied AsyncSession for the local context + await AsyncScopedSession.remove() .. versionadded:: 1.4.19 diff --git a/doc/build/orm/extensions/baked.rst b/doc/build/orm/extensions/baked.rst index b3c21716a2..60bf06b2a1 100644 --- a/doc/build/orm/extensions/baked.rst +++ b/doc/build/orm/extensions/baked.rst @@ -213,6 +213,7 @@ Our example becomes:: my_simple_cache = {} + def lookup(session, id_argument): if "my_key" not in my_simple_cache: query = session.query(Model).filter(Model.id == bindparam("id")) @@ -294,6 +295,7 @@ into a direct use of "bakery" as follows:: parameterized_query = bakery.bake(create_model_query) if include_frobnizzle: + def include_frobnizzle_in_query(query): return query.filter(Model.frobnizzle == True) @@ -362,9 +364,7 @@ statement compilation time:: bakery = baked.bakery() baked_query = bakery(lambda session: session.query(User)) - baked_query += lambda q: q.filter( - User.name.in_(bindparam("username", expanding=True)) - ) + baked_query += lambda q: q.filter(User.name.in_(bindparam("username", expanding=True))) result = baked_query.with_session(session).params(username=["ed", "fred"]).all() diff --git a/doc/build/orm/extensions/declarative/mixins.rst b/doc/build/orm/extensions/declarative/mixins.rst index cde4c12bd1..7a18f07a7f 100644 --- a/doc/build/orm/extensions/declarative/mixins.rst +++ b/doc/build/orm/extensions/declarative/mixins.rst @@ -5,4 +5,4 @@ Mixin and Custom Base Classes ============================= -See :ref:`orm_mixins_toplevel` for this section. \ No newline at end of file +See :ref:`orm_mixins_toplevel` for this section. diff --git a/doc/build/orm/extensions/hybrid.rst b/doc/build/orm/extensions/hybrid.rst index d403c196ff..9773316d49 100644 --- a/doc/build/orm/extensions/hybrid.rst +++ b/doc/build/orm/extensions/hybrid.rst @@ -18,4 +18,4 @@ API Reference .. autoclass:: HybridExtensionType - :members: \ No newline at end of file + :members: diff --git a/doc/build/orm/extensions/mypy.rst b/doc/build/orm/extensions/mypy.rst index d82b302f27..9ebff9fb16 100644 --- a/doc/build/orm/extensions/mypy.rst +++ b/doc/build/orm/extensions/mypy.rst @@ -124,9 +124,7 @@ mapping, using the typical example of the ``User`` class:: # a select() construct makes use of SQL expressions derived from the # User class itself - select_stmt = ( - select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) - ) + select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) Above, the steps that the Mypy extension can take include: @@ -169,9 +167,7 @@ following:: ) name: Mapped[Optional[str]] = Mapped._special_method(Column(String)) - def __init__( - self, id: Optional[int] = ..., name: Optional[str] = ... - ) -> None: + def __init__(self, id: Optional[int] = ..., name: Optional[str] = ...) -> None: ... @@ -179,10 +175,7 @@ following:: print(f"Username: {some_user.name}") - select_stmt = ( - select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) - ) - + select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) The key steps which have been taken above include: @@ -448,9 +441,7 @@ applied explicitly:: id = Column(Integer, primary_key=True) name = Column(String) - addresses: Mapped[List["Address"]] = relationship( - "Address", back_populates="user" - ) + addresses: Mapped[List["Address"]] = relationship("Address", back_populates="user") class Address(Base): diff --git a/doc/build/orm/inheritance.rst b/doc/build/orm/inheritance.rst index b95adec7ba..6f27deb047 100644 --- a/doc/build/orm/inheritance.rst +++ b/doc/build/orm/inheritance.rst @@ -50,9 +50,11 @@ column, and optionally a polymorphic identifier for the base class itself:: from sqlalchemy.orm import Mapped from sqlalchemy.orm import mapped_column + class Base(DeclarativeBase): pass + class Employee(Base): __tablename__ = "employee" id: Mapped[int] = mapped_column(primary_key=True) @@ -179,6 +181,7 @@ and ``Employee``:: from sqlalchemy.orm import relationship + class Company(Base): __tablename__ = "company" id: Mapped[int] = mapped_column(primary_key=True) @@ -332,6 +335,7 @@ comes up when two subclasses want to specify *the same* column, as below:: from datetime import datetime + class Employee(Base): __tablename__ = "employee" id: Mapped[int] = mapped_column(primary_key=True) @@ -674,9 +678,11 @@ almost the same way as we do other forms of inheritance mappings:: from sqlalchemy.ext.declarative import ConcreteBase from sqlalchemy.orm import DeclarativeBase + class Base(DeclarativeBase): pass + class Employee(ConcreteBase, Base): __tablename__ = "employee" id = mapped_column(Integer, primary_key=True) @@ -783,9 +789,11 @@ base class with the ``__abstract__`` indicator:: from sqlalchemy.orm import DeclarativeBase + class Base(DeclarativeBase): pass + class Employee(Base): __abstract__ = True @@ -824,6 +832,7 @@ class called :class:`.AbstractConcreteBase` which achieves this automatically:: from sqlalchemy.ext.declarative import AbstractConcreteBase from sqlalchemy.orm import DeclarativeBase + class Base(DeclarativeBase): pass @@ -857,6 +866,7 @@ class called :class:`.AbstractConcreteBase` which achieves this automatically:: "concrete": True, } + Base.registry.configure() Above, the :meth:`_orm.registry.configure` method is invoked, which will @@ -873,7 +883,7 @@ Using the above mapping, queries can be produced in terms of the ``Employee`` class and any attributes that are locally declared upon it, such as the ``Employee.name``:: - >>> stmt = select(Employee).where(Employee.name == 'n1') + >>> stmt = select(Employee).where(Employee.name == "n1") >>> print(stmt) SELECT pjoin.id, pjoin.name, pjoin.type, pjoin.manager_data, pjoin.engineer_info FROM ( @@ -1077,7 +1087,6 @@ mapping is illustrated below:: "concrete": True, } - Above, we use :func:`.polymorphic_union` in the same manner as before, except that we omit the ``employee`` table. diff --git a/doc/build/orm/join_conditions.rst b/doc/build/orm/join_conditions.rst index b6df355f8c..78837775be 100644 --- a/doc/build/orm/join_conditions.rst +++ b/doc/build/orm/join_conditions.rst @@ -23,11 +23,13 @@ class:: from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass + class Customer(Base): - __tablename__ = 'customer' + __tablename__ = "customer" id = mapped_column(Integer, primary_key=True) name = mapped_column(String) @@ -37,8 +39,9 @@ class:: billing_address = relationship("Address") shipping_address = relationship("Address") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = mapped_column(Integer, primary_key=True) street = mapped_column(String) city = mapped_column(String) @@ -65,7 +68,7 @@ by instructing for each one which foreign key column should be considered, and the appropriate form is as follows:: class Customer(Base): - __tablename__ = 'customer' + __tablename__ = "customer" id = mapped_column(Integer, primary_key=True) name = mapped_column(String) @@ -126,21 +129,25 @@ load those ``Address`` objects which specify a city of "Boston":: from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = mapped_column(Integer, primary_key=True) name = mapped_column(String) - boston_addresses = relationship("Address", - primaryjoin="and_(User.id==Address.user_id, " - "Address.city=='Boston')") + boston_addresses = relationship( + "Address", + primaryjoin="and_(User.id==Address.user_id, " "Address.city=='Boston')", + ) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = mapped_column(Integer, primary_key=True) - user_id = mapped_column(Integer, ForeignKey('user.id')) + user_id = mapped_column(Integer, ForeignKey("user.id")) street = mapped_column(String) city = mapped_column(String) @@ -208,22 +215,25 @@ type of the other:: from sqlalchemy.orm import DeclarativeBase + class Base(DeclarativeBase): pass + class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = mapped_column(Integer, primary_key=True) ip_address = mapped_column(INET) content = mapped_column(String(50)) # relationship() using explicit foreign_keys, remote_side - parent_host = relationship("HostEntry", - primaryjoin=ip_address == cast(content, INET), - foreign_keys=content, - remote_side=ip_address - ) + parent_host = relationship( + "HostEntry", + primaryjoin=ip_address == cast(content, INET), + foreign_keys=content, + remote_side=ip_address, + ) The above relationship will produce a join like:: @@ -244,8 +254,9 @@ SQL expressions:: from sqlalchemy.orm import foreign, remote + class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = mapped_column(Integer, primary_key=True) ip_address = mapped_column(INET) @@ -253,11 +264,10 @@ SQL expressions:: # relationship() using explicit foreign() and remote() annotations # in lieu of separate arguments - parent_host = relationship("HostEntry", - primaryjoin=remote(ip_address) == \ - cast(foreign(content), INET), - ) - + parent_host = relationship( + "HostEntry", + primaryjoin=remote(ip_address) == cast(foreign(content), INET), + ) .. _relationship_custom_operator: @@ -276,18 +286,20 @@ A comparison like the above may be used directly with a :func:`_orm.relationship`:: class IPA(Base): - __tablename__ = 'ip_address' + __tablename__ = "ip_address" id = mapped_column(Integer, primary_key=True) v4address = mapped_column(INET) - network = relationship("Network", - primaryjoin="IPA.v4address.bool_op('<<')" - "(foreign(Network.v4representation))", - viewonly=True - ) + network = relationship( + "Network", + primaryjoin="IPA.v4address.bool_op('<<')" "(foreign(Network.v4representation))", + viewonly=True, + ) + + class Network(Base): - __tablename__ = 'network' + __tablename__ = "network" id = mapped_column(Integer, primary_key=True) v4representation = mapped_column(CIDR) @@ -320,6 +332,7 @@ two expressions. The below example illustrates this with the from sqlalchemy import Column, Integer, func from sqlalchemy.orm import relationship, foreign + class Polygon(Base): __tablename__ = "polygon" id = mapped_column(Integer, primary_key=True) @@ -330,6 +343,7 @@ two expressions. The below example illustrates this with the viewonly=True, ) + class Point(Base): __tablename__ = "point" id = mapped_column(Integer, primary_key=True) @@ -359,35 +373,34 @@ for both; then to make ``Article`` refer to ``Writer`` as well, ``Article.magazine`` and ``Article.writer``:: class Magazine(Base): - __tablename__ = 'magazine' + __tablename__ = "magazine" id = mapped_column(Integer, primary_key=True) class Article(Base): - __tablename__ = 'article' + __tablename__ = "article" article_id = mapped_column(Integer) - magazine_id = mapped_column(ForeignKey('magazine.id')) + magazine_id = mapped_column(ForeignKey("magazine.id")) writer_id = mapped_column() magazine = relationship("Magazine") writer = relationship("Writer") __table_args__ = ( - PrimaryKeyConstraint('article_id', 'magazine_id'), + PrimaryKeyConstraint("article_id", "magazine_id"), ForeignKeyConstraint( - ['writer_id', 'magazine_id'], - ['writer.id', 'writer.magazine_id'] + ["writer_id", "magazine_id"], ["writer.id", "writer.magazine_id"] ), ) class Writer(Base): - __tablename__ = 'writer' + __tablename__ = "writer" id = mapped_column(Integer, primary_key=True) - magazine_id = mapped_column(ForeignKey('magazine.id'), primary_key=True) + magazine_id = mapped_column(ForeignKey("magazine.id"), primary_key=True) magazine = relationship("Magazine") When the above mapping is configured, we will see this warning emitted:: @@ -434,7 +447,7 @@ To get just #1 and #2, we could specify only ``Article.writer_id`` as the class Article(Base): # ... - writer = relationship("Writer", foreign_keys='Article.writer_id') + writer = relationship("Writer", foreign_keys="Article.writer_id") However, this has the effect of ``Article.writer`` not taking ``Article.magazine_id`` into account when querying against ``Writer``: @@ -459,7 +472,8 @@ annotating with :func:`_orm.foreign`:: writer = relationship( "Writer", primaryjoin="and_(Writer.id == foreign(Article.writer_id), " - "Writer.magazine_id == Article.magazine_id)") + "Writer.magazine_id == Article.magazine_id)", + ) .. versionchanged:: 1.0.0 the ORM will attempt to warn when a column is used as the synchronization target from more than one relationship @@ -485,16 +499,16 @@ is considered to be "many to one". For the comparison we'll use here, we'll be dealing with collections so we keep things configured as "one to many":: class Element(Base): - __tablename__ = 'element' + __tablename__ = "element" path = mapped_column(String, primary_key=True) - descendants = relationship('Element', - primaryjoin= - remote(foreign(path)).like( - path.concat('/%')), - viewonly=True, - order_by=path) + descendants = relationship( + "Element", + primaryjoin=remote(foreign(path)).like(path.concat("/%")), + viewonly=True, + order_by=path, + ) Above, if given an ``Element`` object with a path attribute of ``"/foo/bar2"``, we seek for a load of ``Element.descendants`` to look like:: @@ -531,23 +545,29 @@ is when establishing a many-to-many relationship from a class to itself, as show from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass - node_to_node = Table("node_to_node", Base.metadata, + + node_to_node = Table( + "node_to_node", + Base.metadata, Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True), - Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True) + Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True), ) + class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = mapped_column(Integer, primary_key=True) label = mapped_column(String) - right_nodes = relationship("Node", - secondary=node_to_node, - primaryjoin=id==node_to_node.c.left_node_id, - secondaryjoin=id==node_to_node.c.right_node_id, - backref="left_nodes" + right_nodes = relationship( + "Node", + secondary=node_to_node, + primaryjoin=id == node_to_node.c.left_node_id, + secondaryjoin=id == node_to_node.c.right_node_id, + backref="left_nodes", ) Where above, SQLAlchemy can't know automatically which columns should connect @@ -565,14 +585,15 @@ When referring to a plain :class:`_schema.Table` object in a declarative string, use the string name of the table as it is present in the :class:`_schema.MetaData`:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = mapped_column(Integer, primary_key=True) label = mapped_column(String) - right_nodes = relationship("Node", - secondary="node_to_node", - primaryjoin="Node.id==node_to_node.c.left_node_id", - secondaryjoin="Node.id==node_to_node.c.right_node_id", - backref="left_nodes" + right_nodes = relationship( + "Node", + secondary="node_to_node", + primaryjoin="Node.id==node_to_node.c.left_node_id", + secondaryjoin="Node.id==node_to_node.c.right_node_id", + backref="left_nodes", ) .. warning:: When passed as a Python-evaluable string, the @@ -592,26 +613,38 @@ to ``node.c.id``:: metadata_obj = MetaData() mapper_registry = registry() - node_to_node = Table("node_to_node", metadata_obj, + node_to_node = Table( + "node_to_node", + metadata_obj, Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True), - Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True) + Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True), ) - node = Table("node", metadata_obj, - Column('id', Integer, primary_key=True), - Column('label', String) + node = Table( + "node", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("label", String), ) + + class Node: pass - mapper_registry.map_imperatively(Node, node, properties={ - 'right_nodes':relationship(Node, - secondary=node_to_node, - primaryjoin=node.c.id==node_to_node.c.left_node_id, - secondaryjoin=node.c.id==node_to_node.c.right_node_id, - backref="left_nodes" - )}) + mapper_registry.map_imperatively( + Node, + node, + properties={ + "right_nodes": relationship( + Node, + secondary=node_to_node, + primaryjoin=node.c.id == node_to_node.c.left_node_id, + secondaryjoin=node.c.id == node_to_node.c.right_node_id, + backref="left_nodes", + ) + }, + ) Note that in both examples, the :paramref:`_orm.relationship.backref` keyword specifies a ``left_nodes`` backref - when @@ -653,35 +686,38 @@ target consisting of multiple tables. Below is an example of such a join condition (requires version 0.9.2 at least to function as is):: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = mapped_column(Integer, primary_key=True) - b_id = mapped_column(ForeignKey('b.id')) + b_id = mapped_column(ForeignKey("b.id")) + + d = relationship( + "D", + secondary="join(B, D, B.d_id == D.id)." "join(C, C.d_id == D.id)", + primaryjoin="and_(A.b_id == B.id, A.id == C.a_id)", + secondaryjoin="D.id == B.d_id", + uselist=False, + viewonly=True, + ) - d = relationship("D", - secondary="join(B, D, B.d_id == D.id)." - "join(C, C.d_id == D.id)", - primaryjoin="and_(A.b_id == B.id, A.id == C.a_id)", - secondaryjoin="D.id == B.d_id", - uselist=False, - viewonly=True - ) class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = mapped_column(Integer, primary_key=True) - d_id = mapped_column(ForeignKey('d.id')) + d_id = mapped_column(ForeignKey("d.id")) + class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = mapped_column(Integer, primary_key=True) - a_id = mapped_column(ForeignKey('a.id')) - d_id = mapped_column(ForeignKey('d.id')) + a_id = mapped_column(ForeignKey("a.id")) + d_id = mapped_column(ForeignKey("d.id")) + class D(Base): - __tablename__ = 'd' + __tablename__ = "d" id = mapped_column(Integer, primary_key=True) @@ -753,33 +789,37 @@ entities ``C`` and ``D``, which also must have rows that line up with the rows in both ``A`` and ``B`` simultaneously:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = mapped_column(Integer, primary_key=True) - b_id = mapped_column(ForeignKey('b.id')) + b_id = mapped_column(ForeignKey("b.id")) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = mapped_column(Integer, primary_key=True) + class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = mapped_column(Integer, primary_key=True) - a_id = mapped_column(ForeignKey('a.id')) + a_id = mapped_column(ForeignKey("a.id")) some_c_value = mapped_column(String) + class D(Base): - __tablename__ = 'd' + __tablename__ = "d" id = mapped_column(Integer, primary_key=True) - c_id = mapped_column(ForeignKey('c.id')) - b_id = mapped_column(ForeignKey('b.id')) + c_id = mapped_column(ForeignKey("c.id")) + b_id = mapped_column(ForeignKey("b.id")) some_d_value = mapped_column(String) + # 1. set up the join() as a variable, so we can refer # to it in the mapping multiple times. j = join(B, D, D.b_id == B.id).join(C, C.id == D.c_id) @@ -856,28 +896,25 @@ illustrates a non-primary mapper relationship that will load the first ten items for each collection:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = mapped_column(Integer, primary_key=True) class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = mapped_column(Integer, primary_key=True) a_id = mapped_column(ForeignKey("a.id")) + partition = select( - B, - func.row_number().over( - order_by=B.id, partition_by=B.a_id - ).label('index') + B, func.row_number().over(order_by=B.id, partition_by=B.a_id).label("index") ).alias() partitioned_b = aliased(B, partition) A.partitioned_bs = relationship( - partitioned_b, - primaryjoin=and_(partitioned_b.a_id == A.id, partition.c.index < 10) + partitioned_b, primaryjoin=and_(partitioned_b.a_id == A.id, partition.c.index < 10) ) We can use the above ``partitioned_bs`` relationship with most of the loader @@ -926,7 +963,7 @@ conjunction with :class:`_query.Query` as follows: .. sourcecode:: python class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = mapped_column(Integer, primary_key=True) @property @@ -939,4 +976,4 @@ of special Python attributes. .. seealso:: - :ref:`mapper_hybrids` \ No newline at end of file + :ref:`mapper_hybrids` diff --git a/doc/build/orm/loading.rst b/doc/build/orm/loading.rst index 0aca6cd0c9..fdb27806f4 100644 --- a/doc/build/orm/loading.rst +++ b/doc/build/orm/loading.rst @@ -1,3 +1,3 @@ :orphan: -Moved! :doc:`/orm/loading_relationships` \ No newline at end of file +Moved! :doc:`/orm/loading_relationships` diff --git a/doc/build/orm/mapped_attributes.rst b/doc/build/orm/mapped_attributes.rst index f2bfd18ffa..1a8305b481 100644 --- a/doc/build/orm/mapped_attributes.rst +++ b/doc/build/orm/mapped_attributes.rst @@ -23,36 +23,36 @@ issued when the ORM is populating the object:: from sqlalchemy.orm import validates + class EmailAddress(Base): - __tablename__ = 'address' + __tablename__ = "address" id = mapped_column(Integer, primary_key=True) email = mapped_column(String) - @validates('email') + @validates("email") def validate_email(self, key, address): - if '@' not in address: + if "@" not in address: raise ValueError("failed simple email validation") return address - Validators also receive collection append events, when items are added to a collection:: from sqlalchemy.orm import validates + class User(Base): # ... addresses = relationship("Address") - @validates('addresses') + @validates("addresses") def validate_address(self, key, address): - if '@' not in address.email: + if "@" not in address.email: raise ValueError("failed simplified email validation") return address - The validation function by default does not get emitted for collection remove events, as the typical expectation is that a value being discarded doesn't require validation. However, :func:`.validates` supports reception @@ -62,18 +62,18 @@ argument which if ``True`` indicates that the operation is a removal:: from sqlalchemy.orm import validates + class User(Base): # ... addresses = relationship("Address") - @validates('addresses', include_removes=True) + @validates("addresses", include_removes=True) def validate_address(self, key, address, is_remove): if is_remove: - raise ValueError( - "not allowed to remove items from the collection") + raise ValueError("not allowed to remove items from the collection") else: - if '@' not in address.email: + if "@" not in address.email: raise ValueError("failed simplified email validation") return address @@ -84,14 +84,15 @@ event occurs as a result of a backref:: from sqlalchemy.orm import validates + class User(Base): # ... - addresses = relationship("Address", backref='user') + addresses = relationship("Address", backref="user") - @validates('addresses', include_backrefs=False) + @validates("addresses", include_backrefs=False) def validate_address(self, key, address): - if '@' not in address: + if "@" not in address: raise ValueError("failed simplified email validation") return address @@ -130,7 +131,7 @@ plain descriptor, and to have it read/write from a mapped attribute with a different name. Below we illustrate this using Python 2.6-style properties:: class EmailAddress(Base): - __tablename__ = 'email_address' + __tablename__ = "email_address" id = mapped_column(Integer, primary_key=True) @@ -157,8 +158,9 @@ usable with :class:`_sql.Select`. To provide these, we instead use the from sqlalchemy.ext.hybrid import hybrid_property + class EmailAddress(Base): - __tablename__ = 'email_address' + __tablename__ = "email_address" id = mapped_column(Integer, primary_key=True) @@ -206,7 +208,7 @@ host name automatically, we might define two sets of string manipulation logic:: class EmailAddress(Base): - __tablename__ = 'email_address' + __tablename__ = "email_address" id = mapped_column(Integer, primary_key=True) @@ -262,8 +264,9 @@ attribute available by an additional name:: from sqlalchemy.orm import synonym + class MyClass(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = mapped_column(Integer, primary_key=True) job_status = mapped_column(String(50)) @@ -274,19 +277,19 @@ The above class ``MyClass`` has two attributes, ``.job_status`` and ``.status`` that will behave as one attribute, both at the expression level:: - >>> print(MyClass.job_status == 'some_status') + >>> print(MyClass.job_status == "some_status") my_table.job_status = :job_status_1 - >>> print(MyClass.status == 'some_status') + >>> print(MyClass.status == "some_status") my_table.job_status = :job_status_1 and at the instance level:: - >>> m1 = MyClass(status='x') + >>> m1 = MyClass(status="x") >>> m1.status, m1.job_status ('x', 'x') - >>> m1.job_status = 'y' + >>> m1.job_status = "y" >>> m1.status, m1.job_status ('y', 'y') @@ -299,7 +302,7 @@ a user-defined :term:`descriptor`. We can supply our ``status`` synonym with a ``@property``:: class MyClass(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = mapped_column(Integer, primary_key=True) status = mapped_column(String(50)) @@ -315,8 +318,9 @@ using the :func:`.synonym_for` decorator:: from sqlalchemy.ext.declarative import synonym_for + class MyClass(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = mapped_column(Integer, primary_key=True) status = mapped_column(String(50)) diff --git a/doc/build/orm/mapped_sql_expr.rst b/doc/build/orm/mapped_sql_expr.rst index 4d797aee9c..4751148133 100644 --- a/doc/build/orm/mapped_sql_expr.rst +++ b/doc/build/orm/mapped_sql_expr.rst @@ -21,8 +21,9 @@ will provide for us the ``fullname``, which is the string concatenation of the t from sqlalchemy.ext.hybrid import hybrid_property + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = mapped_column(Integer, primary_key=True) firstname = mapped_column(String(50)) lastname = mapped_column(String(50)) @@ -39,8 +40,9 @@ class level, so that it is available from an instance:: as well as usable within queries:: - some_user = session.scalars(select(User).where(User.fullname == "John Smith").limit(1)).first() - + some_user = session.scalars( + select(User).where(User.fullname == "John Smith").limit(1) + ).first() The string concatenation example is a simple one, where the Python expression can be dual purposed at the instance and class level. Often, the SQL expression @@ -52,8 +54,9 @@ needs to be present inside the hybrid, using the ``if`` statement in Python and from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.sql import case + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = mapped_column(Integer, primary_key=True) firstname = mapped_column(String(50)) lastname = mapped_column(String(50)) @@ -67,9 +70,12 @@ needs to be present inside the hybrid, using the ``if`` statement in Python and @fullname.expression def fullname(cls): - return case([ - (cls.firstname != None, cls.firstname + " " + cls.lastname), - ], else_ = cls.lastname) + return case( + [ + (cls.firstname != None, cls.firstname + " " + cls.lastname), + ], + else_=cls.lastname, + ) .. _mapper_column_property_sql_expressions: @@ -96,8 +102,9 @@ follows:: from sqlalchemy.orm import column_property + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = mapped_column(Integer, primary_key=True) firstname = mapped_column(String(50)) lastname = mapped_column(String(50)) @@ -114,31 +121,34 @@ of ``Address`` objects available for a particular ``User``:: from sqlalchemy.orm import DeclarativeBase + class Base(DeclarativeBase): pass + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = mapped_column(Integer, primary_key=True) - user_id = mapped_column(Integer, ForeignKey('user.id')) + user_id = mapped_column(Integer, ForeignKey("user.id")) + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = mapped_column(Integer, primary_key=True) address_count = column_property( - select(func.count(Address.id)). - where(Address.user_id==id). - correlate_except(Address). - scalar_subquery() + select(func.count(Address.id)) + .where(Address.user_id == id) + .correlate_except(Address) + .scalar_subquery() ) In the above example, we define a :func:`_expression.ScalarSelect` construct like the following:: stmt = ( - select(func.count(Address.id)). - where(Address.user_id==id). - correlate_except(Address). - scalar_subquery() + select(func.count(Address.id)) + .where(Address.user_id == id) + .correlate_except(Address) + .scalar_subquery() ) Above, we first use :func:`_sql.select` to create a :class:`_sql.Select` @@ -166,20 +176,21 @@ association table to both tables in a relationship:: from sqlalchemy import and_ + class Author(Base): # ... book_count = column_property( - select(func.count(books.c.id) - ).where( + select(func.count(books.c.id)) + .where( and_( - book_authors.c.author_id==authors.c.id, - book_authors.c.book_id==books.c.id + book_authors.c.author_id == authors.c.id, + book_authors.c.book_id == books.c.id, ) - ).scalar_subquery() + ) + .scalar_subquery() ) - Adding column_property() to an existing Declarative mapped class ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -193,9 +204,7 @@ to add an additional property after the fact:: # only works if a declarative base class is in use User.address_count = column_property( - select(func.count(Address.id)). - where(Address.user_id==User.id). - scalar_subquery() + select(func.count(Address.id)).where(Address.user_id == User.id).scalar_subquery() ) When using mapping styles that don't use Declarative base classes @@ -207,9 +216,10 @@ which can be obtained using :func:`_sa.inspect`:: reg = registry() + @reg.mapped class User: - __tablename__ = 'user' + __tablename__ = "user" # ... additional mapping directives @@ -218,11 +228,12 @@ which can be obtained using :func:`_sa.inspect`:: # works for any kind of mapping from sqlalchemy import inspect + inspect(User).add_property( column_property( - select(func.count(Address.id)). - where(Address.user_id==User.id). - scalar_subquery() + select(func.count(Address.id)) + .where(Address.user_id == User.id) + .scalar_subquery() ) ) @@ -251,20 +262,20 @@ attribute, which is itself a :class:`.ColumnProperty`:: class File(Base): - __tablename__ = 'file' + __tablename__ = "file" id = mapped_column(Integer, primary_key=True) name = mapped_column(String(64)) extension = mapped_column(String(8)) - filename = column_property(name + '.' + extension) - path = column_property('C:/' + filename.expression) + filename = column_property(name + "." + extension) + path = column_property("C:/" + filename.expression) When the ``File`` class is used in expressions normally, the attributes assigned to ``filename`` and ``path`` are usable directly. The use of the :attr:`.ColumnProperty.expression` attribute is only necessary when using the :class:`.ColumnProperty` directly within the mapping definition:: - stmt = select(File.path).where(File.filename == 'foo.txt') + stmt = select(File.path).where(File.filename == "foo.txt") Using Column Deferral with ``column_property()`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -276,8 +287,9 @@ to a SQL expression mapped by :func:`_orm.column_property` by using the from sqlalchemy.orm import deferred + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id: Mapped[int] = mapped_column(primary_key=True) firstname: Mapped[str] = mapped_column() @@ -305,19 +317,18 @@ which is then used to emit a query:: from sqlalchemy.orm import object_session from sqlalchemy import select, func + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = mapped_column(Integer, primary_key=True) firstname = mapped_column(String(50)) lastname = mapped_column(String(50)) @property def address_count(self): - return object_session(self).\ - scalar( - select(func.count(Address.id)).\ - where(Address.user_id==self.id) - ) + return object_session(self).scalar( + select(func.count(Address.id)).where(Address.user_id == self.id) + ) The plain descriptor approach is useful as a last resort, but is less performant in the usual case than both the hybrid and column property approaches, in that diff --git a/doc/build/orm/mapper_config.rst b/doc/build/orm/mapper_config.rst index 5059a12af9..68218491d5 100644 --- a/doc/build/orm/mapper_config.rst +++ b/doc/build/orm/mapper_config.rst @@ -34,4 +34,4 @@ in SQLAlchemy, it's first introduced in the :ref:`unified_tutorial` at .. toctree:: :hidden: - scalar_mapping \ No newline at end of file + scalar_mapping diff --git a/doc/build/orm/mapping_styles.rst b/doc/build/orm/mapping_styles.rst index 2edc74d6ab..b263993934 100644 --- a/doc/build/orm/mapping_styles.rst +++ b/doc/build/orm/mapping_styles.rst @@ -75,9 +75,10 @@ the use of a declarative base which is then used in a declarative table mapping: class Base(DeclarativeBase): pass + # an example mapping using the base class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id: Mapped[int] = mapped_column(primary_key=True) name: Mapped[str] @@ -171,33 +172,40 @@ shared for all mapped classes that are related to each other:: mapper_registry = registry() user_table = Table( - 'user', + "user", mapper_registry.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('fullname', String(50)), - Column('nickname', String(12)) + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), ) + class User: pass - mapper_registry.map_imperatively(User, user_table) + mapper_registry.map_imperatively(User, user_table) Information about mapped attributes, such as relationships to other classes, are provided via the ``properties`` dictionary. The example below illustrates a second :class:`_schema.Table` object, mapped to a class called ``Address``, then linked to ``User`` via :func:`_orm.relationship`:: - address = Table('address', metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String(50)) - ) + address = Table( + "address", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), + ) - mapper_registry.map_imperatively(User, user, properties={ - 'addresses' : relationship(Address, backref='user', order_by=address.c.id) - }) + mapper_registry.map_imperatively( + User, + user, + properties={ + "addresses": relationship(Address, backref="user", order_by=address.c.id) + }, + ) mapper_registry.map_imperatively(Address, address) @@ -335,11 +343,13 @@ all the attributes that are named. E.g.:: from sqlalchemy.orm import Mapped from sqlalchemy.orm import mapped_column + class Base(DeclarativeBase): pass + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id: Mapped[int] = mapped_column(primary_key=True) name: Mapped[str] @@ -348,7 +358,7 @@ all the attributes that are named. E.g.:: An object of type ``User`` above will have a constructor which allows ``User`` objects to be created as:: - u1 = User(name='some name', fullname='some fullname') + u1 = User(name="some name", fullname="some fullname") .. tip:: @@ -371,15 +381,17 @@ The constructor also applies to imperative mappings:: mapper_registry = registry() user_table = Table( - 'user', + "user", mapper_registry.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)) + Column("id", Integer, primary_key=True), + Column("name", String(50)), ) + class User: pass + mapper_registry.map_imperatively(User, user_table) The above class, mapped imperatively as described at :ref:`orm_imperative_mapping`, @@ -539,7 +551,7 @@ as well as specific history on modifications to attributes since the last flush: >>> insp.attrs.nickname.value 'nickname' - >>> u1.nickname = 'new nickname' + >>> u1.nickname = "new nickname" >>> insp.attrs.nickname.history History(added=['new nickname'], unchanged=(), deleted=['nickname']) diff --git a/doc/build/orm/nonstandard_mappings.rst b/doc/build/orm/nonstandard_mappings.rst index 23356d0e30..d461b63c64 100644 --- a/doc/build/orm/nonstandard_mappings.rst +++ b/doc/build/orm/nonstandard_mappings.rst @@ -15,33 +15,38 @@ function creates a selectable unit comprised of multiple tables, complete with its own composite primary key, which can be mapped in the same way as a :class:`_schema.Table`:: - from sqlalchemy import Table, Column, Integer, \ - String, MetaData, join, ForeignKey + from sqlalchemy import Table, Column, Integer, String, MetaData, join, ForeignKey from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import column_property metadata_obj = MetaData() # define two Table objects - user_table = Table('user', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String), - ) - - address_table = Table('address', metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String) - ) + user_table = Table( + "user", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String), + ) + + address_table = Table( + "address", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String), + ) # define a join between them. This # takes place across the user.id and address.user_id # columns. user_address_join = join(user_table, address_table) + class Base(DeclarativeBase): metadata = metadata_obj + # map to it class AddressUser(Base): __table__ = user_address_join @@ -129,15 +134,22 @@ includes a join to a subquery:: from sqlalchemy import select, func - subq = select( - func.count(orders.c.id).label('order_count'), - func.max(orders.c.price).label('highest_order'), - orders.c.customer_id - ).group_by(orders.c.customer_id).subquery() + subq = ( + select( + func.count(orders.c.id).label("order_count"), + func.max(orders.c.price).label("highest_order"), + orders.c.customer_id, + ) + .group_by(orders.c.customer_id) + .subquery() + ) + + customer_select = ( + select(customers, subq) + .join_from(customers, subq, customers.c.id == subq.c.customer_id) + .subquery() + ) - customer_select = select(customers, subq).join_from( - customers, subq, customers.c.id == subq.c.customer_id - ).subquery() class Customer(Base): __table__ = customer_select diff --git a/doc/build/orm/persistence_techniques.rst b/doc/build/orm/persistence_techniques.rst index 7f0361e975..789cd739c8 100644 --- a/doc/build/orm/persistence_techniques.rst +++ b/doc/build/orm/persistence_techniques.rst @@ -21,6 +21,7 @@ an attribute:: value = mapped_column(Integer) + someobject = session.get(SomeClass, 5) # set 'value' attribute to a SQL expression adding one @@ -88,10 +89,10 @@ This is most easily accomplished using the session = Session() # execute a string statement - result = session.execute("select * from table where id=:id", {'id':7}) + result = session.execute("select * from table where id=:id", {"id": 7}) # execute a SQL expression construct - result = session.execute(select(mytable).where(mytable.c.id==7)) + result = session.execute(select(mytable).where(mytable.c.id == 7)) The current :class:`~sqlalchemy.engine.Connection` held by the :class:`~sqlalchemy.orm.session.Session` is accessible using the @@ -117,13 +118,12 @@ proper context for the desired engine:: # need to specify mapper or class when executing result = session.execute( text("select * from table where id=:id"), - {'id':7}, - bind_arguments={'mapper': MyMappedClass} + {"id": 7}, + bind_arguments={"mapper": MyMappedClass}, ) result = session.execute( - select(mytable).where(mytable.c.id==7), - bind_arguments={'mapper': MyMappedClass} + select(mytable).where(mytable.c.id == 7), bind_arguments={"mapper": MyMappedClass} ) connection = session.connection(MyMappedClass) @@ -143,14 +143,15 @@ The ORM considers any attribute that was never set on an object as a "default" case; the attribute will be omitted from the INSERT statement:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = mapped_column(Integer, primary_key=True) data = mapped_column(String(50), nullable=True) + obj = MyObject(id=1) session.add(obj) session.commit() # INSERT with the 'data' column omitted; the database - # itself will persist this as the NULL value + # itself will persist this as the NULL value Omitting a column from the INSERT means that the column will have the NULL value set, *unless* the column has a default set up, @@ -160,29 +161,31 @@ behavior of SQLAlchemy's insert behavior with both client-side and server-side defaults:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = mapped_column(Integer, primary_key=True) data = mapped_column(String(50), nullable=True, server_default="default") + obj = MyObject(id=1) session.add(obj) session.commit() # INSERT with the 'data' column omitted; the database - # itself will persist this as the value 'default' + # itself will persist this as the value 'default' However, in the ORM, even if one assigns the Python value ``None`` explicitly to the object, this is treated the **same** as though the value were never assigned:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = mapped_column(Integer, primary_key=True) data = mapped_column(String(50), nullable=True, server_default="default") + obj = MyObject(id=1, data=None) session.add(obj) session.commit() # INSERT with the 'data' column explicitly set to None; - # the ORM still omits it from the statement and the - # database will still persist this as the value 'default' + # the ORM still omits it from the statement and the + # database will still persist this as the value 'default' The above operation will persist into the ``data`` column the server default value of ``"default"`` and not SQL NULL, even though ``None`` @@ -199,9 +202,9 @@ on a per-instance level, we assign the attribute using the obj = MyObject(id=1, data=null()) session.add(obj) session.commit() # INSERT with the 'data' column explicitly set as null(); - # the ORM uses this directly, bypassing all client- - # and server-side defaults, and the database will - # persist this as the NULL value + # the ORM uses this directly, bypassing all client- + # and server-side defaults, and the database will + # persist this as the NULL value The :obj:`_expression.null` SQL construct always translates into the SQL NULL value being directly present in the target INSERT statement. @@ -214,18 +217,21 @@ a type where the ORM should treat the value ``None`` the same as any other value and pass it through, rather than omitting it as a "missing" value:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = mapped_column(Integer, primary_key=True) data = mapped_column( - String(50).evaluates_none(), # indicate that None should always be passed - nullable=True, server_default="default") + String(50).evaluates_none(), # indicate that None should always be passed + nullable=True, + server_default="default", + ) + obj = MyObject(id=1, data=None) session.add(obj) session.commit() # INSERT with the 'data' column explicitly set to None; - # the ORM uses this directly, bypassing all client- - # and server-side defaults, and the database will - # persist this as the NULL value + # the ORM uses this directly, bypassing all client- + # and server-side defaults, and the database will + # persist this as the NULL value .. topic:: Evaluating None @@ -284,7 +290,7 @@ columns should be fetched immediately upon INSERT and sometimes UPDATE:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = mapped_column(Integer, primary_key=True) timestamp = mapped_column(DateTime(), server_default=func.now()) @@ -313,7 +319,7 @@ This case is the same as case 1 above, except we don't specify :paramref:`.orm.mapper.eager_defaults`:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = mapped_column(Integer, primary_key=True) timestamp = mapped_column(DateTime(), server_default=func.now()) @@ -364,7 +370,7 @@ For an explicit sequence as we use with Oracle, this just means we are using the :class:`.Sequence` construct:: class MyOracleModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = mapped_column(Integer, Sequence("my_sequence"), primary_key=True) data = mapped_column(String(50)) @@ -383,9 +389,11 @@ by a trigger, we use :class:`.FetchedValue`. Below is a model that uses a SQL Server TIMESTAMP column as the primary key, which generates values automatically:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" - timestamp = mapped_column(TIMESTAMP(), server_default=FetchedValue(), primary_key=True) + timestamp = mapped_column( + TIMESTAMP(), server_default=FetchedValue(), primary_key=True + ) An INSERT for the above table on SQL Server looks like: @@ -417,7 +425,7 @@ Using the example of a :class:`.DateTime` column for MySQL, we add an explicit pre-execute-supported default using the "NOW()" SQL function:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" timestamp = mapped_column(DateTime(), default=func.now(), primary_key=True) @@ -443,13 +451,13 @@ into the column:: from sqlalchemy import cast, Binary + class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" timestamp = mapped_column( - TIMESTAMP(), - default=cast(func.now(), Binary), - primary_key=True) + TIMESTAMP(), default=cast(func.now(), Binary), primary_key=True + ) Above, in addition to selecting the "NOW()" function, we additionally make use of the :class:`.Binary` datatype in conjunction with :func:`.cast` so that @@ -476,12 +484,13 @@ We therefore must also specify that we'd like to coerce the return value to by passing this as the ``type_`` parameter:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" timestamp = mapped_column( DateTime, - default=func.datetime('now', 'localtime', type_=DateTime), - primary_key=True) + default=func.datetime("now", "localtime", type_=DateTime), + primary_key=True, + ) The above mapping upon INSERT will look like: @@ -531,12 +540,19 @@ values using RETURNING when available, :paramref:`_schema.Column.server_default` to ensure that the fetch occurs:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = mapped_column(Integer, primary_key=True) - created = mapped_column(DateTime(), default=func.now(), server_default=FetchedValue()) - updated = mapped_column(DateTime(), onupdate=func.now(), server_default=FetchedValue(), server_onupdate=FetchedValue()) + created = mapped_column( + DateTime(), default=func.now(), server_default=FetchedValue() + ) + updated = mapped_column( + DateTime(), + onupdate=func.now(), + server_default=FetchedValue(), + server_onupdate=FetchedValue(), + ) __mapper_args__ = {"eager_defaults": True} @@ -587,13 +603,13 @@ The dictionary is consulted whenever the :class:`.Session` needs to emit SQL on behalf of a particular kind of mapped class in order to locate the appropriate source of database connectivity:: - engine1 = create_engine('postgresql+psycopg2://db1') - engine2 = create_engine('postgresql+psycopg2://db2') + engine1 = create_engine("postgresql+psycopg2://db1") + engine2 = create_engine("postgresql+psycopg2://db2") Session = sessionmaker() # bind User operations to engine 1, Account operations to engine 2 - Session.configure(binds={User:engine1, Account:engine2}) + Session.configure(binds={User: engine1, Account: engine2}) session = Session() @@ -693,26 +709,25 @@ a custom :class:`.Session` which delivers the following rules: :: engines = { - 'leader':create_engine("sqlite:///leader.db"), - 'other':create_engine("sqlite:///other.db"), - 'follower1':create_engine("sqlite:///follower1.db"), - 'follower2':create_engine("sqlite:///follower2.db"), + "leader": create_engine("sqlite:///leader.db"), + "other": create_engine("sqlite:///other.db"), + "follower1": create_engine("sqlite:///follower1.db"), + "follower2": create_engine("sqlite:///follower2.db"), } from sqlalchemy.sql import Update, Delete from sqlalchemy.orm import Session, sessionmaker import random + class RoutingSession(Session): def get_bind(self, mapper=None, clause=None): if mapper and issubclass(mapper.class_, MyOtherClass): - return engines['other'] + return engines["other"] elif self._flushing or isinstance(clause, (Update, Delete)): - return engines['leader'] + return engines["leader"] else: - return engines[ - random.choice(['follower1','follower2']) - ] + return engines[random.choice(["follower1", "follower2"])] The above :class:`.Session` class is plugged in using the ``class_`` argument to :class:`.sessionmaker`:: @@ -749,4 +764,4 @@ Bulk Operations method, making direct use of :class:`_dml.Insert` and :class:`_dml.Update` constructs. See the document at :doc:`queryguide/dml` for documentation, including :ref:`orm_queryguide_legacy_bulk` which illustrates migration - from the older methods to the new methods. \ No newline at end of file + from the older methods to the new methods. diff --git a/doc/build/orm/queryguide/_deferred_setup.rst b/doc/build/orm/queryguide/_deferred_setup.rst index 0dbd086815..a9bbaac8b8 100644 --- a/doc/build/orm/queryguide/_deferred_setup.rst +++ b/doc/build/orm/queryguide/_deferred_setup.rst @@ -27,16 +27,15 @@ This page illustrates the mappings and fixture data used by the >>> >>> class Base(DeclarativeBase): ... pass - ... >>> class User(Base): ... __tablename__ = "user_account" ... id: Mapped[int] = mapped_column(primary_key=True) ... name: Mapped[str] ... fullname: Mapped[Optional[str]] ... books: Mapped[List["Book"]] = relationship(back_populates="owner") + ... ... def __repr__(self) -> str: ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" - ... >>> class Book(Base): ... __tablename__ = "book" ... id: Mapped[int] = mapped_column(primary_key=True) @@ -45,10 +44,9 @@ This page illustrates the mappings and fixture data used by the ... summary: Mapped[str] = mapped_column(Text) ... cover_photo: Mapped[bytes] = mapped_column(LargeBinary) ... owner: Mapped["User"] = relationship(back_populates="books") + ... ... def __repr__(self) -> str: ... return f"Book(id={self.id!r}, title={self.title!r})" - ... - ... >>> engine = create_engine("sqlite+pysqlite:///:memory:", echo=True) >>> Base.metadata.create_all(engine) BEGIN ... @@ -60,18 +58,42 @@ This page illustrates the mappings and fixture data used by the ... name="spongebob", ... fullname="Spongebob Squarepants", ... books=[ - ... Book(title="100 Years of Krabby Patties", summary="some long summary", cover_photo=b'binary_image_data'), - ... Book(title="Sea Catch 22", summary="another long summary", cover_photo=b'binary_image_data'), - ... Book(title="The Sea Grapes of Wrath", summary="yet another summary", cover_photo=b'binary_image_data'), + ... Book( + ... title="100 Years of Krabby Patties", + ... summary="some long summary", + ... cover_photo=b"binary_image_data", + ... ), + ... Book( + ... title="Sea Catch 22", + ... summary="another long summary", + ... cover_photo=b"binary_image_data", + ... ), + ... Book( + ... title="The Sea Grapes of Wrath", + ... summary="yet another summary", + ... cover_photo=b"binary_image_data", + ... ), ... ], ... ), ... User( ... name="sandy", ... fullname="Sandy Cheeks", ... books=[ - ... Book(title="A Nut Like No Other", summary="some long summary", cover_photo=b'binary_image_data'), - ... Book(title="Geodesic Domes: A Retrospective", summary="another long summary", cover_photo=b'binary_image_data'), - ... Book(title="Rocketry for Squirrels", summary="yet another summary", cover_photo=b'binary_image_data'), + ... Book( + ... title="A Nut Like No Other", + ... summary="some long summary", + ... cover_photo=b"binary_image_data", + ... ), + ... Book( + ... title="Geodesic Domes: A Retrospective", + ... summary="another long summary", + ... cover_photo=b"binary_image_data", + ... ), + ... Book( + ... title="Rocketry for Squirrels", + ... summary="yet another summary", + ... cover_photo=b"binary_image_data", + ... ), ... ], ... ), ... ] diff --git a/doc/build/orm/queryguide/_dml_setup.rst b/doc/build/orm/queryguide/_dml_setup.rst index 6fcb32b87e..bae0cce3dc 100644 --- a/doc/build/orm/queryguide/_dml_setup.rst +++ b/doc/build/orm/queryguide/_dml_setup.rst @@ -26,7 +26,6 @@ This page illustrates the mappings and fixture data used by the >>> >>> class Base(DeclarativeBase): ... pass - ... >>> class User(Base): ... __tablename__ = "user_account" ... id: Mapped[int] = mapped_column(primary_key=True) @@ -34,24 +33,25 @@ This page illustrates the mappings and fixture data used by the ... fullname: Mapped[Optional[str]] ... species: Mapped[Optional[str]] ... addresses: Mapped[List["Address"]] = relationship(back_populates="user") + ... ... def __repr__(self) -> str: ... return f"User(name={self.name!r}, fullname={self.fullname!r})" - ... >>> class Address(Base): ... __tablename__ = "address" ... id: Mapped[int] = mapped_column(primary_key=True) ... user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id")) ... email_address: Mapped[str] ... user: Mapped[User] = relationship(back_populates="addresses") + ... ... def __repr__(self) -> str: ... return f"Address(email_address={self.email_address!r})" - ... >>> class LogRecord(Base): ... __tablename__ = "log_record" ... id: Mapped[int] = mapped_column(primary_key=True) ... message: Mapped[str] ... code: Mapped[str] ... timestamp: Mapped[datetime.datetime] + ... ... def __repr__(self): ... return f"LogRecord({self.message!r}, {self.code!r}, {self.timestamp!r})" @@ -60,37 +60,36 @@ This page illustrates the mappings and fixture data used by the ... id: Mapped[int] = mapped_column(primary_key=True) ... name: Mapped[str] ... type: Mapped[str] + ... ... def __repr__(self): ... return f"{self.__class__.__name__}({self.name!r})" + ... ... __mapper_args__ = { ... "polymorphic_identity": "employee", ... "polymorphic_on": "type", ... } - ... >>> class Manager(Employee): ... __tablename__ = "manager" - ... id: Mapped[int] = mapped_column( - ... ForeignKey("employee.id"), primary_key=True - ... ) + ... id: Mapped[int] = mapped_column(ForeignKey("employee.id"), primary_key=True) ... manager_name: Mapped[str] + ... ... def __repr__(self): ... return f"{self.__class__.__name__}({self.name!r}, manager_name={self.manager_name!r})" + ... ... __mapper_args__ = { ... "polymorphic_identity": "manager", ... } - ... >>> class Engineer(Employee): ... __tablename__ = "engineer" - ... id: Mapped[int] = mapped_column( - ... ForeignKey("employee.id"), primary_key=True - ... ) + ... id: Mapped[int] = mapped_column(ForeignKey("employee.id"), primary_key=True) ... engineer_info: Mapped[str] + ... ... def __repr__(self): ... return f"{self.__class__.__name__}({self.name!r}, engineer_info={self.engineer_info!r})" + ... ... __mapper_args__ = { ... "polymorphic_identity": "engineer", ... } - ... >>> engine = create_engine("sqlite+pysqlite:///:memory:", echo=True) >>> Base.metadata.create_all(engine) diff --git a/doc/build/orm/queryguide/_end_doctest.rst b/doc/build/orm/queryguide/_end_doctest.rst index 2667ebe89b..126f95289e 100644 --- a/doc/build/orm/queryguide/_end_doctest.rst +++ b/doc/build/orm/queryguide/_end_doctest.rst @@ -4,4 +4,3 @@ >>> session.close() >>> conn.close() - ... diff --git a/doc/build/orm/queryguide/_inheritance_setup.rst b/doc/build/orm/queryguide/_inheritance_setup.rst index 0b41033bbc..addddda4e2 100644 --- a/doc/build/orm/queryguide/_inheritance_setup.rst +++ b/doc/build/orm/queryguide/_inheritance_setup.rst @@ -22,13 +22,11 @@ the :ref:`queryguide_toplevel`. >>> >>> class Base(DeclarativeBase): ... pass - ... >>> class Company(Base): ... __tablename__ = "company" ... id: Mapped[int] = mapped_column(primary_key=True) ... name: Mapped[str] ... employees: Mapped[list["Employee"]] = relationship(back_populates="company") - ... >>> >>> class Employee(Base): ... __tablename__ = "employee" @@ -37,46 +35,39 @@ the :ref:`queryguide_toplevel`. ... type: Mapped[str] ... company_id: Mapped[int] = mapped_column(ForeignKey("company.id")) ... company: Mapped[Company] = relationship(back_populates="employees") + ... ... def __repr__(self): ... return f"{self.__class__.__name__}({self.name!r})" + ... ... __mapper_args__ = { ... "polymorphic_identity": "employee", ... "polymorphic_on": "type", ... } - ... >>> >>> class Manager(Employee): ... __tablename__ = "manager" - ... id: Mapped[int] = mapped_column( - ... ForeignKey("employee.id"), primary_key=True - ... ) + ... id: Mapped[int] = mapped_column(ForeignKey("employee.id"), primary_key=True) ... manager_name: Mapped[str] ... paperwork: Mapped[list["Paperwork"]] = relationship() ... __mapper_args__ = { ... "polymorphic_identity": "manager", ... } - ... >>> class Paperwork(Base): ... __tablename__ = "paperwork" - ... id: Mapped[int] = mapped_column( - ... primary_key=True - ... ) - ... manager_id: Mapped[int] = mapped_column(ForeignKey('manager.id')) + ... id: Mapped[int] = mapped_column(primary_key=True) + ... manager_id: Mapped[int] = mapped_column(ForeignKey("manager.id")) ... document_name: Mapped[str] + ... ... def __repr__(self): ... return f"Paperwork({self.document_name!r})" - ... >>> >>> class Engineer(Employee): ... __tablename__ = "engineer" - ... id: Mapped[int] = mapped_column( - ... ForeignKey("employee.id"), primary_key=True - ... ) + ... id: Mapped[int] = mapped_column(ForeignKey("employee.id"), primary_key=True) ... engineer_info: Mapped[str] ... __mapper_args__ = { ... "polymorphic_identity": "engineer", ... } - ... >>> >>> engine = create_engine("sqlite://", echo=True) >>> @@ -87,24 +78,25 @@ the :ref:`queryguide_toplevel`. >>> from sqlalchemy.orm import Session >>> session = Session(conn) >>> session.add( - ... Company( - ... name="Krusty Krab", - ... employees=[ - ... Manager( - ... name="Mr. Krabs", manager_name="Eugene H. Krabs", - ... paperwork=[ - ... Paperwork(document_name="Secret Recipes"), - ... Paperwork(document_name="Krabby Patty Orders"), - ... ] - ... ), - ... Engineer( - ... name="SpongeBob", engineer_info="Krabby Patty Master" - ... ), - ... Engineer(name="Squidward", engineer_info="Senior Customer Engagement Engineer"), - ... ], - ... ) + ... Company( + ... name="Krusty Krab", + ... employees=[ + ... Manager( + ... name="Mr. Krabs", + ... manager_name="Eugene H. Krabs", + ... paperwork=[ + ... Paperwork(document_name="Secret Recipes"), + ... Paperwork(document_name="Krabby Patty Orders"), + ... ], + ... ), + ... Engineer(name="SpongeBob", engineer_info="Krabby Patty Master"), + ... Engineer( + ... name="Squidward", + ... engineer_info="Senior Customer Engagement Engineer", + ... ), + ... ], ... ) + ... ) >>> session.commit() - ... BEGIN ... diff --git a/doc/build/orm/queryguide/_plain_setup.rst b/doc/build/orm/queryguide/_plain_setup.rst index fd558dc37d..7166fe9b8a 100644 --- a/doc/build/orm/queryguide/_plain_setup.rst +++ b/doc/build/orm/queryguide/_plain_setup.rst @@ -25,7 +25,6 @@ This page illustrates the mappings and fixture data used by the >>> >>> class Base(DeclarativeBase): ... pass - ... >>> class User(Base): ... __tablename__ = "user_account" ... id: Mapped[int] = mapped_column(primary_key=True) @@ -33,18 +32,18 @@ This page illustrates the mappings and fixture data used by the ... fullname: Mapped[Optional[str]] ... addresses: Mapped[List["Address"]] = relationship(back_populates="user") ... orders: Mapped[List["Order"]] = relationship() + ... ... def __repr__(self) -> str: ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" - ... >>> class Address(Base): ... __tablename__ = "address" ... id: Mapped[int] = mapped_column(primary_key=True) ... user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id")) ... email_address: Mapped[str] ... user: Mapped[User] = relationship(back_populates="addresses") + ... ... def __repr__(self) -> str: ... return f"Address(id={self.id!r}, email_address={self.email_address!r})" - ... >>> order_items_table = Table( ... "order_items", ... Base.metadata, @@ -57,13 +56,11 @@ This page illustrates the mappings and fixture data used by the ... id: Mapped[int] = mapped_column(primary_key=True) ... user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id")) ... items: Mapped[List["Item"]] = relationship(secondary=order_items_table) - ... >>> class Item(Base): ... __tablename__ = "item" ... id: Mapped[int] = mapped_column(primary_key=True) ... name: Mapped[str] ... description: Mapped[str] - ... >>> engine = create_engine("sqlite+pysqlite:///:memory:", echo=True) >>> Base.metadata.create_all(engine) BEGIN ... diff --git a/doc/build/orm/queryguide/_single_inheritance.rst b/doc/build/orm/queryguide/_single_inheritance.rst index c5d2b7cdee..546f87a193 100644 --- a/doc/build/orm/queryguide/_single_inheritance.rst +++ b/doc/build/orm/queryguide/_single_inheritance.rst @@ -22,31 +22,29 @@ the :ref:`queryguide_toplevel`. >>> >>> class Base(DeclarativeBase): ... pass - ... >>> class Employee(Base): ... __tablename__ = "employee" ... id: Mapped[int] = mapped_column(primary_key=True) ... name: Mapped[str] ... type: Mapped[str] + ... ... def __repr__(self): ... return f"{self.__class__.__name__}({self.name!r})" + ... ... __mapper_args__ = { ... "polymorphic_identity": "employee", ... "polymorphic_on": "type", ... } - ... >>> class Manager(Employee): ... manager_name: Mapped[str] = mapped_column(nullable=True) ... __mapper_args__ = { ... "polymorphic_identity": "manager", ... } - ... >>> class Engineer(Employee): ... engineer_info: Mapped[str] = mapped_column(nullable=True) ... __mapper_args__ = { ... "polymorphic_identity": "engineer", ... } - ... >>> >>> engine = create_engine("sqlite://", echo=True) >>> @@ -57,17 +55,18 @@ the :ref:`queryguide_toplevel`. >>> from sqlalchemy.orm import Session >>> session = Session(conn) >>> session.add_all( - ... [ - ... Manager( - ... name="Mr. Krabs", manager_name="Eugene H. Krabs", - ... ), - ... Engineer( - ... name="SpongeBob", engineer_info="Krabby Patty Master" - ... ), - ... Engineer(name="Squidward", engineer_info="Senior Customer Engagement Engineer"), - ... ], - ... ) + ... [ + ... Manager( + ... name="Mr. Krabs", + ... manager_name="Eugene H. Krabs", + ... ), + ... Engineer(name="SpongeBob", engineer_info="Krabby Patty Master"), + ... Engineer( + ... name="Squidward", + ... engineer_info="Senior Customer Engagement Engineer", + ... ), + ... ], + ... ) >>> session.commit() - ... BEGIN ... diff --git a/doc/build/orm/queryguide/api.rst b/doc/build/orm/queryguide/api.rst index 86e76ce894..f9a92e3316 100644 --- a/doc/build/orm/queryguide/api.rst +++ b/doc/build/orm/queryguide/api.rst @@ -297,7 +297,7 @@ same way as the legacy :attr:`.Query.column_descriptions` attribute. The format returned is a list of dictionaries:: >>> from pprint import pprint - >>> user_alias = aliased(User, name='user2') + >>> user_alias = aliased(User, name="user2") >>> stmt = select(User, User.id, user_alias) >>> pprint(stmt.column_descriptions) [{'aliased': False, diff --git a/doc/build/orm/queryguide/columns.rst b/doc/build/orm/queryguide/columns.rst index 2dc9e7c6be..19538c698e 100644 --- a/doc/build/orm/queryguide/columns.rst +++ b/doc/build/orm/queryguide/columns.rst @@ -122,9 +122,9 @@ If we wanted to apply :func:`_orm.load_only` options to both ``User`` and ``Book``, we would make use of two separate options:: >>> stmt = ( - ... select(User, Book). - ... join_from(User, Book). - ... options(load_only(User.name), load_only(Book.title)) + ... select(User, Book) + ... .join_from(User, Book) + ... .options(load_only(User.name), load_only(Book.title)) ... ) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, book.id AS id_1, book.title @@ -148,7 +148,7 @@ in addition to primary key column:: >>> from sqlalchemy.orm import selectinload >>> stmt = select(User).options(selectinload(User.books).load_only(Book.title)) >>> for user in session.scalars(stmt): - ... print(f"{user.fullname} {[b.title for b in user.books]}") + ... print(f"{user.fullname} {[b.title for b in user.books]}") {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account [...] () @@ -175,7 +175,7 @@ the SELECT statement emitted for each ``User.books`` collection:: >>> from sqlalchemy.orm import defaultload >>> stmt = select(User).options(defaultload(User.books).load_only(Book.title)) >>> for user in session.scalars(stmt): - ... print(f"{user.fullname} {[b.title for b in user.books]}") + ... print(f"{user.fullname} {[b.title for b in user.books]}") {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account [...] () @@ -260,9 +260,7 @@ example below, the deferred column ``.cover_photo`` will disallow attribute access:: >>> book = session.scalar( - ... select(Book). - ... options(defer(Book.cover_photo, raiseload=True)). - ... where(Book.id == 4) + ... select(Book).options(defer(Book.cover_photo, raiseload=True)).where(Book.id == 4) ... ) {opensql}SELECT book.id, book.owner_id, book.title, book.summary FROM book @@ -280,9 +278,7 @@ to all deferred attributes:: >>> session.expunge_all() >>> book = session.scalar( - ... select(Book). - ... options(load_only(Book.title, raiseload=True)). - ... where(Book.id == 5) + ... select(Book).options(load_only(Book.title, raiseload=True)).where(Book.id == 5) ... ) {opensql}SELECT book.id, book.title FROM book @@ -319,7 +315,6 @@ Configuring Column Deferral on Mappings >>> class Base(DeclarativeBase): ... pass - ... The functionality of :func:`_orm.defer` is available as a default behavior for mapped columns, as may be appropriate for columns that should not be loaded @@ -336,15 +331,14 @@ unconditionally on every query. To configure, use the ... title: Mapped[str] ... summary: Mapped[str] = mapped_column(Text, deferred=True) ... cover_photo: Mapped[bytes] = mapped_column(LargeBinary, deferred=True) + ... ... def __repr__(self) -> str: ... return f"Book(id={self.id!r}, title={self.title!r})" Using the above mapping, queries against ``Book`` will automatically not include the ``summary`` and ``cover_photo`` columns:: - >>> book = session.scalar( - ... select(Book).where(Book.id == 2) - ... ) + >>> book = session.scalar(select(Book).where(Book.id == 2)) {opensql}SELECT book.id, book.owner_id, book.title FROM book WHERE book.id = ? @@ -398,24 +392,26 @@ to the :paramref:`_orm.registry.map_imperatively.properties` dictionary: mapper_registry = registry() book_table = Table( - 'book', + "book", mapper_registry.metadata, - Column('id', Integer, primary_key=True), - Column('title', String(50)), - Column('summary', Text), - Column('cover_image', Blob) + Column("id", Integer, primary_key=True), + Column("title", String(50)), + Column("summary", Text), + Column("cover_image", Blob), ) + class Book: pass + mapper_registry.map_imperatively( Book, book_table, properties={ "summary": deferred(book_table.c.summary), "cover_image": deferred(book_table.c.cover_image), - } + }, ) :func:`_orm.deferred` may also be used in place of :func:`_orm.column_property` @@ -425,8 +421,9 @@ when mapped SQL expressions should be loaded on a deferred basis: from sqlalchemy.orm import deferred + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id: Mapped[int] = mapped_column(primary_key=True) firstname: Mapped[str] = mapped_column() @@ -452,9 +449,7 @@ of the mapping. For example we may apply :func:`_orm.undefer` to the as deferred:: >>> from sqlalchemy.orm import undefer - >>> book = session.scalar( - ... select(Book).where(Book.id == 2).options(undefer(Book.summary)) - ... ) + >>> book = session.scalar(select(Book).where(Book.id == 2).options(undefer(Book.summary))) {opensql}SELECT book.summary, book.id, book.owner_id, book.title FROM book WHERE book.id = ? @@ -475,7 +470,6 @@ Loading deferred columns in groups >>> class Base(DeclarativeBase): ... pass - ... Normally when a column is mapped with ``mapped_column(deferred=True)``, when the deferred attribute is accessed on an object, SQL will be emitted to load @@ -492,17 +486,20 @@ undeferred:: ... id: Mapped[int] = mapped_column(primary_key=True) ... owner_id: Mapped[int] = mapped_column(ForeignKey("user_account.id")) ... title: Mapped[str] - ... summary: Mapped[str] = mapped_column(Text, deferred=True, deferred_group="book_attrs") - ... cover_photo: Mapped[bytes] = mapped_column(LargeBinary, deferred=True, deferred_group="book_attrs") + ... summary: Mapped[str] = mapped_column( + ... Text, deferred=True, deferred_group="book_attrs" + ... ) + ... cover_photo: Mapped[bytes] = mapped_column( + ... LargeBinary, deferred=True, deferred_group="book_attrs" + ... ) + ... ... def __repr__(self) -> str: ... return f"Book(id={self.id!r}, title={self.title!r})" Using the above mapping, accessing either ``summary`` or ``cover_photo`` will load both columns at once using just one SELECT statement:: - >>> book = session.scalar( - ... select(Book).where(Book.id == 2) - ... ) + >>> book = session.scalar(select(Book).where(Book.id == 2)) {opensql}SELECT book.id, book.owner_id, book.title FROM book WHERE book.id = ? @@ -524,7 +521,7 @@ option, passing the string name of the group to be eagerly loaded:: >>> from sqlalchemy.orm import undefer_group >>> book = session.scalar( - ... select(Book).where(Book.id == 2).options(undefer_group("book_attrs")) + ... select(Book).where(Book.id == 2).options(undefer_group("book_attrs")) ... ) {opensql}SELECT book.summary, book.cover_photo, book.id, book.owner_id, book.title FROM book @@ -544,9 +541,7 @@ attributes. If a mapping has a series of deferred columns, all such columns can be undeferred at once, without using a group name, by indicating a wildcard:: - >>> book = session.scalar( - ... select(Book).where(Book.id == 3).options(undefer("*")) - ... ) + >>> book = session.scalar(select(Book).where(Book.id == 3).options(undefer("*"))) {opensql}SELECT book.summary, book.cover_photo, book.id, book.owner_id, book.title FROM book WHERE book.id = ? @@ -561,7 +556,6 @@ Configuring mapper-level "raiseload" behavior >>> class Base(DeclarativeBase): ... pass - ... The "raiseload" behavior first introduced at :ref:`orm_queryguide_deferred_raiseload` may also be applied as a default mapper-level behavior, using the @@ -576,16 +570,17 @@ will raise on access in all cases unless explicitly "undeferred" using ... owner_id: Mapped[int] = mapped_column(ForeignKey("user_account.id")) ... title: Mapped[str] ... summary: Mapped[str] = mapped_column(Text, deferred=True, deferred_raiseload=True) - ... cover_photo: Mapped[bytes] = mapped_column(LargeBinary, deferred=True, deferred_raiseload=True) + ... cover_photo: Mapped[bytes] = mapped_column( + ... LargeBinary, deferred=True, deferred_raiseload=True + ... ) + ... ... def __repr__(self) -> str: ... return f"Book(id={self.id!r}, title={self.title!r})" Using the above mapping, the ``.summary`` and ``.cover_photo`` columns are by default not loadable:: - >>> book = session.scalar( - ... select(Book).where(Book.id == 2) - ... ) + >>> book = session.scalar(select(Book).where(Book.id == 2)) {opensql}SELECT book.id, book.owner_id, book.title FROM book WHERE book.id = ? @@ -602,10 +597,10 @@ Only by overridding their behavior at query time, typically using :ref:`orm_queryguide_populate_existing` to refresh the already-loaded object's loader options:: >>> book = session.scalar( - ... select(Book). - ... where(Book.id == 2). - ... options(undefer('*')). - ... execution_options(populate_existing=True) + ... select(Book) + ... .where(Book.id == 2) + ... .options(undefer("*")) + ... .execution_options(populate_existing=True) ... ) {opensql}SELECT book.summary, book.cover_photo, book.id, book.owner_id, book.title FROM book @@ -625,16 +620,15 @@ Loading Arbitrary SQL Expressions onto Objects >>> class Base(DeclarativeBase): ... pass - ... >>> class User(Base): ... __tablename__ = "user_account" ... id: Mapped[int] = mapped_column(primary_key=True) ... name: Mapped[str] ... fullname: Mapped[Optional[str]] ... books: Mapped[List["Book"]] = relationship(back_populates="owner") + ... ... def __repr__(self) -> str: ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" - ... >>> class Book(Base): ... __tablename__ = "book" ... id: Mapped[int] = mapped_column(primary_key=True) @@ -643,6 +637,7 @@ Loading Arbitrary SQL Expressions onto Objects ... summary: Mapped[str] = mapped_column(Text) ... cover_photo: Mapped[bytes] = mapped_column(LargeBinary) ... owner: Mapped["User"] = relationship(back_populates="books") + ... ... def __repr__(self) -> str: ... return f"Book(id={self.id!r}, title={self.title!r})" @@ -657,13 +652,9 @@ owner id. This will yield :class:`.Row` objects that each contain two entries, one for ``User`` and one for ``func.count(Book.id)``:: >>> from sqlalchemy import func - >>> stmt = ( - ... select(User, func.count(Book.id)). - ... join_from(User, Book). - ... group_by(Book.owner_id) - ... ) + >>> stmt = select(User, func.count(Book.id)).join_from(User, Book).group_by(Book.owner_id) >>> for user, book_count in session.execute(stmt): - ... print(f"Username: {user.name} Number of books: {book_count}") + ... print(f"Username: {user.name} Number of books: {book_count}") {opensql}SELECT user_account.id, user_account.name, user_account.fullname, count(book.id) AS count_1 FROM user_account JOIN book ON user_account.id = book.owner_id @@ -687,7 +678,6 @@ level :func:`_orm.query_expression` directive may produce this result. >>> class Base(DeclarativeBase): ... pass - ... >>> class Book(Base): ... __tablename__ = "book" ... id: Mapped[int] = mapped_column(primary_key=True) @@ -695,6 +685,7 @@ level :func:`_orm.query_expression` directive may produce this result. ... title: Mapped[str] ... summary: Mapped[str] = mapped_column(Text) ... cover_photo: Mapped[bytes] = mapped_column(LargeBinary) + ... ... def __repr__(self) -> str: ... return f"Book(id={self.id!r}, title={self.title!r})" @@ -714,9 +705,9 @@ normally produce ``None``:: ... name: Mapped[str] ... fullname: Mapped[Optional[str]] ... book_count: Mapped[int] = query_expression() + ... ... def __repr__(self) -> str: ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" - ... With the ``User.book_count`` attribute configured in our mapping, we may populate it with data from a SQL expression using the @@ -726,13 +717,13 @@ to each ``User`` object as it's loaded:: >>> from sqlalchemy.orm import with_expression >>> stmt = ( - ... select(User). - ... join_from(User, Book). - ... group_by(Book.owner_id). - ... options(with_expression(User.book_count, func.count(Book.id))) + ... select(User) + ... .join_from(User, Book) + ... .group_by(Book.owner_id) + ... .options(with_expression(User.book_count, func.count(Book.id))) ... ) >>> for user in session.scalars(stmt): - ... print(f"Username: {user.name} Number of books: {user.book_count}") + ... print(f"Username: {user.name} Number of books: {user.book_count}") {opensql}SELECT count(book.id) AS count_1, user_account.id, user_account.name, user_account.fullname FROM user_account JOIN book ON user_account.id = book.owner_id @@ -765,9 +756,7 @@ The :func:`.query_expression` mapping has these caveats: # load the same A with an option; expression will **not** be applied # to the already-loaded object - obj = session.scalars( - select(A).options(with_expression(A.expr, some_expr)) - ).first() + obj = session.scalars(select(A).options(with_expression(A.expr, some_expr))).first() To ensure the attribute is re-loaded on an existing object, use the :ref:`orm_queryguide_populate_existing` execution option to ensure @@ -776,9 +765,9 @@ The :func:`.query_expression` mapping has these caveats: .. sourcecode:: python obj = session.scalars( - select(A). - options(with_expression(A.expr, some_expr)). - execution_options(populate_existing=True) + select(A) + .options(with_expression(A.expr, some_expr)) + .execution_options(populate_existing=True) ).first() * The :func:`_orm.with_expression` SQL expression **is lost when when the object is @@ -794,9 +783,12 @@ The :func:`.query_expression` mapping has these caveats: .. sourcecode:: python # can't refer to A.expr elsewhere in the query - stmt = select(A).options( - with_expression(A.expr, A.x + A.y) - ).filter(A.expr > 5).order_by(A.expr) + stmt = ( + select(A) + .options(with_expression(A.expr, A.x + A.y)) + .filter(A.expr > 5) + .order_by(A.expr) + ) The ``A.expr`` expression will resolve to NULL in the above WHERE clause and ORDER BY clause. To use the expression throughout the query, assign to a @@ -807,9 +799,12 @@ The :func:`.query_expression` mapping has these caveats: # assign desired expression up front, then refer to that in # the query a_expr = A.x + A.y - stmt = select(A).options( - with_expression(A.expr, a_expr) - ).filter(a_expr > 5).order_by(a_expr) + stmt = ( + select(A) + .options(with_expression(A.expr, a_expr)) + .filter(a_expr > 5) + .order_by(a_expr) + ) .. seealso:: @@ -839,4 +834,4 @@ Column Loading API >>> session.close() >>> conn.close() - ROLLBACK... \ No newline at end of file + ROLLBACK... diff --git a/doc/build/orm/queryguide/dml.rst b/doc/build/orm/queryguide/dml.rst index 2f39498bf6..480604e478 100644 --- a/doc/build/orm/queryguide/dml.rst +++ b/doc/build/orm/queryguide/dml.rst @@ -60,12 +60,12 @@ as much as possible for many rows:: >>> session.execute( ... insert(User), ... [ - ... {"name": "spongebob", "fullname": "Spongebob Squarepants"}, - ... {"name": "sandy", "fullname": "Sandy Cheeks"}, - ... {"name": "patrick", "fullname": "Patrick Star"}, - ... {"name": "squidward", "fullname": "Squidward Tentacles"}, - ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"}, - ... ] + ... {"name": "spongebob", "fullname": "Spongebob Squarepants"}, + ... {"name": "sandy", "fullname": "Sandy Cheeks"}, + ... {"name": "patrick", "fullname": "Patrick Star"}, + ... {"name": "squidward", "fullname": "Squidward Tentacles"}, + ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"}, + ... ], ... ) {opensql}INSERT INTO user_account (name, fullname) VALUES (?, ?) [...] [('spongebob', 'Spongebob Squarepants'), ('sandy', 'Sandy Cheeks'), ('patrick', 'Patrick Star'), @@ -97,8 +97,10 @@ Getting new objects with RETURNING .. Setup code, not for display - >>> session.rollback(); session.connection() + >>> session.rollback() ROLLBACK... + >>> session.connection() + BEGIN (implicit)... The bulk ORM insert feature supports INSERT..RETURNING for selected backends, which can return a :class:`.Result` object that may yield individual @@ -122,12 +124,12 @@ directly without packaging them into :class:`.Row` objects:: >>> users = session.scalars( ... insert(User).returning(User), ... [ - ... {"name": "spongebob", "fullname": "Spongebob Squarepants"}, - ... {"name": "sandy", "fullname": "Sandy Cheeks"}, - ... {"name": "patrick", "fullname": "Patrick Star"}, - ... {"name": "squidward", "fullname": "Squidward Tentacles"}, - ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"}, - ... ] + ... {"name": "spongebob", "fullname": "Spongebob Squarepants"}, + ... {"name": "sandy", "fullname": "Sandy Cheeks"}, + ... {"name": "patrick", "fullname": "Patrick Star"}, + ... {"name": "squidward", "fullname": "Squidward Tentacles"}, + ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"}, + ... ], ... ) {opensql}INSERT INTO user_account (name, fullname) VALUES (?, ?), (?, ?), (?, ?), (?, ?), (?, ?) RETURNING id, name, fullname, species @@ -164,8 +166,10 @@ Using Heterogenous Parameter Dictionaries .. Setup code, not for display - >>> session.rollback(); session.connection() + >>> session.rollback() ROLLBACK... + >>> session.connection() + BEGIN (implicit)... The ORM bulk insert feature supports lists of parameter dictionaries that are "heterogenous", which basically means "individual dictionaries can have different @@ -176,12 +180,20 @@ to each set of keys and batch accordingly into separate INSERT statements:: >>> users = session.scalars( ... insert(User).returning(User), ... [ - ... {"name": "spongebob", "fullname": "Spongebob Squarepants", "species": "Sea Sponge"}, - ... {"name": "sandy", "fullname": "Sandy Cheeks", "species": "Squirrel"}, - ... {"name": "patrick", "species": "Starfish"}, - ... {"name": "squidward", "fullname": "Squidward Tentacles", "species": "Squid"}, - ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs", "species": "Crab"}, - ... ] + ... { + ... "name": "spongebob", + ... "fullname": "Spongebob Squarepants", + ... "species": "Sea Sponge", + ... }, + ... {"name": "sandy", "fullname": "Sandy Cheeks", "species": "Squirrel"}, + ... {"name": "patrick", "species": "Starfish"}, + ... { + ... "name": "squidward", + ... "fullname": "Squidward Tentacles", + ... "species": "Squid", + ... }, + ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs", "species": "Crab"}, + ... ], ... ) {opensql}INSERT INTO user_account (name, fullname, species) VALUES (?, ?, ?), (?, ?, ?) RETURNING id, name, fullname, species [... (insertmanyvalues)] ('spongebob', 'Spongebob Squarepants', 'Sea Sponge', 'sandy', 'Sandy Cheeks', 'Squirrel') @@ -202,8 +214,9 @@ Bulk INSERT for Joined Table Inheritance .. Setup code, not for display - >>> session.rollback(); session.connection() + >>> session.rollback() ROLLBACK + >>> session.connection() BEGIN... ORM bulk insert builds upon the internal system that is used by the @@ -220,9 +233,9 @@ the returned rows include values for all columns inserted:: >>> managers = session.scalars( ... insert(Manager).returning(Manager), ... [ - ... {"name": "sandy", "manager_name": "Sandy Cheeks"}, - ... {"name": "ehkrabs", "manager_name": "Eugene H. Krabs"}, - ... ] + ... {"name": "sandy", "manager_name": "Sandy Cheeks"}, + ... {"name": "ehkrabs", "manager_name": "Eugene H. Krabs"}, + ... ], ... ) {opensql}INSERT INTO employee (name, type) VALUES (?, ?), (?, ?) RETURNING id, name, type [... (insertmanyvalues)] ('sandy', 'manager', 'ehkrabs', 'manager') @@ -249,6 +262,7 @@ As an example, given an ORM mapping that includes a "timestamp" column: import datetime + class LogRecord(Base): __tablename__ = "log_record" id: Mapped[int] = mapped_column(primary_key=True) @@ -264,12 +278,12 @@ and then pass the additional records using "bulk" mode:: >>> from sqlalchemy import func >>> log_record_result = session.scalars( ... insert(LogRecord).values(code="SQLA", timestamp=func.now()).returning(LogRecord), - ... [ - ... {"message": "log message #1"}, - ... {"message": "log message #2"}, - ... {"message": "log message #3"}, - ... {"message": "log message #4"}, - ... ] + ... [ + ... {"message": "log message #1"}, + ... {"message": "log message #2"}, + ... {"message": "log message #3"}, + ... {"message": "log message #4"}, + ... ], ... ) {opensql}INSERT INTO log_record (message, code, timestamp) VALUES (?, ?, CURRENT_TIMESTAMP), (?, ?, CURRENT_TIMESTAMP), (?, ?, CURRENT_TIMESTAMP), @@ -298,12 +312,20 @@ ORM Bulk Insert with Per Row SQL Expressions >>> session.execute( ... insert(User), ... [ - ... {"name": "spongebob", "fullname": "Spongebob Squarepants", "species": "Sea Sponge"}, - ... {"name": "sandy", "fullname": "Sandy Cheeks", "species": "Squirrel"}, - ... {"name": "patrick", "species": "Starfish"}, - ... {"name": "squidward", "fullname": "Squidward Tentacles", "species": "Squid"}, - ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs", "species": "Crab"}, - ... ] + ... { + ... "name": "spongebob", + ... "fullname": "Spongebob Squarepants", + ... "species": "Sea Sponge", + ... }, + ... {"name": "sandy", "fullname": "Sandy Cheeks", "species": "Squirrel"}, + ... {"name": "patrick", "species": "Starfish"}, + ... { + ... "name": "squidward", + ... "fullname": "Squidward Tentacles", + ... "species": "Squid", + ... }, + ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs", "species": "Crab"}, + ... ], ... ) BEGIN... @@ -323,22 +345,24 @@ and also demonstrates :meth:`_dml.Insert.returning` in this form, is below:: >>> from sqlalchemy import select >>> address_result = session.scalars( - ... insert(Address).values( - ... [ - ... { - ... "user_id": select(User.id).where(User.name == 'sandy'), - ... "email_address": "sandy@company.com" - ... }, - ... { - ... "user_id": select(User.id).where(User.name == 'spongebob'), - ... "email_address": "spongebob@company.com" - ... }, - ... { - ... "user_id": select(User.id).where(User.name == 'patrick'), - ... "email_address": "patrick@company.com" - ... }, - ... ] - ... ).returning(Address), + ... insert(Address) + ... .values( + ... [ + ... { + ... "user_id": select(User.id).where(User.name == "sandy"), + ... "email_address": "sandy@company.com", + ... }, + ... { + ... "user_id": select(User.id).where(User.name == "spongebob"), + ... "email_address": "spongebob@company.com", + ... }, + ... { + ... "user_id": select(User.id).where(User.name == "patrick"), + ... "email_address": "patrick@company.com", + ... }, + ... ] + ... ) + ... .returning(Address), ... ) {opensql}INSERT INTO address (user_id, email_address) VALUES ((SELECT user_account.id @@ -396,18 +420,13 @@ for session-synchronization. Code which makes use of :meth:`.Session.bulk_insert_mappings` for example can port code as follows, starting with this mappings example:: - session.bulk_insert_mappings( - User, - [{"name": "u1"}, {"name": "u2"}, {"name": "u3"}] - ) + session.bulk_insert_mappings(User, [{"name": "u1"}, {"name": "u2"}, {"name": "u3"}]) The above is expressed using the new API as:: from sqlalchemy import insert - session.execute( - insert(User), - [{"name": "u1"}, {"name": "u2"}, {"name": "u3"}] - ) + + session.execute(insert(User), [{"name": "u1"}, {"name": "u2"}, {"name": "u3"}]) .. seealso:: @@ -456,14 +475,16 @@ as ORM mapped attribute keys, rather than column names: .. Setup code, not for display - >>> session.rollback(); + >>> session.rollback() ROLLBACK - >>> session.execute(insert(User).values( - ... [ - ... dict(name="sandy"), - ... dict(name="spongebob", fullname="Spongebob Squarepants"), - ... ] - ... )) + >>> session.execute( + ... insert(User).values( + ... [ + ... dict(name="sandy"), + ... dict(name="spongebob", fullname="Spongebob Squarepants"), + ... ] + ... ) + ... ) BEGIN... :: @@ -471,16 +492,15 @@ as ORM mapped attribute keys, rather than column names: >>> from sqlalchemy.dialects.sqlite import insert as sqlite_upsert >>> stmt = sqlite_upsert(User).values( ... [ - ... {"name": "spongebob", "fullname": "Spongebob Squarepants"}, - ... {"name": "sandy", "fullname": "Sandy Cheeks"}, - ... {"name": "patrick", "fullname": "Patrick Star"}, - ... {"name": "squidward", "fullname": "Squidward Tentacles"}, - ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"}, - ... ] + ... {"name": "spongebob", "fullname": "Spongebob Squarepants"}, + ... {"name": "sandy", "fullname": "Sandy Cheeks"}, + ... {"name": "patrick", "fullname": "Patrick Star"}, + ... {"name": "squidward", "fullname": "Squidward Tentacles"}, + ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"}, + ... ] ... ) >>> stmt = stmt.on_conflict_do_update( - ... index_elements=[User.name], - ... set_=dict(fullname=stmt.excluded.fullname) + ... index_elements=[User.name], set_=dict(fullname=stmt.excluded.fullname) ... ) >>> session.execute(stmt) {opensql}INSERT INTO user_account (name, fullname) @@ -503,7 +523,9 @@ works with upsert statements in the same way as was demonstrated at relevant ORM entity class may be passed. Continuing from the example in the previous section:: - >>> result = session.scalars(stmt.returning(User), execution_options={"populate_existing": True}) + >>> result = session.scalars( + ... stmt.returning(User), execution_options={"populate_existing": True} + ... ) {opensql}INSERT INTO user_account (name, fullname) VALUES (?, ?), (?, ?), (?, ?), (?, ?), (?, ?) ON CONFLICT (name) DO UPDATE SET fullname = excluded.fullname @@ -543,21 +565,23 @@ ORM Bulk UPDATE by Primary Key .. Setup code, not for display - >>> session.rollback(); + >>> session.rollback() ROLLBACK >>> session.execute( ... insert(User), ... [ - ... {"name": "spongebob", "fullname": "Spongebob Squarepants"}, - ... {"name": "sandy", "fullname": "Sandy Cheeks"}, - ... {"name": "patrick", "fullname": "Patrick Star"}, - ... {"name": "squidward", "fullname": "Squidward Tentacles"}, - ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"}, - ... ] + ... {"name": "spongebob", "fullname": "Spongebob Squarepants"}, + ... {"name": "sandy", "fullname": "Sandy Cheeks"}, + ... {"name": "patrick", "fullname": "Patrick Star"}, + ... {"name": "squidward", "fullname": "Squidward Tentacles"}, + ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"}, + ... ], ... ) BEGIN ... - >>> session.commit(); session.connection() + >>> session.commit() COMMIT... + >>> session.connection() + BEGIN ... The :class:`_dml.Update` construct may be used with :meth:`_orm.Session.execute` in a similar way as the :class:`_dml.Insert` @@ -582,10 +606,10 @@ appropriate WHERE criteria to match each row by primary key, and using >>> session.execute( ... update(User), ... [ - ... {"id": 1, "fullname": "Spongebob Squarepants"}, - ... {"id": 3, "fullname": "Patrick Star"}, - ... {"id": 5, "fullname": "Eugene H. Krabs"}, - ... ] + ... {"id": 1, "fullname": "Spongebob Squarepants"}, + ... {"id": 3, "fullname": "Patrick Star"}, + ... {"id": 5, "fullname": "Eugene H. Krabs"}, + ... ], ... ) {opensql}UPDATE user_account SET fullname=? WHERE user_account.id = ? [...] [('Spongebob Squarepants', 1), ('Patrick Star', 3), ('Eugene H. Krabs', 5)] @@ -619,11 +643,15 @@ Bulk UPDATE by Primary Key for Joined Table Inheritance >>> session.execute( ... insert(Manager).returning(Manager), ... [ - ... {"name": "sandy", "manager_name": "Sandy Cheeks"}, - ... {"name": "ehkrabs", "manager_name": "Eugene H. Krabs"}, - ... ] - ... ); session.commit(); session.connection() + ... {"name": "sandy", "manager_name": "Sandy Cheeks"}, + ... {"name": "ehkrabs", "manager_name": "Eugene H. Krabs"}, + ... ], + ... ) INSERT... + >>> session.commit() + COMMIT... + >>> session.connection() + BEGIN (implicit)... ORM bulk update has similar behavior to ORM bulk insert when using mappings with joined table inheritance; as described at @@ -637,9 +665,17 @@ Example:: >>> session.execute( ... update(Manager), ... [ - ... {"id": 1, "name": "scheeks", "manager_name": "Sandy Cheeks, President"}, - ... {"id": 2, "name": "eugene", "manager_name": "Eugene H. Krabs, VP Marketing"}, - ... ] + ... { + ... "id": 1, + ... "name": "scheeks", + ... "manager_name": "Sandy Cheeks, President", + ... }, + ... { + ... "id": 2, + ... "name": "eugene", + ... "manager_name": "Eugene H. Krabs, VP Marketing", + ... }, + ... ], ... ) {opensql}UPDATE employee SET name=? WHERE employee.id = ? [...] [('scheeks', 1), ('eugene', 2)] @@ -662,22 +698,23 @@ session-synchronization are not included. The example below:: session.bulk_update_mappings( - User, - [ - {"id": 1, "name": "scheeks", "manager_name": "Sandy Cheeks, President"}, - {"id": 2, "name": "eugene", "manager_name": "Eugene H. Krabs, VP Marketing"}, - ] + User, + [ + {"id": 1, "name": "scheeks", "manager_name": "Sandy Cheeks, President"}, + {"id": 2, "name": "eugene", "manager_name": "Eugene H. Krabs, VP Marketing"}, + ], ) Is expressed using the new API as:: from sqlalchemy import update + session.execute( update(User), [ {"id": 1, "name": "scheeks", "manager_name": "Sandy Cheeks, President"}, {"id": 2, "name": "eugene", "manager_name": "Eugene H. Krabs, VP Marketing"}, - ] + ], ) .. seealso:: @@ -693,8 +730,10 @@ ORM UPDATE and DELETE with Custom WHERE Criteria .. Setup code, not for display - >>> session.rollback(); session.connection() + >>> session.rollback() ROLLBACK... + >>> session.connection() + BEGIN (implicit)... The :class:`_dml.Update` and :class:`_dml.Delete` constructs, when constructed with custom WHERE criteria (that is, using the :meth:`_dml.Update.where` and @@ -714,7 +753,11 @@ field of multiple rows :: >>> from sqlalchemy import update - >>> stmt = update(User).where(User.name.in_(["squidward", "sandy"])).values(fullname="Name starts with S") + >>> stmt = ( + ... update(User) + ... .where(User.name.in_(["squidward", "sandy"])) + ... .values(fullname="Name starts with S") + ... ) >>> session.execute(stmt) {opensql}UPDATE user_account SET fullname=? WHERE user_account.name IN (?, ?) [...] ('Name starts with S', 'squidward', 'sandy') @@ -732,8 +775,10 @@ For a DELETE, an example of deleting rows based on criteria:: .. Setup code, not for display - >>> session.rollback(); session.connection() + >>> session.rollback() ROLLBACK... + >>> session.connection() + BEGIN (implicit)... .. _orm_queryguide_update_delete_sync: @@ -757,9 +802,7 @@ which is passed as an string ORM execution option, typically by using the >>> from sqlalchemy import update >>> stmt = ( - ... update(User). - ... where(User.name == "squidward"). - ... values(fullname="Squidward Tentacles") + ... update(User).where(User.name == "squidward").values(fullname="Squidward Tentacles") ... ) >>> session.execute(stmt, execution_options={"synchronize_session": False}) {opensql}UPDATE user_account SET fullname=? WHERE user_account.name = ? @@ -771,10 +814,10 @@ The execution option may also be bundled with the statement itself using the >>> from sqlalchemy import update >>> stmt = ( - ... update(User). - ... where(User.name == "squidward"). - ... values(fullname="Squidward Tentacles"). - ... execution_options(synchronize_session=False) + ... update(User) + ... .where(User.name == "squidward") + ... .values(fullname="Squidward Tentacles") + ... .execution_options(synchronize_session=False) ... ) >>> session.execute(stmt) {opensql}UPDATE user_account SET fullname=? WHERE user_account.name = ? @@ -850,10 +893,10 @@ and/or columns may be indicated for RETURNING:: >>> from sqlalchemy import update >>> stmt = ( - ... update(User). - ... where(User.name == "squidward"). - ... values(fullname="Squidward Tentacles"). - ... returning(User) + ... update(User) + ... .where(User.name == "squidward") + ... .values(fullname="Squidward Tentacles") + ... .returning(User) ... ) >>> result = session.scalars(stmt) {opensql}UPDATE user_account SET fullname=? WHERE user_account.name = ? @@ -879,8 +922,10 @@ UPDATE/DELETE with Custom WHERE Criteria for Joined Table Inheritance .. Setup code, not for display - >>> session.rollback(); session.connection() + >>> session.rollback() ROLLBACK... + >>> session.connection() + BEGIN (implicit)... The UPDATE/DELETE with WHERE criteria feature, unlike the :ref:`orm_queryguide_bulk_update`, only emits a single UPDATE or DELETE @@ -901,9 +946,9 @@ that are local to the subclass table, as in the example below:: >>> stmt = ( - ... update(Manager). - ... where(Manager.id == 1). - ... values(manager_name="Sandy Cheeks, President") + ... update(Manager) + ... .where(Manager.id == 1) + ... .values(manager_name="Sandy Cheeks, President") ... ) >>> session.execute(stmt) UPDATE manager SET manager_name=? WHERE manager.id = ? @@ -914,13 +959,12 @@ With the above form, a rudimentary way to refer to the base table in order to locate rows which will work on any SQL backend is so use a subquery:: >>> stmt = ( - ... update(Manager). - ... where( - ... Manager.id == - ... select(Employee.id). - ... where(Employee.name == "sandy").scalar_subquery() - ... ). - ... values(manager_name="Sandy Cheeks, President") + ... update(Manager) + ... .where( + ... Manager.id + ... == select(Employee.id).where(Employee.name == "sandy").scalar_subquery() + ... ) + ... .values(manager_name="Sandy Cheeks, President") ... ) >>> session.execute(stmt) {opensql}UPDATE manager SET manager_name=? WHERE manager.id = (SELECT employee.id @@ -934,12 +978,9 @@ as additional plain WHERE criteria, however the criteria between the two tables must be stated explicitly in some way:: >>> stmt = ( - ... update(Manager). - ... where( - ... Manager.id == Employee.id, - ... Employee.name == "sandy" - ... ). - ... values(manager_name="Sandy Cheeks, President") + ... update(Manager) + ... .where(Manager.id == Employee.id, Employee.name == "sandy") + ... .values(manager_name="Sandy Cheeks, President") ... ) >>> session.execute(stmt) {opensql}UPDATE manager SET manager_name=? FROM employee @@ -987,5 +1028,6 @@ the legacy methods don't provide for explicit RETURNING support. .. Setup code, not for display - >>> session.close(); conn.close() - ROLLBACK + >>> session.close() + ROLLBACK... + >>> conn.close() diff --git a/doc/build/orm/queryguide/inheritance.rst b/doc/build/orm/queryguide/inheritance.rst index 7d489c48c0..4506f4ffc7 100644 --- a/doc/build/orm/queryguide/inheritance.rst +++ b/doc/build/orm/queryguide/inheritance.rst @@ -201,9 +201,13 @@ this collection on all ``Manager`` objects, where the sub-attributes of >>> from sqlalchemy.orm import selectinload >>> from sqlalchemy.orm import selectin_polymorphic - >>> stmt = select(Employee).order_by(Employee.id).options( - ... selectin_polymorphic(Employee, [Manager, Engineer]), - ... selectinload(Manager.paperwork) + >>> stmt = ( + ... select(Employee) + ... .order_by(Employee.id) + ... .options( + ... selectin_polymorphic(Employee, [Manager, Engineer]), + ... selectinload(Manager.paperwork), + ... ) ... ) {opensql}>>> objects = session.scalars(stmt).all() BEGIN (implicit) @@ -244,13 +248,12 @@ as a chained loader option; in this form, the first argument is implicit from the previous loader option (in this case :func:`_orm.selectinload`), so we only indicate the additional target subclasses we wish to load:: - >>> stmt = ( - ... select(Company). - ... options(selectinload(Company.employees).selectin_polymorphic([Manager, Engineer])) + >>> stmt = select(Company).options( + ... selectinload(Company.employees).selectin_polymorphic([Manager, Engineer]) ... ) >>> for company in session.scalars(stmt): - ... print(f"company: {company.name}") - ... print(f"employees: {company.employees}") + ... print(f"company: {company.name}") + ... print(f"employees: {company.employees}") {opensql}SELECT company.id, company.name FROM company [...] () @@ -292,34 +295,33 @@ parameter within ``Engineer`` and ``Manager`` subclasses: .. sourcecode:: python class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = mapped_column(Integer, primary_key=True) name = mapped_column(String(50)) type = mapped_column(String(50)) - __mapper_args__ = { - 'polymorphic_identity': 'employee', - 'polymorphic_on': type - } + __mapper_args__ = {"polymorphic_identity": "employee", "polymorphic_on": type} + class Engineer(Employee): - __tablename__ = 'engineer' - id = mapped_column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "engineer" + id = mapped_column(Integer, ForeignKey("employee.id"), primary_key=True) engineer_info = mapped_column(String(30)) __mapper_args__ = { - 'polymorphic_load': 'selectin', - 'polymorphic_identity': 'engineer', + "polymorphic_load": "selectin", + "polymorphic_identity": "engineer", } + class Manager(Employee): - __tablename__ = 'manager' - id = mapped_column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = mapped_column(Integer, ForeignKey("employee.id"), primary_key=True) manager_name = mapped_column(String(30)) __mapper_args__ = { - 'polymorphic_load': 'selectin', - 'polymorphic_identity': 'manager', + "polymorphic_load": "selectin", + "polymorphic_identity": "manager", } With the above mapping, SELECT statements against the ``Employee`` class will @@ -406,14 +408,15 @@ construct to create criteria against both classes at once:: >>> from sqlalchemy import or_ >>> employee_poly = with_polymorphic(Employee, [Engineer, Manager]) >>> stmt = ( - ... select(employee_poly). - ... where( - ... or_( - ... employee_poly.Manager.manager_name == "Eugene H. Krabs", - ... employee_poly.Engineer.engineer_info == "Senior Customer Engagement Engineer" - ... ) - ... ). - ... order_by(employee_poly.id) + ... select(employee_poly) + ... .where( + ... or_( + ... employee_poly.Manager.manager_name == "Eugene H. Krabs", + ... employee_poly.Engineer.engineer_info + ... == "Senior Customer Engagement Engineer", + ... ) + ... ) + ... .order_by(employee_poly.id) ... ) >>> objects = session.scalars(stmt).all() {opensql}SELECT employee.id, employee.name, employee.type, employee.company_id, manager.id AS id_1, @@ -462,18 +465,18 @@ column along with some additional limiting criteria against the >>> manager_employee = with_polymorphic(Employee, [Manager], aliased=True, flat=True) >>> engineer_employee = with_polymorphic(Employee, [Engineer], aliased=True, flat=True) >>> stmt = ( - ... select(manager_employee, engineer_employee). - ... join( + ... select(manager_employee, engineer_employee) + ... .join( ... engineer_employee, ... engineer_employee.company_id == manager_employee.company_id, - ... ). - ... where( + ... ) + ... .where( ... or_( ... manager_employee.name == "Mr. Krabs", - ... manager_employee.Manager.manager_name == "Eugene H. Krabs" + ... manager_employee.Manager.manager_name == "Eugene H. Krabs", ... ) - ... ). - ... order_by(engineer_employee.name, manager_employee.name) + ... ) + ... .order_by(engineer_employee.name, manager_employee.name) ... ) >>> for manager, engineer in session.execute(stmt): ... print(f"{manager} {engineer}") @@ -506,18 +509,18 @@ subquery, producing a more verbose form:: >>> manager_employee = with_polymorphic(Employee, [Manager], aliased=True) >>> engineer_employee = with_polymorphic(Employee, [Engineer], aliased=True) >>> stmt = ( - ... select(manager_employee, engineer_employee). - ... join( + ... select(manager_employee, engineer_employee) + ... .join( ... engineer_employee, ... engineer_employee.company_id == manager_employee.company_id, - ... ). - ... where( + ... ) + ... .where( ... or_( ... manager_employee.name == "Mr. Krabs", - ... manager_employee.Manager.manager_name == "Eugene H. Krabs" + ... manager_employee.Manager.manager_name == "Eugene H. Krabs", ... ) - ... ). - ... order_by(engineer_employee.name, manager_employee.name) + ... ) + ... .order_by(engineer_employee.name, manager_employee.name) ... ) >>> print(stmt) {opensql}SELECT anon_1.employee_id, anon_1.employee_name, anon_1.employee_type, @@ -575,34 +578,33 @@ For example, we may state our ``Employee`` mapping using .. sourcecode:: python class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = mapped_column(Integer, primary_key=True) name = mapped_column(String(50)) type = mapped_column(String(50)) - __mapper_args__ = { - 'polymorphic_identity': 'employee', - 'polymorphic_on': type - } + __mapper_args__ = {"polymorphic_identity": "employee", "polymorphic_on": type} + class Engineer(Employee): - __tablename__ = 'engineer' - id = mapped_column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "engineer" + id = mapped_column(Integer, ForeignKey("employee.id"), primary_key=True) engineer_info = mapped_column(String(30)) __mapper_args__ = { - 'polymorphic_load': 'inline', - 'polymorphic_identity': 'engineer', + "polymorphic_load": "inline", + "polymorphic_identity": "engineer", } + class Manager(Employee): - __tablename__ = 'manager' - id = mapped_column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = mapped_column(Integer, ForeignKey("employee.id"), primary_key=True) manager_name = mapped_column(String(30)) __mapper_args__ = { - 'polymorphic_load': 'inline', - 'polymorphic_identity': 'manager', + "polymorphic_load": "inline", + "polymorphic_identity": "manager", } With the above mapping, SELECT statements against the ``Employee`` class will @@ -652,33 +654,35 @@ LEFT OUTER JOINED, as in: .. sourcecode:: python class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = mapped_column(Integer, primary_key=True) name = mapped_column(String(50)) type = mapped_column(String(50)) __mapper_args__ = { - 'polymorphic_identity': 'employee', - 'with_polymorphic': '*', - 'polymorphic_on': type + "polymorphic_identity": "employee", + "with_polymorphic": "*", + "polymorphic_on": type, } + class Engineer(Employee): - __tablename__ = 'engineer' - id = mapped_column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "engineer" + id = mapped_column(Integer, ForeignKey("employee.id"), primary_key=True) engineer_info = mapped_column(String(30)) __mapper_args__ = { - 'polymorphic_identity': 'engineer', + "polymorphic_identity": "engineer", } + class Manager(Employee): - __tablename__ = 'manager' - id = mapped_column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = mapped_column(Integer, ForeignKey("employee.id"), primary_key=True) manager_name = mapped_column(String(30)) __mapper_args__ = { - 'polymorphic_identity': 'manager', + "polymorphic_identity": "manager", } Overall, the LEFT OUTER JOIN format used by :func:`_orm.with_polymorphic` and @@ -708,12 +712,13 @@ using a :func:`_orm.with_polymorphic` entity as the target:: >>> employee_plus_engineer = with_polymorphic(Employee, [Engineer]) >>> stmt = ( - ... select(Company.name, employee_plus_engineer.name). - ... join(Company.employees.of_type(employee_plus_engineer)). - ... where( + ... select(Company.name, employee_plus_engineer.name) + ... .join(Company.employees.of_type(employee_plus_engineer)) + ... .where( ... or_( ... employee_plus_engineer.name == "SpongeBob", - ... employee_plus_engineer.Engineer.engineer_info == "Senior Customer Engagement Engineer" + ... employee_plus_engineer.Engineer.engineer_info + ... == "Senior Customer Engagement Engineer", ... ) ... ) ... ) @@ -732,12 +737,12 @@ particular sub-type of the :func:`_orm.relationship`'s target. The above query could be written strictly in terms of ``Engineer`` targets as follows:: >>> stmt = ( - ... select(Company.name, Engineer.name). - ... join(Company.employees.of_type(Engineer)). - ... where( + ... select(Company.name, Engineer.name) + ... .join(Company.employees.of_type(Engineer)) + ... .where( ... or_( ... Engineer.name == "SpongeBob", - ... Engineer.engineer_info == "Senior Customer Engagement Engineer" + ... Engineer.engineer_info == "Senior Customer Engagement Engineer", ... ) ... ) ... ) @@ -770,14 +775,11 @@ As a basic example, if we wished to load ``Company`` objects, and additionally eagerly load all elements of ``Company.employees`` using the :func:`_orm.with_polymorphic` construct against the full hierarchy, we may write:: - >>> all_employees = with_polymorphic(Employee, '*') - >>> stmt = ( - ... select(Company). - ... options(selectinload(Company.employees.of_type(all_employees))) - ... ) + >>> all_employees = with_polymorphic(Employee, "*") + >>> stmt = select(Company).options(selectinload(Company.employees.of_type(all_employees))) >>> for company in session.scalars(stmt): - ... print(f"company: {company.name}") - ... print(f"employees: {company.employees}") + ... print(f"company: {company.name}") + ... print(f"employees: {company.employees}") {opensql}SELECT company.id, company.name FROM company [...] () @@ -839,7 +841,7 @@ As an example, a query for the single-inheritance example mapping of >>> stmt = select(Employee).order_by(Employee.id) >>> for obj in session.scalars(stmt): - ... print(f"{obj}") + ... print(f"{obj}") {opensql}BEGIN (implicit) SELECT employee.id, employee.name, employee.type FROM employee ORDER BY employee.id @@ -906,7 +908,7 @@ option as well as the :func:`_orm.with_polymorphic` option, the latter of which simply includes the additional columns and from a SQL perspective is more efficient for single-inheritance mappers:: - >>> employees = with_polymorphic(Employee, '*') + >>> employees = with_polymorphic(Employee, "*") >>> stmt = select(employees).order_by(employees.id) >>> objects = session.scalars(stmt).all() {opensql}BEGIN (implicit) @@ -932,31 +934,30 @@ is below:: >>> class Base(DeclarativeBase): ... pass - ... >>> class Employee(Base): ... __tablename__ = "employee" ... id: Mapped[int] = mapped_column(primary_key=True) ... name: Mapped[str] ... type: Mapped[str] + ... ... def __repr__(self): ... return f"{self.__class__.__name__}({self.name!r})" + ... ... __mapper_args__ = { ... "polymorphic_identity": "employee", ... "polymorphic_on": "type", ... } - ... >>> class Manager(Employee): ... manager_name: Mapped[str] = mapped_column(nullable=True) ... __mapper_args__ = { ... "polymorphic_identity": "manager", - ... "polymorphic_load": "inline" + ... "polymorphic_load": "inline", ... } - ... >>> class Engineer(Employee): ... engineer_info: Mapped[str] = mapped_column(nullable=True) ... __mapper_args__ = { ... "polymorphic_identity": "engineer", - ... "polymorphic_load": "inline" + ... "polymorphic_load": "inline", ... } @@ -986,4 +987,4 @@ Inheritance Loading API >>> session.close() ROLLBACK - >>> conn.close() \ No newline at end of file + >>> conn.close() diff --git a/doc/build/orm/queryguide/relationships.rst b/doc/build/orm/queryguide/relationships.rst index aabfd48ffa..4c8125f5a4 100644 --- a/doc/build/orm/queryguide/relationships.rst +++ b/doc/build/orm/queryguide/relationships.rst @@ -113,12 +113,14 @@ statement for ``Parent`` objects is emitted:: class Base(DeclarativeBase): pass + class Parent(Base): __tablename__ = "parent" id: Mapped[int] = mapped_column(primary_key=True) children: Mapped[list["Child"]] = relationship(lazy="selectin") + class Child(Base): __tablename__ = "child" @@ -166,8 +168,7 @@ to specify how loading should occur further levels deep:: from sqlalchemy.orm import joinedload stmt = select(Parent).options( - joinedload(Parent.children). - subqueryload(Child.subelements) + joinedload(Parent.children).subqueryload(Child.subelements) ) Chained loader options can be applied against a "lazy" loaded collection. @@ -177,10 +178,7 @@ access, the specified option will then take effect:: from sqlalchemy import select from sqlalchemy.orm import lazyload - stmt = select(Parent).options( - lazyload(Parent.children). - subqueryload(Child.subelements) - ) + stmt = select(Parent).options(lazyload(Parent.children).subqueryload(Child.subelements)) Above, the query will return ``Parent`` objects without the ``children`` collections loaded. When the ``children`` collection on a particular @@ -214,9 +212,9 @@ the :ref:`orm_queryguide_populate_existing` execution option:: from sqlalchemy.orm import lazyload stmt = ( - select(A). - options(lazyload(A.bs.and_(B.id > 5))). - execution_options(populate_existing=True) + select(A) + .options(lazyload(A.bs.and_(B.id > 5))) + .execution_options(populate_existing=True) ) In order to add filtering criteria to all occurrences of an entity throughout @@ -236,10 +234,7 @@ of a particular attribute, the :func:`.defaultload` method/function may be used: from sqlalchemy import select from sqlalchemy.orm import defaultload - stmt = select(A).options( - defaultload(A.atob). - joinedload(B.btoc) - ) + stmt = select(A).options(defaultload(A.atob).joinedload(B.btoc)) A similar approach can be used to specify multiple sub-options at once, using the :meth:`_orm.Load.options` method:: @@ -249,13 +244,9 @@ the :meth:`_orm.Load.options` method:: from sqlalchemy.orm import joinedload stmt = select(A).options( - defaultload(A.atob).options( - joinedload(B.btoc), - joinedload(B.btod) - ) + defaultload(A.atob).options(joinedload(B.btoc), joinedload(B.btod)) ) - .. seealso:: :ref:`orm_queryguide_load_only_related` - illustrates examples of combining @@ -267,10 +258,7 @@ the :meth:`_orm.Load.options` method:: upon collections loaded by that specific object for as long as it exists in memory. For example, given the previous example:: - stmt = select(Parent).options( - lazyload(Parent.children). - subqueryload(Child.subelements) - ) + stmt = select(Parent).options(lazyload(Parent.children).subqueryload(Child.subelements)) if the ``children`` collection on a particular ``Parent`` object loaded by the above query is expired (such as when a :class:`.Session` object's @@ -377,9 +365,7 @@ to set up only one attribute as eager loading, and all the rest as raise:: from sqlalchemy.orm import joinedload from sqlalchemy.orm import raiseload - stmt = select(Order).options( - joinedload(Order.items), raiseload('*') - ) + stmt = select(Order).options(joinedload(Order.items), raiseload("*")) The above wildcard will apply to **all** relationships not just on ``Order`` besides ``items``, but all those on the ``Item`` objects as well. To set up @@ -390,16 +376,11 @@ path with :class:`_orm.Load`:: from sqlalchemy.orm import joinedload from sqlalchemy.orm import Load - stmt = select(Order).options( - joinedload(Order.items), Load(Order).raiseload('*') - ) + stmt = select(Order).options(joinedload(Order.items), Load(Order).raiseload("*")) Conversely, to set up the raise for just the ``Item`` objects:: - stmt = select(Order).options( - joinedload(Order.items).raiseload('*') - ) - + stmt = select(Order).options(joinedload(Order.items).raiseload("*")) The :func:`.raiseload` option applies only to relationship attributes. For column-oriented attributes, the :func:`.defer` option supports the @@ -444,11 +425,7 @@ using the :func:`_orm.joinedload` loader option: >>> from sqlalchemy import select >>> from sqlalchemy.orm import joinedload - >>> stmt = ( - ... select(User). - ... options(joinedload(User.addresses)).\ - ... filter_by(name='spongebob') - ... ) + >>> stmt = select(User).options(joinedload(User.addresses)).filter_by(name="spongebob") >>> spongebob = session.scalars(stmt).unique().all() {opensql}SELECT addresses_1.id AS addresses_1_id, @@ -488,7 +465,7 @@ at the mapping level via the :paramref:`_orm.relationship.innerjoin` flag:: class Address(Base): # ... - user_id: Mapped[int] = mapped_column(ForeignKey('users.id')) + user_id: Mapped[int] = mapped_column(ForeignKey("users.id")) user: Mapped[User] = relationship(lazy="joined", innerjoin=True) At the query option level, via the :paramref:`_orm.joinedload.innerjoin` flag:: @@ -496,9 +473,7 @@ At the query option level, via the :paramref:`_orm.joinedload.innerjoin` flag:: from sqlalchemy import select from sqlalchemy.orm import joinedload - stmt = select(Address).options( - joinedload(Address.user, innerjoin=True) - ) + stmt = select(Address).options(joinedload(Address.user, innerjoin=True)) The JOIN will right-nest itself when applied in a chain that includes an OUTER JOIN: @@ -508,8 +483,7 @@ an OUTER JOIN: >>> from sqlalchemy import select >>> from sqlalchemy.orm import joinedload >>> stmt = select(User).options( - ... joinedload(User.addresses). - ... joinedload(Address.widgets, innerjoin=True) + ... joinedload(User.addresses).joinedload(Address.widgets, innerjoin=True) ... ) >>> results = session.scalars(stmt).unique().all() {opensql}SELECT @@ -578,10 +552,10 @@ named in the query: >>> from sqlalchemy import select >>> from sqlalchemy.orm import joinedload >>> stmt = ( - ... select(User). - ... options(joinedload(User.addresses)). - ... filter(User.name == 'spongebob'). - ... order_by(Address.email_address) + ... select(User) + ... .options(joinedload(User.addresses)) + ... .filter(User.name == "spongebob") + ... .order_by(Address.email_address) ... ) >>> result = session.scalars(stmt).unique().all() {opensql}SELECT @@ -607,10 +581,10 @@ address is to use :meth:`_sql.Select.join`: >>> from sqlalchemy import select >>> stmt = ( - ... select(User). - ... join(User.addresses). - ... filter(User.name == 'spongebob'). - ... order_by(Address.email_address) + ... select(User) + ... .join(User.addresses) + ... .filter(User.name == "spongebob") + ... .order_by(Address.email_address) ... ) >>> result = session.scalars(stmt).unique().all() {opensql} @@ -635,11 +609,11 @@ are ordering on, the other is used anonymously to load the contents of the >>> stmt = ( - ... select(User). - ... join(User.addresses). - ... options(joinedload(User.addresses)). - ... filter(User.name == 'spongebob'). - ... order_by(Address.email_address) + ... select(User) + ... .join(User.addresses) + ... .options(joinedload(User.addresses)) + ... .filter(User.name == "spongebob") + ... .order_by(Address.email_address) ... ) >>> result = session.scalars(stmt).unique().all() {opensql}SELECT @@ -670,11 +644,11 @@ to see why :func:`joinedload` does what it does, consider if we were .. sourcecode:: python+sql >>> stmt = ( - ... select(User). - ... join(User.addresses). - ... options(joinedload(User.addresses)). - ... filter(User.name=='spongebob'). - ... filter(Address.email_address=='someaddress@foo.com') + ... select(User) + ... .join(User.addresses) + ... .options(joinedload(User.addresses)) + ... .filter(User.name == "spongebob") + ... .filter(Address.email_address == "someaddress@foo.com") ... ) >>> result = session.scalars(stmt).unique().all() {opensql}SELECT @@ -706,11 +680,11 @@ into :func:`.subqueryload`: .. sourcecode:: python+sql >>> stmt = ( - ... select(User). - ... join(User.addresses). - ... options(subqueryload(User.addresses)). - ... filter(User.name=='spongebob'). - ... filter(Address.email_address=='someaddress@foo.com') + ... select(User) + ... .join(User.addresses) + ... .options(subqueryload(User.addresses)) + ... .filter(User.name == "spongebob") + ... .filter(Address.email_address == "someaddress@foo.com") ... ) >>> result = session.scalars(stmt).all() {opensql}SELECT @@ -766,9 +740,9 @@ order to load related associations: >>> from sqlalchemy import select >>> from sqlalchemy import selectinload >>> stmt = ( - ... select(User). - ... options(selectinload(User.addresses)). - ... filter(or_(User.name == 'spongebob', User.name == 'ed')) + ... select(User) + ... .options(selectinload(User.addresses)) + ... .filter(or_(User.name == "spongebob", User.name == "ed")) ... ) >>> result = session.scalars(stmt).all() {opensql}SELECT @@ -913,11 +887,7 @@ the collection members to load them at once: >>> from sqlalchemy import select >>> from sqlalchemy.orm import subqueryload - >>> stmt = ( - ... select(User) - ... options(subqueryload(User.addresses)) - ... filter_by(name="spongebob") - ... ) + >>> stmt = select(User).options(subqueryload(User.addresses)).filter_by(name="spongebob") >>> results = session.scalars(stmt).all() {opensql}SELECT users.id AS users_id, @@ -980,19 +950,18 @@ the same ordering as used by the parent query. Without it, there is a chance that the inner query could return the wrong rows:: # incorrect, no ORDER BY - stmt = select(User).options( - subqueryload(User.addresses).limit(1) - ) + stmt = select(User).options(subqueryload(User.addresses).limit(1)) # incorrect if User.name is not unique - stmt = select(User).options( - subqueryload(User.addresses) - ).order_by(User.name).limit(1) + stmt = select(User).options(subqueryload(User.addresses)).order_by(User.name).limit(1) # correct - stmt = select(User).options( - subqueryload(User.addresses) - ).order_by(User.name, User.id).limit(1) + stmt = ( + select(User) + .options(subqueryload(User.addresses)) + .order_by(User.name, User.id) + .limit(1) + ) .. seealso:: @@ -1051,7 +1020,7 @@ the string ``'*'`` as the argument to any of these options:: from sqlalchemy import select from sqlalchemy.orm import lazyload - stmt = select(MyClass).options(lazyload('*')) + stmt = select(MyClass).options(lazyload("*")) Above, the ``lazyload('*')`` option will supersede the ``lazy`` setting of all :func:`_orm.relationship` constructs in use for that query, @@ -1071,10 +1040,7 @@ for the ``widget`` relationship:: from sqlalchemy.orm import lazyload from sqlalchemy.orm import joinedload - stmt = select(MyClass).options( - lazyload('*'), - joinedload(MyClass.widget) - ) + stmt = select(MyClass).options(lazyload("*"), joinedload(MyClass.widget)) If multiple ``'*'`` options are passed, the last one overrides those previously passed. @@ -1093,9 +1059,7 @@ chained option:: from sqlalchemy import select from sqlalchemy.orm import Load - stmt = select(User, Address).options( - Load(Address).lazyload('*') - ) + stmt = select(User, Address).options(Load(Address).lazyload("*")) Above, all relationships on ``Address`` will be set to a lazy load. @@ -1123,23 +1087,20 @@ Below, we specify a join between ``User`` and ``Address`` and additionally establish this as the basis for eager loading of ``User.addresses``:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = mapped_column(Integer, primary_key=True) addresses = relationship("Address") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" # ... - from sqlalchemy.orm import contains_eager - stmt = ( - select(User). - join(User.addresses). - options(contains_eager(User.addresses)) - ) + from sqlalchemy.orm import contains_eager + stmt = select(User).join(User.addresses).options(contains_eager(User.addresses)) If the "eager" portion of the statement is "aliased", the path should be specified using :meth:`.PropComparator.of_type`, which allows @@ -1175,10 +1136,7 @@ The path given as the argument to :func:`.contains_eager` needs to be a full path from the starting entity. For example if we were loading ``Users->orders->Order->items->Item``, the option would be used as:: - stmt = select(User).options( - contains_eager(User.orders). - contains_eager(Order.items) - ) + stmt = select(User).options(contains_eager(User.orders).contains_eager(Order.items)) Using contains_eager() to load a custom-filtered collection result ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1196,11 +1154,11 @@ routing it using :func:`_orm.contains_eager`, also using are overwritten:: stmt = ( - select(User). - join(User.addresses). - filter(Address.email_address.like('%@aol.com')). - options(contains_eager(User.addresses)). - execution_options(populate_existing=True) + select(User) + .join(User.addresses) + .filter(Address.email_address.like("%@aol.com")) + .options(contains_eager(User.addresses)) + .execution_options(populate_existing=True) ) The above query will load only ``User`` objects which contain at diff --git a/doc/build/orm/queryguide/select.rst b/doc/build/orm/queryguide/select.rst index cef9169dda..7db866f8d6 100644 --- a/doc/build/orm/queryguide/select.rst +++ b/doc/build/orm/queryguide/select.rst @@ -23,7 +23,7 @@ function. From there, additional methods are used to generate the complete statement, such as the :meth:`_sql.Select.where` method illustrated below:: >>> from sqlalchemy import select - >>> stmt = select(User).where(User.name == 'spongebob') + >>> stmt = select(User).where(User.name == "spongebob") Given a completed :class:`_sql.Select` object, in order to execute it within the ORM to get rows back, the object is passed to @@ -117,13 +117,9 @@ in each result row based on their class name. In the example below, the result rows for a SELECT against ``User`` and ``Address`` will refer to them under the names ``User`` and ``Address``:: - >>> stmt = ( - ... select(User, Address). - ... join(User.addresses). - ... order_by(User.id, Address.id) - ... ) + >>> stmt = select(User, Address).join(User.addresses).order_by(User.id, Address.id) >>> for row in session.execute(stmt): - ... print(f"{row.User.name} {row.Address.email_address}") + ... print(f"{row.User.name} {row.Address.email_address}") {opensql}SELECT user_account.id, user_account.name, user_account.fullname, address.id AS id_1, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -143,9 +139,9 @@ parameter to alias them with an explicit name:: >>> user_cls = aliased(User, name="user_cls") >>> email_cls = aliased(Address, name="email") >>> stmt = ( - ... select(user_cls, email_cls). - ... join(user_cls.addresses.of_type(email_cls)). - ... order_by(user_cls.id, email_cls.id) + ... select(user_cls, email_cls) + ... .join(user_cls.addresses.of_type(email_cls)) + ... .order_by(user_cls.id, email_cls.id) ... ) >>> row = session.execute(stmt).first() {opensql}SELECT user_cls.id, user_cls.name, user_cls.fullname, @@ -165,10 +161,7 @@ column expressions added to its columns clause using the above using this form as well:: >>> stmt = ( - ... select(User). - ... join(User.addresses). - ... add_columns(Address). - ... order_by(User.id, Address.id) + ... select(User).join(User.addresses).add_columns(Address).order_by(User.id, Address.id) ... ) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname, @@ -187,9 +180,9 @@ when passed to :func:`_sql.select`. They may be used in the same way as table columns are used:: >>> result = session.execute( - ... select(User.name, Address.email_address). - ... join(User.addresses). - ... order_by(User.id, Address.id) + ... select(User.name, Address.email_address) + ... .join(User.addresses) + ... .order_by(User.id, Address.id) ... ) {opensql}SELECT user_account.name, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -221,7 +214,7 @@ allows sets of column expressions to be grouped in result rows:: >>> from sqlalchemy.orm import Bundle >>> stmt = select( ... Bundle("user", User.name, User.fullname), - ... Bundle("email", Address.email_address) + ... Bundle("email", Address.email_address), ... ).join_from(User, Address) >>> for row in session.execute(stmt): ... print(f"{row.user.name} {row.user.fullname} {row.email.email_address}") @@ -415,8 +408,7 @@ is used:: >>> from sqlalchemy import union_all >>> u = union_all( - ... select(User).where(User.id < 2), - ... select(User).where(User.id == 3) + ... select(User).where(User.id < 2), select(User).where(User.id == 3) ... ).order_by(User.id) >>> stmt = select(User).from_statement(u) >>> for user_obj in session.execute(stmt).scalars(): @@ -441,8 +433,7 @@ entity in a :func:`_sql.select` construct, including that we can add filtering and order by criteria based on its exported columns:: >>> subq = union_all( - ... select(User).where(User.id < 2), - ... select(User).where(User.id == 3) + ... select(User).where(User.id < 2), select(User).where(User.id == 3) ... ).subquery() >>> user_alias = aliased(User, subq) >>> stmt = select(user_alias).order_by(user_alias.id) @@ -532,11 +523,7 @@ a JOIN first from ``User`` to ``Order``, and a second from ``Order`` to relationship, it results in two separate JOIN elements, for a total of three JOIN elements in the resulting SQL:: - >>> stmt = ( - ... select(User). - ... join(User.orders). - ... join(Order.items) - ... ) + >>> stmt = select(User).join(User.orders).join(Order.items) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -560,12 +547,7 @@ as potential points to continue joining FROM. We can continue to add other elements to join FROM the ``User`` entity above, for example adding on the ``User.addresses`` relationship to our chain of joins:: - >>> stmt = ( - ... select(User). - ... join(User.orders). - ... join(Order.items). - ... join(User.addresses) - ... ) + >>> stmt = select(User).join(User.orders).join(Order.items).join(User.addresses) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -611,7 +593,7 @@ The third calling form allows both the target entity as well as the ON clause to be passed explicitly. A example that includes a SQL expression as the ON clause is as follows:: - >>> stmt = select(User).join(Address, User.id==Address.user_id) + >>> stmt = select(User).join(Address, User.id == Address.user_id) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account JOIN address ON user_account.id = address.user_id @@ -650,9 +632,8 @@ email addresses: .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(User.fullname). - ... join(User.addresses.and_(Address.email_address == 'squirrel@squirrelpower.org')) + >>> stmt = select(User.fullname).join( + ... User.addresses.and_(Address.email_address == "squirrel@squirrelpower.org") ... ) >>> session.execute(stmt).all() {opensql}SELECT user_account.fullname @@ -686,11 +667,11 @@ against the ``Address`` entity:: >>> address_alias_1 = aliased(Address) >>> address_alias_2 = aliased(Address) >>> stmt = ( - ... select(User). - ... join(address_alias_1, User.addresses). - ... where(address_alias_1.email_address == 'patrick@aol.com'). - ... join(address_alias_2, User.addresses). - ... where(address_alias_2.email_address == 'patrick@gmail.com') + ... select(User) + ... .join(address_alias_1, User.addresses) + ... .where(address_alias_1.email_address == "patrick@aol.com") + ... .join(address_alias_2, User.addresses) + ... .where(address_alias_2.email_address == "patrick@gmail.com") ... ) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname @@ -708,12 +689,12 @@ in one step. The example below uses :meth:`_orm.PropComparator.of_type` to produce the same SQL statement as the one just illustrated:: >>> print( - ... select(User). - ... join(User.addresses.of_type(address_alias_1)). - ... where(address_alias_1.email_address == 'patrick@aol.com'). - ... join(User.addresses.of_type(address_alias_2)). - ... where(address_alias_2.email_address == 'patrick@gmail.com') - ... ) + ... select(User) + ... .join(User.addresses.of_type(address_alias_1)) + ... .where(address_alias_1.email_address == "patrick@aol.com") + ... .join(User.addresses.of_type(address_alias_2)) + ... .where(address_alias_2.email_address == "patrick@gmail.com") + ... ) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account JOIN address AS address_1 ON user_account.id = address_1.user_id @@ -727,10 +708,7 @@ aliased entity, the attribute is available from the :func:`_orm.aliased` construct directly:: >>> user_alias_1 = aliased(User) - >>> print( - ... select(user_alias_1.name). - ... join(user_alias_1.addresses) - ... ) + >>> print(select(user_alias_1.name).join(user_alias_1.addresses)) {opensql}SELECT user_account_1.name FROM user_account AS user_account_1 JOIN address ON user_account_1.id = address.user_id @@ -752,11 +730,7 @@ is represented as a row limited subquery, we first construct a :class:`_sql.Subq object using :meth:`_sql.Select.subquery`, which may then be used as the target of the :meth:`_sql.Select.join` method:: - >>> subq = ( - ... select(Address). - ... where(Address.email_address == 'pat999@aol.com'). - ... subquery() - ... ) + >>> subq = select(Address).where(Address.email_address == "pat999@aol.com").subquery() >>> stmt = select(User).join(subq, User.id == subq.c.user_id) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname @@ -830,10 +804,10 @@ by using the appropriate :func:`_orm.aliased` construct. Given for example a subquery that refers to both ``User`` and ``Address``:: >>> user_address_subq = ( - ... select(User.id, User.name, User.fullname, Address.id, Address.email_address). - ... join_from(User, Address). - ... where(Address.email_address.in_(['pat999@aol.com', 'squirrel@squirrelpower.org'])). - ... subquery() + ... select(User.id, User.name, User.fullname, Address.id, Address.email_address) + ... .join_from(User, Address) + ... .where(Address.email_address.in_(["pat999@aol.com", "squirrel@squirrelpower.org"])) + ... .subquery() ... ) We can create :func:`_orm.aliased` constructs against both ``User`` and @@ -846,7 +820,7 @@ A :class:`.Select` construct selecting from both entities will render the subquery once, but in a result-row context can return objects of both ``User`` and ``Address`` classes at the same time:: - >>> stmt = select(user_alias, address_alias).where(user_alias.name == 'sandy') + >>> stmt = select(user_alias, address_alias).where(user_alias.name == "sandy") >>> for row in session.execute(stmt): ... print(f"{row.user} {row.address}") {opensql}SELECT anon_1.id, anon_1.name, anon_1.fullname, anon_1.id_1, anon_1.email_address @@ -869,7 +843,7 @@ In cases where the left side of the current state of :class:`_sql.Select` is not in line with what we want to join from, the :meth:`_sql.Select.join_from` method may be used:: - >>> stmt = select(Address).join_from(User, User.addresses).where(User.name == 'sandy') + >>> stmt = select(Address).join_from(User, User.addresses).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -879,7 +853,7 @@ The :meth:`_sql.Select.join_from` method accepts two or three arguments, either in the form ``(, )``, or ``(, , [])``:: - >>> stmt = select(Address).join_from(User, Address).where(User.name == 'sandy') + >>> stmt = select(Address).join_from(User, Address).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -890,7 +864,7 @@ can be used subsequent, the :meth:`_sql.Select.select_from` method may also be used:: - >>> stmt = select(Address).select_from(User).join(Address).where(User.name == 'sandy') + >>> stmt = select(Address).select_from(User).join(Address).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -907,7 +881,7 @@ be used:: such a :class:`_sql.Join` object. Therefore we can see the contents of :meth:`_sql.Select.select_from` being overridden in a case like this:: - >>> stmt = select(Address).select_from(User).join(Address.user).where(User.name == 'sandy') + >>> stmt = select(Address).select_from(User).join(Address.user).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM address JOIN user_account ON user_account.id = address.user_id @@ -925,8 +899,10 @@ be used:: >>> >>> j = address_table.join(user_table, user_table.c.id == address_table.c.user_id) >>> stmt = ( - ... select(address_table).select_from(user_table).select_from(j). - ... where(user_table.c.name == 'sandy') + ... select(address_table) + ... .select_from(user_table) + ... .select_from(j) + ... .where(user_table.c.name == "sandy") ... ) >>> print(stmt) SELECT address.id, address.user_id, address.email_address @@ -972,9 +948,8 @@ an optional WHERE criteria to limit the rows matched by the subquery: .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(User.fullname). - ... where(User.addresses.any(Address.email_address == 'squirrel@squirrelpower.org')) + >>> stmt = select(User.fullname).where( + ... User.addresses.any(Address.email_address == "squirrel@squirrelpower.org") ... ) >>> session.execute(stmt).all() {opensql}SELECT user_account.fullname @@ -992,10 +967,7 @@ for ``User`` entities that have no related ``Address`` rows: .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(User.fullname). - ... where(~User.addresses.any()) - ... ) + >>> stmt = select(User.fullname).where(~User.addresses.any()) >>> session.execute(stmt).all() {opensql}SELECT user_account.fullname FROM user_account @@ -1012,10 +984,7 @@ which belonged to "sandy": .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(Address.email_address). - ... where(Address.user.has(User.name=="sandy")) - ... ) + >>> stmt = select(Address.email_address).where(Address.user.has(User.name == "sandy")) >>> session.execute(stmt).all() {opensql}SELECT address.email_address FROM address diff --git a/doc/build/orm/quickstart.rst b/doc/build/orm/quickstart.rst index 3b909f2ad6..24dfb2b95d 100644 --- a/doc/build/orm/quickstart.rst +++ b/doc/build/orm/quickstart.rst @@ -41,27 +41,27 @@ real SQL tables that exist, or will exist, in a particular database:: >>> class User(Base): ... __tablename__ = "user_account" - ... + ... ... id: Mapped[int] = mapped_column(primary_key=True) ... name: Mapped[str] = mapped_column(String(30)) ... fullname: Mapped[Optional[str]] - ... + ... ... addresses: Mapped[list["Address"]] = relationship( ... back_populates="user", cascade="all, delete-orphan" ... ) - ... + ... ... def __repr__(self) -> str: ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): ... __tablename__ = "address" - ... + ... ... id: Mapped[int] = mapped_column(primary_key=True) ... email_address: Mapped[str] ... user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id")) - ... + ... ... user: Mapped["User"] = relationship(back_populates="addresses") - ... + ... ... def __repr__(self) -> str: ... return f"Address(id={self.id!r}, email_address={self.email_address!r})" @@ -182,7 +182,7 @@ is used: >>> from sqlalchemy.orm import Session >>> with Session(engine) as session: - ... + ... ... spongebob = User( ... name="spongebob", ... fullname="Spongebob Squarepants", @@ -197,9 +197,9 @@ is used: ... ], ... ) ... patrick = User(name="patrick", fullname="Patrick Star") - ... + ... ... session.add_all([spongebob, sandy, patrick]) - ... + ... ... session.commit() {opensql}BEGIN (implicit) INSERT INTO user_account (name, fullname) VALUES (?, ?), (?, ?), (?, ?) RETURNING id @@ -272,10 +272,10 @@ construct creates joins using the :meth:`_sql.Select.join` method: .. sourcecode:: pycon+sql >>> stmt = ( - ... select(Address) - ... .join(Address.user) - ... .where(User.name == "sandy") - ... .where(Address.email_address == "sandy@sqlalchemy.org") + ... select(Address) + ... .join(Address.user) + ... .where(User.name == "sandy") + ... .where(Address.email_address == "sandy@sqlalchemy.org") ... ) >>> sandy_address = session.scalars(stmt).one() {opensql}SELECT address.id, address.email_address, address.user_id @@ -314,9 +314,7 @@ address associated with "sandy", and also add a new email address to [...] ('patrick',) {stop} - >>> patrick.addresses.append( - ... Address(email_address="patrickstar@sqlalchemy.org") - ... ) + >>> patrick.addresses.append(Address(email_address="patrickstar@sqlalchemy.org")) {opensql}SELECT address.id AS address_id, address.email_address AS address_email_address, address.user_id AS address_user_id FROM address WHERE ? = address.user_id diff --git a/doc/build/orm/relationship_persistence.rst b/doc/build/orm/relationship_persistence.rst index e06a92eecc..28d17da7dd 100644 --- a/doc/build/orm/relationship_persistence.rst +++ b/doc/build/orm/relationship_persistence.rst @@ -63,30 +63,31 @@ a complete example, including two :class:`_schema.ForeignKey` constructs:: from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass + class Entry(Base): - __tablename__ = 'entry' + __tablename__ = "entry" entry_id = mapped_column(Integer, primary_key=True) - widget_id = mapped_column(Integer, ForeignKey('widget.widget_id')) + widget_id = mapped_column(Integer, ForeignKey("widget.widget_id")) name = mapped_column(String(50)) + class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" widget_id = mapped_column(Integer, primary_key=True) - favorite_entry_id = mapped_column(Integer, - ForeignKey('entry.entry_id', - name="fk_favorite_entry")) + favorite_entry_id = mapped_column( + Integer, ForeignKey("entry.entry_id", name="fk_favorite_entry") + ) name = mapped_column(String(50)) - entries = relationship(Entry, primaryjoin= - widget_id==Entry.widget_id) - favorite_entry = relationship(Entry, - primaryjoin= - favorite_entry_id==Entry.entry_id, - post_update=True) + entries = relationship(Entry, primaryjoin=widget_id == Entry.widget_id) + favorite_entry = relationship( + Entry, primaryjoin=favorite_entry_id == Entry.entry_id, post_update=True + ) When a structure against the above configuration is flushed, the "widget" row will be INSERTed minus the "favorite_entry_id" value, then all the "entry" rows will @@ -96,8 +97,8 @@ row at a time for the time being): .. sourcecode:: pycon+sql - >>> w1 = Widget(name='somewidget') - >>> e1 = Entry(name='someentry') + >>> w1 = Widget(name="somewidget") + >>> e1 = Entry(name="someentry") >>> w1.favorite_entry = e1 >>> w1.entries = [e1] >>> session.add_all([w1, e1]) @@ -117,28 +118,34 @@ it's guaranteed that ``favorite_entry_id`` refers to an ``Entry`` that also refers to this ``Widget``. We can use a composite foreign key, as illustrated below:: - from sqlalchemy import Integer, ForeignKey, String, \ - UniqueConstraint, ForeignKeyConstraint + from sqlalchemy import ( + Integer, + ForeignKey, + String, + UniqueConstraint, + ForeignKeyConstraint, + ) from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import mapped_column from sqlalchemy.orm import relationship + class Base(DeclarativeBase): pass + class Entry(Base): - __tablename__ = 'entry' + __tablename__ = "entry" entry_id = mapped_column(Integer, primary_key=True) - widget_id = mapped_column(Integer, ForeignKey('widget.widget_id')) + widget_id = mapped_column(Integer, ForeignKey("widget.widget_id")) name = mapped_column(String(50)) - __table_args__ = ( - UniqueConstraint("entry_id", "widget_id"), - ) + __table_args__ = (UniqueConstraint("entry_id", "widget_id"),) + class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" - widget_id = mapped_column(Integer, autoincrement='ignore_fk', primary_key=True) + widget_id = mapped_column(Integer, autoincrement="ignore_fk", primary_key=True) favorite_entry_id = mapped_column(Integer) name = mapped_column(String(50)) @@ -147,18 +154,19 @@ as illustrated below:: ForeignKeyConstraint( ["widget_id", "favorite_entry_id"], ["entry.widget_id", "entry.entry_id"], - name="fk_favorite_entry" + name="fk_favorite_entry", ), ) - entries = relationship(Entry, primaryjoin= - widget_id==Entry.widget_id, - foreign_keys=Entry.widget_id) - favorite_entry = relationship(Entry, - primaryjoin= - favorite_entry_id==Entry.entry_id, - foreign_keys=favorite_entry_id, - post_update=True) + entries = relationship( + Entry, primaryjoin=widget_id == Entry.widget_id, foreign_keys=Entry.widget_id + ) + favorite_entry = relationship( + Entry, + primaryjoin=favorite_entry_id == Entry.entry_id, + foreign_keys=favorite_entry_id, + post_update=True, + ) The above mapping features a composite :class:`_schema.ForeignKeyConstraint` bridging the ``widget_id`` and ``favorite_entry_id`` columns. To ensure @@ -188,8 +196,8 @@ capabilities of the database. An example mapping which illustrates this is:: class User(Base): - __tablename__ = 'user' - __table_args__ = {'mysql_engine': 'InnoDB'} + __tablename__ = "user" + __table_args__ = {"mysql_engine": "InnoDB"} username = mapped_column(String(50), primary_key=True) fullname = mapped_column(String(100)) @@ -198,13 +206,13 @@ illustrates this is:: class Address(Base): - __tablename__ = 'address' - __table_args__ = {'mysql_engine': 'InnoDB'} + __tablename__ = "address" + __table_args__ = {"mysql_engine": "InnoDB"} email = mapped_column(String(50), primary_key=True) - username = mapped_column(String(50), - ForeignKey('user.username', onupdate="cascade") - ) + username = mapped_column( + String(50), ForeignKey("user.username", onupdate="cascade") + ) Above, we illustrate ``onupdate="cascade"`` on the :class:`_schema.ForeignKey` object, and we also illustrate the ``mysql_engine='InnoDB'`` setting @@ -249,7 +257,7 @@ will be fully loaded into memory if not already locally present. Our previous mapping using ``passive_updates=False`` looks like:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" username = mapped_column(String(50), primary_key=True) fullname = mapped_column(String(100)) @@ -258,11 +266,12 @@ Our previous mapping using ``passive_updates=False`` looks like:: # does not implement ON UPDATE CASCADE addresses = relationship("Address", passive_updates=False) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" email = mapped_column(String(50), primary_key=True) - username = mapped_column(String(50), ForeignKey('user.username')) + username = mapped_column(String(50), ForeignKey("user.username")) Key limitations of ``passive_updates=False`` include: diff --git a/doc/build/orm/scalar_mapping.rst b/doc/build/orm/scalar_mapping.rst index 42adbddc27..f6863edada 100644 --- a/doc/build/orm/scalar_mapping.rst +++ b/doc/build/orm/scalar_mapping.rst @@ -11,4 +11,3 @@ This page has been merged into the mapping_columns - diff --git a/doc/build/orm/self_referential.rst b/doc/build/orm/self_referential.rst index a48ece1114..ba73a2ad93 100644 --- a/doc/build/orm/self_referential.rst +++ b/doc/build/orm/self_referential.rst @@ -26,9 +26,9 @@ In this example, we'll work with a single mapped class called ``Node``, representing a tree structure:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = mapped_column(Integer, primary_key=True) - parent_id = mapped_column(Integer, ForeignKey('node.id')) + parent_id = mapped_column(Integer, ForeignKey("node.id")) data = mapped_column(String(50)) children = relationship("Node") @@ -60,9 +60,9 @@ is a :class:`_schema.Column` or collection of :class:`_schema.Column` objects that indicate those which should be considered to be "remote":: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = mapped_column(Integer, primary_key=True) - parent_id = mapped_column(Integer, ForeignKey('node.id')) + parent_id = mapped_column(Integer, ForeignKey("node.id")) data = mapped_column(String(50)) parent = relationship("Node", remote_side=[id]) @@ -75,13 +75,11 @@ As always, both directions can be combined into a bidirectional relationship using the :func:`.backref` function:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = mapped_column(Integer, primary_key=True) - parent_id = mapped_column(Integer, ForeignKey('node.id')) + parent_id = mapped_column(Integer, ForeignKey("node.id")) data = mapped_column(String(50)) - children = relationship("Node", - backref=backref('parent', remote_side=[id]) - ) + children = relationship("Node", backref=backref("parent", remote_side=[id])) .. seealso:: @@ -99,11 +97,11 @@ the same account as that of the parent; while ``folder_id`` refers to a specific folder within that account:: class Folder(Base): - __tablename__ = 'folder' + __tablename__ = "folder" __table_args__ = ( - ForeignKeyConstraint( - ['account_id', 'parent_id'], - ['folder.account_id', 'folder.folder_id']), + ForeignKeyConstraint( + ["account_id", "parent_id"], ["folder.account_id", "folder.folder_id"] + ), ) account_id = mapped_column(Integer, primary_key=True) @@ -111,10 +109,9 @@ to a specific folder within that account:: parent_id = mapped_column(Integer) name = mapped_column(String) - parent_folder = relationship("Folder", - backref="child_folders", - remote_side=[account_id, folder_id] - ) + parent_folder = relationship( + "Folder", backref="child_folders", remote_side=[account_id, folder_id] + ) Above, we pass ``account_id`` into the :paramref:`_orm.relationship.remote_side` list. :func:`_orm.relationship` recognizes that the ``account_id`` column here @@ -130,7 +127,7 @@ Self-Referential Query Strategies Querying of self-referential structures works like any other query:: # get all nodes named 'child2' - session.scalars(select(Node).where(Node.data=='child2')) + session.scalars(select(Node).where(Node.data == "child2")) However extra care is needed when attempting to join along the foreign key from one level of the tree to the next. In SQL, diff --git a/doc/build/orm/session.rst b/doc/build/orm/session.rst index 8e14942c4e..00d2e6f6a1 100644 --- a/doc/build/orm/session.rst +++ b/doc/build/orm/session.rst @@ -24,4 +24,3 @@ persistence operations is the session_events session_api - diff --git a/doc/build/orm/session_basics.rst b/doc/build/orm/session_basics.rst index 5a4cee803e..a24b972085 100644 --- a/doc/build/orm/session_basics.rst +++ b/doc/build/orm/session_basics.rst @@ -63,7 +63,7 @@ may look like:: # an Engine, which the Session will use for connection # resources - engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/') + engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/") # create session and add objects with Session(engine) as session: @@ -130,8 +130,8 @@ operations:: # create session and add objects with Session(engine) as session: with session.begin(): - session.add(some_object) - session.add(some_other_object) + session.add(some_object) + session.add(some_other_object) # inner context calls session.commit(), if there were no exceptions # outer context calls session.close() @@ -158,7 +158,7 @@ scope, the :class:`_orm.sessionmaker` can provide a factory for # an Engine, which the Session will use for connection # resources, typically in module scope - engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/') + engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/") # a sessionmaker(), also in the same scope as the engine Session = sessionmaker(engine) @@ -183,7 +183,7 @@ and also maintains a begin/commit/rollback block:: # an Engine, which the Session will use for connection # resources - engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/') + engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/") # a sessionmaker(), also in the same scope as the engine Session = sessionmaker(engine) @@ -238,7 +238,6 @@ A complete guide to SQLAlchemy ORM querying can be found at # list of ``User`` objects user_obj = session.scalars(statement).all() - # query for individual columns statement = select(User.name, User.fullname) @@ -267,12 +266,12 @@ already present and do not need to be added. Instances which are :term:`detached (i.e. have been removed from a session) may be re-associated with a session using this method:: - user1 = User(name='user1') - user2 = User(name='user2') + user1 = User(name="user1") + user2 = User(name="user2") session.add(user1) session.add(user2) - session.commit() # write changes to the database + session.commit() # write changes to the database To add a list of items to the session at once, use :meth:`~.Session.add_all`:: @@ -526,9 +525,7 @@ ways to refresh its contents with new data from the current transaction: re-populated from their contents in the database:: u2 = session.scalars( - select(User) - .where(User.id == 5) - .execution_options(populate_existing=True) + select(User).where(User.id == 5).execution_options(populate_existing=True) ).one() .. @@ -831,6 +828,7 @@ E.g. **don't do this**:: ### this is the **wrong way to do it** ### + class ThingOne: def go(self): session = Session() @@ -841,6 +839,7 @@ E.g. **don't do this**:: session.rollback() raise + class ThingTwo: def go(self): session = Session() @@ -851,6 +850,7 @@ E.g. **don't do this**:: session.rollback() raise + def run_my_program(): ThingOne().go() ThingTwo().go() @@ -863,21 +863,23 @@ transaction automatically:: ### this is a **better** (but not the only) way to do it ### + class ThingOne: def go(self, session): session.execute(update(FooBar).values(x=5)) + class ThingTwo: def go(self, session): session.execute(update(Widget).values(q=18)) + def run_my_program(): with Session() as session: with session.begin(): ThingOne().go(session) ThingTwo().go(session) - .. versionchanged:: 1.4 The :class:`_orm.Session` may be used as a context manager without the use of external helper functions. @@ -915,6 +917,7 @@ available on :class:`~sqlalchemy.orm.session.Session`:: The newer :ref:`core_inspection_toplevel` system can also be used:: from sqlalchemy import inspect + session = inspect(someobject).session .. _session_faq_threadsafe: diff --git a/doc/build/orm/session_events.rst b/doc/build/orm/session_events.rst index 9859f16820..b502191e57 100644 --- a/doc/build/orm/session_events.rst +++ b/doc/build/orm/session_events.rst @@ -47,6 +47,7 @@ options:: Session = sessionmaker(engine, future=True) + @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if orm_execute_state.is_select: @@ -58,7 +59,7 @@ options:: # ORDER BY if so col_descriptions = orm_execute_state.statement.column_descriptions - if col_descriptions[0]['entity'] is MyEntity: + if col_descriptions[0]["entity"] is MyEntity: orm_execute_state.statement = statement.order_by(MyEntity.name) The above example illustrates some simple modifications to SELECT statements. @@ -85,13 +86,14 @@ may be used on its own, or is ideally suited to be used within the Session = sessionmaker(engine, future=True) + @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if ( - orm_execute_state.is_select and - not orm_execute_state.is_column_load and - not orm_execute_state.is_relationship_load + orm_execute_state.is_select + and not orm_execute_state.is_column_load + and not orm_execute_state.is_relationship_load ): orm_execute_state.statement = orm_execute_state.statement.options( with_loader_criteria(MyEntity.public == True) @@ -114,6 +116,7 @@ Given a series of classes based on a mixin called ``HasTimestamp``:: import datetime + class HasTimestamp: timestamp = mapped_column(DateTime, default=datetime.datetime.now) @@ -122,11 +125,11 @@ Given a series of classes based on a mixin called ``HasTimestamp``:: __tablename__ = "some_entity" id = mapped_column(Integer, primary_key=True) + class SomeOtherEntity(HasTimestamp, Base): __tablename__ = "some_entity" id = mapped_column(Integer, primary_key=True) - The above classes ``SomeEntity`` and ``SomeOtherEntity`` will each have a column ``timestamp`` that defaults to the current date and time. An event may be used to intercept all objects that extend from ``HasTimestamp`` and filter their @@ -135,9 +138,9 @@ to intercept all objects that extend from ``HasTimestamp`` and filter their @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if ( - orm_execute_state.is_select - and not orm_execute_state.is_column_load - and not orm_execute_state.is_relationship_load + orm_execute_state.is_select + and not orm_execute_state.is_column_load + and not orm_execute_state.is_relationship_load ): one_month_ago = datetime.datetime.today() - datetime.timedelta(months=1) @@ -145,7 +148,7 @@ to intercept all objects that extend from ``HasTimestamp`` and filter their with_loader_criteria( HasTimestamp, lambda cls: cls.timestamp >= one_month_ago, - include_aliases=True + include_aliases=True, ) ) @@ -202,6 +205,7 @@ E.g., using :meth:`_orm.SessionEvents.do_orm_execute` to implement a cache:: cache = {} + @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if "my_cache_key" in orm_execute_state.execution_options: @@ -222,7 +226,9 @@ E.g., using :meth:`_orm.SessionEvents.do_orm_execute` to implement a cache:: With the above hook in place, an example of using the cache would look like:: - stmt = select(User).where(User.name == 'sandy').execution_options(my_cache_key="key_sandy") + stmt = ( + select(User).where(User.name == "sandy").execution_options(my_cache_key="key_sandy") + ) result = session.execute(stmt) @@ -413,7 +419,8 @@ with a specific :class:`.Session` object:: session = Session() - @event.listens_for(session, 'transient_to_pending') + + @event.listens_for(session, "transient_to_pending") def object_is_pending(session, obj): print("new pending: %s" % obj) @@ -425,7 +432,8 @@ Or with the :class:`.Session` class itself, as well as with a specific maker = sessionmaker() - @event.listens_for(maker, 'transient_to_pending') + + @event.listens_for(maker, "transient_to_pending") def object_is_pending(session, obj): print("new pending: %s" % obj) @@ -455,14 +463,15 @@ intercept all new objects for a particular declarative base:: from sqlalchemy.orm import DeclarativeBase from sqlalchemy import event + class Base(DeclarativeBase): pass + @event.listens_for(Base, "init", propagate=True) def intercept_init(instance, args, kwargs): print("new transient: %s" % instance) - Transient to Pending ^^^^^^^^^^^^^^^^^^^^ @@ -477,7 +486,6 @@ the :meth:`.SessionEvents.transient_to_pending` event:: def intercept_transient_to_pending(session, object_): print("transient to pending: %s" % object_) - Pending to Persistent ^^^^^^^^^^^^^^^^^^^^^ @@ -518,7 +526,6 @@ state via this particular avenue:: def intercept_loaded_as_persistent(session, object_): print("object loaded into persistent state: %s" % object_) - Persistent to Transient ^^^^^^^^^^^^^^^^^^^^^^^ @@ -562,7 +569,6 @@ Track the persistent to deleted transition with def intercept_persistent_to_deleted(session, object_): print("object was DELETEd, is now in deleted state: %s" % object_) - Deleted to Detached ^^^^^^^^^^^^^^^^^^^ @@ -576,7 +582,6 @@ the deleted to detached transition using :meth:`.SessionEvents.deleted_to_detach def intercept_deleted_to_detached(session, object_): print("deleted to detached: %s" % object_) - .. note:: While the object is in the deleted state, the :attr:`.InstanceState.deleted` @@ -619,7 +624,6 @@ objects moving back to persistent from detached using the def intercept_detached_to_persistent(session, object_): print("object became persistent again: %s" % object_) - Deleted to Persistent ^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/build/orm/session_state_management.rst b/doc/build/orm/session_state_management.rst index da6472e92f..7538fb4a1f 100644 --- a/doc/build/orm/session_state_management.rst +++ b/doc/build/orm/session_state_management.rst @@ -142,25 +142,25 @@ the :term:`persistent` state is as follows:: from sqlalchemy import event + def strong_reference_session(session): @event.listens_for(session, "pending_to_persistent") @event.listens_for(session, "deleted_to_persistent") @event.listens_for(session, "detached_to_persistent") @event.listens_for(session, "loaded_as_persistent") def strong_ref_object(sess, instance): - if 'refs' not in sess.info: - sess.info['refs'] = refs = set() + if "refs" not in sess.info: + sess.info["refs"] = refs = set() else: - refs = sess.info['refs'] + refs = sess.info["refs"] refs.add(instance) - @event.listens_for(session, "persistent_to_detached") @event.listens_for(session, "persistent_to_deleted") @event.listens_for(session, "persistent_to_transient") def deref_object(sess, instance): - sess.info['refs'].discard(instance) + sess.info["refs"].discard(instance) Above, we intercept the :meth:`.SessionEvents.pending_to_persistent`, :meth:`.SessionEvents.detached_to_persistent`, @@ -186,7 +186,6 @@ It may also be called for any :class:`.sessionmaker`:: maker = sessionmaker() strong_reference_session(maker) - .. _unitofwork_merging: Merging @@ -290,22 +289,23 @@ some unexpected state regarding the object being passed to :meth:`~.Session.merg Lets use the canonical example of the User and Address objects:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = mapped_column(Integer, primary_key=True) name = mapped_column(String(50), nullable=False) addresses = relationship("Address", backref="user") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = mapped_column(Integer, primary_key=True) email_address = mapped_column(String(50), nullable=False) - user_id = mapped_column(Integer, ForeignKey('user.id'), nullable=False) + user_id = mapped_column(Integer, ForeignKey("user.id"), nullable=False) Assume a ``User`` object with one ``Address``, already persistent:: - >>> u1 = User(name='ed', addresses=[Address(email_address='ed@ed.com')]) + >>> u1 = User(name="ed", addresses=[Address(email_address="ed@ed.com")]) >>> session.add(u1) >>> session.commit() @@ -419,7 +419,7 @@ When we talk about expiration of data we are usually talking about an object that is in the :term:`persistent` state. For example, if we load an object as follows:: - user = session.scalars(select(User).filter_by(name='user1').limit(1)).first() + user = session.scalars(select(User).filter_by(name="user1").limit(1)).first() The above ``User`` object is persistent, and has a series of attributes present; if we were to look inside its ``__dict__``, we'd see that state @@ -481,7 +481,7 @@ Another key behavior of both :meth:`~.Session.expire` and :meth:`~.Session.refre is that all un-flushed changes on an object are discarded. That is, if we were to modify an attribute on our ``User``:: - >>> user.name = 'user2' + >>> user.name = "user2" but then we call :meth:`~.Session.expire` without first calling :meth:`~.Session.flush`, our pending value of ``'user2'`` is discarded:: @@ -500,7 +500,7 @@ it can also be passed a list of string attribute names, referring to specific attributes to be marked as expired:: # expire only attributes obj1.attr1, obj1.attr2 - session.expire(obj1, ['attr1', 'attr2']) + session.expire(obj1, ["attr1", "attr2"]) The :meth:`.Session.expire_all` method allows us to essentially call :meth:`.Session.expire` on all objects contained within the :class:`.Session` @@ -519,7 +519,7 @@ but unlike :meth:`~.Session.expire`, expects at least one name to be that of a column-mapped attribute:: # reload obj1.attr1, obj1.attr2 - session.refresh(obj1, ['attr1', 'attr2']) + session.refresh(obj1, ["attr1", "attr2"]) .. tip:: diff --git a/doc/build/orm/session_transaction.rst b/doc/build/orm/session_transaction.rst index 5a05a945b5..164fea347a 100644 --- a/doc/build/orm/session_transaction.rst +++ b/doc/build/orm/session_transaction.rst @@ -28,6 +28,7 @@ the scope of the :class:`_orm.SessionTransaction`. Below, assume we start with a :class:`_orm.Session`:: from sqlalchemy.orm import Session + session = Session(engine) We can now run operations within a demarcated transaction using a context @@ -139,7 +140,7 @@ method:: session.add(u1) session.add(u2) - nested = session.begin_nested() # establish a savepoint + nested = session.begin_nested() # establish a savepoint session.add(u3) nested.rollback() # rolls back u3, keeps u1 and u2 @@ -163,9 +164,9 @@ rolling back the whole transaction, as in the example below:: for record in records: try: with session.begin_nested(): - session.merge(record) + session.merge(record) except: - print("Skipped record %s" % record) + print("Skipped record %s" % record) session.commit() When the context manager yielded by :meth:`_orm.Session.begin_nested` @@ -264,8 +265,8 @@ Engine:: [ {"data": "some data one"}, {"data": "some data two"}, - {"data": "some data three"} - ] + {"data": "some data three"}, + ], ) conn.commit() @@ -274,11 +275,13 @@ Session:: Session = sessionmaker(engine, future=True) with Session() as session: - session.add_all([ - SomeClass(data="some data one"), - SomeClass(data="some data two"), - SomeClass(data="some data three") - ]) + session.add_all( + [ + SomeClass(data="some data one"), + SomeClass(data="some data two"), + SomeClass(data="some data three"), + ] + ) session.commit() Begin Once @@ -300,8 +303,8 @@ Engine:: [ {"data": "some data one"}, {"data": "some data two"}, - {"data": "some data three"} - ] + {"data": "some data three"}, + ], ) # commits and closes automatically @@ -310,14 +313,15 @@ Session:: Session = sessionmaker(engine, future=True) with Session.begin() as session: - session.add_all([ - SomeClass(data="some data one"), - SomeClass(data="some data two"), - SomeClass(data="some data three") - ]) + session.add_all( + [ + SomeClass(data="some data one"), + SomeClass(data="some data two"), + SomeClass(data="some data three"), + ] + ) # commits and closes automatically - Nested Transaction ~~~~~~~~~~~~~~~~~~~~ @@ -339,8 +343,8 @@ Engine:: [ {"data": "some data one"}, {"data": "some data two"}, - {"data": "some data three"} - ] + {"data": "some data three"}, + ], ) savepoint.commit() # or rollback @@ -352,17 +356,16 @@ Session:: with Session.begin() as session: savepoint = session.begin_nested() - session.add_all([ - SomeClass(data="some data one"), - SomeClass(data="some data two"), - SomeClass(data="some data three") - ]) + session.add_all( + [ + SomeClass(data="some data one"), + SomeClass(data="some data two"), + SomeClass(data="some data three"), + ] + ) savepoint.commit() # or rollback # commits automatically - - - .. _session_explicit_begin: Explicit Begin @@ -385,8 +388,8 @@ point at which the "begin" operation occurs. To suit this, the try: item1 = session.get(Item, 1) item2 = session.get(Item, 2) - item1.foo = 'bar' - item2.bar = 'foo' + item1.foo = "bar" + item2.bar = "foo" session.commit() except: session.rollback() @@ -399,8 +402,8 @@ The above pattern is more idiomatically invoked using a context manager:: with session.begin(): item1 = session.get(Item, 1) item2 = session.get(Item, 2) - item1.foo = 'bar' - item2.bar = 'foo' + item1.foo = "bar" + item2.bar = "foo" The :meth:`_orm.Session.begin` method and the session's "autobegin" process use the same sequence of steps to begin the transaction. This includes @@ -423,13 +426,13 @@ also :meth:`_orm.Session.prepare` the session for interacting with transactions not managed by SQLAlchemy. To use two phase transactions set the flag ``twophase=True`` on the session:: - engine1 = create_engine('postgresql+psycopg2://db1') - engine2 = create_engine('postgresql+psycopg2://db2') + engine1 = create_engine("postgresql+psycopg2://db1") + engine2 = create_engine("postgresql+psycopg2://db2") Session = sessionmaker(twophase=True) # bind User operations to engine 1, Account operations to engine 2 - Session.configure(binds={User:engine1, Account:engine2}) + Session.configure(binds={User: engine1, Account: engine2}) session = Session() @@ -439,7 +442,6 @@ transactions set the flag ``twophase=True`` on the session:: # before committing both transactions session.commit() - .. _session_transaction_isolation: Setting Transaction Isolation Levels / DBAPI AUTOCOMMIT @@ -489,12 +491,11 @@ in all cases, which is then used as the source of connectivity for a eng = create_engine( "postgresql+psycopg2://scott:tiger@localhost/test", - isolation_level='REPEATABLE READ' + isolation_level="REPEATABLE READ", ) Session = sessionmaker(eng) - Another option, useful if there are to be two engines with different isolation levels at once, is to use the :meth:`_engine.Engine.execution_options` method, which will produce a shallow copy of the original :class:`_engine.Engine` which @@ -512,7 +513,6 @@ operations:: transactional_session = sessionmaker(eng) autocommit_session = sessionmaker(autocommit_engine) - Above, both "``eng``" and ``"autocommit_engine"`` share the same dialect and connection pool. However the "AUTOCOMMIT" mode will be set upon connections when they are acquired from the ``autocommit_engine``. The two @@ -565,7 +565,6 @@ methods:: with Session() as session: session.bind_mapper(User, autocommit_engine) - Setting Isolation for Individual Transactions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -588,7 +587,7 @@ level on a per-connection basis can be affected by using the # call connection() with options before any other operations proceed. # this will procure a new connection from the bound engine and begin a real # database transaction. - sess.connection(execution_options={'isolation_level': 'SERIALIZABLE'}) + sess.connection(execution_options={"isolation_level": "SERIALIZABLE"}) # ... work with session in SERIALIZABLE isolation level... @@ -620,15 +619,13 @@ the per-connection-transaction isolation level:: # call connection() with options before any other operations proceed. # this will procure a new connection from the bound engine and begin a # real database transaction. - sess.connection(execution_options={'isolation_level': 'SERIALIZABLE'}) + sess.connection(execution_options={"isolation_level": "SERIALIZABLE"}) # ... work with session in SERIALIZABLE isolation level... # outside the block, the transaction has been committed. the connection is # released and reverted to its previous isolation level. - - Tracking Transaction State with Events -------------------------------------- @@ -670,7 +667,8 @@ are reverted:: # global application scope. create Session class, engine Session = sessionmaker() - engine = create_engine('postgresql+psycopg2://...') + engine = create_engine("postgresql+psycopg2://...") + class SomeTest(TestCase): def setUp(self): @@ -680,11 +678,9 @@ are reverted:: # begin a non-ORM transaction self.trans = self.connection.begin() - # bind an individual Session to the connection self.session = Session(bind=self.connection) - ### optional ### # if the database supports SAVEPOINT (SQLite needs special diff --git a/doc/build/orm/versioning.rst b/doc/build/orm/versioning.rst index d12b03cb9f..2e69ee77b1 100644 --- a/doc/build/orm/versioning.rst +++ b/doc/build/orm/versioning.rst @@ -55,15 +55,13 @@ to the mapped table, then establish it as the ``version_id_col`` within the mapper options:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = mapped_column(Integer, primary_key=True) version_id = mapped_column(Integer, nullable=False) name = mapped_column(String(50), nullable=False) - __mapper_args__ = { - "version_id_col": version_id - } + __mapper_args__ = {"version_id_col": version_id} .. note:: It is **strongly recommended** that the ``version_id`` column be made NOT NULL. The versioning feature **does not support** a NULL @@ -105,16 +103,17 @@ support a native GUID type, but we illustrate here using a simple string):: import uuid + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = mapped_column(Integer, primary_key=True) version_uuid = mapped_column(String(32), nullable=False) name = mapped_column(String(50), nullable=False) __mapper_args__ = { - 'version_id_col':version_uuid, - 'version_id_generator':lambda version: uuid.uuid4().hex + "version_id_col": version_uuid, + "version_id_generator": lambda version: uuid.uuid4().hex, } The persistence engine will call upon ``uuid.uuid4()`` each time a @@ -148,17 +147,15 @@ class as follows:: from sqlalchemy import FetchedValue + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = mapped_column(Integer, primary_key=True) name = mapped_column(String(50), nullable=False) xmin = mapped_column("xmin", String, system=True, server_default=FetchedValue()) - __mapper_args__ = { - 'version_id_col': xmin, - 'version_id_generator': False - } + __mapper_args__ = {"version_id_col": xmin, "version_id_generator": False} With the above mapping, the ORM will rely upon the ``xmin`` column for automatically providing the new value of the version id counter. @@ -221,25 +218,24 @@ at our choosing:: import uuid + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = mapped_column(Integer, primary_key=True) version_uuid = mapped_column(String(32), nullable=False) name = mapped_column(String(50), nullable=False) - __mapper_args__ = { - 'version_id_col':version_uuid, - 'version_id_generator': False - } + __mapper_args__ = {"version_id_col": version_uuid, "version_id_generator": False} + - u1 = User(name='u1', version_uuid=uuid.uuid4()) + u1 = User(name="u1", version_uuid=uuid.uuid4()) session.add(u1) session.commit() - u1.name = 'u2' + u1.name = "u2" u1.version_uuid = uuid.uuid4() session.commit() @@ -251,7 +247,7 @@ for schemes where only certain classes of UPDATE are sensitive to concurrency issues:: # will leave version_uuid unchanged - u1.name = 'u3' + u1.name = "u3" session.commit() .. versionadded:: 0.9.0 diff --git a/doc/build/tutorial/data_insert.rst b/doc/build/tutorial/data_insert.rst index 767e7995bd..c1df86120e 100644 --- a/doc/build/tutorial/data_insert.rst +++ b/doc/build/tutorial/data_insert.rst @@ -35,7 +35,7 @@ A simple example of :class:`_sql.Insert` illustrating the target table and the VALUES clause at once:: >>> from sqlalchemy import insert - >>> stmt = insert(user_table).values(name='spongebob', fullname="Spongebob Squarepants") + >>> stmt = insert(user_table).values(name="spongebob", fullname="Spongebob Squarepants") The above ``stmt`` variable is an instance of :class:`_sql.Insert`. Most SQL expressions can be stringified in place as a means to see the general @@ -122,8 +122,8 @@ illustrate this: ... insert(user_table), ... [ ... {"name": "sandy", "fullname": "Sandy Cheeks"}, - ... {"name": "patrick", "fullname": "Patrick Star"} - ... ] + ... {"name": "patrick", "fullname": "Patrick Star"}, + ... ], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -167,19 +167,22 @@ construct automatically. >>> from sqlalchemy import select, bindparam >>> scalar_subq = ( - ... select(user_table.c.id). - ... where(user_table.c.name==bindparam('username')). - ... scalar_subquery() + ... select(user_table.c.id) + ... .where(user_table.c.name == bindparam("username")) + ... .scalar_subquery() ... ) >>> with engine.connect() as conn: ... result = conn.execute( ... insert(address_table).values(user_id=scalar_subq), ... [ - ... {"username": 'spongebob', "email_address": "spongebob@sqlalchemy.org"}, - ... {"username": 'sandy', "email_address": "sandy@sqlalchemy.org"}, - ... {"username": 'sandy', "email_address": "sandy@squirrelpower.org"}, - ... ] + ... { + ... "username": "spongebob", + ... "email_address": "spongebob@sqlalchemy.org", + ... }, + ... {"username": "sandy", "email_address": "sandy@sqlalchemy.org"}, + ... {"username": "sandy", "email_address": "sandy@squirrelpower.org"}, + ... ], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -221,7 +224,9 @@ method; in this case, the :class:`_engine.Result` object that's returned when the statement is executed has rows which can be fetched:: - >>> insert_stmt = insert(address_table).returning(address_table.c.id, address_table.c.email_address) + >>> insert_stmt = insert(address_table).returning( + ... address_table.c.id, address_table.c.email_address + ... ) >>> print(insert_stmt) {opensql}INSERT INTO address (id, user_id, email_address) VALUES (:id, :user_id, :email_address) diff --git a/doc/build/tutorial/data_select.rst b/doc/build/tutorial/data_select.rst index b55113fd3f..16357d1900 100644 --- a/doc/build/tutorial/data_select.rst +++ b/doc/build/tutorial/data_select.rst @@ -36,7 +36,7 @@ each method builds more state onto the object. Like the other SQL constructs, it can be stringified in place:: >>> from sqlalchemy import select - >>> stmt = select(user_table).where(user_table.c.name == 'spongebob') + >>> stmt = select(user_table).where(user_table.c.name == "spongebob") >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -71,7 +71,7 @@ elements within each row: .. sourcecode:: pycon+sql - >>> stmt = select(User).where(User.name == 'spongebob') + >>> stmt = select(User).where(User.name == "spongebob") >>> with Session(engine) as session: ... for row in session.execute(stmt): ... print(row) @@ -125,7 +125,7 @@ Alternatively, when using the :attr:`.FromClause.c` collection of any :class:`.FromClause` such as :class:`.Table`, multiple columns may be specified for a :func:`_sql.select` by using a tuple of string names:: - >>> print(select(user_table.c['name', 'fullname'])) + >>> print(select(user_table.c["name", "fullname"])) {opensql}SELECT user_account.name, user_account.fullname FROM user_account @@ -209,9 +209,7 @@ attribute of the ``User`` entity as the first element of the row, and combine it with full ``Address`` entities in the second element:: >>> session.execute( - ... select(User.name, Address). - ... where(User.id==Address.user_id). - ... order_by(Address.id) + ... select(User.name, Address).where(User.id == Address.user_id).order_by(Address.id) ... ).all() {opensql}SELECT user_account.name, address.id, address.email_address, address.user_id FROM user_account, address @@ -239,11 +237,9 @@ when referring to arbitrary SQL expressions in a result row by name: .. sourcecode:: pycon+sql >>> from sqlalchemy import func, cast - >>> stmt = ( - ... select( - ... ("Username: " + user_table.c.name).label("username"), - ... ).order_by(user_table.c.name) - ... ) + >>> stmt = select( + ... ("Username: " + user_table.c.name).label("username"), + ... ).order_by(user_table.c.name) >>> with engine.connect() as conn: ... for row in conn.execute(stmt): ... print(f"{row.username}") @@ -282,11 +278,7 @@ a hardcoded string literal ``'some label'`` and embed it within the SELECT statement:: >>> from sqlalchemy import text - >>> stmt = ( - ... select( - ... text("'some phrase'"), user_table.c.name - ... ).order_by(user_table.c.name) - ... ) + >>> stmt = select(text("'some phrase'"), user_table.c.name).order_by(user_table.c.name) >>> with engine.connect() as conn: ... print(conn.execute(stmt).all()) {opensql}BEGIN (implicit) @@ -308,10 +300,8 @@ towards in subqueries and other expressions:: >>> from sqlalchemy import literal_column - >>> stmt = ( - ... select( - ... literal_column("'some phrase'").label("p"), user_table.c.name - ... ).order_by(user_table.c.name) + >>> stmt = select(literal_column("'some phrase'").label("p"), user_table.c.name).order_by( + ... user_table.c.name ... ) >>> with engine.connect() as conn: ... for row in conn.execute(stmt): @@ -343,7 +333,7 @@ conjunction with Python operators such as ``==``, ``!=``, ``<``, ``>=`` etc. generate new SQL Expression objects, rather than plain boolean ``True``/``False`` values:: - >>> print(user_table.c.name == 'squidward') + >>> print(user_table.c.name == "squidward") user_account.name = :name_1 >>> print(address_table.c.user_id > 10) @@ -353,7 +343,7 @@ SQL Expression objects, rather than plain boolean ``True``/``False`` values:: We can use expressions like these to generate the WHERE clause by passing the resulting objects to the :meth:`_sql.Select.where` method:: - >>> print(select(user_table).where(user_table.c.name == 'squidward')) + >>> print(select(user_table).where(user_table.c.name == "squidward")) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account WHERE user_account.name = :name_1 @@ -363,9 +353,9 @@ To produce multiple expressions joined by AND, the :meth:`_sql.Select.where` method may be invoked any number of times:: >>> print( - ... select(address_table.c.email_address). - ... where(user_table.c.name == 'squidward'). - ... where(address_table.c.user_id == user_table.c.id) + ... select(address_table.c.email_address) + ... .where(user_table.c.name == "squidward") + ... .where(address_table.c.user_id == user_table.c.id) ... ) {opensql}SELECT address.email_address FROM address, user_account @@ -375,10 +365,9 @@ A single call to :meth:`_sql.Select.where` also accepts multiple expressions with the same effect:: >>> print( - ... select(address_table.c.email_address). - ... where( - ... user_table.c.name == 'squidward', - ... address_table.c.user_id == user_table.c.id + ... select(address_table.c.email_address).where( + ... user_table.c.name == "squidward", + ... address_table.c.user_id == user_table.c.id, ... ) ... ) {opensql}SELECT address.email_address @@ -391,11 +380,10 @@ of ORM entities:: >>> from sqlalchemy import and_, or_ >>> print( - ... select(Address.email_address). - ... where( + ... select(Address.email_address).where( ... and_( - ... or_(User.name == 'squidward', User.name == 'sandy'), - ... Address.user_id == User.id + ... or_(User.name == "squidward", User.name == "sandy"), + ... Address.user_id == User.id, ... ) ... ) ... ) @@ -409,9 +397,7 @@ popular method known as :meth:`_sql.Select.filter_by` which accepts keyword arguments that match to column keys or ORM attribute names. It will filter against the leftmost FROM clause or the last entity joined:: - >>> print( - ... select(User).filter_by(name='spongebob', fullname='Spongebob Squarepants') - ... ) + >>> print(select(User).filter_by(name="spongebob", fullname="Spongebob Squarepants")) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account WHERE user_account.name = :name_1 AND user_account.fullname = :fullname_1 @@ -453,8 +439,9 @@ method, which allows us to indicate the left and right side of the JOIN explicitly:: >>> print( - ... select(user_table.c.name, address_table.c.email_address). - ... join_from(user_table, address_table) + ... select(user_table.c.name, address_table.c.email_address).join_from( + ... user_table, address_table + ... ) ... ) {opensql}SELECT user_account.name, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -463,10 +450,7 @@ explicitly:: The other is the the :meth:`_sql.Select.join` method, which indicates only the right side of the JOIN, the left hand-side is inferred:: - >>> print( - ... select(user_table.c.name, address_table.c.email_address). - ... join(address_table) - ... ) + >>> print(select(user_table.c.name, address_table.c.email_address).join(address_table)) {opensql}SELECT user_account.name, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -483,10 +467,7 @@ where we establish ``user_table`` as the first element in the FROM clause and :meth:`_sql.Select.join` to establish ``address_table`` as the second:: - >>> print( - ... select(address_table.c.email_address). - ... select_from(user_table).join(address_table) - ... ) + >>> print(select(address_table.c.email_address).select_from(user_table).join(address_table)) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -497,9 +478,7 @@ FROM clause. For example, to SELECT from the common SQL expression produce the SQL ``count()`` function:: >>> from sqlalchemy import func - >>> print ( - ... select(func.count('*')).select_from(user_table) - ... ) + >>> print(select(func.count("*")).select_from(user_table)) {opensql}SELECT count(:count_2) AS count_1 FROM user_account @@ -528,9 +507,9 @@ accept an additional argument for the ON clause, which is stated using the same SQL Expression mechanics as we saw about in :ref:`tutorial_select_where_clause`:: >>> print( - ... select(address_table.c.email_address). - ... select_from(user_table). - ... join(address_table, user_table.c.id == address_table.c.user_id) + ... select(address_table.c.email_address) + ... .select_from(user_table) + ... .join(address_table, user_table.c.id == address_table.c.user_id) ... ) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -552,15 +531,11 @@ accept keyword arguments :paramref:`_sql.Select.join.isouter` and :paramref:`_sql.Select.join.full` which will render LEFT OUTER JOIN and FULL OUTER JOIN, respectively:: - >>> print( - ... select(user_table).join(address_table, isouter=True) - ... ) + >>> print(select(user_table).join(address_table, isouter=True)) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account LEFT OUTER JOIN address ON user_account.id = address.user_id{stop} - >>> print( - ... select(user_table).join(address_table, full=True) - ... ) + >>> print(select(user_table).join(address_table, full=True)) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account FULL OUTER JOIN address ON user_account.id = address.user_id{stop} @@ -657,10 +632,10 @@ than one address: >>> with engine.connect() as conn: ... result = conn.execute( - ... select(User.name, func.count(Address.id).label("count")). - ... join(Address). - ... group_by(User.name). - ... having(func.count(Address.id) > 1) + ... select(User.name, func.count(Address.id).label("count")) + ... .join(Address) + ... .group_by(User.name) + ... .having(func.count(Address.id) > 1) ... ) ... print(result.all()) {opensql}BEGIN (implicit) @@ -690,10 +665,11 @@ error if no match is found. The unary modifiers .. sourcecode:: pycon+sql >>> from sqlalchemy import func, desc - >>> stmt = select( - ... Address.user_id, - ... func.count(Address.id).label('num_addresses')).\ - ... group_by("user_id").order_by("user_id", desc("num_addresses")) + >>> stmt = ( + ... select(Address.user_id, func.count(Address.id).label("num_addresses")) + ... .group_by("user_id") + ... .order_by("user_id", desc("num_addresses")) + ... ) >>> print(stmt) {opensql}SELECT address.user_id, count(address.id) AS num_addresses FROM address GROUP BY address.user_id ORDER BY address.user_id, num_addresses DESC @@ -720,8 +696,9 @@ below for example returns all unique pairs of user names:: >>> user_alias_1 = user_table.alias() >>> user_alias_2 = user_table.alias() >>> print( - ... select(user_alias_1.c.name, user_alias_2.c.name). - ... join_from(user_alias_1, user_alias_2, user_alias_1.c.id > user_alias_2.c.id) + ... select(user_alias_1.c.name, user_alias_2.c.name).join_from( + ... user_alias_1, user_alias_2, user_alias_1.c.id > user_alias_2.c.id + ... ) ... ) {opensql}SELECT user_account_1.name, user_account_2.name AS name_1 FROM user_account AS user_account_1 @@ -743,11 +720,11 @@ while maintaining ORM functionality. The SELECT below selects from the >>> address_alias_1 = aliased(Address) >>> address_alias_2 = aliased(Address) >>> print( - ... select(User). - ... join_from(User, address_alias_1). - ... where(address_alias_1.email_address == 'patrick@aol.com'). - ... join_from(User, address_alias_2). - ... where(address_alias_2.email_address == 'patrick@gmail.com') + ... select(User) + ... .join_from(User, address_alias_1) + ... .where(address_alias_1.email_address == "patrick@aol.com") + ... .join_from(User, address_alias_2) + ... .where(address_alias_2.email_address == "patrick@gmail.com") ... ) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -788,10 +765,11 @@ We can construct a :class:`_sql.Subquery` that will select an aggregate count of rows from the ``address`` table (aggregate functions and GROUP BY were introduced previously at :ref:`tutorial_group_by_w_aggregates`): - >>> subq = select( - ... func.count(address_table.c.id).label("count"), - ... address_table.c.user_id - ... ).group_by(address_table.c.user_id).subquery() + >>> subq = ( + ... select(func.count(address_table.c.id).label("count"), address_table.c.user_id) + ... .group_by(address_table.c.user_id) + ... .subquery() + ... ) Stringifying the subquery by itself without it being embedded inside of another :class:`_sql.Select` or other statement produces the plain SELECT statement @@ -817,11 +795,9 @@ With a selection of rows contained within the ``subq`` object, we can apply the object to a larger :class:`_sql.Select` that will join the data to the ``user_account`` table:: - >>> stmt = select( - ... user_table.c.name, - ... user_table.c.fullname, - ... subq.c.count - ... ).join_from(user_table, subq) + >>> stmt = select(user_table.c.name, user_table.c.fullname, subq.c.count).join_from( + ... user_table, subq + ... ) >>> print(stmt) {opensql}SELECT user_account.name, user_account.fullname, anon_1.count @@ -847,16 +823,15 @@ the invocation of the :meth:`_sql.Select.subquery` method to use element in the same way, but the SQL rendered is the very different common table expression syntax:: - >>> subq = select( - ... func.count(address_table.c.id).label("count"), - ... address_table.c.user_id - ... ).group_by(address_table.c.user_id).cte() + >>> subq = ( + ... select(func.count(address_table.c.id).label("count"), address_table.c.user_id) + ... .group_by(address_table.c.user_id) + ... .cte() + ... ) - >>> stmt = select( - ... user_table.c.name, - ... user_table.c.fullname, - ... subq.c.count - ... ).join_from(user_table, subq) + >>> stmt = select(user_table.c.name, user_table.c.fullname, subq.c.count).join_from( + ... user_table, subq + ... ) >>> print(stmt) {opensql}WITH anon_1 AS @@ -907,9 +882,13 @@ each ``Address`` object ultimately came from a subquery against the .. sourcecode:: python+sql - >>> subq = select(Address).where(~Address.email_address.like('%@aol.com')).subquery() + >>> subq = select(Address).where(~Address.email_address.like("%@aol.com")).subquery() >>> address_subq = aliased(Address, subq) - >>> stmt = select(User, address_subq).join_from(User, address_subq).order_by(User.id, address_subq.id) + >>> stmt = ( + ... select(User, address_subq) + ... .join_from(User, address_subq) + ... .order_by(User.id, address_subq.id) + ... ) >>> with Session(engine) as session: ... for user, address in session.execute(stmt): ... print(f"{user} {address}") @@ -932,9 +911,13 @@ Another example follows, which is exactly the same except it makes use of the .. sourcecode:: python+sql - >>> cte_obj = select(Address).where(~Address.email_address.like('%@aol.com')).cte() + >>> cte_obj = select(Address).where(~Address.email_address.like("%@aol.com")).cte() >>> address_cte = aliased(Address, cte_obj) - >>> stmt = select(User, address_cte).join_from(User, address_cte).order_by(User.id, address_cte.id) + >>> stmt = ( + ... select(User, address_cte) + ... .join_from(User, address_cte) + ... .order_by(User.id, address_cte.id) + ... ) >>> with Session(engine) as session: ... for user, address in session.execute(stmt): ... print(f"{user} {address}") @@ -981,9 +964,11 @@ subquery is indicated explicitly by making use of the :meth:`_sql.Select.scalar_ method as below. It's default string form when stringified by itself renders as an ordinary SELECT statement that is selecting from two tables:: - >>> subq = select(func.count(address_table.c.id)).\ - ... where(user_table.c.id == address_table.c.user_id).\ - ... scalar_subquery() + >>> subq = ( + ... select(func.count(address_table.c.id)) + ... .where(user_table.c.id == address_table.c.user_id) + ... .scalar_subquery() + ... ) >>> print(subq) {opensql}(SELECT count(address.id) AS count_1 FROM address, user_account @@ -1016,13 +1001,15 @@ Simple correlated subqueries will usually do the right thing that's desired. However, in the case where the correlation is ambiguous, SQLAlchemy will let us know that more clarity is needed:: - >>> stmt = select( - ... user_table.c.name, - ... address_table.c.email_address, - ... subq.label("address_count") - ... ).\ - ... join_from(user_table, address_table).\ - ... order_by(user_table.c.id, address_table.c.id) + >>> stmt = ( + ... select( + ... user_table.c.name, + ... address_table.c.email_address, + ... subq.label("address_count"), + ... ) + ... .join_from(user_table, address_table) + ... .order_by(user_table.c.id, address_table.c.id) + ... ) >>> print(stmt) Traceback (most recent call last): ... @@ -1034,9 +1021,12 @@ To specify that the ``user_table`` is the one we seek to correlate we specify this using the :meth:`_sql.ScalarSelect.correlate` or :meth:`_sql.ScalarSelect.correlate_except` methods:: - >>> subq = select(func.count(address_table.c.id)).\ - ... where(user_table.c.id == address_table.c.user_id).\ - ... scalar_subquery().correlate(user_table) + >>> subq = ( + ... select(func.count(address_table.c.id)) + ... .where(user_table.c.id == address_table.c.user_id) + ... .scalar_subquery() + ... .correlate(user_table) + ... ) The statement then can return the data for this column like any other: @@ -1047,10 +1037,10 @@ The statement then can return the data for this column like any other: ... select( ... user_table.c.name, ... address_table.c.email_address, - ... subq.label("address_count") - ... ). - ... join_from(user_table, address_table). - ... order_by(user_table.c.id, address_table.c.id) + ... subq.label("address_count"), + ... ) + ... .join_from(user_table, address_table) + ... .order_by(user_table.c.id, address_table.c.id) ... ) ... print(result.all()) {opensql}BEGIN (implicit) @@ -1091,21 +1081,19 @@ use of LATERAL, selecting the "user account / count of email address" data as was discussed in the previous section:: >>> subq = ( - ... select( - ... func.count(address_table.c.id).label("address_count"), - ... address_table.c.email_address, - ... address_table.c.user_id, - ... ). - ... where(user_table.c.id == address_table.c.user_id). - ... lateral() + ... select( + ... func.count(address_table.c.id).label("address_count"), + ... address_table.c.email_address, + ... address_table.c.user_id, + ... ) + ... .where(user_table.c.id == address_table.c.user_id) + ... .lateral() + ... ) + >>> stmt = ( + ... select(user_table.c.name, subq.c.address_count, subq.c.email_address) + ... .join_from(user_table, subq) + ... .order_by(user_table.c.id, subq.c.email_address) ... ) - >>> stmt = select( - ... user_table.c.name, - ... subq.c.address_count, - ... subq.c.email_address - ... ).\ - ... join_from(user_table, subq).\ - ... order_by(user_table.c.id, subq.c.email_address) >>> print(stmt) {opensql}SELECT user_account.name, anon_1.address_count, anon_1.email_address FROM user_account @@ -1156,8 +1144,8 @@ that it has fewer methods. The :class:`_sql.CompoundSelect` produced by :meth:`_engine.Connection.execute`:: >>> from sqlalchemy import union_all - >>> stmt1 = select(user_table).where(user_table.c.name == 'sandy') - >>> stmt2 = select(user_table).where(user_table.c.name == 'spongebob') + >>> stmt1 = select(user_table).where(user_table.c.name == "sandy") + >>> stmt2 = select(user_table).where(user_table.c.name == "spongebob") >>> u = union_all(stmt1, stmt2) >>> with engine.connect() as conn: ... result = conn.execute(u) @@ -1180,9 +1168,9 @@ collection that may be referred towards in an enclosing :func:`_sql.select`:: >>> u_subq = u.subquery() >>> stmt = ( - ... select(u_subq.c.name, address_table.c.email_address). - ... join_from(address_table, u_subq). - ... order_by(u_subq.c.name, address_table.c.email_address) + ... select(u_subq.c.name, address_table.c.email_address) + ... .join_from(address_table, u_subq) + ... .order_by(u_subq.c.name, address_table.c.email_address) ... ) >>> with engine.connect() as conn: ... result = conn.execute(stmt) @@ -1217,8 +1205,8 @@ object that represents the SELECT / UNION / etc statement we want to execute; this statement should be composed against the target ORM entities or their underlying mapped :class:`_schema.Table` objects:: - >>> stmt1 = select(User).where(User.name == 'sandy') - >>> stmt2 = select(User).where(User.name == 'spongebob') + >>> stmt1 = select(User).where(User.name == "sandy") + >>> stmt2 = select(User).where(User.name == "spongebob") >>> u = union_all(stmt1, stmt2) For a simple SELECT with UNION that is not already nested inside of a @@ -1292,15 +1280,13 @@ can return ``user_account`` rows that have more than one related row in .. sourcecode:: pycon+sql >>> subq = ( - ... select(func.count(address_table.c.id)). - ... where(user_table.c.id == address_table.c.user_id). - ... group_by(address_table.c.user_id). - ... having(func.count(address_table.c.id) > 1) + ... select(func.count(address_table.c.id)) + ... .where(user_table.c.id == address_table.c.user_id) + ... .group_by(address_table.c.user_id) + ... .having(func.count(address_table.c.id) > 1) ... ).exists() >>> with engine.connect() as conn: - ... result = conn.execute( - ... select(user_table.c.name).where(subq) - ... ) + ... result = conn.execute(select(user_table.c.name).where(subq)) ... print(result.all()) {opensql}BEGIN (implicit) SELECT user_account.name @@ -1322,13 +1308,10 @@ clause: .. sourcecode:: pycon+sql >>> subq = ( - ... select(address_table.c.id). - ... where(user_table.c.id == address_table.c.user_id) + ... select(address_table.c.id).where(user_table.c.id == address_table.c.user_id) ... ).exists() >>> with engine.connect() as conn: - ... result = conn.execute( - ... select(user_table.c.name).where(~subq) - ... ) + ... result = conn.execute(select(user_table.c.name).where(~subq)) ... print(result.all()) {opensql}BEGIN (implicit) SELECT user_account.name @@ -1584,11 +1567,15 @@ number the email addresses of individual users: .. sourcecode:: pycon+sql - >>> stmt = select( - ... func.row_number().over(partition_by=user_table.c.name), - ... user_table.c.name, - ... address_table.c.email_address - ... ).select_from(user_table).join(address_table) + >>> stmt = ( + ... select( + ... func.row_number().over(partition_by=user_table.c.name), + ... user_table.c.name, + ... address_table.c.email_address, + ... ) + ... .select_from(user_table) + ... .join(address_table) + ... ) >>> with engine.connect() as conn: # doctest:+SKIP ... result = conn.execute(stmt) ... print(result.all()) @@ -1606,10 +1593,15 @@ We also may make use of the ``ORDER BY`` clause using :paramref:`_functions.Func .. sourcecode:: pycon+sql - >>> stmt = select( - ... func.count().over(order_by=user_table.c.name), - ... user_table.c.name, - ... address_table.c.email_address).select_from(user_table).join(address_table) + >>> stmt = ( + ... select( + ... func.count().over(order_by=user_table.c.name), + ... user_table.c.name, + ... address_table.c.email_address, + ... ) + ... .select_from(user_table) + ... .join(address_table) + ... ) >>> with engine.connect() as conn: # doctest:+SKIP ... result = conn.execute(stmt) ... print(result.all()) @@ -1648,7 +1640,7 @@ method:: >>> print( ... func.unnest( - ... func.percentile_disc([0.25,0.5,0.75,1]).within_group(user_table.c.name) + ... func.percentile_disc([0.25, 0.5, 0.75, 1]).within_group(user_table.c.name) ... ) ... ) unnest(percentile_disc(:percentile_disc_1) WITHIN GROUP (ORDER BY user_account.name)) @@ -1657,10 +1649,16 @@ method:: particular subset of rows compared to the total range of rows returned, available using the :meth:`_functions.FunctionElement.filter` method:: - >>> stmt = select( - ... func.count(address_table.c.email_address).filter(user_table.c.name == 'sandy'), - ... func.count(address_table.c.email_address).filter(user_table.c.name == 'spongebob') - ... ).select_from(user_table).join(address_table) + >>> stmt = ( + ... select( + ... func.count(address_table.c.email_address).filter(user_table.c.name == "sandy"), + ... func.count(address_table.c.email_address).filter( + ... user_table.c.name == "spongebob" + ... ), + ... ) + ... .select_from(user_table) + ... .join(address_table) + ... ) >>> with engine.connect() as conn: # doctest:+SKIP ... result = conn.execute(stmt) ... print(result.all()) @@ -1823,11 +1821,7 @@ string into one of MySQL's JSON functions: >>> from sqlalchemy import JSON >>> from sqlalchemy import type_coerce >>> from sqlalchemy.dialects import mysql - >>> s = select( - ... type_coerce( - ... {'some_key': {'foo': 'bar'}}, JSON - ... )['some_key'] - ... ) + >>> s = select(type_coerce({"some_key": {"foo": "bar"}}, JSON)["some_key"]) >>> print(s.compile(dialect=mysql.dialect())) SELECT JSON_EXTRACT(%s, %s) AS anon_1 diff --git a/doc/build/tutorial/data_update.rst b/doc/build/tutorial/data_update.rst index ccd274a80d..a34d4622cb 100644 --- a/doc/build/tutorial/data_update.rst +++ b/doc/build/tutorial/data_update.rst @@ -56,8 +56,9 @@ A basic UPDATE looks like:: >>> from sqlalchemy import update >>> stmt = ( - ... update(user_table).where(user_table.c.name == 'patrick'). - ... values(fullname='Patrick the Star') + ... update(user_table) + ... .where(user_table.c.name == "patrick") + ... .values(fullname="Patrick the Star") ... ) >>> print(stmt) {opensql}UPDATE user_account SET fullname=:fullname WHERE user_account.name = :name_1 @@ -70,10 +71,7 @@ keyword arguments. UPDATE supports all the major SQL forms of UPDATE, including updates against expressions, where we can make use of :class:`_schema.Column` expressions:: - >>> stmt = ( - ... update(user_table). - ... values(fullname="Username: " + user_table.c.name) - ... ) + >>> stmt = update(user_table).values(fullname="Username: " + user_table.c.name) >>> print(stmt) {opensql}UPDATE user_account SET fullname=(:name_1 || user_account.name) @@ -86,19 +84,19 @@ that literal values would normally go: >>> from sqlalchemy import bindparam >>> stmt = ( - ... update(user_table). - ... where(user_table.c.name == bindparam('oldname')). - ... values(name=bindparam('newname')) + ... update(user_table) + ... .where(user_table.c.name == bindparam("oldname")) + ... .values(name=bindparam("newname")) ... ) >>> with engine.begin() as conn: - ... conn.execute( - ... stmt, - ... [ - ... {'oldname':'jack', 'newname':'ed'}, - ... {'oldname':'wendy', 'newname':'mary'}, - ... {'oldname':'jim', 'newname':'jake'}, - ... ] - ... ) + ... conn.execute( + ... stmt, + ... [ + ... {"oldname": "jack", "newname": "ed"}, + ... {"oldname": "wendy", "newname": "mary"}, + ... {"oldname": "jim", "newname": "jake"}, + ... ], + ... ) {opensql}BEGIN (implicit) UPDATE user_account SET name=? WHERE user_account.name = ? [...] [('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim')] @@ -118,11 +116,11 @@ An UPDATE statement can make use of rows in other tables by using a anywhere a column expression might be placed:: >>> scalar_subq = ( - ... select(address_table.c.email_address). - ... where(address_table.c.user_id == user_table.c.id). - ... order_by(address_table.c.id). - ... limit(1). - ... scalar_subquery() + ... select(address_table.c.email_address) + ... .where(address_table.c.user_id == user_table.c.id) + ... .order_by(address_table.c.id) + ... .limit(1) + ... .scalar_subquery() ... ) >>> update_stmt = update(user_table).values(fullname=scalar_subq) >>> print(update_stmt) @@ -143,11 +141,11 @@ syntax will be generated implicitly when additional tables are located in the WHERE clause of the statement:: >>> update_stmt = ( - ... update(user_table). - ... where(user_table.c.id == address_table.c.user_id). - ... where(address_table.c.email_address == 'patrick@aol.com'). - ... values(fullname='Pat') - ... ) + ... update(user_table) + ... .where(user_table.c.id == address_table.c.user_id) + ... .where(address_table.c.email_address == "patrick@aol.com") + ... .values(fullname="Pat") + ... ) >>> print(update_stmt) {opensql}UPDATE user_account SET fullname=:fullname FROM address WHERE user_account.id = address.user_id AND address.email_address = :email_address_1 @@ -158,16 +156,16 @@ requires we refer to :class:`_schema.Table` objects in the VALUES clause in order to refer to additional tables:: >>> update_stmt = ( - ... update(user_table). - ... where(user_table.c.id == address_table.c.user_id). - ... where(address_table.c.email_address == 'patrick@aol.com'). - ... values( - ... { - ... user_table.c.fullname: "Pat", - ... address_table.c.email_address: "pat@aol.com" - ... } - ... ) - ... ) + ... update(user_table) + ... .where(user_table.c.id == address_table.c.user_id) + ... .where(address_table.c.email_address == "patrick@aol.com") + ... .values( + ... { + ... user_table.c.fullname: "Pat", + ... address_table.c.email_address: "pat@aol.com", + ... } + ... ) + ... ) >>> from sqlalchemy.dialects import mysql >>> print(update_stmt.compile(dialect=mysql.dialect())) {opensql}UPDATE user_account, address @@ -184,12 +182,8 @@ of an UPDATE actually impacts the evaluation of each expression. For this use case, the :meth:`_sql.Update.ordered_values` method accepts a sequence of tuples so that this order may be controlled [2]_:: - >>> update_stmt = ( - ... update(some_table). - ... ordered_values( - ... (some_table.c.y, 20), - ... (some_table.c.x, some_table.c.y + 10) - ... ) + >>> update_stmt = update(some_table).ordered_values( + ... (some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10) ... ) >>> print(update_stmt) {opensql}UPDATE some_table SET y=:y, x=(some_table.y + :y_1) @@ -219,7 +213,7 @@ allowing for a RETURNING variant on some database backends. :: >>> from sqlalchemy import delete - >>> stmt = delete(user_table).where(user_table.c.name == 'patrick') + >>> stmt = delete(user_table).where(user_table.c.name == "patrick") >>> print(stmt) {opensql}DELETE FROM user_account WHERE user_account.name = :name_1 @@ -234,10 +228,10 @@ subqueries in the WHERE clause as well as backend-specific multiple table syntaxes, such as ``DELETE FROM..USING`` on MySQL:: >>> delete_stmt = ( - ... delete(user_table). - ... where(user_table.c.id == address_table.c.user_id). - ... where(address_table.c.email_address == 'patrick@aol.com') - ... ) + ... delete(user_table) + ... .where(user_table.c.id == address_table.c.user_id) + ... .where(address_table.c.email_address == "patrick@aol.com") + ... ) >>> from sqlalchemy.dialects import mysql >>> print(delete_stmt.compile(dialect=mysql.dialect())) {opensql}DELETE FROM user_account USING user_account, address @@ -258,9 +252,9 @@ is available from the :attr:`_engine.CursorResult.rowcount` attribute: >>> with engine.begin() as conn: ... result = conn.execute( - ... update(user_table). - ... values(fullname="Patrick McStar"). - ... where(user_table.c.name == 'patrick') + ... update(user_table) + ... .values(fullname="Patrick McStar") + ... .where(user_table.c.name == "patrick") ... ) ... print(result.rowcount) {opensql}BEGIN (implicit) @@ -315,9 +309,10 @@ be iterated:: >>> update_stmt = ( - ... update(user_table).where(user_table.c.name == 'patrick'). - ... values(fullname='Patrick the Star'). - ... returning(user_table.c.id, user_table.c.name) + ... update(user_table) + ... .where(user_table.c.name == "patrick") + ... .values(fullname="Patrick the Star") + ... .returning(user_table.c.id, user_table.c.name) ... ) >>> print(update_stmt) {opensql}UPDATE user_account SET fullname=:fullname @@ -325,8 +320,9 @@ be iterated:: RETURNING user_account.id, user_account.name{stop} >>> delete_stmt = ( - ... delete(user_table).where(user_table.c.name == 'patrick'). - ... returning(user_table.c.id, user_table.c.name) + ... delete(user_table) + ... .where(user_table.c.name == "patrick") + ... .returning(user_table.c.id, user_table.c.name) ... ) >>> print(delete_stmt) {opensql}DELETE FROM user_account diff --git a/doc/build/tutorial/dbapi_transactions.rst b/doc/build/tutorial/dbapi_transactions.rst index 6914104eed..58a4e3ab58 100644 --- a/doc/build/tutorial/dbapi_transactions.rst +++ b/doc/build/tutorial/dbapi_transactions.rst @@ -107,7 +107,7 @@ where we acquired the :class:`_future.Connection` object: ... conn.execute(text("CREATE TABLE some_table (x int, y int)")) ... conn.execute( ... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"), - ... [{"x": 1, "y": 1}, {"x": 2, "y": 4}] + ... [{"x": 1, "y": 1}, {"x": 2, "y": 4}], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -145,7 +145,7 @@ may be referred towards as **begin once**: >>> with engine.begin() as conn: ... conn.execute( ... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"), - ... [{"x": 6, "y": 8}, {"x": 9, "y": 10}] + ... [{"x": 6, "y": 8}, {"x": 9, "y": 10}], ... ) {opensql}BEGIN (implicit) INSERT INTO some_table (x, y) VALUES (?, ?) @@ -316,12 +316,9 @@ construct accepts these using a colon format "``:y``". The actual value for .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... result = conn.execute( - ... text("SELECT x, y FROM some_table WHERE y > :y"), - ... {"y": 2} - ... ) + ... result = conn.execute(text("SELECT x, y FROM some_table WHERE y > :y"), {"y": 2}) ... for row in result: - ... print(f"x: {row.x} y: {row.y}") + ... print(f"x: {row.x} y: {row.y}") {opensql}BEGIN (implicit) SELECT x, y FROM some_table WHERE y > ? [...] (2,) @@ -370,7 +367,7 @@ be invoked against each parameter set individually: >>> with engine.connect() as conn: ... conn.execute( ... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"), - ... [{"x": 11, "y": 12}, {"x": 13, "y": 14}] + ... [{"x": 11, "y": 12}, {"x": 13, "y": 14}], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -436,7 +433,7 @@ a context manager: >>> with Session(engine) as session: ... result = session.execute(stmt, {"y": 6}) ... for row in result: - ... print(f"x: {row.x} y: {row.y}") + ... print(f"x: {row.x} y: {row.y}") {opensql}BEGIN (implicit) SELECT x, y FROM some_table WHERE y > ? ORDER BY x, y [...] (6,){stop} @@ -462,7 +459,7 @@ our data: >>> with Session(engine) as session: ... result = session.execute( ... text("UPDATE some_table SET y=:y WHERE x=:x"), - ... [{"x": 9, "y":11}, {"x": 13, "y": 15}] + ... [{"x": 9, "y": 11}, {"x": 13, "y": 15}], ... ) ... session.commit() {opensql}BEGIN (implicit) diff --git a/doc/build/tutorial/metadata.rst b/doc/build/tutorial/metadata.rst index 6906267353..1b56994118 100644 --- a/doc/build/tutorial/metadata.rst +++ b/doc/build/tutorial/metadata.rst @@ -76,9 +76,9 @@ that will be how we will refer to the table in application code:: >>> user_table = Table( ... "user_account", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30)), - ... Column('fullname', String) + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30)), + ... Column("fullname", String), ... ) We can observe that the above :class:`_schema.Table` construct looks a lot like @@ -151,9 +151,9 @@ table:: >>> address_table = Table( ... "address", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', ForeignKey('user_account.id'), nullable=False), - ... Column('email_address', String, nullable=False) + ... Column("id", Integer, primary_key=True), + ... Column("user_id", ForeignKey("user_account.id"), nullable=False), + ... Column("email_address", String, nullable=False), ... ) The table above also features a third kind of constraint, which in SQL is the @@ -331,26 +331,26 @@ types:: >>> from sqlalchemy.orm import relationship >>> class User(Base): - ... __tablename__ = 'user_account' - ... + ... __tablename__ = "user_account" + ... ... id: Mapped[int] = mapped_column(primary_key=True) ... name: Mapped[str] = mapped_column(String(30)) ... fullname: Mapped[Optional[str]] - ... + ... ... addresses: Mapped[List["Address"]] = relationship(back_populates="user") - ... + ... ... def __repr__(self) -> str: - ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" + ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): - ... __tablename__ = 'address' - ... + ... __tablename__ = "address" + ... ... id: Mapped[int] = mapped_column(primary_key=True) ... email_address: Mapped[str] - ... user_id = mapped_column(ForeignKey('user_account.id')) - ... + ... user_id = mapped_column(ForeignKey("user_account.id")) + ... ... user: Mapped[User] = relationship(back_populates="addresses") - ... + ... ... def __repr__(self) -> str: ... return f"Address(id={self.id!r}, email_address={self.email_address!r})" @@ -472,7 +472,6 @@ the collection from the ``Base.metadata`` attribute and then using Base.metadata.create_all(engine) - Combining Core Table Declarations with ORM Declarative ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -497,6 +496,7 @@ and should not be run):: class Base(DeclarativeBase): pass + class User(Base): __table__ = user_table @@ -505,6 +505,7 @@ and should not be run):: def __repr__(self): return f"User({self.name!r}, {self.fullname!r})" + class Address(Base): __table__ = address_table diff --git a/doc/build/tutorial/orm_data_manipulation.rst b/doc/build/tutorial/orm_data_manipulation.rst index d33146b553..5414dd012e 100644 --- a/doc/build/tutorial/orm_data_manipulation.rst +++ b/doc/build/tutorial/orm_data_manipulation.rst @@ -287,9 +287,7 @@ from this row and we will get our updated value back: .. sourcecode:: pycon+sql - >>> sandy_fullname = session.execute( - ... select(User.fullname).where(User.id == 2) - ... ).scalar_one() + >>> sandy_fullname = session.execute(select(User.fullname).where(User.id == 2)).scalar_one() {opensql}UPDATE user_account SET fullname=? WHERE user_account.id = ? [...] ('Sandy Squirrel', 2) SELECT user_account.fullname diff --git a/doc/build/tutorial/orm_related_objects.rst b/doc/build/tutorial/orm_related_objects.rst index 04a3d17285..4c16b146a0 100644 --- a/doc/build/tutorial/orm_related_objects.rst +++ b/doc/build/tutorial/orm_related_objects.rst @@ -25,24 +25,24 @@ and other directives: .. sourcecode:: python - from sqlalchemy.orm import Mapped - from sqlalchemy.orm import relationship + from sqlalchemy.orm import Mapped + from sqlalchemy.orm import relationship - class User(Base): - __tablename__ = 'user_account' - # ... mapped_column() mappings + class User(Base): + __tablename__ = "user_account" - addresses: Mapped[list["Address"]] = relationship(back_populates="user") + # ... mapped_column() mappings + addresses: Mapped[list["Address"]] = relationship(back_populates="user") - class Address(Base): - __tablename__ = 'address' - # ... mapped_column() mappings + class Address(Base): + __tablename__ = "address" - user: Mapped["User"] = relationship(back_populates="addresses") + # ... mapped_column() mappings + user: Mapped["User"] = relationship(back_populates="addresses") Above, the ``User`` class now has an attribute ``User.addresses`` and the ``Address`` class has an attribute ``Address.user``. The @@ -73,7 +73,7 @@ We can start by illustrating what :func:`_orm.relationship` does to instances of objects. If we make a new ``User`` object, we can note that there is a Python list when we access the ``.addresses`` element:: - >>> u1 = User(name='pkrabs', fullname='Pearl Krabs') + >>> u1 = User(name="pkrabs", fullname="Pearl Krabs") >>> u1.addresses [] @@ -304,11 +304,7 @@ corresponding to the :func:`_orm.relationship` may be passed as the **single argument** to :meth:`_sql.Select.join`, where it serves to indicate both the right side of the join as well as the ON clause at once:: - >>> print( - ... select(Address.email_address). - ... select_from(User). - ... join(User.addresses) - ... ) + >>> print(select(Address.email_address).select_from(User).join(User.addresses)) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -320,10 +316,7 @@ ON clause, it works because of the :class:`_schema.ForeignKeyConstraint` between the two mapped :class:`_schema.Table` objects, not because of the :func:`_orm.relationship` objects on the ``User`` and ``Address`` classes:: - >>> print( - ... select(Address.email_address). - ... join_from(User, Address) - ... ) + >>> print(select(Address.email_address).join_from(User, Address)) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -403,10 +396,13 @@ the :paramref:`_orm.relationship.lazy` option, e.g.: from sqlalchemy.orm import Mapped from sqlalchemy.orm import relationship + class User(Base): - __tablename__ = 'user_account' + __tablename__ = "user_account" - addresses: Mapped[list["Address"]] = relationship(back_populates="user", lazy="selectin") + addresses: Mapped[list["Address"]] = relationship( + back_populates="user", lazy="selectin" + ) Each loader strategy object adds some kind of information to the statement that will be used later by the :class:`_orm.Session` when it is deciding how various @@ -446,11 +442,11 @@ related ``Address`` objects: .. sourcecode:: pycon+sql >>> from sqlalchemy.orm import selectinload - >>> stmt = ( - ... select(User).options(selectinload(User.addresses)).order_by(User.id) - ... ) + >>> stmt = select(User).options(selectinload(User.addresses)).order_by(User.id) >>> for row in session.execute(stmt): - ... print(f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})") + ... print( + ... f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})" + ... ) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account ORDER BY user_account.id [...] () @@ -490,7 +486,9 @@ as below where we know that all ``Address`` objects have an associated >>> from sqlalchemy.orm import joinedload >>> stmt = ( - ... select(Address).options(joinedload(Address.user, innerjoin=True)).order_by(Address.id) + ... select(Address) + ... .options(joinedload(Address.user, innerjoin=True)) + ... .order_by(Address.id) ... ) >>> for row in session.execute(stmt): ... print(f"{row.Address.email_address} {row.Address.user.name}") @@ -557,10 +555,11 @@ example: >>> from sqlalchemy.orm import contains_eager >>> stmt = ( - ... select(Address). - ... join(Address.user). - ... where(User.name == 'pkrabs'). - ... options(contains_eager(Address.user)).order_by(Address.id) + ... select(Address) + ... .join(Address.user) + ... .where(User.name == "pkrabs") + ... .options(contains_eager(Address.user)) + ... .order_by(Address.id) ... ) >>> for row in session.execute(stmt): ... print(f"{row.Address.email_address} {row.Address.user.name}") @@ -578,10 +577,11 @@ rows. If we had applied :func:`_orm.joinedload` separately, we would get a SQL query that unnecessarily joins twice:: >>> stmt = ( - ... select(Address). - ... join(Address.user). - ... where(User.name == 'pkrabs'). - ... options(joinedload(Address.user)).order_by(Address.id) + ... select(Address) + ... .join(Address.user) + ... .where(User.name == "pkrabs") + ... .options(joinedload(Address.user)) + ... .order_by(Address.id) ... ) >>> print(stmt) # SELECT has a JOIN and LEFT OUTER JOIN unnecessarily {opensql}SELECT address.id, address.email_address, address.user_id, @@ -620,22 +620,24 @@ relationship will never try to emit SQL: from sqlalchemy.orm import Mapped from sqlalchemy.orm import relationship + class User(Base): - __tablename__ = 'user_account' + __tablename__ = "user_account" # ... mapped_column() mappings - addresses: Mapped[list["Address"]] = relationship(back_populates="user", lazy="raise_on_sql") + addresses: Mapped[list["Address"]] = relationship( + back_populates="user", lazy="raise_on_sql" + ) class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" # ... mapped_column() mappings user: Mapped["User"] = relationship(back_populates="addresses", lazy="raise_on_sql") - Using such a mapping, the application is blocked from lazy loading, indicating that a particular query would need to specify a loader strategy: diff --git a/tools/format_docs_code.py b/tools/format_docs_code.py new file mode 100644 index 0000000000..04dc59d36c --- /dev/null +++ b/tools/format_docs_code.py @@ -0,0 +1,289 @@ +from argparse import ArgumentParser +from argparse import RawDescriptionHelpFormatter +from collections.abc import Iterator +from pathlib import Path +import re + +from black import DEFAULT_LINE_LENGTH +from black import format_str +from black import Mode +from black import parse_pyproject_toml +from black import TargetVersion + + +home = Path(__file__).parent.parent + +_Block = list[tuple[str, int, str | None, str]] + + +def _format_block( + input_block: _Block, exit_on_error: bool, is_doctest: bool +) -> list[str]: + code = "\n".join(c for *_, c in input_block) + try: + formatted = format_str(code, mode=BLACK_MODE) + except Exception as e: + if is_doctest: + start_line = input_block[0][1] + print( + "Could not format code block starting at " + f"line {start_line}:\n{code}\nError: {e}" + ) + if exit_on_error: + print("Exiting since --exit-on-error was passed") + raise + else: + print("Ignoring error") + elif VERBOSE: + start_line = input_block[0][1] + print( + "Could not format code block starting at " + f"line {start_line}:\n---\n{code}\n---Error: {e}" + ) + return [line for line, *_ in input_block] + else: + formatted_code_lines = formatted.splitlines() + padding = input_block[0][2] + if is_doctest: + formatted_lines = [ + f"{padding}>>> {formatted_code_lines[0]}", + *(f"{padding}... {fcl}" for fcl in formatted_code_lines[1:]), + ] + else: + # The first line may have additional padding. + # If it does restore it + additionalPadding = re.match( + r"^(\s*)[^ ]?", input_block[0][3] + ).groups()[0] + formatted_lines = [ + f"{padding}{additionalPadding}{fcl}" if fcl else fcl + for fcl in formatted_code_lines + ] + if not input_block[-1][0] and formatted_lines[-1]: + # last line was empty and black removed it. restore it + formatted_lines.append("") + return formatted_lines + + +doctest_code_start = re.compile(r"^(\s+)>>>\s?(.+)") +doctest_code_continue = re.compile(r"^\s+\.\.\.\s?(\s*.*)") +plain_indent = re.compile(r"^(\s{4})(\s*[^: ].*)") +format_directive = re.compile(r"^\.\.\s*format\s*:\s*(on|off)\s*$") +dont_format_under_directive = re.compile(r"^\.\. (?:toctree)::\s*$") + + +def format_file( + file: Path, exit_on_error: bool, check: bool, no_plain: bool +) -> bool | None: + buffer = [] + if not check: + print(f"Running file {file} ..", end="") + original = file.read_text("utf-8") + doctest_block: _Block | None = None + plain_block: _Block | None = None + last_line = None + disable_format = False + non_code_directive = False + for line_no, line in enumerate(original.splitlines(), 1): + if match := format_directive.match(line): + disable_format = match.groups()[0] == "off" + elif match := dont_format_under_directive.match(line): + non_code_directive = True + + if doctest_block: + assert not plain_block + if match := doctest_code_continue.match(line): + doctest_block.append((line, line_no, None, match.groups()[0])) + continue + else: + buffer.extend( + _format_block( + doctest_block, exit_on_error, is_doctest=True + ) + ) + doctest_block = None + + if plain_block: + assert not doctest_block + if not line: + plain_block.append((line, line_no, None, line)) + continue + elif match := plain_indent.match(line): + plain_block.append((line, line_no, None, match.groups()[1])) + continue + else: + if non_code_directive: + buffer.extend(line for line, _, _, _ in plain_block) + else: + buffer.extend( + _format_block( + plain_block, exit_on_error, is_doctest=False + ) + ) + plain_block = None + non_code_directive = False + + if match := doctest_code_start.match(line): + if plain_block: + buffer.extend( + _format_block(plain_block, exit_on_error, is_doctest=False) + ) + plain_block = None + padding, code = match.groups() + doctest_block = [(line, line_no, padding, code)] + elif ( + not no_plain + and not disable_format + and not last_line + and (match := plain_indent.match(line)) + ): + # print('start plain', line) + assert not doctest_block + # start of a plain block + padding, code = match.groups() + plain_block = [(line, line_no, padding, code)] + else: + buffer.append(line) + last_line = line + + if doctest_block: + buffer.extend( + _format_block(doctest_block, exit_on_error, is_doctest=True) + ) + if plain_block: + if non_code_directive: + buffer.extend(line for line, _, _, _ in plain_block) + else: + buffer.extend( + _format_block(plain_block, exit_on_error, is_doctest=False) + ) + if buffer: + # if there is nothing in the buffer something strange happened so + # don't do anything + buffer.append("") + updated = "\n".join(buffer) + equal = original == updated + if not check: + print("..done. ", "No changes" if equal else "Changes detected") + if not equal: + # write only if there are changes to write + file.write_text(updated, "utf-8", newline="\n") + else: + if not check: + print(".. Nothing to write") + equal = bool(original) is False + + if check: + if not equal: + print(f"File {file} would be formatted") + return equal + else: + return None + + +def iter_files(directory) -> Iterator[Path]: + yield from (home / directory).glob("./**/*.rst") + + +def main( + file: str | None, + directory: str, + exit_on_error: bool, + check: bool, + no_plain: bool, +): + if file is not None: + result = [format_file(Path(file), exit_on_error, check, no_plain)] + else: + result = [ + format_file(doc, exit_on_error, check, no_plain) + for doc in iter_files(directory) + ] + + if check: + if all(result): + print("All files are correctly formatted") + exit(0) + else: + print("Some file would be reformated") + exit(1) + + +if __name__ == "__main__": + parser = ArgumentParser( + description="""Formats code inside docs using black. Supports \ +doctest code blocks and also tries to format plain code block identifies as \ +all indented blocks of at least 4 spaces, unless '--no-plain' is specified. + +Plain code block may lead to false positive. To disable formatting on a \ +file section the comment ``.. format: off`` disables formatting until \ +``.. format: on`` is encountered or the file ends. +Another alterative is to use less than 4 spaces to indent the code block. +""", + formatter_class=RawDescriptionHelpFormatter, + ) + parser.add_argument( + "-f", "--file", help="Format only this file instead of all docs" + ) + parser.add_argument( + "-d", + "--directory", + help="Find documents in this directory and its sub dirs", + default="doc/build", + ) + parser.add_argument( + "-c", + "--check", + help="Don't write the files back, just return the " + "status. Return code 0 means nothing would change. " + "Return code 1 means some files would be reformatted.", + action="store_true", + ) + parser.add_argument( + "-e", + "--exit-on-error", + help="Exit in case of black format error instead of ignoring it. " + "This option is only valid for doctest code blocks", + action="store_true", + ) + parser.add_argument( + "-l", + "--project-line-length", + help="Configure the line length to the project value instead " + "of using the black default of 88", + action="store_true", + ) + parser.add_argument( + "-v", + "--verbose", + help="Increase verbosity", + action="store_true", + ) + parser.add_argument( + "-n", + "--no-plain", + help="Disable plain code blocks formatting that's more difficult " + "to parse compared to doctest code blocks", + action="store_true", + ) + args = parser.parse_args() + + config = parse_pyproject_toml(home / "pyproject.toml") + BLACK_MODE = Mode( + target_versions=set( + TargetVersion[val.upper()] + for val in config.get("target_version", []) + ), + line_length=config.get("line_length", DEFAULT_LINE_LENGTH) + if args.project_line_length + else DEFAULT_LINE_LENGTH, + ) + VERBOSE = args.verbose + + main( + args.file, + args.directory, + args.exit_on_error, + args.check, + args.no_plain, + ) diff --git a/tox.ini b/tox.ini index e7f17e2e9e..0b0e0d7ccb 100644 --- a/tox.ini +++ b/tox.ini @@ -188,6 +188,7 @@ commands = # test with cython and without cython exts running slotscheck -m sqlalchemy env DISABLE_SQLALCHEMY_CEXT_RUNTIME=1 slotscheck -m sqlalchemy + python ./tools/format_docs_code.py --check # "pep8" env was renamed to "lint".