From: Mike Bayer Date: Sun, 2 Oct 2022 18:04:43 +0000 (-0400) Subject: experiment w/ docs formatter on SQLA 1.4 X-Git-Tag: rel_1_4_42~12^2 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c41b83afb58a445c0a97bf030e1e3b81c1f507f0;p=thirdparty%2Fsqlalchemy%2Fsqlalchemy.git experiment w/ docs formatter on SQLA 1.4 Enhanced the "{sql}" thing some more so that it maintains these tags exactly as they were. Note that the "{sql}" and "{stop}" tags are intended to be on the Python code lines, not the SQL lines, so special handling to find these, preserve them, then add them back after python code is formatted is added here. Change-Id: I07acd3ea54608cd63bee8003679f8dff131a90f4 --- diff --git a/doc/build/changelog/changelog_04.rst b/doc/build/changelog/changelog_04.rst index 9261c1262b..10e632c93c 100644 --- a/doc/build/changelog/changelog_04.rst +++ b/doc/build/changelog/changelog_04.rst @@ -540,9 +540,7 @@ to work for subclasses, if they are present, for example:: - sess.query(Company).options( - eagerload_all( - )) + sess.query(Company).options(eagerload_all()) to load Company objects, their employees, and the 'machines' collection of employees who happen to be diff --git a/doc/build/changelog/changelog_08.rst b/doc/build/changelog/changelog_08.rst index f6be2e3e19..3bf8f67f20 100644 --- a/doc/build/changelog/changelog_08.rst +++ b/doc/build/changelog/changelog_08.rst @@ -970,7 +970,7 @@ del_ = delete(SomeMappedClass).where(SomeMappedClass.id == 5) - upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name='ed') + upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name="ed") .. change:: :tags: bug, orm @@ -2079,8 +2079,7 @@ to the original, older use case for :meth:`_query.Query.select_from`, which is that of restating the mapped entity in terms of a different selectable:: - session.query(User.name).\ - select_from(user_table.select().where(user_table.c.id > 5)) + session.query(User.name).select_from(user_table.select().where(user_table.c.id > 5)) Which produces:: @@ -2281,11 +2280,11 @@ original. Allows symmetry when using :class:`_engine.Engine` and :class:`_engine.Connection` objects as context managers:: - with conn.connect() as c: # leaves the Connection open - c.execute("...") + with conn.connect() as c: # leaves the Connection open + c.execute("...") with engine.connect() as c: # closes the Connection - c.execute("...") + c.execute("...") .. change:: :tags: engine diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst index c9ec5f3a49..d00e043326 100644 --- a/doc/build/changelog/changelog_09.rst +++ b/doc/build/changelog/changelog_09.rst @@ -1708,15 +1708,15 @@ ad-hoc keyword arguments within the :attr:`.Index.kwargs` collection, after construction:: - idx = Index('a', 'b') - idx.kwargs['mysql_someargument'] = True + idx = Index("a", "b") + idx.kwargs["mysql_someargument"] = True To suit the use case of allowing custom arguments at construction time, the :meth:`.DialectKWArgs.argument_for` method now allows this registration:: - Index.argument_for('mysql', 'someargument', False) + Index.argument_for("mysql", "someargument", False) - idx = Index('a', 'b', mysql_someargument=True) + idx = Index("a", "b", mysql_someargument=True) .. seealso:: diff --git a/doc/build/changelog/migration_04.rst b/doc/build/changelog/migration_04.rst index b503134079..93a2b654fb 100644 --- a/doc/build/changelog/migration_04.rst +++ b/doc/build/changelog/migration_04.rst @@ -27,7 +27,7 @@ Secondly, anywhere you used to say ``engine=``, :: - myengine = create_engine('sqlite://') + myengine = create_engine("sqlite://") meta = MetaData(myengine) @@ -56,6 +56,7 @@ In 0.3, this code worked: from sqlalchemy import * + class UTCDateTime(types.TypeDecorator): pass @@ -66,6 +67,7 @@ In 0.4, one must do: from sqlalchemy import * from sqlalchemy import types + class UTCDateTime(types.TypeDecorator): pass @@ -119,7 +121,7 @@ when working with mapped classes: :: - session.query(User).filter(and_(User.name == 'fred', User.id > 17)) + session.query(User).filter(and_(User.name == "fred", User.id > 17)) While simple column-based comparisons are no big deal, the class attributes have some new "higher level" constructs @@ -139,18 +141,18 @@ available, including what was previously only available in # return all users who contain a particular address with # the email_address like '%foo%' - filter(User.addresses.any(Address.email_address.like('%foo%'))) + filter(User.addresses.any(Address.email_address.like("%foo%"))) # same, email address equals 'foo@bar.com'. can fall back to keyword # args for simple comparisons - filter(User.addresses.any(email_address = 'foo@bar.com')) + filter(User.addresses.any(email_address="foo@bar.com")) # return all Addresses whose user attribute has the username 'ed' - filter(Address.user.has(name='ed')) + filter(Address.user.has(name="ed")) # return all Addresses whose user attribute has the username 'ed' # and an id > 5 (mixing clauses with kwargs) - filter(Address.user.has(User.id > 5, name='ed')) + filter(Address.user.has(User.id > 5, name="ed")) The ``Column`` collection remains available on mapped classes in the ``.c`` attribute. Note that property-based @@ -199,12 +201,20 @@ any ``Alias`` objects: :: # standard self-referential TreeNode mapper with backref - mapper(TreeNode, tree_nodes, properties={ - 'children':relation(TreeNode, backref=backref('parent', remote_side=tree_nodes.id)) - }) + mapper( + TreeNode, + tree_nodes, + properties={ + "children": relation( + TreeNode, backref=backref("parent", remote_side=tree_nodes.id) + ) + }, + ) # query for node with child containing "bar" two levels deep - session.query(TreeNode).join(["children", "children"], aliased=True).filter_by(name='bar') + session.query(TreeNode).join(["children", "children"], aliased=True).filter_by( + name="bar" + ) To add criterion for each table along the way in an aliased join, you can use ``from_joinpoint`` to keep joining against @@ -215,15 +225,15 @@ the same line of aliases: # search for the treenode along the path "n1/n12/n122" # first find a Node with name="n122" - q = sess.query(Node).filter_by(name='n122') + q = sess.query(Node).filter_by(name="n122") # then join to parent with "n12" - q = q.join('parent', aliased=True).filter_by(name='n12') + q = q.join("parent", aliased=True).filter_by(name="n12") # join again to the next parent with 'n1'. use 'from_joinpoint' # so we join from the previous point, instead of joining off the # root table - q = q.join('parent', aliased=True, from_joinpoint=True).filter_by(name='n1') + q = q.join("parent", aliased=True, from_joinpoint=True).filter_by(name="n1") node = q.first() @@ -271,17 +281,24 @@ deep you want to go. Lets show the self-referential :: - nodes = Table('nodes', metadata, - Column('id', Integer, primary_key=True), - Column('parent_id', Integer, ForeignKey('nodes.id')), - Column('name', String(30))) + nodes = Table( + "nodes", + metadata, + Column("id", Integer, primary_key=True), + Column("parent_id", Integer, ForeignKey("nodes.id")), + Column("name", String(30)), + ) + class TreeNode(object): pass - mapper(TreeNode, nodes, properties={ - 'children':relation(TreeNode, lazy=False, join_depth=3) - }) + + mapper( + TreeNode, + nodes, + properties={"children": relation(TreeNode, lazy=False, join_depth=3)}, + ) So what happens when we say: @@ -324,10 +341,13 @@ new type, ``Point``. Stores an x/y coordinate: def __init__(self, x, y): self.x = x self.y = y + def __composite_values__(self): return self.x, self.y + def __eq__(self, other): return other.x == self.x and other.y == self.y + def __ne__(self, other): return not self.__eq__(other) @@ -341,13 +361,15 @@ Let's create a table of vertices storing two points per row: :: - vertices = Table('vertices', metadata, - Column('id', Integer, primary_key=True), - Column('x1', Integer), - Column('y1', Integer), - Column('x2', Integer), - Column('y2', Integer), - ) + vertices = Table( + "vertices", + metadata, + Column("id", Integer, primary_key=True), + Column("x1", Integer), + Column("y1", Integer), + Column("x2", Integer), + Column("y2", Integer), + ) Then, map it ! We'll create a ``Vertex`` object which stores two ``Point`` objects: @@ -359,10 +381,15 @@ stores two ``Point`` objects: self.start = start self.end = end - mapper(Vertex, vertices, properties={ - 'start':composite(Point, vertices.c.x1, vertices.c.y1), - 'end':composite(Point, vertices.c.x2, vertices.c.y2) - }) + + mapper( + Vertex, + vertices, + properties={ + "start": composite(Point, vertices.c.x1, vertices.c.y1), + "end": composite(Point, vertices.c.x2, vertices.c.y2), + }, + ) Once you've set up your composite type, it's usable just like any other type: @@ -370,7 +397,7 @@ like any other type: :: - v = Vertex(Point(3, 4), Point(26,15)) + v = Vertex(Point(3, 4), Point(26, 15)) session.save(v) session.flush() @@ -388,7 +415,7 @@ work as primary keys too, and are usable in ``query.get()``: # a Document class which uses a composite Version # object as primary key - document = query.get(Version(1, 'a')) + document = query.get(Version(1, "a")) ``dynamic_loader()`` relations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -438,16 +465,12 @@ eager in one pass: :: - mapper(Foo, foo_table, properties={ - 'bar':relation(Bar) - }) - mapper(Bar, bar_table, properties={ - 'bat':relation(Bat) - }) + mapper(Foo, foo_table, properties={"bar": relation(Bar)}) + mapper(Bar, bar_table, properties={"bat": relation(Bat)}) mapper(Bat, bat_table) # eager load bar and bat - session.query(Foo).options(eagerload_all('bar.bat')).filter(...).all() + session.query(Foo).options(eagerload_all("bar.bat")).filter(...).all() New Collection API ^^^^^^^^^^^^^^^^^^ @@ -471,7 +494,7 @@ many needs: # use a dictionary relation keyed by a column relation(Item, collection_class=column_mapped_collection(items.c.keyword)) # or named attribute - relation(Item, collection_class=attribute_mapped_collection('keyword')) + relation(Item, collection_class=attribute_mapped_collection("keyword")) # or any function you like relation(Item, collection_class=mapped_collection(lambda entity: entity.a + entity.b)) @@ -493,12 +516,20 @@ columns or subqueries: :: - mapper(User, users, properties={ - 'fullname': column_property((users.c.firstname + users.c.lastname).label('fullname')), - 'numposts': column_property( - select([func.count(1)], users.c.id==posts.c.user_id).correlate(users).label('posts') - ) - }) + mapper( + User, + users, + properties={ + "fullname": column_property( + (users.c.firstname + users.c.lastname).label("fullname") + ), + "numposts": column_property( + select([func.count(1)], users.c.id == posts.c.user_id) + .correlate(users) + .label("posts") + ), + }, + ) a typical query looks like: @@ -534,7 +565,7 @@ your ``engine`` (or anywhere): from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker - engine = create_engine('myengine://') + engine = create_engine("myengine://") Session = sessionmaker(bind=engine, autoflush=True, transactional=True) # use the new Session() freely @@ -542,7 +573,6 @@ your ``engine`` (or anywhere): sess.save(someobject) sess.flush() - If you need to post-configure your Session, say with an engine, add it later with ``configure()``: @@ -562,7 +592,7 @@ with both ``sessionmaker`` as well as ``create_session()``: Session = scoped_session(sessionmaker(autoflush=True, transactional=True)) Session.configure(bind=engine) - u = User(name='wendy') + u = User(name="wendy") sess = Session() sess.save(u) @@ -573,7 +603,6 @@ with both ``sessionmaker`` as well as ``create_session()``: sess2 = Session() assert sess is sess2 - When using a thread-local ``Session``, the returned class has all of ``Session's`` interface implemented as classmethods, and "assignmapper"'s functionality is @@ -586,11 +615,10 @@ old ``objectstore`` days.... # "assignmapper"-like functionality available via ScopedSession.mapper Session.mapper(User, users_table) - u = User(name='wendy') + u = User(name="wendy") Session.commit() - Sessions are again Weak Referencing By Default ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -624,13 +652,13 @@ Also, ``autoflush=True`` means the ``Session`` will Session = sessionmaker(bind=engine, autoflush=True, transactional=True) - u = User(name='wendy') + u = User(name="wendy") sess = Session() sess.save(u) # wendy is flushed, comes right back from a query - wendy = sess.query(User).filter_by(name='wendy').one() + wendy = sess.query(User).filter_by(name="wendy").one() Transactional methods moved onto sessions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -649,7 +677,7 @@ background). # use the session - sess.commit() # commit transaction + sess.commit() # commit transaction Sharing a ``Session`` with an enclosing engine-level (i.e. non-ORM) transaction is easy: @@ -745,7 +773,7 @@ Just like it says: :: - b = bindparam('foo', type_=String) + b = bindparam("foo", type_=String) in\_ Function Changed to Accept Sequence or Selectable ------------------------------------------------------ @@ -847,8 +875,18 @@ Out Parameters for Oracle :: - result = engine.execute(text("begin foo(:x, :y, :z); end;", bindparams=[bindparam('x', Numeric), outparam('y', Numeric), outparam('z', Numeric)]), x=5) - assert result.out_parameters == {'y':10, 'z':75} + result = engine.execute( + text( + "begin foo(:x, :y, :z); end;", + bindparams=[ + bindparam("x", Numeric), + outparam("y", Numeric), + outparam("z", Numeric), + ], + ), + x=5, + ) + assert result.out_parameters == {"y": 10, "z": 75} Connection-bound ``MetaData``, ``Sessions`` ------------------------------------------- diff --git a/doc/build/changelog/migration_05.rst b/doc/build/changelog/migration_05.rst index 64b69e1523..3d7bb52df3 100644 --- a/doc/build/changelog/migration_05.rst +++ b/doc/build/changelog/migration_05.rst @@ -64,15 +64,21 @@ Object Relational Mapping :: - session.query(User.name, func.count(Address.id).label("numaddresses")).join(Address).group_by(User.name) + session.query(User.name, func.count(Address.id).label("numaddresses")).join( + Address + ).group_by(User.name) The tuples returned by any multi-column/entity query are *named*' tuples: :: - for row in session.query(User.name, func.count(Address.id).label('numaddresses')).join(Address).group_by(User.name): - print("name", row.name, "number", row.numaddresses) + for row in ( + session.query(User.name, func.count(Address.id).label("numaddresses")) + .join(Address) + .group_by(User.name) + ): + print("name", row.name, "number", row.numaddresses) ``Query`` has a ``statement`` accessor, as well as a ``subquery()`` method which allow ``Query`` to be used to @@ -80,10 +86,15 @@ Object Relational Mapping :: - subq = session.query(Keyword.id.label('keyword_id')).filter(Keyword.name.in_(['beans', 'carrots'])).subquery() - recipes = session.query(Recipe).filter(exists(). - where(Recipe.id==recipe_keywords.c.recipe_id). - where(recipe_keywords.c.keyword_id==subq.c.keyword_id) + subq = ( + session.query(Keyword.id.label("keyword_id")) + .filter(Keyword.name.in_(["beans", "carrots"])) + .subquery() + ) + recipes = session.query(Recipe).filter( + exists() + .where(Recipe.id == recipe_keywords.c.recipe_id) + .where(recipe_keywords.c.keyword_id == subq.c.keyword_id) ) * **Explicit ORM aliases are recommended for aliased joins** @@ -223,17 +234,24 @@ Object Relational Mapping :: - mapper(User, users, properties={ - 'addresses':relation(Address, order_by=addresses.c.id) - }, order_by=users.c.id) + mapper( + User, + users, + properties={"addresses": relation(Address, order_by=addresses.c.id)}, + order_by=users.c.id, + ) To set ordering on a backref, use the ``backref()`` function: :: - 'keywords':relation(Keyword, secondary=item_keywords, - order_by=keywords.c.name, backref=backref('items', order_by=items.c.id)) + "keywords": relation( + Keyword, + secondary=item_keywords, + order_by=keywords.c.name, + backref=backref("items", order_by=items.c.id), + ) Using declarative ? To help with the new ``order_by`` requirement, ``order_by`` and friends can now be set using @@ -244,7 +262,7 @@ Object Relational Mapping class MyClass(MyDeclarativeBase): ... - 'addresses':relation("Address", order_by="Address.id") + "addresses": relation("Address", order_by="Address.id") It's generally a good idea to set ``order_by`` on ``relation()s`` which load list-based collections of @@ -402,14 +420,17 @@ Schema/Types convert_result_value methods """ + def bind_processor(self, dialect): def convert(value): return self.convert_bind_param(value, dialect) + return convert def result_processor(self, dialect): def convert(value): return self.convert_result_value(value, dialect) + return convert def convert_result_value(self, value, dialect): @@ -461,10 +482,10 @@ Schema/Types dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125) # 125 usec # old way - '2008-06-27 12:00:00.125' + "2008-06-27 12:00:00.125" # new way - '2008-06-27 12:00:00.000125' + "2008-06-27 12:00:00.000125" So if an existing SQLite file-based database intends to be used across 0.4 and 0.5, you either have to upgrade the @@ -481,6 +502,7 @@ Schema/Types :: from sqlalchemy.databases.sqlite import DateTimeMixin + DateTimeMixin.__legacy_microseconds__ = True Connection Pool no longer threadlocal by default @@ -522,7 +544,7 @@ data-driven, it takes ``[args]``. :: - query.join('orders', 'items') + query.join("orders", "items") query.join(User.orders, Order.items) * the ``in_()`` method on columns and similar only accepts a @@ -605,6 +627,7 @@ Removed :: from sqlalchemy.orm import aliased + address_alias = aliased(Address) print(session.query(User, address_alias).join((address_alias, User.addresses)).all()) diff --git a/doc/build/changelog/migration_06.rst b/doc/build/changelog/migration_06.rst index 0867fefe02..73c57bd931 100644 --- a/doc/build/changelog/migration_06.rst +++ b/doc/build/changelog/migration_06.rst @@ -73,7 +73,7 @@ will use psycopg2: :: - create_engine('postgresql://scott:tiger@localhost/test') + create_engine("postgresql://scott:tiger@localhost/test") However to specify a specific DBAPI backend such as pg8000, add it to the "protocol" section of the URL using a plus @@ -81,7 +81,7 @@ sign "+": :: - create_engine('postgresql+pg8000://scott:tiger@localhost/test') + create_engine("postgresql+pg8000://scott:tiger@localhost/test") Important Dialect Links: @@ -138,8 +138,15 @@ set of PG types: :: - from sqlalchemy.dialects.postgresql import INTEGER, BIGINT, SMALLINT,\ - VARCHAR, MACADDR, DATE, BYTEA + from sqlalchemy.dialects.postgresql import ( + INTEGER, + BIGINT, + SMALLINT, + VARCHAR, + MACADDR, + DATE, + BYTEA, + ) Above, ``INTEGER`` is actually the plain ``INTEGER`` type from ``sqlalchemy.types``, but the PG dialect makes it @@ -164,7 +171,7 @@ object returns another ``ClauseElement``: :: >>> from sqlalchemy.sql import column - >>> column('foo') == 5 + >>> column("foo") == 5 This so that Python expressions produce SQL expressions when @@ -172,16 +179,15 @@ converted to strings: :: - >>> str(column('foo') == 5) + >>> str(column("foo") == 5) 'foo = :foo_1' But what happens if we say this? :: - >>> if column('foo') == 5: + >>> if column("foo") == 5: ... print("yes") - ... In previous versions of SQLAlchemy, the returned ``_BinaryExpression`` was a plain Python object which @@ -191,11 +197,11 @@ as to that being compared. Meaning: :: - >>> bool(column('foo') == 5) + >>> bool(column("foo") == 5) False - >>> bool(column('foo') == column('foo')) + >>> bool(column("foo") == column("foo")) False - >>> c = column('foo') + >>> c = column("foo") >>> bool(c == c) True >>> @@ -252,7 +258,7 @@ sets: :: - connection.execute(table.insert(), {'data':'row1'}, {'data':'row2'}, {'data':'row3'}) + connection.execute(table.insert(), {"data": "row1"}, {"data": "row2"}, {"data": "row3"}) When the ``Connection`` object sends off the given ``insert()`` construct for compilation, it passes to the @@ -268,10 +274,12 @@ works: :: - connection.execute(table.insert(), - {'timestamp':today, 'data':'row1'}, - {'timestamp':today, 'data':'row2'}, - {'data':'row3'}) + connection.execute( + table.insert(), + {"timestamp": today, "data": "row1"}, + {"timestamp": today, "data": "row2"}, + {"data": "row3"}, + ) Because the third row does not specify the 'timestamp' column. Previous versions of SQLAlchemy would simply insert @@ -392,7 +400,7 @@ with tables or metadata objects: from sqlalchemy.schema import DDL - DDL('CREATE TRIGGER users_trigger ...').execute_at('after-create', metadata) + DDL("CREATE TRIGGER users_trigger ...").execute_at("after-create", metadata) Now the full suite of DDL constructs are available under the same system, including those for CREATE TABLE, ADD @@ -402,7 +410,7 @@ CONSTRAINT, etc.: from sqlalchemy.schema import Constraint, AddConstraint - AddContraint(CheckConstraint("value > 5")).execute_at('after-create', mytable) + AddContraint(CheckConstraint("value > 5")).execute_at("after-create", mytable) Additionally, all the DDL objects are now regular ``ClauseElement`` objects just like any other SQLAlchemy @@ -428,20 +436,22 @@ make your own: from sqlalchemy.schema import DDLElement from sqlalchemy.ext.compiler import compiles - class AlterColumn(DDLElement): + class AlterColumn(DDLElement): def __init__(self, column, cmd): self.column = column self.cmd = cmd + @compiles(AlterColumn) def visit_alter_column(element, compiler, **kw): return "ALTER TABLE %s ALTER COLUMN %s %s ..." % ( element.column.table.name, element.column.name, - element.cmd + element.cmd, ) + engine.execute(AlterColumn(table.c.mycolumn, "SET DEFAULT 'test'")) Deprecated/Removed Schema Elements @@ -566,6 +576,7 @@ To use an inspector: :: from sqlalchemy.engine.reflection import Inspector + insp = Inspector.from_engine(my_engine) print(insp.get_schema_names()) @@ -578,10 +589,10 @@ such as that of PostgreSQL which provides a :: - my_engine = create_engine('postgresql://...') + my_engine = create_engine("postgresql://...") pg_insp = Inspector.from_engine(my_engine) - print(pg_insp.get_table_oid('my_table')) + print(pg_insp.get_table_oid("my_table")) RETURNING Support ================= @@ -600,10 +611,10 @@ columns will be returned as a regular result set: result = connection.execute( - table.insert().values(data='some data').returning(table.c.id, table.c.timestamp) - ) + table.insert().values(data="some data").returning(table.c.id, table.c.timestamp) + ) row = result.first() - print("ID:", row['id'], "Timestamp:", row['timestamp']) + print("ID:", row["id"], "Timestamp:", row["timestamp"]) The implementation of RETURNING across the four supported backends varies wildly, in the case of Oracle requiring an @@ -740,7 +751,7 @@ that converts unicode back to utf-8, or whatever is desired: def process_result_value(self, value, dialect): if isinstance(value, unicode): - value = value.encode('utf-8') + value = value.encode("utf-8") return value Note that the ``assert_unicode`` flag is now deprecated. @@ -968,9 +979,11 @@ At mapper level: :: mapper(Child, child) - mapper(Parent, parent, properties={ - 'child':relationship(Child, lazy='joined', innerjoin=True) - }) + mapper( + Parent, + parent, + properties={"child": relationship(Child, lazy="joined", innerjoin=True)}, + ) At query time level: diff --git a/doc/build/changelog/migration_07.rst b/doc/build/changelog/migration_07.rst index a222f5380b..4763b9134c 100644 --- a/doc/build/changelog/migration_07.rst +++ b/doc/build/changelog/migration_07.rst @@ -244,7 +244,7 @@ with an explicit onclause is now: :: - query.join(SomeClass, SomeClass.id==ParentClass.some_id) + query.join(SomeClass, SomeClass.id == ParentClass.some_id) In 0.6, this usage was considered to be an error, because ``join()`` accepts multiple arguments corresponding to @@ -336,10 +336,12 @@ to the creation of the index outside of the Table. That is: :: - Table('mytable', metadata, - Column('id',Integer, primary_key=True), - Column('name', String(50), nullable=False), - Index('idx_name', 'name') + Table( + "mytable", + metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Index("idx_name", "name"), ) The primary rationale here is for the benefit of declarative @@ -348,14 +350,16 @@ The primary rationale here is for the benefit of declarative :: class HasNameMixin(object): - name = Column('name', String(50), nullable=False) + name = Column("name", String(50), nullable=False) + @declared_attr def __table_args__(cls): - return (Index('name'), {}) + return (Index("name"), {}) + class User(HasNameMixin, Base): - __tablename__ = 'user' - id = Column('id', Integer, primary_key=True) + __tablename__ = "user" + id = Column("id", Integer, primary_key=True) `Indexes `_ @@ -385,17 +389,16 @@ tutorial: from sqlalchemy.sql import table, column, select, func - empsalary = table('empsalary', - column('depname'), - column('empno'), - column('salary')) + empsalary = table("empsalary", column("depname"), column("empno"), column("salary")) - s = select([ + s = select( + [ empsalary, - func.avg(empsalary.c.salary). - over(partition_by=empsalary.c.depname). - label('avg') - ]) + func.avg(empsalary.c.salary) + .over(partition_by=empsalary.c.depname) + .label("avg"), + ] + ) print(s) @@ -495,7 +498,7 @@ equivalent to: :: - query.from_self(func.count(literal_column('1'))).scalar() + query.from_self(func.count(literal_column("1"))).scalar() Previously, internal logic attempted to rewrite the columns clause of the query itself, and upon detection of a @@ -534,6 +537,7 @@ be used: :: from sqlalchemy import func + session.query(func.count(MyClass.id)).scalar() or for ``count(*)``: @@ -541,7 +545,8 @@ or for ``count(*)``: :: from sqlalchemy import func, literal_column - session.query(func.count(literal_column('*'))).select_from(MyClass).scalar() + + session.query(func.count(literal_column("*"))).select_from(MyClass).scalar() LIMIT/OFFSET clauses now use bind parameters -------------------------------------------- @@ -690,8 +695,11 @@ function, can be mapped. from sqlalchemy import select, func from sqlalchemy.orm import mapper + class Subset(object): pass + + selectable = select(["x", "y", "z"]).select_from(func.some_db_function()).alias() mapper(Subset, selectable, primary_key=[selectable.c.x]) @@ -773,10 +781,11 @@ mutations, the type object must be constructed with :: - Table('mytable', metadata, + Table( + "mytable", + metadata, # .... - - Column('pickled_data', PickleType(mutable=True)) + Column("pickled_data", PickleType(mutable=True)), ) The ``mutable=True`` flag is being phased out, in favor of @@ -1036,7 +1045,7 @@ key column ``id``, the following now produces an error: :: - foobar = foo.join(bar, foo.c.id==bar.c.foo_id) + foobar = foo.join(bar, foo.c.id == bar.c.foo_id) mapper(FooBar, foobar) This because the ``mapper()`` refuses to guess what column @@ -1047,10 +1056,8 @@ explicit: :: - foobar = foo.join(bar, foo.c.id==bar.c.foo_id) - mapper(FooBar, foobar, properties={ - 'id':[foo.c.id, bar.c.id] - }) + foobar = foo.join(bar, foo.c.id == bar.c.foo_id) + mapper(FooBar, foobar, properties={"id": [foo.c.id, bar.c.id]}) :ticket:`1896` @@ -1231,14 +1238,14 @@ backend: :: - select([mytable], distinct='ALL', prefixes=['HIGH_PRIORITY']) + select([mytable], distinct="ALL", prefixes=["HIGH_PRIORITY"]) The ``prefixes`` keyword or ``prefix_with()`` method should be used for non-standard or unusual prefixes: :: - select([mytable]).prefix_with('HIGH_PRIORITY', 'ALL') + select([mytable]).prefix_with("HIGH_PRIORITY", "ALL") ``useexisting`` superseded by ``extend_existing`` and ``keep_existing`` ----------------------------------------------------------------------- diff --git a/doc/build/changelog/migration_08.rst b/doc/build/changelog/migration_08.rst index a4dc58549f..4a07518539 100644 --- a/doc/build/changelog/migration_08.rst +++ b/doc/build/changelog/migration_08.rst @@ -71,16 +71,17 @@ entities. The new system includes these features: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) - child_id_one = Column(Integer, ForeignKey('child.id')) - child_id_two = Column(Integer, ForeignKey('child.id')) + child_id_one = Column(Integer, ForeignKey("child.id")) + child_id_two = Column(Integer, ForeignKey("child.id")) child_one = relationship("Child", foreign_keys=child_id_one) child_two = relationship("Child", foreign_keys=child_id_two) + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) * relationships against self-referential, composite foreign @@ -90,11 +91,11 @@ entities. The new system includes these features: :: class Folder(Base): - __tablename__ = 'folder' + __tablename__ = "folder" __table_args__ = ( - ForeignKeyConstraint( - ['account_id', 'parent_id'], - ['folder.account_id', 'folder.folder_id']), + ForeignKeyConstraint( + ["account_id", "parent_id"], ["folder.account_id", "folder.folder_id"] + ), ) account_id = Column(Integer, primary_key=True) @@ -102,10 +103,9 @@ entities. The new system includes these features: parent_id = Column(Integer) name = Column(String) - parent_folder = relationship("Folder", - backref="child_folders", - remote_side=[account_id, folder_id] - ) + parent_folder = relationship( + "Folder", backref="child_folders", remote_side=[account_id, folder_id] + ) Above, the ``Folder`` refers to its parent ``Folder`` joining from ``account_id`` to itself, and ``parent_id`` @@ -144,18 +144,19 @@ entities. The new system includes these features: expected in most cases:: class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign_keys, remote_side - parent_host = relationship("HostEntry", - primaryjoin=ip_address == cast(content, INET), - foreign_keys=content, - remote_side=ip_address - ) + parent_host = relationship( + "HostEntry", + primaryjoin=ip_address == cast(content, INET), + foreign_keys=content, + remote_side=ip_address, + ) The new :func:`_orm.relationship` mechanics make use of a SQLAlchemy concept known as :term:`annotations`. These annotations @@ -167,8 +168,9 @@ entities. The new system includes these features: from sqlalchemy.orm import foreign, remote + class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = Column(Integer, primary_key=True) ip_address = Column(INET) @@ -176,11 +178,10 @@ entities. The new system includes these features: # relationship() using explicit foreign() and remote() annotations # in lieu of separate arguments - parent_host = relationship("HostEntry", - primaryjoin=remote(ip_address) == \ - cast(foreign(content), INET), - ) - + parent_host = relationship( + "HostEntry", + primaryjoin=remote(ip_address) == cast(foreign(content), INET), + ) .. seealso:: @@ -226,12 +227,11 @@ certain contexts, such as :class:`.AliasedInsp` and A walkthrough of some key capabilities follows:: >>> class User(Base): - ... __tablename__ = 'user' + ... __tablename__ = "user" ... id = Column(Integer, primary_key=True) ... name = Column(String) ... name_syn = synonym(name) ... addresses = relationship("Address") - ... >>> # universal entry point is inspect() >>> b = inspect(User) @@ -285,7 +285,7 @@ A walkthrough of some key capabilities follows:: "user".id = address.user_id >>> # inspect works on instances - >>> u1 = User(id=3, name='x') + >>> u1 = User(id=3, name="x") >>> b = inspect(u1) >>> # it returns the InstanceState @@ -354,10 +354,11 @@ usable anywhere: :: from sqlalchemy.orm import with_polymorphic + palias = with_polymorphic(Person, [Engineer, Manager]) - session.query(Company).\ - join(palias, Company.employees).\ - filter(or_(Engineer.language=='java', Manager.hair=='pointy')) + session.query(Company).join(palias, Company.employees).filter( + or_(Engineer.language == "java", Manager.hair == "pointy") + ) .. seealso:: @@ -377,9 +378,11 @@ by combining it with the new :func:`.with_polymorphic` function:: # use eager loading in conjunction with with_polymorphic targets Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True) - q = s.query(DataContainer).\ - join(DataContainer.jobs.of_type(Job_P)).\ - options(contains_eager(DataContainer.jobs.of_type(Job_P))) + q = ( + s.query(DataContainer) + .join(DataContainer.jobs.of_type(Job_P)) + .options(contains_eager(DataContainer.jobs.of_type(Job_P))) + ) The method now works equally well in most places a regular relationship attribute is accepted, including with loader functions like @@ -389,26 +392,28 @@ and :meth:`.PropComparator.has`:: # use eager loading in conjunction with with_polymorphic targets Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True) - q = s.query(DataContainer).\ - join(DataContainer.jobs.of_type(Job_P)).\ - options(contains_eager(DataContainer.jobs.of_type(Job_P))) + q = ( + s.query(DataContainer) + .join(DataContainer.jobs.of_type(Job_P)) + .options(contains_eager(DataContainer.jobs.of_type(Job_P))) + ) # pass subclasses to eager loads (implicitly applies with_polymorphic) - q = s.query(ParentThing).\ - options( - joinedload_all( - ParentThing.container, - DataContainer.jobs.of_type(SubJob) - )) + q = s.query(ParentThing).options( + joinedload_all(ParentThing.container, DataContainer.jobs.of_type(SubJob)) + ) # control self-referential aliasing with any()/has() Job_A = aliased(Job) - q = s.query(Job).join(DataContainer.jobs).\ - filter( - DataContainer.jobs.of_type(Job_A).\ - any(and_(Job_A.id < Job.id, Job_A.type=='fred') - ) - ) + q = ( + s.query(Job) + .join(DataContainer.jobs) + .filter( + DataContainer.jobs.of_type(Job_A).any( + and_(Job_A.id < Job.id, Job_A.type == "fred") + ) + ) + ) .. seealso:: @@ -429,13 +434,15 @@ with a declarative base class:: Base = declarative_base() + @event.listens_for("load", Base, propagate=True) def on_load(target, context): print("New instance loaded:", target) + # on_load() will be applied to SomeClass class SomeClass(Base): - __tablename__ = 'sometable' + __tablename__ = "sometable" # ... @@ -453,8 +460,9 @@ can be referred to via dotted name in expressions:: class Snack(Base): # ... - peanuts = relationship("nuts.Peanut", - primaryjoin="nuts.Peanut.snack_id == Snack.id") + peanuts = relationship( + "nuts.Peanut", primaryjoin="nuts.Peanut.snack_id == Snack.id" + ) The resolution allows that any full or partial disambiguating package name can be used. If the @@ -484,17 +492,22 @@ in one step: class ReflectedOne(DeferredReflection, Base): __abstract__ = True + class ReflectedTwo(DeferredReflection, Base): __abstract__ = True + class MyClass(ReflectedOne): - __tablename__ = 'mytable' + __tablename__ = "mytable" + class MyOtherClass(ReflectedOne): - __tablename__ = 'myothertable' + __tablename__ = "myothertable" + class YetAnotherClass(ReflectedTwo): - __tablename__ = 'yetanothertable' + __tablename__ = "yetanothertable" + ReflectedOne.prepare(engine_one) ReflectedTwo.prepare(engine_two) @@ -535,10 +548,9 @@ Below, we emit an UPDATE against ``SomeEntity``, adding a FROM clause (or equivalent, depending on backend) against ``SomeOtherEntity``:: - query(SomeEntity).\ - filter(SomeEntity.id==SomeOtherEntity.id).\ - filter(SomeOtherEntity.foo=='bar').\ - update({"data":"x"}) + query(SomeEntity).filter(SomeEntity.id == SomeOtherEntity.id).filter( + SomeOtherEntity.foo == "bar" + ).update({"data": "x"}) In particular, updates to joined-inheritance entities are supported, provided the target of the UPDATE is local to the @@ -548,10 +560,9 @@ given ``Engineer`` as a joined subclass of ``Person``: :: - query(Engineer).\ - filter(Person.id==Engineer.id).\ - filter(Person.name=='dilbert').\ - update({"engineer_data":"java"}) + query(Engineer).filter(Person.id == Engineer.id).filter( + Person.name == "dilbert" + ).update({"engineer_data": "java"}) would produce: @@ -649,6 +660,7 @@ For example, to add logarithm support to :class:`.Numeric` types: from sqlalchemy.types import Numeric from sqlalchemy.sql import func + class CustomNumeric(Numeric): class comparator_factory(Numeric.Comparator): def log(self, other): @@ -659,16 +671,17 @@ The new type is usable like any other type: :: - data = Table('data', metadata, - Column('id', Integer, primary_key=True), - Column('x', CustomNumeric(10, 5)), - Column('y', CustomNumeric(10, 5)) - ) + data = Table( + "data", + metadata, + Column("id", Integer, primary_key=True), + Column("x", CustomNumeric(10, 5)), + Column("y", CustomNumeric(10, 5)), + ) stmt = select([data.c.x.log(data.c.y)]).where(data.c.x.log(2) < value) print(conn.execute(stmt).fetchall()) - New features which have come from this immediately include support for PostgreSQL's HSTORE type, as well as new operations associated with PostgreSQL's ARRAY @@ -696,11 +709,13 @@ support this syntax, including PostgreSQL, SQLite, and MySQL. It is not the same thing as the usual ``executemany()`` style of INSERT which remains unchanged:: - users.insert().values([ - {"name": "some name"}, - {"name": "some other name"}, - {"name": "yet another name"}, - ]) + users.insert().values( + [ + {"name": "some name"}, + {"name": "some other name"}, + {"name": "yet another name"}, + ] + ) .. seealso:: @@ -721,6 +736,7 @@ functionality, except on the database side:: from sqlalchemy.types import String from sqlalchemy import func, Table, Column, MetaData + class LowerString(String): def bind_expression(self, bindvalue): return func.lower(bindvalue) @@ -728,18 +744,15 @@ functionality, except on the database side:: def column_expression(self, col): return func.lower(col) + metadata = MetaData() - test_table = Table( - 'test_table', - metadata, - Column('data', LowerString) - ) + test_table = Table("test_table", metadata, Column("data", LowerString)) Above, the ``LowerString`` type defines a SQL expression that will be emitted whenever the ``test_table.c.data`` column is rendered in the columns clause of a SELECT statement:: - >>> print(select([test_table]).where(test_table.c.data == 'HI')) + >>> print(select([test_table]).where(test_table.c.data == "HI")) SELECT lower(test_table.data) AS data FROM test_table WHERE test_table.data = lower(:data_1) @@ -789,16 +802,17 @@ against a particular target selectable:: signatures = relationship("Signature", lazy=False) + class Signature(Base): __tablename__ = "signature" id = Column(Integer, primary_key=True) sig_count = column_property( - select([func.count('*')]).\ - where(SnortEvent.signature == id). - correlate_except(SnortEvent) - ) + select([func.count("*")]) + .where(SnortEvent.signature == id) + .correlate_except(SnortEvent) + ) .. seealso:: @@ -818,19 +832,16 @@ and containment methods such as from sqlalchemy.dialects.postgresql import HSTORE - data = Table('data_table', metadata, - Column('id', Integer, primary_key=True), - Column('hstore_data', HSTORE) - ) - - engine.execute( - select([data.c.hstore_data['some_key']]) - ).scalar() + data = Table( + "data_table", + metadata, + Column("id", Integer, primary_key=True), + Column("hstore_data", HSTORE), + ) - engine.execute( - select([data.c.hstore_data.matrix()]) - ).scalar() + engine.execute(select([data.c.hstore_data["some_key"]])).scalar() + engine.execute(select([data.c.hstore_data.matrix()])).scalar() .. seealso:: @@ -861,30 +872,20 @@ results: The type also introduces new operators, using the new type-specific operator framework. New operations include indexed access:: - result = conn.execute( - select([mytable.c.arraycol[2]]) - ) + result = conn.execute(select([mytable.c.arraycol[2]])) slice access in SELECT:: - result = conn.execute( - select([mytable.c.arraycol[2:4]]) - ) + result = conn.execute(select([mytable.c.arraycol[2:4]])) slice updates in UPDATE:: - conn.execute( - mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]}) - ) + conn.execute(mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]})) freestanding array literals:: >>> from sqlalchemy.dialects import postgresql - >>> conn.scalar( - ... select([ - ... postgresql.array([1, 2]) + postgresql.array([3, 4, 5]) - ... ]) - ... ) + >>> conn.scalar(select([postgresql.array([1, 2]) + postgresql.array([3, 4, 5])])) [1, 2, 3, 4, 5] array concatenation, where below, the right side ``[4, 5, 6]`` is coerced into an array literal:: @@ -912,20 +913,24 @@ everything else. :: - Column('sometimestamp', sqlite.DATETIME(truncate_microseconds=True)) - Column('sometimestamp', sqlite.DATETIME( - storage_format=( - "%(year)04d%(month)02d%(day)02d" - "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d" - ), - regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})" - ) - ) - Column('somedate', sqlite.DATE( - storage_format="%(month)02d/%(day)02d/%(year)04d", - regexp="(?P\d+)/(?P\d+)/(?P\d+)", - ) - ) + Column("sometimestamp", sqlite.DATETIME(truncate_microseconds=True)) + Column( + "sometimestamp", + sqlite.DATETIME( + storage_format=( + "%(year)04d%(month)02d%(day)02d" + "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d" + ), + regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})", + ), + ) + Column( + "somedate", + sqlite.DATE( + storage_format="%(month)02d/%(day)02d/%(year)04d", + regexp="(?P\d+)/(?P\d+)/(?P\d+)", + ), + ) Huge thanks to Nate Dub for the sprinting on this at Pycon 2012. @@ -946,7 +951,7 @@ The "collate" keyword, long accepted by the MySQL dialect, is now established on all :class:`.String` types and will render on any backend, including when features such as :meth:`_schema.MetaData.create_all` and :func:`.cast` is used:: - >>> stmt = select([cast(sometable.c.somechar, String(20, collation='utf8'))]) + >>> stmt = select([cast(sometable.c.somechar, String(20, collation="utf8"))]) >>> print(stmt) SELECT CAST(sometable.somechar AS VARCHAR(20) COLLATE "utf8") AS anon_1 FROM sometable @@ -1047,33 +1052,35 @@ The new behavior allows the following test case to work:: Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(64)) + class UserKeyword(Base): - __tablename__ = 'user_keyword' - user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) - keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) + __tablename__ = "user_keyword" + user_id = Column(Integer, ForeignKey("user.id"), primary_key=True) + keyword_id = Column(Integer, ForeignKey("keyword.id"), primary_key=True) - user = relationship(User, - backref=backref("user_keywords", - cascade="all, delete-orphan") - ) + user = relationship( + User, backref=backref("user_keywords", cascade="all, delete-orphan") + ) - keyword = relationship("Keyword", - backref=backref("user_keywords", - cascade="all, delete-orphan") - ) + keyword = relationship( + "Keyword", backref=backref("user_keywords", cascade="all, delete-orphan") + ) # uncomment this to enable the old behavior # __mapper_args__ = {"legacy_is_orphan": True} + class Keyword(Base): - __tablename__ = 'keyword' + __tablename__ = "keyword" id = Column(Integer, primary_key=True) - keyword = Column('keyword', String(64)) + keyword = Column("keyword", String(64)) + from sqlalchemy import create_engine from sqlalchemy.orm import Session @@ -1103,7 +1110,6 @@ The new behavior allows the following test case to work:: session.commit() - :ticket:`2655` The after_attach event fires after the item is associated with the Session instead of before; before_attach added @@ -1129,9 +1135,9 @@ use cases should use the new "before_attach" event: @event.listens_for(Session, "before_attach") def before_attach(session, instance): - instance.some_necessary_attribute = session.query(Widget).\ - filter_by(instance.widget_name).\ - first() + instance.some_necessary_attribute = ( + session.query(Widget).filter_by(instance.widget_name).first() + ) :ticket:`2464` @@ -1146,11 +1152,13 @@ parent: :: - subq = session.query(Entity.value).\ - filter(Entity.id==Parent.entity_id).\ - correlate(Parent).\ - as_scalar() - session.query(Parent).filter(subq=="some value") + subq = ( + session.query(Entity.value) + .filter(Entity.id == Parent.entity_id) + .correlate(Parent) + .as_scalar() + ) + session.query(Parent).filter(subq == "some value") This was the opposite behavior of a plain ``select()`` construct which would assume auto-correlation by default. @@ -1158,10 +1166,8 @@ The above statement in 0.8 will correlate automatically: :: - subq = session.query(Entity.value).\ - filter(Entity.id==Parent.entity_id).\ - as_scalar() - session.query(Parent).filter(subq=="some value") + subq = session.query(Entity.value).filter(Entity.id == Parent.entity_id).as_scalar() + session.query(Parent).filter(subq == "some value") like in ``select()``, correlation can be disabled by calling ``query.correlate(None)`` or manually set by passing an @@ -1187,8 +1193,8 @@ objects relative to what's being selected:: from sqlalchemy.sql import table, column, select - t1 = table('t1', column('x')) - t2 = table('t2', column('y')) + t1 = table("t1", column("x")) + t2 = table("t2", column("y")) s = select([t1, t2]).correlate(t1) print(s) @@ -1263,8 +1269,8 @@ doing something like this: :: - scalar_subq = select([someothertable.c.id]).where(someothertable.c.data=='foo') - select([sometable]).where(sometable.c.id==scalar_subq) + scalar_subq = select([someothertable.c.id]).where(someothertable.c.data == "foo") + select([sometable]).where(sometable.c.id == scalar_subq) SQL Server doesn't allow an equality comparison to a scalar SELECT, that is, "x = (SELECT something)". The MSSQL dialect @@ -1313,32 +1319,28 @@ key would be ignored, inconsistently versus when :: # before 0.8 - table1 = Table('t1', metadata, - Column('col1', Integer, key='column_one') - ) + table1 = Table("t1", metadata, Column("col1", Integer, key="column_one")) s = select([table1]) - s.c.column_one # would be accessible like this - s.c.col1 # would raise AttributeError + s.c.column_one # would be accessible like this + s.c.col1 # would raise AttributeError s = select([table1]).apply_labels() - s.c.table1_column_one # would raise AttributeError - s.c.table1_col1 # would be accessible like this + s.c.table1_column_one # would raise AttributeError + s.c.table1_col1 # would be accessible like this In 0.8, :attr:`_schema.Column.key` is honored in both cases: :: # with 0.8 - table1 = Table('t1', metadata, - Column('col1', Integer, key='column_one') - ) + table1 = Table("t1", metadata, Column("col1", Integer, key="column_one")) s = select([table1]) - s.c.column_one # works - s.c.col1 # AttributeError + s.c.column_one # works + s.c.col1 # AttributeError s = select([table1]).apply_labels() - s.c.table1_column_one # works - s.c.table1_col1 # AttributeError + s.c.table1_column_one # works + s.c.table1_col1 # AttributeError All other behavior regarding "name" and "key" are the same, including that the rendered SQL will still use the form @@ -1408,8 +1410,8 @@ warning: :: - t1 = table('t1', column('x')) - t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z" + t1 = table("t1", column("x")) + t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z" :ticket:`2415` @@ -1439,7 +1441,7 @@ always compared case-insensitively: :: >>> row = result.fetchone() - >>> row['foo'] == row['FOO'] == row['Foo'] + >>> row["foo"] == row["FOO"] == row["Foo"] True This was for the benefit of a few dialects which in the diff --git a/doc/build/changelog/migration_09.rst b/doc/build/changelog/migration_09.rst index 70fa49e343..2e45695abb 100644 --- a/doc/build/changelog/migration_09.rst +++ b/doc/build/changelog/migration_09.rst @@ -60,8 +60,7 @@ Using a :class:`_query.Query` in conjunction with a composite attribute now retu type maintained by that composite, rather than being broken out into individual columns. Using the mapping setup at :ref:`mapper_composite`:: - >>> session.query(Vertex.start, Vertex.end).\ - ... filter(Vertex.start == Point(3, 4)).all() + >>> session.query(Vertex.start, Vertex.end).filter(Vertex.start == Point(3, 4)).all() [(Point(x=3, y=4), Point(x=5, y=6))] This change is backwards-incompatible with code that expects the individual attribute @@ -69,8 +68,9 @@ to be expanded into individual columns. To get that behavior, use the ``.clause accessor:: - >>> session.query(Vertex.start.clauses, Vertex.end.clauses).\ - ... filter(Vertex.start == Point(3, 4)).all() + >>> session.query(Vertex.start.clauses, Vertex.end.clauses).filter( + ... Vertex.start == Point(3, 4) + ... ).all() [(3, 4, 5, 6)] .. seealso:: @@ -93,9 +93,11 @@ Consider the following example against the usual ``User`` mapping:: select_stmt = select([User]).where(User.id == 7).alias() - q = session.query(User).\ - join(select_stmt, User.id == select_stmt.c.id).\ - filter(User.name == 'ed') + q = ( + session.query(User) + .join(select_stmt, User.id == select_stmt.c.id) + .filter(User.name == "ed") + ) The above statement predictably renders SQL like the following:: @@ -109,10 +111,12 @@ If we wanted to reverse the order of the left and right elements of the JOIN, the documentation would lead us to believe we could use :meth:`_query.Query.select_from` to do so:: - q = session.query(User).\ - select_from(select_stmt).\ - join(User, User.id == select_stmt.c.id).\ - filter(User.name == 'ed') + q = ( + session.query(User) + .select_from(select_stmt) + .join(User, User.id == select_stmt.c.id) + .filter(User.name == "ed") + ) However, in version 0.8 and earlier, the above use of :meth:`_query.Query.select_from` would apply the ``select_stmt`` to **replace** the ``User`` entity, as it @@ -137,7 +141,7 @@ to selecting from a customized :func:`.aliased` construct:: select_stmt = select([User]).where(User.id == 7) user_from_stmt = aliased(User, select_stmt.alias()) - q = session.query(user_from_stmt).filter(user_from_stmt.name == 'ed') + q = session.query(user_from_stmt).filter(user_from_stmt.name == "ed") So with SQLAlchemy 0.9, our query that selects from ``select_stmt`` produces the SQL we expect:: @@ -180,17 +184,20 @@ The change is illustrated as follows:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) a = relationship("A", backref=backref("bs", viewonly=True)) + e = create_engine("sqlite://") Base.metadata.create_all(e) @@ -229,16 +236,17 @@ the "association" row being present or not when the comparison is against Consider this mapping:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(Integer, ForeignKey('b.id'), primary_key=True) + b_id = Column(Integer, ForeignKey("b.id"), primary_key=True) b = relationship("B") b_value = association_proxy("b", "value") + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) value = Column(String) @@ -323,21 +331,24 @@ proxied value. E.g.:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b = relationship("B", uselist=False) bname = association_proxy("b", "name") + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) name = Column(String) + a1 = A() # this is how m2o's always have worked @@ -370,17 +381,19 @@ This is a small change demonstrated as follows:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) data = Column(String) + e = create_engine("sqlite://", echo=True) Base.metadata.create_all(e) sess = Session(e) - a1 = A(data='a1') + a1 = A(data="a1") sess.add(a1) sess.commit() # a1 is now expired @@ -388,11 +401,23 @@ This is a small change demonstrated as follows:: assert inspect(a1).attrs.data.history == (None, None, None) # in 0.8, this would fail to load the unloaded state. - assert attributes.get_history(a1, 'data') == ((), ['a1',], ()) + assert attributes.get_history(a1, "data") == ( + (), + [ + "a1", + ], + (), + ) # load_history() is now equivalent to get_history() with # passive=PASSIVE_OFF ^ INIT_OK - assert inspect(a1).attrs.data.load_history() == ((), ['a1',], ()) + assert inspect(a1).attrs.data.load_history() == ( + (), + [ + "a1", + ], + (), + ) :ticket:`2787` @@ -452,14 +477,10 @@ use the :meth:`.TypeEngine.with_variant` method:: from sqlalchemy.dialects.mysql import INTEGER d = Date().with_variant( - DATE(storage_format="%(day)02d.%(month)02d.%(year)04d"), - "sqlite" - ) + DATE(storage_format="%(day)02d.%(month)02d.%(year)04d"), "sqlite" + ) - i = Integer().with_variant( - INTEGER(display_width=5), - "mysql" - ) + i = Integer().with_variant(INTEGER(display_width=5), "mysql") :meth:`.TypeEngine.with_variant` isn't new, it was added in SQLAlchemy 0.7.2. So code that is running on the 0.8 series can be corrected to use @@ -549,7 +570,7 @@ The precedence rules for COLLATE have been changed Previously, an expression like the following:: - print((column('x') == 'somevalue').collate("en_EN")) + print((column("x") == "somevalue").collate("en_EN")) would produce an expression like this:: @@ -567,7 +588,7 @@ The potentially backwards incompatible change arises if the :meth:`.ColumnOperators.collate` operator is being applied to the right-hand column, as follows:: - print(column('x') == literal('somevalue').collate("en_EN")) + print(column("x") == literal("somevalue").collate("en_EN")) In 0.8, this produces:: @@ -584,11 +605,11 @@ The :meth:`.ColumnOperators.collate` operator now works more appropriately withi generated:: >>> # 0.8 - >>> print(column('x').collate('en_EN').desc()) + >>> print(column("x").collate("en_EN").desc()) (x COLLATE en_EN) DESC >>> # 0.9 - >>> print(column('x').collate('en_EN').desc()) + >>> print(column("x").collate("en_EN").desc()) x COLLATE en_EN DESC :ticket:`2879` @@ -604,7 +625,7 @@ The :class:`_postgresql.ENUM` type will now apply escaping to single quote signs within the enumerated values:: >>> from sqlalchemy.dialects import postgresql - >>> type = postgresql.ENUM('one', 'two', "three's", name="myenum") + >>> type = postgresql.ENUM("one", "two", "three's", name="myenum") >>> from sqlalchemy.dialects.postgresql import base >>> print(base.CreateEnumType(type).compile(dialect=postgresql.dialect())) CREATE TYPE myenum AS ENUM ('one','two','three''s') @@ -633,6 +654,7 @@ from all locations in which it had been established:: """listen for before_insert""" # ... + event.remove(MyClass, "before_insert", my_before_insert) In the example above, the ``propagate=True`` flag is set. This @@ -689,13 +711,9 @@ Setting an option on path that is based on a subclass requires that all links in the path be spelled out as class bound attributes, since the :meth:`.PropComparator.of_type` method needs to be called:: - session.query(Company).\ - options( - subqueryload_all( - Company.employees.of_type(Engineer), - Engineer.machines - ) - ) + session.query(Company).options( + subqueryload_all(Company.employees.of_type(Engineer), Engineer.machines) + ) **New Way** @@ -726,7 +744,6 @@ but the intent is clearer:: query(User).options(defaultload("orders").defaultload("items").subqueryload("keywords")) - The dotted style can still be taken advantage of, particularly in the case of skipping over several path elements:: @@ -791,7 +808,6 @@ others:: # undefer all Address columns query(User).options(defaultload(User.addresses).undefer("*")) - :ticket:`1418` @@ -826,7 +842,8 @@ The :func:`_expression.text` construct gains new methods: stmt = stmt.alias() stmt = select([addresses]).select_from( - addresses.join(stmt), addresses.c.user_id == stmt.c.id) + addresses.join(stmt), addresses.c.user_id == stmt.c.id + ) # or into a cte(): @@ -834,7 +851,8 @@ The :func:`_expression.text` construct gains new methods: stmt = stmt.cte("x") stmt = select([addresses]).select_from( - addresses.join(stmt), addresses.c.user_id == stmt.c.id) + addresses.join(stmt), addresses.c.user_id == stmt.c.id + ) :ticket:`2877` @@ -850,9 +868,9 @@ compatible construct can be passed to the new method :meth:`_expression.Insert.f where it will be used to render an ``INSERT .. SELECT`` construct:: >>> from sqlalchemy.sql import table, column - >>> t1 = table('t1', column('a'), column('b')) - >>> t2 = table('t2', column('x'), column('y')) - >>> print(t1.insert().from_select(['a', 'b'], t2.select().where(t2.c.y == 5))) + >>> t1 = table("t1", column("a"), column("b")) + >>> t2 = table("t2", column("x"), column("y")) + >>> print(t1.insert().from_select(["a", "b"], t2.select().where(t2.c.y == 5))) INSERT INTO t1 (a, b) SELECT t2.x, t2.y FROM t2 WHERE t2.y = :y_1 @@ -861,7 +879,7 @@ The construct is smart enough to also accommodate ORM objects such as classes and :class:`_query.Query` objects:: s = Session() - q = s.query(User.id, User.name).filter_by(name='ed') + q = s.query(User.id, User.name).filter_by(name="ed") ins = insert(Address).from_select((Address.id, Address.email_address), q) rendering:: @@ -920,9 +938,10 @@ for ``.decimal_return_scale`` if it is not otherwise specified. If both from sqlalchemy.dialects.mysql import DOUBLE import decimal - data = Table('data', metadata, - Column('double_value', - mysql.DOUBLE(decimal_return_scale=12, asdecimal=True)) + data = Table( + "data", + metadata, + Column("double_value", mysql.DOUBLE(decimal_return_scale=12, asdecimal=True)), ) conn.execute( @@ -938,7 +957,6 @@ for ``.decimal_return_scale`` if it is not otherwise specified. If both # much precision for DOUBLE assert result == decimal.Decimal("45.768392065789") - :ticket:`2867` @@ -1004,8 +1022,9 @@ from a backref:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", backref="a") @@ -1015,21 +1034,22 @@ from a backref:: print("A.bs validator") return item + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) @validates("a", include_backrefs=False) def validate_a(self, key, item): print("B.a validator") return item + a1 = A() a1.bs.append(B()) # prints only "A.bs validator" - :ticket:`1535` @@ -1262,14 +1282,9 @@ without any subqueries generated:: employee_alias = with_polymorphic(Person, [Engineer, Manager], flat=True) - session.query(Company).join( - Company.employees.of_type(employee_alias) - ).filter( - or_( - Engineer.primary_language == 'python', - Manager.manager_name == 'dilbert' - ) - ) + session.query(Company).join(Company.employees.of_type(employee_alias)).filter( + or_(Engineer.primary_language == "python", Manager.manager_name == "dilbert") + ) Generates (everywhere except SQLite):: @@ -1295,7 +1310,9 @@ on the right side. Normally, a joined eager load chain like the following:: - query(User).options(joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True)) + query(User).options( + joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True) + ) Would not produce an inner join; because of the LEFT OUTER JOIN from user->order, joined eager loading could not use an INNER join from order->items without changing @@ -1311,7 +1328,9 @@ the new "right-nested joins are OK" logic would kick in, and we'd get:: Since we missed the boat on that, to avoid further regressions we've added the above functionality by specifying the string ``"nested"`` to :paramref:`_orm.joinedload.innerjoin`:: - query(User).options(joinedload("orders", innerjoin=False).joinedload("items", innerjoin="nested")) + query(User).options( + joinedload("orders", innerjoin=False).joinedload("items", innerjoin="nested") + ) This feature is new in 0.9.4. @@ -1406,16 +1425,18 @@ replacement operation, which in turn should cause the item to be removed from a previous collection:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) children = relationship("Child", backref="parent") + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) - parent_id = Column(ForeignKey('parent.id')) + parent_id = Column(ForeignKey("parent.id")) + p1 = Parent() p2 = Parent() @@ -1520,7 +1541,7 @@ Starting with a table such as this:: from sqlalchemy import Table, Boolean, Integer, Column, MetaData - t1 = Table('t', MetaData(), Column('x', Boolean()), Column('y', Integer)) + t1 = Table("t", MetaData(), Column("x", Boolean()), Column("y", Integer)) A select construct will now render the boolean column as a binary expression on backends that don't feature ``true``/``false`` constant behavior:: @@ -1535,8 +1556,9 @@ The :func:`.and_` and :func:`.or_` constructs will now exhibit quasi "short circuit" behavior, that is truncating a rendered expression, when a :func:`.true` or :func:`.false` constant is present:: - >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile( - ... dialect=postgresql.dialect())) + >>> print( + ... select([t1]).where(and_(t1.c.y > 5, false())).compile(dialect=postgresql.dialect()) + ... ) SELECT t.x, t.y FROM t WHERE false :func:`.true` can be used as the base to build up an expression:: @@ -1549,8 +1571,7 @@ The :func:`.and_` and :func:`.or_` constructs will now exhibit quasi The boolean constants :func:`.true` and :func:`.false` themselves render as ``0 = 1`` and ``1 = 1`` for a backend with no boolean constants:: - >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile( - ... dialect=mysql.dialect())) + >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile(dialect=mysql.dialect())) SELECT t.x, t.y FROM t WHERE 0 = 1 Interpretation of ``None``, while not particularly valid SQL, is at least @@ -1581,7 +1602,7 @@ E.g. an example like:: from sqlalchemy.sql import table, column, select, func - t = table('t', column('c1'), column('c2')) + t = table("t", column("c1"), column("c2")) expr = (func.foo(t.c.c1) + t.c.c2).label("expr") stmt = select([expr]).order_by(expr) @@ -1620,16 +1641,16 @@ The ``__eq__()`` method now compares both sides as a tuple and also an ``__lt__()`` method has been added:: users.insert().execute( - dict(user_id=1, user_name='foo'), - dict(user_id=2, user_name='bar'), - dict(user_id=3, user_name='def'), - ) + dict(user_id=1, user_name="foo"), + dict(user_id=2, user_name="bar"), + dict(user_id=3, user_name="def"), + ) rows = users.select().order_by(users.c.user_name).execute().fetchall() - eq_(rows, [(2, 'bar'), (3, 'def'), (1, 'foo')]) + eq_(rows, [(2, "bar"), (3, "def"), (1, "foo")]) - eq_(sorted(rows), [(1, 'foo'), (2, 'bar'), (3, 'def')]) + eq_(sorted(rows), [(1, "foo"), (2, "bar"), (3, "def")]) :ticket:`2848` @@ -1667,7 +1688,7 @@ Above, ``bp`` remains unchanged, but the ``String`` type will be used when the statement is executed, which we can see by examining the ``binds`` dictionary:: >>> compiled = stmt.compile() - >>> compiled.binds['some_col'].type + >>> compiled.binds["some_col"].type String The feature allows custom types to take their expected effect within INSERT/UPDATE @@ -1727,10 +1748,10 @@ Scenarios which now work correctly include: >>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKey >>> metadata = MetaData() - >>> t2 = Table('t2', metadata, Column('t1id', ForeignKey('t1.id'))) + >>> t2 = Table("t2", metadata, Column("t1id", ForeignKey("t1.id"))) >>> t2.c.t1id.type NullType() - >>> t1 = Table('t1', metadata, Column('id', Integer, primary_key=True)) + >>> t1 = Table("t1", metadata, Column("id", Integer, primary_key=True)) >>> t2.c.t1id.type Integer() @@ -1738,16 +1759,23 @@ Scenarios which now work correctly include: >>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKeyConstraint >>> metadata = MetaData() - >>> t2 = Table('t2', metadata, - ... Column('t1a'), Column('t1b'), - ... ForeignKeyConstraint(['t1a', 't1b'], ['t1.a', 't1.b'])) + >>> t2 = Table( + ... "t2", + ... metadata, + ... Column("t1a"), + ... Column("t1b"), + ... ForeignKeyConstraint(["t1a", "t1b"], ["t1.a", "t1.b"]), + ... ) >>> t2.c.t1a.type NullType() >>> t2.c.t1b.type NullType() - >>> t1 = Table('t1', metadata, - ... Column('a', Integer, primary_key=True), - ... Column('b', Integer, primary_key=True)) + >>> t1 = Table( + ... "t1", + ... metadata, + ... Column("a", Integer, primary_key=True), + ... Column("b", Integer, primary_key=True), + ... ) >>> t2.c.t1a.type Integer() >>> t2.c.t1b.type @@ -1758,13 +1786,13 @@ Scenarios which now work correctly include: >>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKey >>> metadata = MetaData() - >>> t2 = Table('t2', metadata, Column('t1id', ForeignKey('t1.id'))) - >>> t3 = Table('t3', metadata, Column('t2t1id', ForeignKey('t2.t1id'))) + >>> t2 = Table("t2", metadata, Column("t1id", ForeignKey("t1.id"))) + >>> t3 = Table("t3", metadata, Column("t2t1id", ForeignKey("t2.t1id"))) >>> t2.c.t1id.type NullType() >>> t3.c.t2t1id.type NullType() - >>> t1 = Table('t1', metadata, Column('id', Integer, primary_key=True)) + >>> t1 = Table("t1", metadata, Column("id", Integer, primary_key=True)) >>> t2.c.t1id.type Integer() >>> t3.c.t2t1id.type diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst index 68fb0bd777..2ff8641501 100644 --- a/doc/build/changelog/migration_10.rst +++ b/doc/build/changelog/migration_10.rst @@ -71,15 +71,16 @@ once, a query as a pre-compiled unit begins to be feasible:: bakery = baked.bakery() + def search_for_user(session, username, email=None): baked_query = bakery(lambda session: session.query(User)) - baked_query += lambda q: q.filter(User.name == bindparam('username')) + baked_query += lambda q: q.filter(User.name == bindparam("username")) baked_query += lambda q: q.order_by(User.id) if email: - baked_query += lambda q: q.filter(User.email == bindparam('email')) + baked_query += lambda q: q.filter(User.email == bindparam("email")) result = baked_query(session).params(username=username, email=email).all() @@ -109,10 +110,11 @@ call upon mixin-established columns and will receive a reference to the correct @declared_attr def foobar_prop(cls): - return column_property('foobar: ' + cls.foobar) + return column_property("foobar: " + cls.foobar) + class SomeClass(HasFooBar, Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" id = Column(Integer, primary_key=True) Above, ``SomeClass.foobar_prop`` will be invoked against ``SomeClass``, @@ -132,10 +134,11 @@ this:: @declared_attr def foobar_prop(cls): - return column_property('foobar: ' + cls.foobar) + return column_property("foobar: " + cls.foobar) + class SomeClass(HasFooBar, Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" id = Column(Integer, primary_key=True) Previously, ``SomeClass`` would be mapped with one particular copy of @@ -167,16 +170,19 @@ applied:: @declared_attr.cascading def id(cls): if has_inherited_table(cls): - return Column(ForeignKey('myclass.id'), primary_key=True) + return Column(ForeignKey("myclass.id"), primary_key=True) else: return Column(Integer, primary_key=True) + class MyClass(HasIdMixin, Base): - __tablename__ = 'myclass' + __tablename__ = "myclass" # ... + class MySubClass(MyClass): - "" + """""" + # ... .. seealso:: @@ -189,13 +195,17 @@ on the abstract base:: from sqlalchemy import Column, Integer, ForeignKey from sqlalchemy.orm import relationship - from sqlalchemy.ext.declarative import (declarative_base, declared_attr, - AbstractConcreteBase) + from sqlalchemy.ext.declarative import ( + declarative_base, + declared_attr, + AbstractConcreteBase, + ) Base = declarative_base() + class Something(Base): - __tablename__ = u'something' + __tablename__ = "something" id = Column(Integer, primary_key=True) @@ -212,9 +222,8 @@ on the abstract base:: class Concrete(Abstract): - __tablename__ = u'cca' - __mapper_args__ = {'polymorphic_identity': 'cca', 'concrete': True} - + __tablename__ = "cca" + __mapper_args__ = {"polymorphic_identity": "cca", "concrete": True} The above mapping will set up a table ``cca`` with both an ``id`` and a ``something_id`` column, and ``Concrete`` will also have a relationship @@ -240,17 +249,19 @@ of load that's improved the most:: Base = declarative_base() + class Foo(Base): __table__ = Table( - 'foo', Base.metadata, - Column('id', Integer, primary_key=True), - Column('a', Integer(), nullable=False), - Column('b', Integer(), nullable=False), - Column('c', Integer(), nullable=False), + "foo", + Base.metadata, + Column("id", Integer, primary_key=True), + Column("a", Integer(), nullable=False), + Column("b", Integer(), nullable=False), + Column("c", Integer(), nullable=False), ) - engine = create_engine( - 'mysql+mysqldb://scott:tiger@localhost/test', echo=True) + + engine = create_engine("mysql+mysqldb://scott:tiger@localhost/test", echo=True) sess = Session(engine) @@ -385,32 +396,29 @@ of inheritance-oriented scenarios, including: * Binding to a Mixin or Abstract Class:: class MyClass(SomeMixin, Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" # ... - session = Session(binds={SomeMixin: some_engine}) + session = Session(binds={SomeMixin: some_engine}) * Binding to inherited concrete subclasses individually based on table:: class BaseClass(Base): - __tablename__ = 'base' + __tablename__ = "base" # ... + class ConcreteSubClass(BaseClass): - __tablename__ = 'concrete' + __tablename__ = "concrete" # ... - __mapper_args__ = {'concrete': True} - + __mapper_args__ = {"concrete": True} - session = Session(binds={ - base_table: some_engine, - concrete_table: some_other_engine - }) + session = Session(binds={base_table: some_engine, concrete_table: some_other_engine}) :ticket:`3035` @@ -446,10 +454,10 @@ These scenarios include: statement as well as for the SELECT used by the "fetch" strategy:: session.query(User).filter(User.id == 15).update( - {"name": "foob"}, synchronize_session='fetch') + {"name": "foob"}, synchronize_session="fetch" + ) - session.query(User).filter(User.id == 15).delete( - synchronize_session='fetch') + session.query(User).filter(User.id == 15).delete(synchronize_session="fetch") * Queries against individual columns:: @@ -488,7 +496,7 @@ at the attribute. Below this is illustrated using the return self.value + 5 - inspect(SomeObject).all_orm_descriptors.some_prop.info['foo'] = 'bar' + inspect(SomeObject).all_orm_descriptors.some_prop.info["foo"] = "bar" It is also available as a constructor argument for all :class:`.SchemaItem` objects (e.g. :class:`_schema.ForeignKey`, :class:`.UniqueConstraint` etc.) as well @@ -510,20 +518,19 @@ as the "order by label" logic introduced in 0.9 (see :ref:`migration_1068`). Given a mapping like the following:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) - A.b = column_property( - select([func.max(B.id)]).where(B.a_id == A.id).correlate(A) - ) + A.b = column_property(select([func.max(B.id)]).where(B.a_id == A.id).correlate(A)) A simple scenario that included "A.b" twice would fail to render correctly:: @@ -550,12 +557,12 @@ There were also many scenarios where the "order by" logic would fail to order by label, for example if the mapping were "polymorphic":: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) type = Column(String) - __mapper_args__ = {'polymorphic_on': type, 'with_polymorphic': '*'} + __mapper_args__ = {"polymorphic_on": type, "with_polymorphic": "*"} The order_by would fail to use the label, as it would be anonymized due to the polymorphic loading:: @@ -592,7 +599,7 @@ any SQL expression, in addition to integer values, as arguments. The ORM this is used to allow a bound parameter to be passed, which can be substituted with a value later:: - sel = select([table]).limit(bindparam('mylimit')).offset(bindparam('myoffset')) + sel = select([table]).limit(bindparam("mylimit")).offset(bindparam("myoffset")) Dialects which don't support non-integer LIMIT or OFFSET expressions may continue to not support this behavior; third party dialects may also need modification @@ -702,12 +709,12 @@ CHECK Constraints now support the ``%(column_0_name)s`` token in naming conventi The ``%(column_0_name)s`` will derive from the first column found in the expression of a :class:`.CheckConstraint`:: - metadata = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - foo = Table('foo', metadata, - Column('value', Integer), + foo = Table( + "foo", + metadata, + Column("value", Integer), ) CheckConstraint(foo.c.value > 5) @@ -743,10 +750,7 @@ Since at least version 0.8, a :class:`.Constraint` has had the ability to m = MetaData() - t = Table('t', m, - Column('a', Integer), - Column('b', Integer) - ) + t = Table("t", m, Column("a", Integer), Column("b", Integer)) uq = UniqueConstraint(t.c.a, t.c.b) # will auto-attach to Table @@ -762,12 +766,12 @@ the :class:`.Constraint` is also added:: m = MetaData() - a = Column('a', Integer) - b = Column('b', Integer) + a = Column("a", Integer) + b = Column("b", Integer) uq = UniqueConstraint(a, b) - t = Table('t', m, a, b) + t = Table("t", m, a, b) assert uq in t.constraints # constraint auto-attached @@ -781,12 +785,12 @@ tracking for the addition of names to a :class:`_schema.Table`:: m = MetaData() - a = Column('a', Integer) - b = Column('b', Integer) + a = Column("a", Integer) + b = Column("b", Integer) - uq = UniqueConstraint(a, 'b') + uq = UniqueConstraint(a, "b") - t = Table('t', m, a, b) + t = Table("t", m, a, b) # constraint *not* auto-attached, as we do not have tracking # to locate when a name 'b' becomes available on the table @@ -806,18 +810,17 @@ the :class:`.Constraint` is constructed:: m = MetaData() - a = Column('a', Integer) - b = Column('b', Integer) + a = Column("a", Integer) + b = Column("b", Integer) - t = Table('t', m, a, b) + t = Table("t", m, a, b) - uq = UniqueConstraint(a, 'b') + uq = UniqueConstraint(a, "b") # constraint auto-attached normally as in older versions assert uq in t.constraints - :ticket:`3341` :ticket:`3411` @@ -838,12 +841,11 @@ expressions are rendered as constants into the SELECT statement:: m = MetaData() t = Table( - 't', m, - Column('x', Integer), - Column('y', Integer, default=func.somefunction())) + "t", m, Column("x", Integer), Column("y", Integer, default=func.somefunction()) + ) stmt = select([t.c.x]) - print(t.insert().from_select(['x'], stmt)) + print(t.insert().from_select(["x"], stmt)) Will render:: @@ -870,9 +872,10 @@ embedded in SQL to render correctly, such as:: metadata = MetaData() - tbl = Table("derp", metadata, - Column("arr", ARRAY(Text), - server_default=array(["foo", "bar", "baz"])), + tbl = Table( + "derp", + metadata, + Column("arr", ARRAY(Text), server_default=array(["foo", "bar", "baz"])), ) print(CreateTable(tbl).compile(dialect=postgresql.dialect())) @@ -981,8 +984,9 @@ emitted for ten of the parameter sets, out of a total of 1000:: warnings.filterwarnings("once") for i in range(1000): - e.execute(select([cast( - ('foo_%d' % random.randint(0, 1000000)).encode('ascii'), Unicode)])) + e.execute( + select([cast(("foo_%d" % random.randint(0, 1000000)).encode("ascii"), Unicode)]) + ) The format of the warning here is:: @@ -1015,40 +1019,41 @@ onto the class. The string names are now resolved as attribute names in earnest:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) - name = Column('user_name', String(50)) + name = Column("user_name", String(50)) Above, the column ``user_name`` is mapped as ``name``. Previously, a call to :meth:`_query.Query.update` that was passed strings would have to have been called as follows:: - session.query(User).update({'user_name': 'moonbeam'}) + session.query(User).update({"user_name": "moonbeam"}) The given string is now resolved against the entity:: - session.query(User).update({'name': 'moonbeam'}) + session.query(User).update({"name": "moonbeam"}) It is typically preferable to use the attribute directly, to avoid any ambiguity:: - session.query(User).update({User.name: 'moonbeam'}) + session.query(User).update({User.name: "moonbeam"}) The change also indicates that synonyms and hybrid attributes can be referred to by string name as well:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) - name = Column('user_name', String(50)) + name = Column("user_name", String(50)) @hybrid_property def fullname(self): return self.name - session.query(User).update({'fullname': 'moonbeam'}) + + session.query(User).update({"fullname": "moonbeam"}) :ticket:`3228` @@ -1108,13 +1113,14 @@ it only became apparent as a result of :ticket:`3371`. Given a mapping:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) a = relationship("A") Given ``A``, with primary key of 7, but which we changed to be 10 @@ -1254,15 +1260,16 @@ attributes, a change in behavior can be seen here when assigning None. Given a mapping:: class A(Base): - __tablename__ = 'table_a' + __tablename__ = "table_a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'table_b' + __tablename__ = "table_b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('table_a.id')) + a_id = Column(ForeignKey("table_a.id")) a = relationship(A) In 1.0, the relationship-bound attribute takes precedence over the FK-bound @@ -1277,7 +1284,7 @@ only takes effect if a value is assigned; the None is not considered:: session.flush() b1 = B() - b1.a = a1 # we expect a_id to be '1'; takes precedence in 0.9 and 1.0 + b1.a = a1 # we expect a_id to be '1'; takes precedence in 0.9 and 1.0 b2 = B() b2.a = None # we expect a_id to be None; takes precedence only in 1.0 @@ -1339,7 +1346,7 @@ with yield-per (subquery loading could be in theory, however). When this error is raised, the :func:`.lazyload` option can be sent with an asterisk:: - q = sess.query(Object).options(lazyload('*')).yield_per(100) + q = sess.query(Object).options(lazyload("*")).yield_per(100) or use :meth:`_query.Query.enable_eagerloads`:: @@ -1348,8 +1355,11 @@ or use :meth:`_query.Query.enable_eagerloads`:: The :func:`.lazyload` option has the advantage that additional many-to-one joined loader options can still be used:: - q = sess.query(Object).options( - lazyload('*'), joinedload("some_manytoone")).yield_per(100) + q = ( + sess.query(Object) + .options(lazyload("*"), joinedload("some_manytoone")) + .yield_per(100) + ) .. _bug_3233: @@ -1370,15 +1380,17 @@ Starting with a mapping as:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) A query that joins to ``A.bs`` twice:: @@ -1392,9 +1404,9 @@ Will render:: The query deduplicates the redundant ``A.bs`` because it is attempting to support a case like the following:: - s.query(A).join(A.bs).\ - filter(B.foo == 'bar').\ - reset_joinpoint().join(A.bs, B.cs).filter(C.bar == 'bat') + s.query(A).join(A.bs).filter(B.foo == "bar").reset_joinpoint().join(A.bs, B.cs).filter( + C.bar == "bat" + ) That is, the ``A.bs`` is part of a "path". As part of :ticket:`3367`, arriving at the same endpoint twice without it being part of a @@ -1437,31 +1449,33 @@ a mapping as follows:: Base = declarative_base() + class A(Base): __tablename__ = "a" id = Column(Integer, primary_key=True) type = Column(String) - __mapper_args__ = {'polymorphic_on': type, 'polymorphic_identity': 'a'} + __mapper_args__ = {"polymorphic_on": type, "polymorphic_identity": "a"} class ASub1(A): - __mapper_args__ = {'polymorphic_identity': 'asub1'} + __mapper_args__ = {"polymorphic_identity": "asub1"} class ASub2(A): - __mapper_args__ = {'polymorphic_identity': 'asub2'} + __mapper_args__ = {"polymorphic_identity": "asub2"} class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(Integer, ForeignKey("a.id")) - a = relationship("A", primaryjoin="B.a_id == A.id", backref='b') + a = relationship("A", primaryjoin="B.a_id == A.id", backref="b") + s = Session() @@ -1543,26 +1557,28 @@ Previously, the sample code looked like:: from sqlalchemy.orm import Bundle + class DictBundle(Bundle): def create_row_processor(self, query, procs, labels): """Override create_row_processor to return values as dictionaries""" + def proc(row, result): - return dict( - zip(labels, (proc(row, result) for proc in procs)) - ) + return dict(zip(labels, (proc(row, result) for proc in procs))) + return proc The unused ``result`` member is now removed:: from sqlalchemy.orm import Bundle + class DictBundle(Bundle): def create_row_processor(self, query, procs, labels): """Override create_row_processor to return values as dictionaries""" + def proc(row): - return dict( - zip(labels, (proc(row) for proc in procs)) - ) + return dict(zip(labels, (proc(row) for proc in procs))) + return proc .. seealso:: @@ -1587,7 +1603,8 @@ join eager load will use a right-nested join. ``"nested"`` is now implied when using ``innerjoin=True``:: query(User).options( - joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True)) + joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True) + ) With the new default, this will render the FROM clause in the form:: @@ -1601,7 +1618,8 @@ optimization parameter to take effect in all cases. To get the older behavior, use ``innerjoin="unnested"``:: query(User).options( - joinedload("orders", innerjoin=False).joinedload("items", innerjoin="unnested")) + joinedload("orders", innerjoin=False).joinedload("items", innerjoin="unnested") + ) This will avoid right-nested joins and chain the joins together using all OUTER joins despite the innerjoin directive:: @@ -1626,15 +1644,16 @@ Subqueries no longer applied to uselist=False joined eager loads Given a joined eager load like the following:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b = relationship("B", uselist=False) class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) + s = Session() print(s.query(A).options(joinedload(A.b)).limit(5)) @@ -1709,7 +1728,8 @@ Change to single-table-inheritance criteria when using from_self(), count() Given a single-table inheritance mapping, such as:: class Widget(Base): - __table__ = 'widget_table' + __table__ = "widget_table" + class FooWidget(Widget): pass @@ -1769,20 +1789,20 @@ the "single table criteria" when joining on a relationship. Given a mapping as:: class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" id = Column(Integer, primary_key=True) type = Column(String) - related_id = Column(ForeignKey('related.id')) + related_id = Column(ForeignKey("related.id")) related = relationship("Related", backref="widget") - __mapper_args__ = {'polymorphic_on': type} + __mapper_args__ = {"polymorphic_on": type} class FooWidget(Widget): - __mapper_args__ = {'polymorphic_identity': 'foo'} + __mapper_args__ = {"polymorphic_identity": "foo"} class Related(Base): - __tablename__ = 'related' + __tablename__ = "related" id = Column(Integer, primary_key=True) It's been the behavior for quite some time that a JOIN on the relationship @@ -1850,7 +1870,7 @@ behavior of passing string values that become parameterized:: # This is a normal Core expression with a string argument - # we aren't talking about this!! - stmt = select([sometable]).where(sometable.c.somecolumn == 'value') + stmt = select([sometable]).where(sometable.c.somecolumn == "value") The Core tutorial has long featured an example of the use of this technique, using a :func:`_expression.select` construct where virtually all components of it @@ -1893,24 +1913,28 @@ one wishes the warnings to be exceptions, the should be used:: import warnings - warnings.simplefilter("error") # all warnings raise an exception + + warnings.simplefilter("error") # all warnings raise an exception Given the above warnings, our statement works just fine, but to get rid of the warnings we would rewrite our statement as follows:: from sqlalchemy import select, text - stmt = select([ - text("a"), - text("b") - ]).where(text("a = b")).select_from(text("sometable")) + + stmt = ( + select([text("a"), text("b")]).where(text("a = b")).select_from(text("sometable")) + ) and as the warnings suggest, we can give our statement more specificity about the text if we use :func:`_expression.column` and :func:`.table`:: from sqlalchemy import select, text, column, table - stmt = select([column("a"), column("b")]).\ - where(text("a = b")).select_from(table("sometable")) + stmt = ( + select([column("a"), column("b")]) + .where(text("a = b")) + .select_from(table("sometable")) + ) Where note also that :func:`.table` and :func:`_expression.column` can now be imported from "sqlalchemy" without the "sql" part. @@ -1927,10 +1951,11 @@ of this change we have enhanced its functionality. When we have a :func:`_expression.select` or :class:`_query.Query` that refers to some column name or named label, we might want to GROUP BY and/or ORDER BY known columns or labels:: - stmt = select([ - user.c.name, - func.count(user.c.id).label("id_count") - ]).group_by("name").order_by("id_count") + stmt = ( + select([user.c.name, func.count(user.c.id).label("id_count")]) + .group_by("name") + .order_by("id_count") + ) In the above statement we expect to see "ORDER BY id_count", as opposed to a re-statement of the function. The string argument given is actively @@ -1944,10 +1969,9 @@ the ``"name"`` expression has been resolved to ``users.name``!):: However, if we refer to a name that cannot be located, then we get the warning again, as below:: - stmt = select([ - user.c.name, - func.count(user.c.id).label("id_count") - ]).order_by("some_label") + stmt = select([user.c.name, func.count(user.c.id).label("id_count")]).order_by( + "some_label" + ) The output does what we say, but again it warns us:: @@ -1995,16 +2019,21 @@ that of an "executemany" style of invocation:: counter = itertools.count(1) t = Table( - 'my_table', metadata, - Column('id', Integer, default=lambda: next(counter)), - Column('data', String) + "my_table", + metadata, + Column("id", Integer, default=lambda: next(counter)), + Column("data", String), ) - conn.execute(t.insert().values([ - {"data": "d1"}, - {"data": "d2"}, - {"data": "d3"}, - ])) + conn.execute( + t.insert().values( + [ + {"data": "d1"}, + {"data": "d2"}, + {"data": "d3"}, + ] + ) + ) The above example will invoke ``next(counter)`` for each row individually as would be expected:: @@ -2034,16 +2063,21 @@ value is required; if an omitted value only refers to a server-side default, an exception is raised:: t = Table( - 'my_table', metadata, - Column('id', Integer, primary_key=True), - Column('data', String, server_default='some default') + "my_table", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String, server_default="some default"), ) - conn.execute(t.insert().values([ - {"data": "d1"}, - {"data": "d2"}, - {}, - ])) + conn.execute( + t.insert().values( + [ + {"data": "d1"}, + {"data": "d2"}, + {}, + ] + ) + ) will raise:: @@ -2109,7 +2143,7 @@ data is needed. A :class:`_schema.Table` can be set up for reflection by passing :paramref:`_schema.Table.autoload_with` alone:: - my_table = Table('my_table', metadata, autoload_with=some_engine) + my_table = Table("my_table", metadata, autoload_with=some_engine) :ticket:`3027` @@ -2224,8 +2258,8 @@ An :class:`_postgresql.ENUM` that is created **without** being explicitly associated with a :class:`_schema.MetaData` object will be created *and* dropped corresponding to :meth:`_schema.Table.create` and :meth:`_schema.Table.drop`:: - table = Table('sometable', metadata, - Column('some_enum', ENUM('a', 'b', 'c', name='myenum')) + table = Table( + "sometable", metadata, Column("some_enum", ENUM("a", "b", "c", name="myenum")) ) table.create(engine) # will emit CREATE TYPE and CREATE TABLE @@ -2242,11 +2276,9 @@ corresponding to :meth:`_schema.Table.create` and :meth:`_schema.Table.drop`, wi the exception of :meth:`_schema.Table.create` called with the ``checkfirst=True`` flag:: - my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata) + my_enum = ENUM("a", "b", "c", name="myenum", metadata=metadata) - table = Table('sometable', metadata, - Column('some_enum', my_enum) - ) + table = Table("sometable", metadata, Column("some_enum", my_enum)) # will fail: ENUM 'my_enum' does not exist table.create(engine) @@ -2256,10 +2288,9 @@ flag:: table.drop(engine) # will emit DROP TABLE, *not* DROP TYPE - metadata.drop_all(engine) # will emit DROP TYPE - - metadata.create_all(engine) # will emit CREATE TYPE + metadata.drop_all(engine) # will emit DROP TYPE + metadata.create_all(engine) # will emit CREATE TYPE :ticket:`3319` @@ -2334,13 +2365,14 @@ so that code like the following may proceed:: metadata = MetaData() user_tmp = Table( - "user_tmp", metadata, + "user_tmp", + metadata, Column("id", INT, primary_key=True), - Column('name', VARCHAR(50)), - prefixes=['TEMPORARY'] + Column("name", VARCHAR(50)), + prefixes=["TEMPORARY"], ) - e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug') + e = create_engine("postgresql://scott:tiger@localhost/test", echo="debug") with e.begin() as conn: user_tmp.create(conn, checkfirst=True) @@ -2357,21 +2389,23 @@ the temporary table:: metadata = MetaData() user_tmp = Table( - "user_tmp", metadata, + "user_tmp", + metadata, Column("id", INT, primary_key=True), - Column('name', VARCHAR(50)), - prefixes=['TEMPORARY'] + Column("name", VARCHAR(50)), + prefixes=["TEMPORARY"], ) - e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug') + e = create_engine("postgresql://scott:tiger@localhost/test", echo="debug") with e.begin() as conn: user_tmp.create(conn, checkfirst=True) m2 = MetaData() user = Table( - "user_tmp", m2, + "user_tmp", + m2, Column("id", INT, primary_key=True), - Column('name', VARCHAR(50)), + Column("name", VARCHAR(50)), ) # in 0.9, *will create* the new table, overwriting the old one. @@ -2548,11 +2582,13 @@ Code like the following will now function correctly and return floating points on MySQL:: >>> connection.execute( - ... select([ - ... matchtable.c.title.match('Agile Ruby Programming').label('ruby'), - ... matchtable.c.title.match('Dive Python').label('python'), - ... matchtable.c.title - ... ]).order_by(matchtable.c.id) + ... select( + ... [ + ... matchtable.c.title.match("Agile Ruby Programming").label("ruby"), + ... matchtable.c.title.match("Dive Python").label("python"), + ... matchtable.c.title, + ... ] + ... ).order_by(matchtable.c.id) ... ) [ (2.0, 0.0, 'Agile Web Development with Ruby On Rails'), @@ -2614,7 +2650,9 @@ Connecting to SQL Server with PyODBC using a DSN-less connection, e.g. with an explicit hostname, now requires a driver name - SQLAlchemy will no longer attempt to guess a default:: - engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0") + engine = create_engine( + "mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0" + ) SQLAlchemy's previously hardcoded default of "SQL Server" is obsolete on Windows, and SQLAlchemy cannot be tasked with guessing the best driver @@ -2642,13 +2680,16 @@ Improved support for CTEs in Oracle CTE support has been fixed up for Oracle, and there is also a new feature :meth:`_expression.CTE.with_suffixes` that can assist with Oracle's special directives:: - included_parts = select([ - part.c.sub_part, part.c.part, part.c.quantity - ]).where(part.c.part == "p1").\ - cte(name="included_parts", recursive=True).\ - suffix_with( + included_parts = ( + select([part.c.sub_part, part.c.part, part.c.quantity]) + .where(part.c.part == "p1") + .cte(name="included_parts", recursive=True) + .suffix_with( "search depth first by part set ord1", - "cycle part set y_cycle to 1 default 0", dialect='oracle') + "cycle part set y_cycle to 1 default 0", + dialect="oracle", + ) + ) :ticket:`3220` diff --git a/doc/build/changelog/migration_11.rst b/doc/build/changelog/migration_11.rst index 5c1b842b61..6b25bc4168 100644 --- a/doc/build/changelog/migration_11.rst +++ b/doc/build/changelog/migration_11.rst @@ -207,29 +207,35 @@ expression, and ``func.date()`` applied to a datetime expression; both examples will return duplicate rows due to the joined eager load unless explicit typing is applied:: - result = session.query( - func.substr(A.some_thing, 0, 4), A - ).options(joinedload(A.bs)).all() + result = ( + session.query(func.substr(A.some_thing, 0, 4), A).options(joinedload(A.bs)).all() + ) - users = session.query( - func.date( - User.date_created, 'start of month' - ).label('month'), - User, - ).options(joinedload(User.orders)).all() + users = ( + session.query( + func.date(User.date_created, "start of month").label("month"), + User, + ) + .options(joinedload(User.orders)) + .all() + ) The above examples, in order to retain deduping, should be specified as:: - result = session.query( - func.substr(A.some_thing, 0, 4, type_=String), A - ).options(joinedload(A.bs)).all() + result = ( + session.query(func.substr(A.some_thing, 0, 4, type_=String), A) + .options(joinedload(A.bs)) + .all() + ) - users = session.query( - func.date( - User.date_created, 'start of month', type_=DateTime - ).label('month'), - User, - ).options(joinedload(User.orders)).all() + users = ( + session.query( + func.date(User.date_created, "start of month", type_=DateTime).label("month"), + User, + ) + .options(joinedload(User.orders)) + .all() + ) Additionally, the treatment of a so-called "unhashable" type is slightly different than its been in previous releases; internally we are using @@ -259,7 +265,6 @@ string value:: >>> some_user = User() >>> q = s.query(User).filter(User.name == some_user) - ... sqlalchemy.exc.ArgumentError: Object <__main__.User object at 0x103167e90> is not legal as a SQL literal value The exception is now immediate when the comparison is made between @@ -292,18 +297,18 @@ refer to specific elements of an "indexable" data type, such as an array or JSON field:: class Person(Base): - __tablename__ = 'person' + __tablename__ = "person" id = Column(Integer, primary_key=True) data = Column(JSON) - name = index_property('data', 'name') + name = index_property("data", "name") Above, the ``name`` attribute will read/write the field ``"name"`` from the JSON column ``data``, after initializing it to an empty dictionary:: - >>> person = Person(name='foobar') + >>> person = Person(name="foobar") >>> person.name foobar @@ -346,17 +351,18 @@ no longer inappropriately add the "single inheritance" criteria when the query is against a subquery expression such as an exists:: class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" id = Column(Integer, primary_key=True) type = Column(String) data = Column(String) - __mapper_args__ = {'polymorphic_on': type} + __mapper_args__ = {"polymorphic_on": type} class FooWidget(Widget): - __mapper_args__ = {'polymorphic_identity': 'foo'} + __mapper_args__ = {"polymorphic_identity": "foo"} - q = session.query(FooWidget).filter(FooWidget.data == 'bar').exists() + + q = session.query(FooWidget).filter(FooWidget.data == "bar").exists() session.query(q).all() @@ -433,10 +439,12 @@ removed would be lost, and the flush would incorrectly raise an error:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + e = create_engine("sqlite://", echo=True) Base.metadata.create_all(e) @@ -522,25 +530,23 @@ the :paramref:`.orm.mapper.passive_deletes` option:: class A(Base): __tablename__ = "a" - id = Column('id', Integer, primary_key=True) + id = Column("id", Integer, primary_key=True) type = Column(String) __mapper_args__ = { - 'polymorphic_on': type, - 'polymorphic_identity': 'a', - 'passive_deletes': True + "polymorphic_on": type, + "polymorphic_identity": "a", + "passive_deletes": True, } class B(A): - __tablename__ = 'b' - b_table_id = Column('b_table_id', Integer, primary_key=True) - bid = Column('bid', Integer, ForeignKey('a.id', ondelete="CASCADE")) - data = Column('data', String) + __tablename__ = "b" + b_table_id = Column("b_table_id", Integer, primary_key=True) + bid = Column("bid", Integer, ForeignKey("a.id", ondelete="CASCADE")) + data = Column("data", String) - __mapper_args__ = { - 'polymorphic_identity': 'b' - } + __mapper_args__ = {"polymorphic_identity": "b"} With the above mapping, the :paramref:`.orm.mapper.passive_deletes` option is configured on the base mapper; it takes effect for all non-base mappers @@ -571,22 +577,24 @@ Same-named backrefs will not raise an error when applied to concrete inheritance The following mapping has always been possible without issue:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b = relationship("B", foreign_keys="B.a_id", backref="a") + class A1(A): - __tablename__ = 'a1' + __tablename__ = "a1" id = Column(Integer, primary_key=True) b = relationship("B", foreign_keys="B.a1_id", backref="a1") - __mapper_args__ = {'concrete': True} + __mapper_args__ = {"concrete": True} + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - a1_id = Column(ForeignKey('a1.id')) + a_id = Column(ForeignKey("a.id")) + a1_id = Column(ForeignKey("a1.id")) Above, even though class ``A`` and class ``A1`` have a relationship named ``b``, no conflict warning or error occurs because class ``A1`` is @@ -596,22 +604,22 @@ However, if the relationships were configured the other way, an error would occur:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) class A1(A): - __tablename__ = 'a1' + __tablename__ = "a1" id = Column(Integer, primary_key=True) - __mapper_args__ = {'concrete': True} + __mapper_args__ = {"concrete": True} class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - a1_id = Column(ForeignKey('a1.id')) + a_id = Column(ForeignKey("a.id")) + a1_id = Column(ForeignKey("a1.id")) a = relationship("A", backref="b") a1 = relationship("A1", backref="b") @@ -634,22 +642,21 @@ on inherited mapper ''; this can cause dependency issues during flush". An example is as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") class ASub(A): - __tablename__ = 'a_sub' - id = Column(Integer, ForeignKey('a.id'), primary_key=True) + __tablename__ = "a_sub" + id = Column(Integer, ForeignKey("a.id"), primary_key=True) bs = relationship("B") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - + a_id = Column(ForeignKey("a.id")) This warning dates back to the 0.4 series in 2007 and is based on a version of the unit of work code that has since been entirely rewritten. Currently, there @@ -672,7 +679,7 @@ A hybrid method or property will now reflect the ``__doc__`` value present in the original docstring:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) name = Column(String) @@ -710,9 +717,9 @@ also propagated from the hybrid descriptor itself, rather than from the underlyi expression. That is, accessing ``A.some_name.info`` now returns the same dictionary that you'd get from ``inspect(A).all_orm_descriptors['some_name'].info``:: - >>> A.some_name.info['foo'] = 'bar' + >>> A.some_name.info["foo"] = "bar" >>> from sqlalchemy import inspect - >>> inspect(A).all_orm_descriptors['some_name'].info + >>> inspect(A).all_orm_descriptors["some_name"].info {'foo': 'bar'} Note that this ``.info`` dictionary is **separate** from that of a mapped attribute @@ -739,11 +746,11 @@ consistent. Given:: - u1 = User(id=7, name='x') + u1 = User(id=7, name="x") u1.orders = [ - Order(description='o1', address=Address(id=1, email_address='a')), - Order(description='o2', address=Address(id=1, email_address='b')), - Order(description='o3', address=Address(id=1, email_address='c')) + Order(description="o1", address=Address(id=1, email_address="a")), + Order(description="o2", address=Address(id=1, email_address="b")), + Order(description="o3", address=Address(id=1, email_address="c")), ] sess = Session() @@ -925,32 +932,32 @@ row on a different "path" that doesn't include the attribute. This is a deep use case that's hard to reproduce, but the general idea is as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) - c_id = Column(ForeignKey('c.id')) + b_id = Column(ForeignKey("b.id")) + c_id = Column(ForeignKey("c.id")) b = relationship("B") c = relationship("C") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - c_id = Column(ForeignKey('c.id')) + c_id = Column(ForeignKey("c.id")) c = relationship("C") class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) - d_id = Column(ForeignKey('d.id')) + d_id = Column(ForeignKey("d.id")) d = relationship("D") class D(Base): - __tablename__ = 'd' + __tablename__ = "d" id = Column(Integer, primary_key=True) @@ -959,7 +966,9 @@ deep use case that's hard to reproduce, but the general idea is as follows:: q = s.query(A) q = q.join(A.b).join(c_alias_1, B.c).join(c_alias_1.d) - q = q.options(contains_eager(A.b).contains_eager(B.c, alias=c_alias_1).contains_eager(C.d)) + q = q.options( + contains_eager(A.b).contains_eager(B.c, alias=c_alias_1).contains_eager(C.d) + ) q = q.join(c_alias_2, A.c) q = q.options(contains_eager(A.c, alias=c_alias_2)) @@ -1121,6 +1130,7 @@ for specific exceptions:: engine = create_engine("postgresql+psycopg2://") + @event.listens_for(engine, "handle_error") def cancel_disconnect(ctx): if isinstance(ctx.original_exception, KeyboardInterrupt): @@ -1149,25 +1159,22 @@ statement:: >>> from sqlalchemy import table, column, select, literal, exists >>> orders = table( - ... 'orders', - ... column('region'), - ... column('amount'), - ... column('product'), - ... column('quantity') + ... "orders", column("region"), column("amount"), column("product"), column("quantity") ... ) >>> >>> upsert = ( ... orders.update() - ... .where(orders.c.region == 'Region1') - ... .values(amount=1.0, product='Product1', quantity=1) - ... .returning(*(orders.c._all_columns)).cte('upsert')) + ... .where(orders.c.region == "Region1") + ... .values(amount=1.0, product="Product1", quantity=1) + ... .returning(*(orders.c._all_columns)) + ... .cte("upsert") + ... ) >>> >>> insert = orders.insert().from_select( ... orders.c.keys(), - ... select([ - ... literal('Region1'), literal(1.0), - ... literal('Product1'), literal(1) - ... ]).where(~exists(upsert.select())) + ... select([literal("Region1"), literal(1.0), literal("Product1"), literal(1)]).where( + ... ~exists(upsert.select()) + ... ), ... ) >>> >>> print(insert) # note formatting added for clarity @@ -1198,13 +1205,13 @@ RANGE and ROWS expressions for window functions:: >>> from sqlalchemy import func - >>> print(func.row_number().over(order_by='x', range_=(-5, 10))) + >>> print(func.row_number().over(order_by="x", range_=(-5, 10))) row_number() OVER (ORDER BY x RANGE BETWEEN :param_1 PRECEDING AND :param_2 FOLLOWING) - >>> print(func.row_number().over(order_by='x', rows=(None, 0))) + >>> print(func.row_number().over(order_by="x", rows=(None, 0))) row_number() OVER (ORDER BY x ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) - >>> print(func.row_number().over(order_by='x', range_=(-2, None))) + >>> print(func.row_number().over(order_by="x", range_=(-2, None))) row_number() OVER (ORDER BY x RANGE BETWEEN :param_1 PRECEDING AND UNBOUNDED FOLLOWING) :paramref:`.expression.over.range_` and :paramref:`.expression.over.rows` are specified as @@ -1230,10 +1237,13 @@ correlation of tables that are derived from the same FROM clause as the selectable, e.g. lateral correlation:: >>> from sqlalchemy import table, column, select, true - >>> people = table('people', column('people_id'), column('age'), column('name')) - >>> books = table('books', column('book_id'), column('owner_id')) - >>> subq = select([books.c.book_id]).\ - ... where(books.c.owner_id == people.c.people_id).lateral("book_subq") + >>> people = table("people", column("people_id"), column("age"), column("name")) + >>> books = table("books", column("book_id"), column("owner_id")) + >>> subq = ( + ... select([books.c.book_id]) + ... .where(books.c.owner_id == people.c.people_id) + ... .lateral("book_subq") + ... ) >>> print(select([people]).select_from(people.join(subq, true()))) SELECT people.people_id, people.age, people.name FROM people JOIN LATERAL (SELECT books.book_id AS book_id @@ -1262,10 +1272,7 @@ construct similar to an alias:: from sqlalchemy import func - selectable = people.tablesample( - func.bernoulli(1), - name='alias', - seed=func.random()) + selectable = people.tablesample(func.bernoulli(1), name="alias", seed=func.random()) stmt = select([selectable.c.people_id]) Assuming ``people`` with a column ``people_id``, the above @@ -1295,9 +1302,10 @@ What's changed is that this feature no longer turns on automatically for a *composite* primary key; previously, a table definition such as:: Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True) + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True), ) Would have "autoincrement" semantics applied to the ``'x'`` column, only @@ -1306,9 +1314,10 @@ disable this, one would have to turn off ``autoincrement`` on all columns:: # old way Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True, autoincrement=False), - Column('y', Integer, primary_key=True, autoincrement=False) + "some_table", + metadata, + Column("x", Integer, primary_key=True, autoincrement=False), + Column("y", Integer, primary_key=True, autoincrement=False), ) With the new behavior, the composite primary key will not have autoincrement @@ -1316,9 +1325,10 @@ semantics unless a column is marked explicitly with ``autoincrement=True``:: # column 'y' will be SERIAL/AUTO_INCREMENT/ auto-generating Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, autoincrement=True) + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, autoincrement=True), ) In order to anticipate some potential backwards-incompatible scenarios, @@ -1327,9 +1337,10 @@ for missing primary key values on composite primary key columns that don't have autoincrement set up; given a table such as:: Table( - 'b', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True) + "b", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True), ) An INSERT emitted with no values for this table will produce this warning:: @@ -1349,9 +1360,10 @@ default or something less common such as a trigger, the presence of a value generator can be indicated using :class:`.FetchedValue`:: Table( - 'b', metadata, - Column('x', Integer, primary_key=True, server_default=FetchedValue()), - Column('y', Integer, primary_key=True, server_default=FetchedValue()) + "b", + metadata, + Column("x", Integer, primary_key=True, server_default=FetchedValue()), + Column("y", Integer, primary_key=True, server_default=FetchedValue()), ) For the very unlikely case where a composite primary key is actually intended @@ -1359,9 +1371,10 @@ to store NULL in one or more of its columns (only supported on SQLite and MySQL) specify the column with ``nullable=True``:: Table( - 'b', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, nullable=True) + "b", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, nullable=True), ) In a related change, the ``autoincrement`` flag may be set to True @@ -1384,19 +1397,19 @@ New operators :meth:`.ColumnOperators.is_distinct_from` and :meth:`.ColumnOperators.isnot_distinct_from` allow the IS DISTINCT FROM and IS NOT DISTINCT FROM sql operation:: - >>> print(column('x').is_distinct_from(None)) + >>> print(column("x").is_distinct_from(None)) x IS DISTINCT FROM NULL Handling is provided for NULL, True and False:: - >>> print(column('x').isnot_distinct_from(False)) + >>> print(column("x").isnot_distinct_from(False)) x IS NOT DISTINCT FROM false For SQLite, which doesn't have this operator, "IS" / "IS NOT" is rendered, which on SQLite works for NULL unlike other backends:: >>> from sqlalchemy.dialects import sqlite - >>> print(column('x').is_distinct_from(None).compile(dialect=sqlite.dialect())) + >>> print(column("x").is_distinct_from(None).compile(dialect=sqlite.dialect())) x IS NOT NULL .. _change_1957: @@ -1445,19 +1458,15 @@ and the column arguments passed to :meth:`_expression.TextClause.columns`:: from sqlalchemy import text - stmt = text("SELECT users.id, addresses.id, users.id, " - "users.name, addresses.email_address AS email " - "FROM users JOIN addresses ON users.id=addresses.user_id " - "WHERE users.id = 1").columns( - User.id, - Address.id, - Address.user_id, - User.name, - Address.email_address - ) - - query = session.query(User).from_statement(stmt).\ - options(contains_eager(User.addresses)) + + stmt = text( + "SELECT users.id, addresses.id, users.id, " + "users.name, addresses.email_address AS email " + "FROM users JOIN addresses ON users.id=addresses.user_id " + "WHERE users.id = 1" + ).columns(User.id, Address.id, Address.user_id, User.name, Address.email_address) + + query = session.query(User).from_statement(stmt).options(contains_eager(User.addresses)) result = query.all() Above, the textual SQL contains the column "id" three times, which would @@ -1489,7 +1498,7 @@ Another aspect of this change is that the rules for matching columns have also b to rely upon "positional" matching more fully for compiled SQL constructs as well. Given a statement like the following:: - ua = users.alias('ua') + ua = users.alias("ua") stmt = select([users.c.user_id, ua.c.user_id]) The above statement will compile to:: @@ -1512,7 +1521,7 @@ fetch columns:: ua_id = row[ua.c.user_id] # this still raises, however - user_id = row['user_id'] + user_id = row["user_id"] Much less likely to get an "ambiguous column" error message ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1550,10 +1559,7 @@ string/integer/etc values:: three = 3 - t = Table( - 'data', MetaData(), - Column('value', Enum(MyEnum)) - ) + t = Table("data", MetaData(), Column("value", Enum(MyEnum))) e = create_engine("sqlite://") t.create(e) @@ -1600,8 +1606,9 @@ flag is used (1.1.0b2):: >>> from sqlalchemy import Table, MetaData, Column, Enum, create_engine >>> t = Table( - ... 'data', MetaData(), - ... Column('value', Enum("one", "two", "three", validate_strings=True)) + ... "data", + ... MetaData(), + ... Column("value", Enum("one", "two", "three", validate_strings=True)), ... ) >>> e = create_engine("sqlite://") >>> t.create(e) @@ -1674,8 +1681,8 @@ within logging, exception reporting, as well as ``repr()`` of the row itself:: >>> from sqlalchemy import create_engine >>> import random - >>> e = create_engine("sqlite://", echo='debug') - >>> some_value = ''.join(chr(random.randint(52, 85)) for i in range(5000)) + >>> e = create_engine("sqlite://", echo="debug") + >>> some_value = "".join(chr(random.randint(52, 85)) for i in range(5000)) >>> row = e.execute("select ?", [some_value]).first() ... (lines are wrapped for clarity) ... 2016-02-17 13:23:03,027 INFO sqlalchemy.engine.base.Engine select ? @@ -1752,6 +1759,7 @@ replacing the ``None`` value:: json_value = Column(JSON(none_as_null=False), default="some default") + # would insert "some default" instead of "'null'", # now will insert "'null'" obj = MyObject(json_value=None) @@ -1769,6 +1777,7 @@ inconsistently vs. all other datatypes:: some_other_value = Column(String(50)) json_value = Column(JSON(none_as_null=False)) + # would result in NULL for some_other_value, # but json "'null'" for json_value. Now results in NULL for both # (the json_value is omitted from the INSERT) @@ -1786,9 +1795,7 @@ would be ignored in all cases:: # would insert SQL NULL and/or trigger defaults, # now inserts "'null'" - session.bulk_insert_mappings( - MyObject, - [{"json_value": None}]) + session.bulk_insert_mappings(MyObject, [{"json_value": None}]) The :class:`_types.JSON` type now implements the :attr:`.TypeEngine.should_evaluate_none` flag, @@ -1847,9 +1854,7 @@ is now in Core. The :class:`_types.ARRAY` type still **only works on PostgreSQL**, however it can be used directly, supporting special array use cases such as indexed access, as well as support for the ANY and ALL:: - mytable = Table("mytable", metadata, - Column("data", ARRAY(Integer, dimensions=2)) - ) + mytable = Table("mytable", metadata, Column("data", ARRAY(Integer, dimensions=2))) expr = mytable.c.data[5][6] @@ -1884,7 +1889,6 @@ such as:: subq = select([mytable.c.value]) select([mytable]).where(12 > any_(subq)) - :ticket:`3516` .. _change_3132: @@ -1897,12 +1901,14 @@ function for the ``array_agg()`` SQL function that returns an array, which is now available using :class:`_functions.array_agg`:: from sqlalchemy import func + stmt = select([func.array_agg(table.c.value)]) A PostgreSQL element for an aggregate ORDER BY is also added via :class:`_postgresql.aggregate_order_by`:: from sqlalchemy.dialects.postgresql import aggregate_order_by + expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc())) stmt = select([expr]) @@ -1914,8 +1920,8 @@ The PG dialect itself also provides an :func:`_postgresql.array_agg` wrapper to ensure the :class:`_postgresql.ARRAY` type:: from sqlalchemy.dialects.postgresql import array_agg - stmt = select([array_agg(table.c.value).contains('foo')]) + stmt = select([array_agg(table.c.value).contains("foo")]) Additionally, functions like ``percentile_cont()``, ``percentile_disc()``, ``rank()``, ``dense_rank()`` and others that require an ordering via @@ -1923,12 +1929,13 @@ Additionally, functions like ``percentile_cont()``, ``percentile_disc()``, :meth:`.FunctionElement.within_group` modifier:: from sqlalchemy import func - stmt = select([ - department.c.id, - func.percentile_cont(0.5).within_group( - department.c.salary.desc() - ) - ]) + + stmt = select( + [ + department.c.id, + func.percentile_cont(0.5).within_group(department.c.salary.desc()), + ] + ) The above statement would produce SQL similar to:: @@ -1956,7 +1963,7 @@ an :class:`_postgresql.ENUM` had to look like this:: # old way class MyEnum(TypeDecorator, SchemaType): - impl = postgresql.ENUM('one', 'two', 'three', name='myenum') + impl = postgresql.ENUM("one", "two", "three", name="myenum") def _set_table(self, table): self.impl._set_table(table) @@ -1966,8 +1973,7 @@ can be done like any other type:: # new way class MyEnum(TypeDecorator): - impl = postgresql.ENUM('one', 'two', 'three', name='myenum') - + impl = postgresql.ENUM("one", "two", "three", name="myenum") :ticket:`2919` @@ -1987,17 +1993,18 @@ translation works for DDL and SQL generation, as well as with the ORM. For example, if the ``User`` class were assigned the schema "per_user":: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) - __table_args__ = {'schema': 'per_user'} + __table_args__ = {"schema": "per_user"} On each request, the :class:`.Session` can be set up to refer to a different schema each time:: session = Session() - session.connection(execution_options={ - "schema_translate_map": {"per_user": "account_one"}}) + session.connection( + execution_options={"schema_translate_map": {"per_user": "account_one"}} + ) # will query from the ``account_one.user`` table session.query(User).get(5) @@ -2072,21 +2079,21 @@ Then, a mapping where we are equating a string "id" column on one table to an integer "id" column on the other:: class Person(Base): - __tablename__ = 'person' + __tablename__ = "person" id = Column(StringAsInt, primary_key=True) pets = relationship( - 'Pets', + "Pets", primaryjoin=( - 'foreign(Pets.person_id)' - '==cast(type_coerce(Person.id, Integer), Integer)' - ) + "foreign(Pets.person_id)" "==cast(type_coerce(Person.id, Integer), Integer)" + ), ) + class Pets(Base): - __tablename__ = 'pets' - id = Column('id', Integer, primary_key=True) - person_id = Column('person_id', Integer) + __tablename__ = "pets" + id = Column("id", Integer, primary_key=True) + person_id = Column("person_id", Integer) Above, in the :paramref:`_orm.relationship.primaryjoin` expression, we are using :func:`.type_coerce` to handle bound parameters passed via @@ -2166,8 +2173,7 @@ Column:: class MyObject(Base): # ... - json_value = Column( - JSON(none_as_null=False), nullable=False, default=JSON.NULL) + json_value = Column(JSON(none_as_null=False), nullable=False, default=JSON.NULL) Or, ensure the value is present on the object:: @@ -2182,7 +2188,6 @@ passed to :paramref:`_schema.Column.default` or :paramref:`_schema.Column.server # default=None is the same as omitting it entirely, does not apply JSON NULL json_value = Column(JSON(none_as_null=False), nullable=False, default=None) - .. seealso:: :ref:`change_3514` @@ -2195,9 +2200,11 @@ Columns no longer added redundantly with DISTINCT + ORDER BY A query such as the following will now augment only those columns that are missing from the SELECT list, without duplicates:: - q = session.query(User.id, User.name.label('name')).\ - distinct().\ - order_by(User.id, User.name, User.fullname) + q = ( + session.query(User.id, User.name.label("name")) + .distinct() + .order_by(User.id, User.name, User.fullname) + ) Produces:: @@ -2237,7 +2244,7 @@ now raises an error, whereas previously it would silently pick only the last defined validator:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) data = Column(String) @@ -2250,6 +2257,7 @@ last defined validator:: def _validate_data_two(self): assert "y" in data + configure_mappers() Will raise:: @@ -2321,7 +2329,7 @@ passed through the literal quoting system:: >>> from sqlalchemy.schema import MetaData, Table, Column, CreateTable >>> from sqlalchemy.types import String - >>> t = Table('t', MetaData(), Column('x', String(), server_default="hi ' there")) + >>> t = Table("t", MetaData(), Column("x", String(), server_default="hi ' there")) >>> print(CreateTable(t)) CREATE TABLE t ( @@ -2473,7 +2481,7 @@ This includes: one less dimension. Given a column with type ``ARRAY(Integer, dimensions=3)``, we can now perform this expression:: - int_expr = col[5][6][7] # returns an Integer expression object + int_expr = col[5][6][7] # returns an Integer expression object Previously, the indexed access to ``col[5]`` would return an expression of type :class:`.Integer` where we could no longer perform indexed access @@ -2490,7 +2498,7 @@ This includes: the :class:`_postgresql.ARRAY` type, this means that it is now straightforward to produce JSON expressions with multiple levels of indexed access:: - json_expr = json_col['key1']['attr1'][5] + json_expr = json_col["key1"]["attr1"][5] * The "textual" type that is returned by indexed access of :class:`.HSTORE` as well as the "textual" type that is returned by indexed access of @@ -2520,12 +2528,11 @@ support CAST operations to each other without the "astext" aspect. This means that in most cases, an application that was doing this:: - expr = json_col['somekey'].cast(Integer) + expr = json_col["somekey"].cast(Integer) Will now need to change to this:: - expr = json_col['somekey'].astext.cast(Integer) - + expr = json_col["somekey"].astext.cast(Integer) .. _change_2729: @@ -2536,12 +2543,21 @@ A table definition like the following will now emit CREATE TYPE as expected:: enum = Enum( - 'manager', 'place_admin', 'carwash_admin', - 'parking_admin', 'service_admin', 'tire_admin', - 'mechanic', 'carwasher', 'tire_mechanic', name="work_place_roles") + "manager", + "place_admin", + "carwash_admin", + "parking_admin", + "service_admin", + "tire_admin", + "mechanic", + "carwasher", + "tire_mechanic", + name="work_place_roles", + ) + class WorkPlacement(Base): - __tablename__ = 'work_placement' + __tablename__ = "work_placement" id = Column(Integer, primary_key=True) roles = Column(ARRAY(enum)) @@ -2580,10 +2596,11 @@ The new argument :paramref:`.PGInspector.get_view_names.include` allows specification of which sub-types of views should be returned:: from sqlalchemy import inspect + insp = inspect(engine) - plain_views = insp.get_view_names(include='plain') - all_views = insp.get_view_names(include=('plain', 'materialized')) + plain_views = insp.get_view_names(include="plain") + all_views = insp.get_view_names(include=("plain", "materialized")) :ticket:`3588` @@ -2668,9 +2685,7 @@ The MySQL dialect now accepts the value "AUTOCOMMIT" for the parameters:: connection = engine.connect() - connection = connection.execution_options( - isolation_level="AUTOCOMMIT" - ) + connection = connection.execution_options(isolation_level="AUTOCOMMIT") The isolation level makes use of the various "autocommit" attributes provided by most MySQL DBAPIs. @@ -2687,10 +2702,11 @@ on an InnoDB table featured AUTO_INCREMENT on one of its columns which was not the first column, e.g.:: t = Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True, autoincrement=False), - Column('y', Integer, primary_key=True, autoincrement=True), - mysql_engine='InnoDB' + "some_table", + metadata, + Column("x", Integer, primary_key=True, autoincrement=False), + Column("y", Integer, primary_key=True, autoincrement=True), + mysql_engine="InnoDB", ) DDL such as the following would be generated:: @@ -2720,12 +2736,13 @@ use the :class:`.PrimaryKeyConstraint` construct explicitly (1.1.0b2) (along with a KEY for the autoincrement column as required by MySQL), e.g.:: t = Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, autoincrement=True), - PrimaryKeyConstraint('x', 'y'), - UniqueConstraint('y'), - mysql_engine='InnoDB' + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, autoincrement=True), + PrimaryKeyConstraint("x", "y"), + UniqueConstraint("y"), + mysql_engine="InnoDB", ) Along with the change :ref:`change_3216`, composite primary keys with @@ -2735,14 +2752,13 @@ now defaults to the value ``"auto"`` and the ``autoincrement=False`` directives are no longer needed:: t = Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, autoincrement=True), - mysql_engine='InnoDB' + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, autoincrement=True), + mysql_engine="InnoDB", ) - - Dialect Improvements and Changes - SQLite ========================================= @@ -2849,8 +2865,7 @@ parameters. The four standard levels are supported as well as ``SNAPSHOT``:: engine = create_engine( - "mssql+pyodbc://scott:tiger@ms_2008", - isolation_level="REPEATABLE READ" + "mssql+pyodbc://scott:tiger@ms_2008", isolation_level="REPEATABLE READ" ) .. seealso:: @@ -2869,12 +2884,11 @@ which includes a length, an "un-lengthed" type under SQL Server would copy the "length" parameter as the value ``"max"``:: >>> from sqlalchemy import create_engine, inspect - >>> engine = create_engine('mssql+pyodbc://scott:tiger@ms_2008', echo=True) + >>> engine = create_engine("mssql+pyodbc://scott:tiger@ms_2008", echo=True) >>> engine.execute("create table s (x varchar(max), y varbinary(max))") >>> insp = inspect(engine) >>> for col in insp.get_columns("s"): - ... print(col['type'].__class__, col['type'].length) - ... + ... print(col["type"].__class__, col["type"].length) max max @@ -2884,8 +2898,7 @@ interprets as "max". The fix then is so that these lengths come out as None, so that the type objects work in non-SQL Server contexts:: >>> for col in insp.get_columns("s"): - ... print(col['type'].__class__, col['type'].length) - ... + ... print(col["type"].__class__, col["type"].length) None None @@ -2918,10 +2931,11 @@ This aliasing attempts to turn schema-qualified tables into aliases; given a table such as:: account_table = Table( - 'account', metadata, - Column('id', Integer, primary_key=True), - Column('info', String(100)), - schema="customer_schema" + "account", + metadata, + Column("id", Integer, primary_key=True), + Column("info", String(100)), + schema="customer_schema", ) The legacy mode of behavior will attempt to turn a schema-qualified table diff --git a/doc/build/changelog/migration_12.rst b/doc/build/changelog/migration_12.rst index 7073660f78..d5676e2854 100644 --- a/doc/build/changelog/migration_12.rst +++ b/doc/build/changelog/migration_12.rst @@ -80,9 +80,11 @@ that is cacheable as well as more efficient. Given a query as below:: - q = session.query(User).\ - filter(User.name.like('%ed%')).\ - options(subqueryload(User.addresses)) + q = ( + session.query(User) + .filter(User.name.like("%ed%")) + .options(subqueryload(User.addresses)) + ) The SQL produced would be the query against ``User`` followed by the subqueryload for ``User.addresses`` (note the parameters are also listed):: @@ -106,9 +108,11 @@ subqueryload for ``User.addresses`` (note the parameters are also listed):: With "selectin" loading, we instead get a SELECT that refers to the actual primary key values loaded in the parent query:: - q = session.query(User).\ - filter(User.name.like('%ed%')).\ - options(selectinload(User.addresses)) + q = ( + session.query(User) + .filter(User.name.like("%ed%")) + .options(selectinload(User.addresses)) + ) Produces:: @@ -225,8 +229,9 @@ if not specified, the attribute defaults to ``None``:: from sqlalchemy.orm import query_expression from sqlalchemy.orm import with_expression + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) x = Column(Integer) y = Column(Integer) @@ -234,9 +239,9 @@ if not specified, the attribute defaults to ``None``:: # will be None normally... expr = query_expression() + # but let's give it x + y - a1 = session.query(A).options( - with_expression(A.expr, A.x + A.y)).first() + a1 = session.query(A).options(with_expression(A.expr, A.x + A.y)).first() print(a1.expr) .. seealso:: @@ -259,10 +264,9 @@ Below, we emit a DELETE against ``SomeEntity``, adding a FROM clause (or equivalent, depending on backend) against ``SomeOtherEntity``:: - query(SomeEntity).\ - filter(SomeEntity.id==SomeOtherEntity.id).\ - filter(SomeOtherEntity.foo=='bar').\ - delete() + query(SomeEntity).filter(SomeEntity.id == SomeOtherEntity.id).filter( + SomeOtherEntity.foo == "bar" + ).delete() .. seealso:: @@ -291,28 +295,26 @@ into multiple columns/expressions:: @hybrid.hybrid_property def name(self): - return self.first_name + ' ' + self.last_name + return self.first_name + " " + self.last_name @name.expression def name(cls): - return func.concat(cls.first_name, ' ', cls.last_name) + return func.concat(cls.first_name, " ", cls.last_name) @name.update_expression def name(cls, value): - f, l = value.split(' ', 1) + f, l = value.split(" ", 1) return [(cls.first_name, f), (cls.last_name, l)] Above, an UPDATE can be rendered using:: - session.query(Person).filter(Person.id == 5).update( - {Person.name: "Dr. No"}) + session.query(Person).filter(Person.id == 5).update({Person.name: "Dr. No"}) Similar functionality is available for composites, where composite values will be broken out into their individual columns for bulk UPDATE:: session.query(Vertex).update({Edge.start: Point(3, 4)}) - .. seealso:: :ref:`hybrid_bulk_update` @@ -342,6 +344,7 @@ Python:: def name(self, value): self.first_name = value + class FirstNameLastName(FirstNameOnly): # ... @@ -349,15 +352,15 @@ Python:: @FirstNameOnly.name.getter def name(self): - return self.first_name + ' ' + self.last_name + return self.first_name + " " + self.last_name @name.setter def name(self, value): - self.first_name, self.last_name = value.split(' ', maxsplit=1) + self.first_name, self.last_name = value.split(" ", maxsplit=1) @name.expression def name(cls): - return func.concat(cls.first_name, ' ', cls.last_name) + return func.concat(cls.first_name, " ", cls.last_name) Above, the ``FirstNameOnly.name`` hybrid is referenced by the ``FirstNameLastName`` subclass in order to repurpose it specifically to the @@ -391,6 +394,7 @@ hybrid in-place, interfering with the definition on the superclass. def _set_name(self, value): self.first_name = value + class FirstNameOnly(Base): @hybrid_property def name(self): @@ -426,10 +430,12 @@ if this "append" event is the second part of a bulk replace:: from sqlalchemy.orm.attributes import OP_BULK_REPLACE + @event.listens_for(SomeObject.collection, "bulk_replace") def process_collection(target, values, initiator): values[:] = [_make_value(value) for value in values] + @event.listens_for(SomeObject.collection, "append", retval=True) def process_collection(target, value, initiator): # make sure bulk_replace didn't already do it @@ -438,7 +444,6 @@ if this "append" event is the second part of a bulk replace:: else: return value - :ticket:`3896` .. _change_3303: @@ -457,11 +462,13 @@ extension:: Base = declarative_base() + class MyDataClass(Base): - __tablename__ = 'my_data' + __tablename__ = "my_data" id = Column(Integer, primary_key=True) data = Column(MutableDict.as_mutable(JSONEncodedDict)) + @event.listens_for(MyDataClass.data, "modified") def modified_json(instance): print("json value modified:", instance.data) @@ -511,7 +518,6 @@ becomes part of the next flush process:: model = session.query(MyModel).first() model.json_set &= {1, 3} - :ticket:`3853` .. _change_3769: @@ -527,7 +533,7 @@ is an association proxy that links to ``AtoB.bvalue``, which is itself an association proxy onto ``B``:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b_values = association_proxy("atob", "b_value") @@ -535,26 +541,26 @@ itself an association proxy onto ``B``:: class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) value = Column(String) c = relationship("C") class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) + b_id = Column(ForeignKey("b.id")) value = Column(String) class AtoB(Base): - __tablename__ = 'atob' + __tablename__ = "atob" - a_id = Column(ForeignKey('a.id'), primary_key=True) - b_id = Column(ForeignKey('b.id'), primary_key=True) + a_id = Column(ForeignKey("a.id"), primary_key=True) + b_id = Column(ForeignKey("b.id"), primary_key=True) a = relationship("A", backref="atob") b = relationship("B", backref="atob") @@ -567,7 +573,7 @@ query across the two proxies ``A.b_values``, ``AtoB.b_value``: .. sourcecode:: pycon+sql - >>> s.query(A).filter(A.b_values.contains('hi')).all() + >>> s.query(A).filter(A.b_values.contains("hi")).all() {opensql}SELECT a.id AS a_id FROM a WHERE EXISTS (SELECT 1 @@ -581,7 +587,7 @@ to query across the two proxies ``A.c_values``, ``AtoB.c_value``: .. sourcecode:: pycon+sql - >>> s.query(A).filter(A.c_values.any(value='x')).all() + >>> s.query(A).filter(A.c_values.any(value="x")).all() {opensql}SELECT a.id AS a_id FROM a WHERE EXISTS (SELECT 1 @@ -612,8 +618,8 @@ primary key value. The example now illustrates that a new ``identity_token`` field tracks this difference so that the two objects can co-exist in the same identity map:: - tokyo = WeatherLocation('Asia', 'Tokyo') - newyork = WeatherLocation('North America', 'New York') + tokyo = WeatherLocation("Asia", "Tokyo") + newyork = WeatherLocation("North America", "New York") tokyo.reports.append(Report(80.0)) newyork.reports.append(Report(75)) @@ -632,15 +638,14 @@ same identity map:: newyork_report = newyork.reports[0] tokyo_report = tokyo.reports[0] - assert inspect(newyork_report).identity_key == (Report, (1, ), "north_america") - assert inspect(tokyo_report).identity_key == (Report, (1, ), "asia") + assert inspect(newyork_report).identity_key == (Report, (1,), "north_america") + assert inspect(tokyo_report).identity_key == (Report, (1,), "asia") # the token representing the originating shard is also available directly assert inspect(newyork_report).identity_token == "north_america" assert inspect(tokyo_report).identity_token == "asia" - :ticket:`4137` New Features and Improvements - Core @@ -673,6 +678,7 @@ illustrates a recipe that will allow for the "liberal" behavior of the pre-1.1 from sqlalchemy import Boolean from sqlalchemy import TypeDecorator + class LiberalBoolean(TypeDecorator): impl = Boolean @@ -681,7 +687,6 @@ illustrates a recipe that will allow for the "liberal" behavior of the pre-1.1 value = bool(int(value)) return value - :ticket:`4102` .. _change_3919: @@ -844,7 +849,7 @@ other comparison operators has been flattened into one level. This will have the effect of more parenthesization being generated when comparison operators are combined together, such as:: - (column('q') == null()) != (column('y') == null()) + (column("q") == null()) != (column("y") == null()) Will now generate ``(q IS NULL) != (y IS NULL)`` rather than ``q IS NULL != y IS NULL``. @@ -862,9 +867,10 @@ and columns. These are specified via the :paramref:`_schema.Table.comment` and :paramref:`_schema.Column.comment` arguments:: Table( - 'my_table', metadata, - Column('q', Integer, comment="the Q value"), - comment="my Q table" + "my_table", + metadata, + Column("q", Integer, comment="the Q value"), + comment="my Q table", ) Above, DDL will be rendered appropriately upon table create to associate @@ -891,9 +897,11 @@ the 0.7 and 0.8 series. Given a statement as:: - stmt = users.delete().\ - where(users.c.id == addresses.c.id).\ - where(addresses.c.email_address.startswith('ed%')) + stmt = ( + users.delete() + .where(users.c.id == addresses.c.id) + .where(addresses.c.email_address.startswith("ed%")) + ) conn.execute(stmt) The resulting SQL from the above statement on a PostgreSQL backend @@ -930,7 +938,7 @@ can now be used to change the autoescape character, if desired. An expression such as:: - >>> column('x').startswith('total%score', autoescape=True) + >>> column("x").startswith("total%score", autoescape=True) Renders as:: @@ -940,7 +948,7 @@ Where the value of the parameter "x_1" is ``'total/%score'``. Similarly, an expression that has backslashes:: - >>> column('x').startswith('total/score', autoescape=True) + >>> column("x").startswith("total/score", autoescape=True) Will render the same way, with the value of the parameter "x_1" as ``'total//score'``. @@ -968,8 +976,8 @@ if the application is working with plain floats. float_value = connection.scalar( - select([literal(4.56)]) # the "BindParameter" will now be - # Float, not Numeric(asdecimal=True) + select([literal(4.56)]) # the "BindParameter" will now be + # Float, not Numeric(asdecimal=True) ) * Math operations between :class:`.Numeric`, :class:`.Float`, and @@ -978,11 +986,11 @@ if the application is working with plain floats. as well as if the type should be :class:`.Float`:: # asdecimal flag is maintained - expr = column('a', Integer) * column('b', Numeric(asdecimal=False)) + expr = column("a", Integer) * column("b", Numeric(asdecimal=False)) assert expr.type.asdecimal == False # Float subclass of Numeric is maintained - expr = column('a', Integer) * column('b', Float()) + expr = column("a", Integer) * column("b", Float()) assert isinstance(expr.type, Float) * The :class:`.Float` datatype will apply the ``float()`` processor to @@ -1009,9 +1017,7 @@ is added to the compiler to allow for the space. All three functions are named in the documentation now:: >>> from sqlalchemy import select, table, column, func, tuple_ - >>> t = table('t', - ... column('value'), column('x'), - ... column('y'), column('z'), column('q')) + >>> t = table("t", column("value"), column("x"), column("y"), column("z"), column("q")) >>> stmt = select([func.sum(t.c.value)]).group_by( ... func.grouping_sets( ... tuple_(t.c.x, t.c.y), @@ -1046,16 +1052,17 @@ localized to the current VALUES clause being processed:: def mydefault(context): - return context.get_current_parameters()['counter'] + 12 + return context.get_current_parameters()["counter"] + 12 - mytable = Table('mytable', metadata_obj, - Column('counter', Integer), - Column('counter_plus_twelve', - Integer, default=mydefault, onupdate=mydefault) + + mytable = Table( + "mytable", + metadata_obj, + Column("counter", Integer), + Column("counter_plus_twelve", Integer, default=mydefault, onupdate=mydefault), ) - stmt = mytable.insert().values( - [{"counter": 5}, {"counter": 18}, {"counter": 20}]) + stmt = mytable.insert().values([{"counter": 5}, {"counter": 18}, {"counter": 20}]) conn.execute(stmt) @@ -1077,7 +1084,8 @@ of the :meth:`.SessionEvents.after_commit` event which also emits before the sess = Session() - user = sess.query(User).filter_by(name='x').first() + user = sess.query(User).filter_by(name="x").first() + @event.listens_for(sess, "after_rollback") def after_rollback(session): @@ -1086,12 +1094,14 @@ of the :meth:`.SessionEvents.after_commit` event which also emits before the # to emit a lazy load. print("user name: %s" % user.name) + @event.listens_for(sess, "after_commit") def after_commit(session): # 'user.name' is present, assuming it was already # loaded. this is the existing behavior. print("user name: %s" % user.name) + if should_rollback: sess.rollback() else: @@ -1148,7 +1158,7 @@ In the case of assigning a collection to an attribute that would replace the previous collection, a side effect of this was that the collection being replaced would also be mutated, which is misleading and unnecessary:: - >>> a1, a2, a3 = Address('a1'), Address('a2'), Address('a3') + >>> a1, a2, a3 = Address("a1"), Address("a2"), Address("a3") >>> user.addresses = [a1, a2] >>> previous_collection = user.addresses @@ -1177,18 +1187,19 @@ existing collection. Given a mapping as:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") - @validates('bs') + @validates("bs") def convert_dict_to_b(self, key, value): - return B(data=value['data']) + return B(data=value["data"]) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) data = Column(String) Above, we could use the validator as follows, to convert from an incoming @@ -1217,7 +1228,7 @@ are new. Supposing a simple validator such as:: class A(Base): # ... - @validates('bs') + @validates("bs") def validate_b(self, key, value): assert value.data is not None return value @@ -1255,16 +1266,16 @@ Use flag_dirty() to mark an object as "dirty" without any attribute changing An exception is now raised if the :func:`.attributes.flag_modified` function is used to mark an attribute as modified that isn't actually loaded:: - a1 = A(data='adf') + a1 = A(data="adf") s.add(a1) s.flush() # expire, similarly as though we said s.commit() - s.expire(a1, 'data') + s.expire(a1, "data") # will raise InvalidRequestError - attributes.flag_modified(a1, 'data') + attributes.flag_modified(a1, "data") This because the flush process will most likely fail in any case if the attribute remains un-present by the time flush occurs. To mark an object @@ -1287,6 +1298,7 @@ such as :meth:`.SessionEvents.before_flush`, use the new A very old and undocumented keyword argument ``scope`` has been removed:: from sqlalchemy.orm import scoped_session + Session = scoped_session(sessionmaker()) session = Session(scope=None) @@ -1312,18 +1324,21 @@ it is re-stated during the UPDATE so that the "onupdate" rule does not overwrite it:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - favorite_b_id = Column(ForeignKey('b.id', name="favorite_b_fk")) + favorite_b_id = Column(ForeignKey("b.id", name="favorite_b_fk")) bs = relationship("B", primaryjoin="A.id == B.a_id") favorite_b = relationship( - "B", primaryjoin="A.favorite_b_id == B.id", post_update=True) + "B", primaryjoin="A.favorite_b_id == B.id", post_update=True + ) updated = Column(Integer, onupdate=my_onupdate_function) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id', name="a_fk")) + a_id = Column(ForeignKey("a.id", name="a_fk")) + a1 = A() b1 = B() @@ -1371,21 +1386,18 @@ now participates in the versioning feature, documented at Given a mapping:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) version_id = Column(Integer, default=0) - parent_id = Column(ForeignKey('node.id')) - favorite_node_id = Column(ForeignKey('node.id')) + parent_id = Column(ForeignKey("node.id")) + favorite_node_id = Column(ForeignKey("node.id")) nodes = relationship("Node", primaryjoin=remote(parent_id) == id) favorite_node = relationship( - "Node", primaryjoin=favorite_node_id == remote(id), - post_update=True + "Node", primaryjoin=favorite_node_id == remote(id), post_update=True ) - __mapper_args__ = { - 'version_id_col': version_id - } + __mapper_args__ = {"version_id_col": version_id} An UPDATE of a node that associates another node as "favorite" will now increment the version counter as well as match the current version:: @@ -1435,20 +1447,20 @@ Whereas in 1.1, an expression such as the following would produce a result with no return type (assume ``-%>`` is some special operator supported by the database):: - >>> column('x', types.DateTime).op('-%>')(None).type + >>> column("x", types.DateTime).op("-%>")(None).type NullType() Other types would use the default behavior of using the left-hand type as the return type:: - >>> column('x', types.String(50)).op('-%>')(None).type + >>> column("x", types.String(50)).op("-%>")(None).type String(length=50) These behaviors were mostly by accident, so the behavior has been made consistent with the second form, that is the default return type is the same as the left-hand expression:: - >>> column('x', types.DateTime).op('-%>')(None).type + >>> column("x", types.DateTime).op("-%>")(None).type DateTime() As most user-defined operators tend to be "comparison" operators, often @@ -1457,18 +1469,18 @@ one of the many special operators defined by PostgreSQL, the its documented behavior of allowing the return type to be :class:`.Boolean` in all cases, including for :class:`_types.ARRAY` and :class:`_types.JSON`:: - >>> column('x', types.String(50)).op('-%>', is_comparison=True)(None).type + >>> column("x", types.String(50)).op("-%>", is_comparison=True)(None).type Boolean() - >>> column('x', types.ARRAY(types.Integer)).op('-%>', is_comparison=True)(None).type + >>> column("x", types.ARRAY(types.Integer)).op("-%>", is_comparison=True)(None).type Boolean() - >>> column('x', types.JSON()).op('-%>', is_comparison=True)(None).type + >>> column("x", types.JSON()).op("-%>", is_comparison=True)(None).type Boolean() To assist with boolean comparison operators, a new shorthand method :meth:`.Operators.bool_op` has been added. This method should be preferred for on-the-fly boolean operators:: - >>> print(column('x', types.Integer).bool_op('-%>')(5)) + >>> print(column("x", types.Integer).bool_op("-%>")(5)) x -%> :x_1 @@ -1485,7 +1497,7 @@ Previously, it was not possible to produce a :obj:`_expression.literal_column` construct that stated a single percent sign:: >>> from sqlalchemy import literal_column - >>> print(literal_column('some%symbol')) + >>> print(literal_column("some%symbol")) some%%symbol The percent sign is now unaffected for dialects that are not set to @@ -1494,10 +1506,10 @@ dialects which do state one of these paramstyles will continue to escape as is appropriate:: >>> from sqlalchemy import literal_column - >>> print(literal_column('some%symbol')) + >>> print(literal_column("some%symbol")) some%symbol >>> from sqlalchemy.dialects import mysql - >>> print(literal_column('some%symbol').compile(dialect=mysql.dialect())) + >>> print(literal_column("some%symbol").compile(dialect=mysql.dialect())) some%%symbol As part of this change, the doubling that has been present when using @@ -1517,8 +1529,9 @@ A bug in the :func:`_expression.collate` and :meth:`.ColumnOperators.collate` functions, used to supply ad-hoc column collations at the statement level, is fixed, where a case sensitive name would not be quoted:: - stmt = select([mytable.c.x, mytable.c.y]).\ - order_by(mytable.c.somecolumn.collate("fr_FR")) + stmt = select([mytable.c.x, mytable.c.y]).order_by( + mytable.c.somecolumn.collate("fr_FR") + ) now renders:: @@ -1553,8 +1566,8 @@ sets. The feature is off by default and can be enabled using the ``use_batch_mode`` argument on :func:`_sa.create_engine`:: engine = create_engine( - "postgresql+psycopg2://scott:tiger@host/dbname", - use_batch_mode=True) + "postgresql+psycopg2://scott:tiger@host/dbname", use_batch_mode=True + ) The feature is considered to be experimental for the moment but may become on by default in a future release. @@ -1577,10 +1590,7 @@ now allows these values to be specified:: from sqlalchemy.dialects.postgresql import INTERVAL - Table( - 'my_table', metadata, - Column("some_interval", INTERVAL(fields="DAY TO SECOND")) - ) + Table("my_table", metadata, Column("some_interval", INTERVAL(fields="DAY TO SECOND"))) Additionally, all INTERVAL datatypes can now be reflected independently of the "fields" specifier present; the "fields" parameter in the datatype @@ -1610,12 +1620,10 @@ This :class:`_expression.Insert` subclass adds a new method from sqlalchemy.dialects.mysql import insert - insert_stmt = insert(my_table). \ - values(id='some_id', data='some data to insert') + insert_stmt = insert(my_table).values(id="some_id", data="some data to insert") on_conflict_stmt = insert_stmt.on_duplicate_key_update( - data=insert_stmt.inserted.data, - status='U' + data=insert_stmt.inserted.data, status="U" ) conn.execute(on_conflict_stmt) @@ -1748,9 +1756,15 @@ name, rather than the raw UPPERCASE format that Oracle uses:: Previously, the foreign keys result would look like:: - [{'referred_table': u'users', 'referred_columns': [u'id'], - 'referred_schema': None, 'name': 'USER_ID_FK', - 'constrained_columns': [u'user_id']}] + [ + { + "referred_table": "users", + "referred_columns": ["id"], + "referred_schema": None, + "name": "USER_ID_FK", + "constrained_columns": ["user_id"], + } + ] Where the above could create problems particularly with Alembic autogenerate. @@ -1774,20 +1788,17 @@ now be passed using brackets to manually specify where this split occurs, allowing database and/or owner names that themselves contain one or more dots:: - Table( - "some_table", metadata, - Column("q", String(50)), - schema="[MyDataBase.dbo]" - ) + Table("some_table", metadata, Column("q", String(50)), schema="[MyDataBase.dbo]") The above table will consider the "owner" to be ``MyDataBase.dbo``, which will also be quoted upon render, and the "database" as None. To individually refer to database name and owner, use two pairs of brackets:: Table( - "some_table", metadata, + "some_table", + metadata, Column("q", String(50)), - schema="[MyDataBase.SomeDB].[MyDB.owner]" + schema="[MyDataBase.SomeDB].[MyDB.owner]", ) Additionally, the :class:`.quoted_name` construct is now honored when diff --git a/doc/build/changelog/migration_13.rst b/doc/build/changelog/migration_13.rst index f54bae329d..a8197c6c62 100644 --- a/doc/build/changelog/migration_13.rst +++ b/doc/build/changelog/migration_13.rst @@ -130,14 +130,17 @@ like:: j = join(B, D, D.b_id == B.id).join(C, C.id == D.c_id) B_viacd = mapper( - B, j, non_primary=True, primary_key=[j.c.b_id], + B, + j, + non_primary=True, + primary_key=[j.c.b_id], properties={ "id": j.c.b_id, # so that 'id' looks the same as before - "c_id": j.c.c_id, # needed for disambiguation + "c_id": j.c.c_id, # needed for disambiguation "d_c_id": j.c.d_c_id, # needed for disambiguation "b_id": [j.c.b_id, j.c.d_b_id], "d_id": j.c.d_id, - } + }, ) A.b = relationship(B_viacd, primaryjoin=A.b_id == B_viacd.c.b_id) @@ -185,14 +188,14 @@ of collections all in one query without using JOIN or subqueries at all. Given a mapping:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", lazy="selectin") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) @@ -349,7 +352,7 @@ where the ``del`` operation is roughly equivalent to setting the attribute to th some_object = session.query(SomeObject).get(5) - del some_object.some_attribute # from a SQL perspective, works like "= None" + del some_object.some_attribute # from a SQL perspective, works like "= None" :ticket:`4354` @@ -366,10 +369,9 @@ along with that object's full lifecycle in memory:: from sqlalchemy import inspect - u1 = User(id=7, name='ed') - - inspect(u1).info['user_info'] = '7|ed' + u1 = User(id=7, name="ed") + inspect(u1).info["user_info"] = "7|ed" :ticket:`4257` @@ -399,23 +401,22 @@ Association proxy has new cascade_scalar_deletes flag Given a mapping as:: class A(Base): - __tablename__ = 'test_a' + __tablename__ = "test_a" id = Column(Integer, primary_key=True) - ab = relationship( - 'AB', backref='a', uselist=False) + ab = relationship("AB", backref="a", uselist=False) b = association_proxy( - 'ab', 'b', creator=lambda b: AB(b=b), - cascade_scalar_deletes=True) + "ab", "b", creator=lambda b: AB(b=b), cascade_scalar_deletes=True + ) class B(Base): - __tablename__ = 'test_b' + __tablename__ = "test_b" id = Column(Integer, primary_key=True) - ab = relationship('AB', backref='b', cascade='all, delete-orphan') + ab = relationship("AB", backref="b", cascade="all, delete-orphan") class AB(Base): - __tablename__ = 'test_ab' + __tablename__ = "test_ab" a_id = Column(Integer, ForeignKey(A.id), primary_key=True) b_id = Column(Integer, ForeignKey(B.id), primary_key=True) @@ -490,7 +491,7 @@ to a class-specific :class:`.AssociationProxyInstance`, demonstrated as:: class User(Base): # ... - keywords = association_proxy('kws', 'keyword') + keywords = association_proxy("kws", "keyword") proxy_state = inspect(User).all_orm_descriptors["keywords"].for_class(User) @@ -522,6 +523,7 @@ and is **not** an object reference or another association proxy:: # column-based association proxy values = association_proxy("elements", "value") + class Element(Base): # ... @@ -530,7 +532,7 @@ and is **not** an object reference or another association proxy:: The ``User.values`` association proxy refers to the ``Element.value`` column. Standard column operations are now available, such as ``like``:: - >>> print(s.query(User).filter(User.values.like('%foo%'))) + >>> print(s.query(User).filter(User.values.like("%foo%"))) SELECT "user".id AS user_id FROM "user" WHERE EXISTS (SELECT 1 @@ -539,7 +541,7 @@ Standard column operations are now available, such as ``like``:: ``equals``:: - >>> print(s.query(User).filter(User.values == 'foo')) + >>> print(s.query(User).filter(User.values == "foo")) SELECT "user".id AS user_id FROM "user" WHERE EXISTS (SELECT 1 @@ -564,7 +566,7 @@ comparison operator; **this is a change in behavior** in that previously, the association proxy used ``.contains`` as a list containment operator only. With a column-oriented comparison, it now behaves like a "like":: - >>> print(s.query(User).filter(User.values.contains('foo'))) + >>> print(s.query(User).filter(User.values.contains("foo"))) SELECT "user".id AS user_id FROM "user" WHERE EXISTS (SELECT 1 @@ -579,7 +581,7 @@ When using an object-based association proxy with a collection, the behavior is as before, that of testing for collection membership, e.g. given a mapping:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) user_elements = relationship("UserElement") @@ -589,7 +591,7 @@ as before, that of testing for collection membership, e.g. given a mapping:: class UserElement(Base): - __tablename__ = 'user_element' + __tablename__ = "user_element" id = Column(Integer, primary_key=True) user_id = Column(ForeignKey("user.id")) @@ -598,7 +600,7 @@ as before, that of testing for collection membership, e.g. given a mapping:: class Element(Base): - __tablename__ = 'element' + __tablename__ = "element" id = Column(Integer, primary_key=True) value = Column(String) @@ -633,21 +635,21 @@ any use cases arise where it causes side effects. As an example, given a mapping with association proxy:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") - b_data = association_proxy('bs', 'data') + b_data = association_proxy("bs", "data") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) data = Column(String) - a1 = A(bs=[B(data='b1'), B(data='b2')]) + a1 = A(bs=[B(data="b1"), B(data="b2")]) b_data = a1.b_data @@ -671,7 +673,7 @@ Above, because the ``A`` object would be garbage collected before the The change is that the ``b_data`` collection is now maintaining a strong reference to the ``a1`` object, so that it remains present:: - assert b_data == ['b1', 'b2'] + assert b_data == ["b1", "b2"] This change introduces the side effect that if an application is passing around the collection as above, **the parent object won't be garbage collected** until @@ -699,7 +701,9 @@ new association objects where appropriate:: id = Column(Integer, primary_key=True) b_rel = relationship( - "B", collection_class=set, cascade="all, delete-orphan", + "B", + collection_class=set, + cascade="all, delete-orphan", ) b = association_proxy("b_rel", "value", creator=lambda x: B(value=x)) @@ -712,6 +716,7 @@ new association objects where appropriate:: a_id = Column(Integer, ForeignKey("test_a.id"), nullable=False) value = Column(String) + # ... s = Session(e) @@ -728,7 +733,6 @@ new association objects where appropriate:: # against the deleted ones. assert len(s.new) == 1 - :ticket:`2642` .. _change_1103: @@ -749,14 +753,14 @@ having a duplicate temporarily present in the list is intrinsic to a Python "swap" operation. Given a standard one-to-many/many-to-one setup:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", backref="a") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) @@ -780,7 +784,7 @@ during the flush. The same issue can be demonstrated using plain duplicates:: >>> del a1.bs[1] >>> a1.bs # collection is unaffected so far... [<__main__.B object at 0x7f047af5fb70>] - >>> b1.a # however b1.a is None + >>> b1.a # however b1.a is None >>> >>> session.add(a1) >>> session.commit() # so upon flush + expire.... @@ -955,21 +959,21 @@ been removed. Previously, this did not take place for one-to-many, or one-to-one relationships, in the following situation:: class User(Base): - __tablename__ = 'users' + __tablename__ = "users" id = Column(Integer, primary_key=True) - addresses = relationship( - "Address", - passive_deletes="all") + addresses = relationship("Address", passive_deletes="all") + class Address(Base): - __tablename__ = 'addresses' + __tablename__ = "addresses" id = Column(Integer, primary_key=True) email = Column(String) - user_id = Column(Integer, ForeignKey('users.id')) + user_id = Column(Integer, ForeignKey("users.id")) user = relationship("User") + u1 = session.query(User).first() address = u1.addresses[0] u1.addresses.remove(address) @@ -1006,16 +1010,17 @@ joined together either with no separator or with an underscore separator. Below we define a convention that will name :class:`.UniqueConstraint` constraints with a name that joins together the names of all columns:: - metadata_obj = MetaData(naming_convention={ - "uq": "uq_%(table_name)s_%(column_0_N_name)s" - }) + metadata_obj = MetaData( + naming_convention={"uq": "uq_%(table_name)s_%(column_0_N_name)s"} + ) table = Table( - 'info', metadata_obj, - Column('a', Integer), - Column('b', Integer), - Column('c', Integer), - UniqueConstraint('a', 'b', 'c') + "info", + metadata_obj, + Column("a", Integer), + Column("b", Integer), + Column("c", Integer), + UniqueConstraint("a", "b", "c"), ) The CREATE TABLE for the above table will render as:: @@ -1037,11 +1042,12 @@ PostgreSQL where identifiers cannot be longer than 63 characters, a long constraint name would normally be generated from the table definition below:: long_names = Table( - 'long_names', metadata_obj, - Column('information_channel_code', Integer, key='a'), - Column('billing_convention_name', Integer, key='b'), - Column('product_identifier', Integer, key='c'), - UniqueConstraint('a', 'b', 'c') + "long_names", + metadata_obj, + Column("information_channel_code", Integer, key="a"), + Column("billing_convention_name", Integer, key="b"), + Column("product_identifier", Integer, key="c"), + UniqueConstraint("a", "b", "c"), ) The truncation logic will ensure a too-long name isn't generated for the @@ -1137,17 +1143,16 @@ modifier to produce a :class:`.BinaryExpression` that has a "left" and a "right" side:: class Venue(Base): - __tablename__ = 'venue' + __tablename__ = "venue" id = Column(Integer, primary_key=True) name = Column(String) descendants = relationship( "Venue", - primaryjoin=func.instr( - remote(foreign(name)), name + "/" - ).as_comparison(1, 2) == 1, + primaryjoin=func.instr(remote(foreign(name)), name + "/").as_comparison(1, 2) + == 1, viewonly=True, - order_by=name + order_by=name, ) Above, the :paramref:`_orm.relationship.primaryjoin` of the "descendants" relationship @@ -1162,8 +1167,12 @@ lazyload to produce SQL like:: and a joinedload, such as:: - v1 = s.query(Venue).filter_by(name="parent1").options( - joinedload(Venue.descendants)).one() + v1 = ( + s.query(Venue) + .filter_by(name="parent1") + .options(joinedload(Venue.descendants)) + .one() + ) to work as:: @@ -1195,12 +1204,12 @@ backend, such as "SELECT CAST(NULL AS INTEGER) WHERE 1!=1" for PostgreSQL, >>> from sqlalchemy import select, literal_column, bindparam >>> e = create_engine("postgresql://scott:tiger@localhost/test", echo=True) >>> with e.connect() as conn: - ... conn.execute( - ... select([literal_column('1')]). - ... where(literal_column('1').in_(bindparam('q', expanding=True))), - ... q=[] - ... ) - ... + ... conn.execute( + ... select([literal_column("1")]).where( + ... literal_column("1").in_(bindparam("q", expanding=True)) + ... ), + ... q=[], + ... ) SELECT 1 WHERE 1 IN (SELECT CAST(NULL AS INTEGER) WHERE 1!=1) The feature also works for tuple-oriented IN statements, where the "empty IN" @@ -1211,12 +1220,12 @@ such as on PostgreSQL:: >>> from sqlalchemy import select, literal_column, tuple_, bindparam >>> e = create_engine("postgresql://scott:tiger@localhost/test", echo=True) >>> with e.connect() as conn: - ... conn.execute( - ... select([literal_column('1')]). - ... where(tuple_(50, "somestring").in_(bindparam('q', expanding=True))), - ... q=[] - ... ) - ... + ... conn.execute( + ... select([literal_column("1")]).where( + ... tuple_(50, "somestring").in_(bindparam("q", expanding=True)) + ... ), + ... q=[], + ... ) SELECT 1 WHERE (%(param_1)s, %(param_2)s) IN (SELECT CAST(NULL AS INTEGER), CAST(NULL AS VARCHAR) WHERE 1!=1) @@ -1239,6 +1248,7 @@ variant expression in order to locate these methods:: from sqlalchemy import TypeDecorator, LargeBinary, func + class CompressedLargeBinary(TypeDecorator): impl = LargeBinary @@ -1248,13 +1258,15 @@ variant expression in order to locate these methods:: def column_expression(self, col): return func.uncompress(col, type_=self) + MyLargeBinary = LargeBinary().with_variant(CompressedLargeBinary(), "sqlite") The above expression will render a function within SQL when used on SQLite only:: from sqlalchemy import select, column from sqlalchemy.dialects import sqlite - print(select([column('x', CompressedLargeBinary)]).compile(dialect=sqlite.dialect())) + + print(select([column("x", CompressedLargeBinary)]).compile(dialect=sqlite.dialect())) will render:: @@ -1445,17 +1457,20 @@ queries used until now. Given a schema such as:: dv = Table( - 'data_values', metadata_obj, - Column('modulus', Integer, nullable=False), - Column('data', String(30)), - postgresql_partition_by='range(modulus)') + "data_values", + metadata_obj, + Column("modulus", Integer, nullable=False), + Column("data", String(30)), + postgresql_partition_by="range(modulus)", + ) sa.event.listen( dv, "after_create", sa.DDL( "CREATE TABLE data_values_4_10 PARTITION OF data_values " - "FOR VALUES FROM (4) TO (10)") + "FOR VALUES FROM (4) TO (10)" + ), ) The two table names ``'data_values'`` and ``'data_values_4_10'`` will come @@ -1492,9 +1507,7 @@ can now be explicitly ordered by passing a list of 2-tuples:: from sqlalchemy.dialects.mysql import insert - insert_stmt = insert(my_table).values( - id='some_existing_id', - data='inserted value') + insert_stmt = insert(my_table).values(id="some_existing_id", data="inserted value") on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update( [ @@ -1542,10 +1555,11 @@ keyword added to objects like :class:`.UniqueConstraint` as well as several :class:`_schema.Column` -specific variants:: some_table = Table( - 'some_table', metadata_obj, - Column('id', Integer, primary_key=True, sqlite_on_conflict_primary_key='FAIL'), - Column('data', Integer), - UniqueConstraint('id', 'data', sqlite_on_conflict='IGNORE') + "some_table", + metadata_obj, + Column("id", Integer, primary_key=True, sqlite_on_conflict_primary_key="FAIL"), + Column("data", Integer), + UniqueConstraint("id", "data", sqlite_on_conflict="IGNORE"), ) The above table would render in a CREATE TABLE statement as:: @@ -1651,7 +1665,8 @@ Pass it via :func:`_sa.create_engine`:: engine = create_engine( "mssql+pyodbc://scott:tiger@mssql2017:1433/test?driver=ODBC+Driver+13+for+SQL+Server", - fast_executemany=True) + fast_executemany=True, + ) .. seealso:: @@ -1678,12 +1693,16 @@ new ``mssql_identity_start`` and ``mssql_identity_increment`` parameters on :class:`_schema.Column`:: test = Table( - 'test', metadata_obj, + "test", + metadata_obj, Column( - 'id', Integer, primary_key=True, mssql_identity_start=100, - mssql_identity_increment=10 + "id", + Integer, + primary_key=True, + mssql_identity_start=100, + mssql_identity_increment=10, ), - Column('name', String(20)) + Column("name", String(20)), ) In order to emit ``IDENTITY`` on a non-primary key column, which is a little-used @@ -1693,9 +1712,10 @@ primary key column:: test = Table( - 'test', metadata_obj, - Column('id', Integer, primary_key=True, autoincrement=False), - Column('number', Integer, autoincrement=True) + "test", + metadata_obj, + Column("id", Integer, primary_key=True, autoincrement=False), + Column("number", Integer, autoincrement=True), ) .. seealso:: diff --git a/doc/build/changelog/migration_14.rst b/doc/build/changelog/migration_14.rst index b6cce48849..089715bf6d 100644 --- a/doc/build/changelog/migration_14.rst +++ b/doc/build/changelog/migration_14.rst @@ -70,9 +70,12 @@ to be used freely against ORM entities:: with Session(engine, future=True) as sess: - stmt = select(User).where( - User.name == 'sandy' - ).join(User.addresses).where(Address.email_address.like("%gmail%")) + stmt = ( + select(User) + .where(User.name == "sandy") + .join(User.addresses) + .where(Address.email_address.like("%gmail%")) + ) result = sess.execute(stmt) @@ -121,16 +124,19 @@ Similar adjustments have been made to "bulk updates and deletes" such that Core :func:`_sql.update` and :func:`_sql.delete` can be used for bulk operations. A bulk update like the following:: - session.query(User).filter(User.name == 'sandy').update({"password": "foobar"}, synchronize_session="fetch") + session.query(User).filter(User.name == "sandy").update( + {"password": "foobar"}, synchronize_session="fetch" + ) can now be achieved in :term:`2.0 style` (and indeed the above runs internally in this way) as follows:: with Session(engine, future=True) as sess: - stmt = update(User).where( - User.name == 'sandy' - ).values(password="foobar").execution_options( - synchronize_session="fetch" + stmt = ( + update(User) + .where(User.name == "sandy") + .values(password="foobar") + .execution_options(synchronize_session="fetch") ) sess.execute(stmt) @@ -676,7 +682,7 @@ that are in the columns clause of the SELECT statement. A common beginner mist is code such as the following:: stmt = select(users) - stmt = stmt.where(stmt.c.name == 'foo') + stmt = stmt.where(stmt.c.name == "foo") The above code appears intuitive and that it would generate "SELECT * FROM users WHERE name='foo'", however veteran SQLAlchemy users will @@ -688,8 +694,7 @@ the use case above, as in a case like the above it links directly to the columns present in the ``users.c`` collection:: stmt = select(users) - stmt = stmt.where(stmt.selected_columns.name == 'foo') - + stmt = stmt.where(stmt.selected_columns.name == "foo") :ticket:`4617` @@ -745,7 +750,9 @@ With the new implementation, :meth:`_sql.Select.join` and :meth:`_orm.Query.join`, adding JOIN criteria to the existing statement by matching to the left entity:: - stmt = select(user_table).join(addresses_table, user_table.c.id == addresses_table.c.user_id) + stmt = select(user_table).join( + addresses_table, user_table.c.id == addresses_table.c.user_id + ) producing:: @@ -839,7 +846,7 @@ returns a new :class:`_engine.URL` object with changes applied:: To alter the contents of the :attr:`_engine.URL.query` dictionary, methods such as :meth:`_engine.URL.update_query_dict` may be used:: - >>> url.update_query_dict({"sslcert": '/path/to/crt'}) + >>> url.update_query_dict({"sslcert": "/path/to/crt"}) postgresql://user:***@host/dbname?sslcert=%2Fpath%2Fto%2Fcrt To upgrade code that is mutating these fields directly, a **backwards and @@ -855,6 +862,7 @@ style:: some_url.drivername = some_drivername return some_url + def set_ssl_cert(some_url, ssl_cert): # check for 1.4 if hasattr(some_url, "update_query_dict"): @@ -869,7 +877,9 @@ to strings, using sequences of strings to represent multiple parameters. For example:: >>> from sqlalchemy.engine import make_url - >>> url = make_url("postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&sslcert=%2Fpath%2Fto%2Fcrt") + >>> url = make_url( + ... "postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&sslcert=%2Fpath%2Fto%2Fcrt" + ... ) >>> url.query immutabledict({'alt_host': ('host1', 'host2'), 'sslcert': '/path/to/crt'}) @@ -901,25 +911,24 @@ method. A backwards compatible approach would look like:: from sqlalchemy.engine import CreateEnginePlugin + class MyPlugin(CreateEnginePlugin): def __init__(self, url, kwargs): # check for 1.4 style if hasattr(CreateEnginePlugin, "update_url"): - self.my_argument_one = url.query['my_argument_one'] - self.my_argument_two = url.query['my_argument_two'] + self.my_argument_one = url.query["my_argument_one"] + self.my_argument_two = url.query["my_argument_two"] else: # legacy - self.my_argument_one = url.query.pop('my_argument_one') - self.my_argument_two = url.query.pop('my_argument_two') + self.my_argument_one = url.query.pop("my_argument_one") + self.my_argument_two = url.query.pop("my_argument_two") - self.my_argument_three = kwargs.pop('my_argument_three', None) + self.my_argument_three = kwargs.pop("my_argument_three", None) def update_url(self, url): # this method runs in 1.4 only and should be used to consume # plugin-specific arguments - return url.difference_update_query( - ["my_argument_one", "my_argument_two"] - ) + return url.difference_update_query(["my_argument_one", "my_argument_two"]) See the docstring at :class:`_engine.CreateEnginePlugin` for complete details on how this class is used. @@ -974,9 +983,9 @@ track for the old calling style:: stmt = select(users_table).where( case( - (users_table.c.name == 'wendy', 'W'), - (users_table.c.name == 'jack', 'J'), - else_='E' + (users_table.c.name == "wendy", "W"), + (users_table.c.name == "jack", "J"), + else_="E", ) ) @@ -1128,9 +1137,11 @@ not line up with these two tables will create an additional FROM entry:: address_alias = aliased(Address) - q = session.query(User).\ - join(address_alias, User.addresses).\ - filter(Address.email_address == 'foo') + q = ( + session.query(User) + .join(address_alias, User.addresses) + .filter(Address.email_address == "foo") + ) The above query selects from a JOIN of ``User`` and ``address_alias``, the latter of which is an alias of the ``Address`` entity. However, the @@ -1189,11 +1200,13 @@ JOIN clauses but also through the WHERE clause Above, we can add a WHERE clause to link the new ``Address`` entity with the previous ``address_alias`` entity and that will remove the warning:: - q = session.query(User).\ - join(address_alias, User.addresses).\ - filter(Address.email_address == 'foo').\ - filter(Address.id == address_alias.id) # resolve cartesian products, - # will no longer warn + q = ( + session.query(User) + .join(address_alias, User.addresses) + .filter(Address.email_address == "foo") + .filter(Address.id == address_alias.id) + ) # resolve cartesian products, + # will no longer warn The cartesian product warning considers **any** kind of link between two FROM clauses to be a resolution, even if the end result set is still @@ -1201,11 +1214,13 @@ wasteful, as the linter is intended only to detect the common case of a FROM clause that is completely unexpected. If the FROM clause is referred to explicitly elsewhere and linked to the other FROMs, no warning is emitted:: - q = session.query(User).\ - join(address_alias, User.addresses).\ - filter(Address.email_address == 'foo').\ - filter(Address.id > address_alias.id) # will generate a lot of rows, - # but no warning + q = ( + session.query(User) + .join(address_alias, User.addresses) + .filter(Address.email_address == "foo") + .filter(Address.id > address_alias.id) + ) # will generate a lot of rows, + # but no warning Full cartesian products are also allowed if they are explicitly stated; if we wanted for example the cartesian product of ``User`` and ``Address``, we can @@ -1256,7 +1271,6 @@ including methods such as: with engine.connect() as conn: row = conn.execute(table.select().where(table.c.id == 5)).one() - :meth:`_engine.Result.one_or_none` - same, but also returns None for no rows :meth:`_engine.Result.all` - returns all rows @@ -1278,12 +1292,12 @@ including methods such as: .. sourcecode:: with engine.connect() as conn: - # requests x, y, z - result = conn.execute(select(table.c.x, table.c.y, table.c.z)) + # requests x, y, z + result = conn.execute(select(table.c.x, table.c.y, table.c.z)) - # iterate rows as y, x - for y, x in result.columns("y", "x"): - print("Y: %s X: %s" % (y, x)) + # iterate rows as y, x + for y, x in result.columns("y", "x"): + print("Y: %s X: %s" % (y, x)) :meth:`_engine.Result.scalars` - returns lists of scalar objects, from the first column by default but can also be selected: @@ -1300,10 +1314,10 @@ dictionaries: .. sourcecode:: with engine.connect() as conn: - result = conn.execute(select(table.c.x, table.c.y, table.c.z)) + result = conn.execute(select(table.c.x, table.c.y, table.c.z)) - for map_ in result.mappings(): - print("Y: %(y)s X: %(x)s" % map_) + for map_ in result.mappings(): + print("Y: %(y)s X: %(x)s" % map_) When using Core, the object returned by :meth:`_engine.Connection.execute` is an instance of :class:`.CursorResult`, which continues to feature the same API @@ -1374,8 +1388,8 @@ can be summarized. Given a "named tuple" in pseudo code as:: The biggest cross-incompatible difference is the behavior of ``__contains__``:: - "id" in row # True for a mapping, False for a named tuple - "some name" in row # False for a mapping, True for a named tuple + "id" in row # True for a mapping, False for a named tuple + "some name" in row # False for a mapping, True for a named tuple In 1.4, when a :class:`.LegacyRow` is returned by a Core result set, the above ``"id" in row`` comparison will continue to succeed, however a deprecation @@ -1402,7 +1416,7 @@ when the row was first fetched. This means for example when retrieving a datetime value from SQLite, the data for the row as present in the :class:`.RowProxy` object would previously have looked like:: - row_proxy = (1, '2019-12-31 19:56:58.272106') + row_proxy = (1, "2019-12-31 19:56:58.272106") and then upon access via ``__getitem__``, the ``datetime.strptime()`` function would be used on the fly to convert the above string date into a ``datetime`` @@ -1478,8 +1492,8 @@ allows for greater cross-compatibility between the two, which is a key goal of the 2.0 transition:: >>> from sqlalchemy import column, select - >>> c1, c2, c3, c4 = column('c1'), column('c2'), column('c3'), column('c4') - >>> stmt = select(c1, c2, c3.label('c2'), c2, c4) + >>> c1, c2, c3, c4 = column("c1"), column("c2"), column("c3"), column("c4") + >>> stmt = select(c1, c2, c3.label("c2"), c2, c4) >>> print(stmt) SELECT c1, c2, c3 AS c2, c2, c4 @@ -1522,7 +1536,7 @@ does not imply deduplication of column objects, although it does imply deduplication of implicitly generated labels:: >>> from sqlalchemy import table - >>> user = table('user', column('id'), column('name')) + >>> user = table("user", column("id"), column("name")) >>> stmt = select(user.c.id, user.c.name, user.c.id).apply_labels() >>> print(stmt) SELECT "user".id AS user_id, "user".name AS user_name, "user".id AS id_1 @@ -1606,7 +1620,7 @@ prominently with CAST:: For CAST against expressions that don't have a name, the previous logic is used to generate the usual "anonymous" labels:: - >>> print(select(cast('hi there,' + foo.c.data, String))) + >>> print(select(cast("hi there," + foo.c.data, String))) SELECT CAST(:data_1 + foo.data AS VARCHAR) AS anon_1 FROM foo @@ -1614,14 +1628,14 @@ A :func:`.cast` against a :class:`.Label`, despite having to omit the label expression as these don't render inside of a CAST, will nonetheless make use of the given name:: - >>> print(select(cast(('hi there,' + foo.c.data).label('hello_data'), String))) + >>> print(select(cast(("hi there," + foo.c.data).label("hello_data"), String))) SELECT CAST(:data_1 + foo.data AS VARCHAR) AS hello_data FROM foo And of course as was always the case, :class:`.Label` can be applied to the expression on the outside to apply an "AS " label directly:: - >>> print(select(cast(('hi there,' + foo.c.data), String).label('hello_data'))) + >>> print(select(cast(("hi there," + foo.c.data), String).label("hello_data"))) SELECT CAST(:data_1 + foo.data AS VARCHAR) AS hello_data FROM foo @@ -1768,7 +1782,6 @@ flags to ``True``:: boolean = Column(Boolean(create_constraint=True)) enum = Column(Enum("a", "b", "c", create_constraint=True)) - :ticket:`5367` New Features - ORM @@ -1796,13 +1809,14 @@ To configure column-level raiseload on a mapping, the the attribute:: class Book(Base): - __tablename__ = 'book' + __tablename__ = "book" book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = deferred(Column(String(2000)), raiseload=True) excerpt = deferred(Column(Text), raiseload=True) + book_w_excerpt = session.query(Book).options(undefer(Book.excerpt)).first() It was originally considered that the existing :func:`.raiseload` option that @@ -1810,8 +1824,7 @@ works for :func:`_orm.relationship` attributes be expanded to also support colum attributes. However, this would break the "wildcard" behavior of :func:`.raiseload`, which is documented as allowing one to prevent all relationships from loading:: - session.query(Order).options( - joinedload(Order.items), raiseload('*')) + session.query(Order).options(joinedload(Order.items), raiseload("*")) Above, if we had expanded :func:`.raiseload` to accommodate for columns as well, the wildcard would also prevent columns from loading and thus be a @@ -2003,11 +2016,7 @@ as entity / column should work:: row._mapping[u1] # same as row[0] - row = ( - s.query(User.id, Address.email_address) - .join(User.addresses) - .first() - ) + row = s.query(User.id, Address.email_address).join(User.addresses).first() row._mapping[User.id] # same as row[0] row._mapping["id"] # same as row[0] @@ -2202,13 +2211,11 @@ use of the :paramref:`_orm.Session.future` flag to :term:`2.0-style` mode:: Session = sessionmaker(engine, future=True) with Session() as session: - u1 = User() - session.add(u1) - - a1 = Address() - a1.user = u1 # <--- will not add "a1" to the Session - + u1 = User() + session.add(u1) + a1 = Address() + a1.user = u1 # <--- will not add "a1" to the Session :ticket:`5150` @@ -2225,7 +2232,7 @@ selectin/subquery loaders will run an "immediateload" operation for a given relationship, when an expired object is unexpired or an object is refreshed:: >>> a1 = session.query(A).options(joinedload(A.bs)).first() - >>> a1.data = 'new data' + >>> a1.data = "new data" >>> session.commit() Above, the ``A`` object was loaded with a ``joinedload()`` option associated @@ -2251,7 +2258,7 @@ a refresh scenario, which resembles the query emitted by "lazyload", emitted as an additional query:: >>> a1 = session.query(A).options(selectinload(A.bs)).first() - >>> a1.data = 'new data' + >>> a1.data = "new data" >>> session.commit() >>> a1.data SELECT a.id AS a_id, a.data AS a_data @@ -2333,9 +2340,11 @@ eventually identified in :ticket:`4519` where this empty collection could be harmful, which is when the object is merged into a session:: >>> u1 = User(id=1) # create an empty User to merge with id=1 in the database - >>> merged1 = session.merge(u1) # value of merged1.addresses is unchanged from that of the DB + >>> merged1 = session.merge( + ... u1 + ... ) # value of merged1.addresses is unchanged from that of the DB - >>> u2 = User(id=2) # create an empty User to merge with id=2 in the database + >>> u2 = User(id=2) # create an empty User to merge with id=2 in the database >>> u2.addresses [] >>> merged2 = session.merge(u2) # value of merged2.addresses has been emptied in the DB @@ -2364,7 +2373,9 @@ however is not added to ``__dict__`` until it is actually mutated:: >>> u1 = User() >>> l1 = u1.addresses # new list is created, associated with the state >>> assert u1.addresses is l1 # you get the same list each time you access it - >>> assert "addresses" not in u1.__dict__ # but it won't go into __dict__ until it's mutated + >>> assert ( + ... "addresses" not in u1.__dict__ + ... ) # but it won't go into __dict__ until it's mutated >>> from sqlalchemy import inspect >>> inspect(u1).attrs.addresses.history History(added=None, unchanged=None, deleted=None) @@ -2386,7 +2397,9 @@ the object contains certain values based on its ``__dict__``:: >>> u1.addresses [] # this will now fail, would pass before - >>> assert {k: v for k, v in u1.__dict__.items() if not k.startswith("_")} == {"addresses": []} + >>> assert {k: v for k, v in u1.__dict__.items() if not k.startswith("_")} == { + ... "addresses": [] + ... } or to ensure that the collection won't require a lazy load to proceed, the (admittedly awkward) code below will now also fail:: @@ -2415,10 +2428,11 @@ SQLAlchemy has always had logic to detect when an object in the :class:`.Session to be inserted has the same primary key as an object that is already present:: class Product(Base): - __tablename__ = 'product' + __tablename__ = "product" id = Column(Integer, primary_key=True) + session = Session(engine) # add Product with primary key 1 @@ -2500,8 +2514,7 @@ disallowed:: # ... # this is now an error - addresses = relationship( - "Address", viewonly=True, cascade="all, delete-orphan") + addresses = relationship("Address", viewonly=True, cascade="all, delete-orphan") The above will raise:: @@ -2542,10 +2555,7 @@ inheritance mapping:: s.commit() - print( - s.query(Manager).select_entity_from(s.query(Employee).subquery()).all() - ) - + print(s.query(Manager).select_entity_from(s.query(Employee).subquery()).all()) The subquery selects both the ``Engineer`` and the ``Manager`` rows, and even though the outer query is against ``Manager``, we get a non ``Manager`` @@ -2818,8 +2828,9 @@ effect. When "optional" is used on a :class:`.Sequence` that is present in the integer primary key column of a table:: Table( - "some_table", metadata, - Column("id", Integer, Sequence("some_seq", optional=True), primary_key=True) + "some_table", + metadata, + Column("id", Integer, Sequence("some_seq", optional=True), primary_key=True), ) The above :class:`.Sequence` is only used for DDL and INSERT statements if the diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index 626574cc06..105108434f 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -234,7 +234,6 @@ as a bonus our program is much clearer:: print(result.fetchall()) - The goal of "2.0 deprecations mode" is that a program which runs with no :class:`_exc.RemovedIn20Warning` warnings with "2.0 deprecations mode" turned on is then ready to run in SQLAlchemy 2.0. @@ -262,24 +261,23 @@ the SQLAlchemy project itself, the approach taken is as follows: from sqlalchemy import exc # for warnings not included in regex-based filter below, just log - warnings.filterwarnings( - "always", category=exc.RemovedIn20Warning - ) + warnings.filterwarnings("always", category=exc.RemovedIn20Warning) # for warnings related to execute() / scalar(), raise for msg in [ r"The (?:Executable|Engine)\.(?:execute|scalar)\(\) function", - r"The current statement is being autocommitted using implicit " - "autocommit,", + r"The current statement is being autocommitted using implicit " "autocommit,", r"The connection.execute\(\) method in SQLAlchemy 2.0 will accept " "parameters as a single dictionary or a single sequence of " "dictionaries only.", r"The Connection.connect\(\) function/method is considered legacy", r".*DefaultGenerator.execute\(\)", ]: - warnings.filterwarnings( - "error", message=msg, category=exc.RemovedIn20Warning, - ) + warnings.filterwarnings( + "error", + message=msg, + category=exc.RemovedIn20Warning, + ) 3. As each sub-category of warnings are resolved in the application, new warnings that are caught by the "always" filter can be added to the list @@ -325,8 +323,6 @@ The new engine is described at :class:`_future.Engine` which delivers a new conn.commit() # commit as you go - - Migration to 2.0 Step Five - Use the ``future`` flag on Session --------------------------------------------------------------- @@ -360,6 +356,7 @@ in 1.4 which are now closely matched to the patterns used by the :class:`_orm.Session` may be used as a context manager:: from sqlalchemy.orm import Session + with Session(engine) as session: session.add(MyObject()) session.commit() @@ -405,7 +402,7 @@ the underlying DBAPI transaction, but in SQLAlchemy conn = engine.connect() # won't autocommit in 2.0 - conn.execute(some_table.insert().values(foo='bar')) + conn.execute(some_table.insert().values(foo="bar")) Nor will this autocommit:: @@ -421,10 +418,7 @@ execution option, will be removed:: conn = engine.connect() # won't autocommit in 2.0 - conn.execute( - text("EXEC my_procedural_thing()").execution_options(autocommit=True) - ) - + conn.execute(text("EXEC my_procedural_thing()").execution_options(autocommit=True)) **Migration to 2.0** @@ -433,13 +427,13 @@ style` execution is to make use of the :meth:`_engine.Connection.begin` method, or the :meth:`_engine.Engine.begin` context manager:: with engine.begin() as conn: - conn.execute(some_table.insert().values(foo='bar')) - conn.execute(some_other_table.insert().values(bat='hoho')) + conn.execute(some_table.insert().values(foo="bar")) + conn.execute(some_other_table.insert().values(bat="hoho")) with engine.connect() as conn: with conn.begin(): - conn.execute(some_table.insert().values(foo='bar')) - conn.execute(some_other_table.insert().values(bat='hoho')) + conn.execute(some_table.insert().values(foo="bar")) + conn.execute(some_other_table.insert().values(bat="hoho")) with engine.begin() as conn: conn.execute(text("EXEC my_procedural_thing()")) @@ -451,8 +445,8 @@ when a statement is first invoked in the absence of an explicit call to :meth:`_future.Connection.begin`:: with engine.connect() as conn: - conn.execute(some_table.insert().values(foo='bar')) - conn.execute(some_other_table.insert().values(bat='hoho')) + conn.execute(some_table.insert().values(foo="bar")) + conn.execute(some_other_table.insert().values(bat="hoho")) conn.commit() @@ -490,7 +484,7 @@ explicit as to how the transaction should be used. For the vast majority of Core use cases, it's the pattern that is already recommended:: with engine.begin() as conn: - conn.execute(some_table.insert().values(foo='bar')) + conn.execute(some_table.insert().values(foo="bar")) For "commit as you go, or rollback instead" usage, which resembles how the :class:`_orm.Session` is normally used today, the "future" version of @@ -508,7 +502,7 @@ a statement is first invoked:: engine = create_engine(..., future=True) with engine.connect() as conn: - conn.execute(some_table.insert().values(foo='bar')) + conn.execute(some_table.insert().values(foo="bar")) conn.commit() conn.execute(text("some other SQL")) @@ -558,11 +552,11 @@ execution patterns, is removed:: metadata_obj = MetaData(bind=engine) # no longer supported - metadata_obj.create_all() # requires Engine or Connection + metadata_obj.create_all() # requires Engine or Connection metadata_obj.reflect() # requires Engine or Connection - t = Table('t', metadata_obj, autoload=True) # use autoload_with=engine + t = Table("t", metadata_obj, autoload=True) # use autoload_with=engine result = engine.execute(t.select()) # no longer supported @@ -592,7 +586,7 @@ the ORM-level :meth:`_orm.Session.execute` method):: metadata_obj.reflect(engine) # reflect individual table - t = Table('t', metadata_obj, autoload_with=engine) + t = Table("t", metadata_obj, autoload_with=engine) # connection level: @@ -607,12 +601,11 @@ the ORM-level :meth:`_orm.Session.execute` method):: metadata_obj.reflect(connection) # reflect individual table - t = Table('t', metadata_obj, autoload_with=connection) + t = Table("t", metadata_obj, autoload_with=connection) # execute SQL statements result = conn.execute(t.select()) - **Discussion** @@ -736,7 +729,6 @@ in the case that the operation is a write operation:: with conn.begin(): result = conn.execute(stmt) - execute() method more strict, execution options are more prominent ------------------------------------------------------------------------------- @@ -756,18 +748,16 @@ require modification:: # positional parameters no longer supported, only named # unless using exec_driver_sql() - result = connection.execute(table.insert(), ('x', 'y', 'z')) + result = connection.execute(table.insert(), ("x", "y", "z")) # **kwargs no longer accepted, pass a single dictionary result = connection.execute(table.insert(), x=10, y=5) # multiple *args no longer accepted, pass a list result = connection.execute( - table.insert(), - {"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8} + table.insert(), {"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8} ) - **Migration to 2.0** The new :meth:`_future.Connection.execute` method now accepts a subset of the @@ -778,6 +768,7 @@ method, so the following code is cross-compatible between 1.x and 2.0:: connection = engine.connect() from sqlalchemy import text + result = connection.execute(text("select * from table")) # pass a single dictionary for single statement execution @@ -785,12 +776,9 @@ method, so the following code is cross-compatible between 1.x and 2.0:: # pass a list of dictionaries for executemany result = connection.execute( - table.insert(), - [{"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}] + table.insert(), [{"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}] ) - - **Discussion** The use of ``*args`` and ``**kwargs`` has been removed both to remove the @@ -832,11 +820,10 @@ tuples when using "future" mode:: row = result.first() # suppose the row is (1, 2) - "x" in row # evaluates to False, in 1.x / future=False, this would be True + "x" in row # evaluates to False, in 1.x / future=False, this would be True 1 in row # evaluates to True, in 1.x / future=False, this would be False - **Migration to 2.0** Application code or test suites that are testing for a particular key @@ -881,10 +868,7 @@ or attribute:: stmt = select(User, Address).join(User.addresses) for row in session.execute(stmt).mappings(): - print("the user is: %s the address is: %s" % ( - row[User], - row[Address] - )) + print("the user is: %s the address is: %s" % (row[User], row[Address])) .. seealso:: @@ -921,14 +905,10 @@ now accepts its WHEN criteria positionally, rather than as a list:: # list emits a deprecation warning case_clause = case( - [ - (table.c.x == 5, "five"), - (table.c.x == 7, "seven") - ], - else_="neither five nor seven" + [(table.c.x == 5, "five"), (table.c.x == 7, "seven")], + else_="neither five nor seven", ) - **Migration to 2.0** Only the "generative" style of :func:`_sql.select` will be supported. The list @@ -951,9 +931,7 @@ is cross-compatible with 1.4 and 2.0:: # case conditions passed positionally case_clause = case( - (table.c.x == 5, "five"), - (table.c.x == 7, "seven"), - else_="neither five nor seven" + (table.c.x == 5, "five"), (table.c.x == 7, "seven"), else_="neither five nor seven" ) **Discussion** @@ -973,7 +951,7 @@ documented style in the Core tutorial. Examples of "structural" vs. "data" elements are as follows:: # table columns for CREATE TABLE - structural - table = Table("table", metadata_obj, Column('x', Integer), Column('y', Integer)) + table = Table("table", metadata_obj, Column("x", Integer), Column("y", Integer)) # columns in a SELECT statement - structural stmt = select(table.c.x, table.c.y) @@ -1006,10 +984,7 @@ constructor arguments to :func:`_sql.insert`, :func:`_sql.update` and stmt = table.delete(table.c.x > 15) # no longer supported - stmt = table.update( - table.c.x < 15, - preserve_parameter_order=True - ).values( + stmt = table.update(table.c.x < 15, preserve_parameter_order=True).values( [(table.c.y, 20), (table.c.x, table.c.y + 10)] ) @@ -1028,10 +1003,12 @@ examples:: stmt = table.delete().where(table.c.x > 15) # use generative methods, ordered_values() replaces preserve_parameter_order - stmt = table.update().where( - table.c.x < 15, - ).ordered_values( - (table.c.y, 20), (table.c.x, table.c.y + 10) + stmt = ( + table.update() + .where( + table.c.x < 15, + ) + .ordered_values((table.c.y, 20), (table.c.x, table.c.y + 10)) ) **Discussion** @@ -1102,9 +1079,7 @@ Code that works with classical mappings should change imports and code from:: from sqlalchemy.orm import mapper - mapper(SomeClass, some_table, properties={ - "related": relationship(SomeRelatedClass) - }) + mapper(SomeClass, some_table, properties={"related": relationship(SomeRelatedClass)}) To work from a central :class:`_orm.registry` object:: @@ -1112,9 +1087,9 @@ To work from a central :class:`_orm.registry` object:: mapper_reg = registry() - mapper_reg.map_imperatively(SomeClass, some_table, properties={ - "related": relationship(SomeRelatedClass) - }) + mapper_reg.map_imperatively( + SomeClass, some_table, properties={"related": relationship(SomeRelatedClass)} + ) The above :class:`_orm.registry` is also the source for declarative mappings, and classical mappings now have access to this registry including string-based @@ -1126,19 +1101,23 @@ configuration on :func:`_orm.relationship`:: Base = mapper_reg.generate_base() + class SomeRelatedClass(Base): - __tablename__ = 'related' + __tablename__ = "related" # ... - mapper_reg.map_imperatively(SomeClass, some_table, properties={ - "related": relationship( - "SomeRelatedClass", - primaryjoin="SomeRelatedClass.related_id == SomeClass.id" - ) - }) - + mapper_reg.map_imperatively( + SomeClass, + some_table, + properties={ + "related": relationship( + "SomeRelatedClass", + primaryjoin="SomeRelatedClass.related_id == SomeClass.id", + ) + }, + ) **Discussion** @@ -1203,9 +1182,7 @@ following the table, and may include additional notes not summarized here. - :: - session.execute( - select(User) - ).scalars().all() + session.execute(select(User)).scalars().all() # or session.scalars(select(User)).all() @@ -1216,15 +1193,11 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).\ - filter_by(name='some user').one() + session.query(User).filter_by(name="some user").one() - :: - session.execute( - select(User). - filter_by(name="some user") - ).scalar_one() + session.execute(select(User).filter_by(name="some user")).scalar_one() - :ref:`migration_20_unify_select` @@ -1232,17 +1205,11 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).\ - filter_by(name='some user').first() - + session.query(User).filter_by(name="some user").first() - :: - session.scalars( - select(User). - filter_by(name="some user"). - limit(1) - ).first() + session.scalars(select(User).filter_by(name="some user").limit(1)).first() - :ref:`migration_20_unify_select` @@ -1250,34 +1217,22 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).options( - joinedload(User.addresses) - ).all() + session.query(User).options(joinedload(User.addresses)).all() - :: - session.scalars( - select(User). - options( - joinedload(User.addresses) - ) - ).unique().all() + session.scalars(select(User).options(joinedload(User.addresses))).unique().all() - :ref:`joinedload_not_uniqued` * - :: - session.query(User).\ - join(Address).\ - filter(Address.email == 'e@sa.us').\ - all() + session.query(User).join(Address).filter(Address.email == "e@sa.us").all() - :: session.execute( - select(User). - join(Address). - where(Address.email == 'e@sa.us') + select(User).join(Address).where(Address.email == "e@sa.us") ).scalars().all() - :ref:`migration_20_unify_select` @@ -1286,37 +1241,27 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).from_statement( - text("select * from users") - ).all() + session.query(User).from_statement(text("select * from users")).all() - :: - session.scalars( - select(User). - from_statement( - text("select * from users") - ) - ).all() + session.scalars(select(User).from_statement(text("select * from users"))).all() - :ref:`orm_queryguide_selecting_text` * - :: - session.query(User).\ - join(User.addresses).\ - options( - contains_eager(User.addresses) - ).\ - populate_existing().all() + session.query(User).join(User.addresses).options( + contains_eager(User.addresses) + ).populate_existing().all() - :: session.execute( - select(User). - join(User.addresses). - options(contains_eager(User.addresses)). - execution_options(populate_existing=True) + select(User) + .join(User.addresses) + .options(contains_eager(User.addresses)) + .execution_options(populate_existing=True) ).scalars().all() - @@ -1328,21 +1273,17 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).\ - filter(User.name == 'foo').\ - update( - {"fullname": "Foo Bar"}, - synchronize_session="evaluate" - ) - + session.query(User).filter(User.name == "foo").update( + {"fullname": "Foo Bar"}, synchronize_session="evaluate" + ) - :: session.execute( - update(User). - where(User.name == 'foo'). - values(fullname="Foo Bar"). - execution_options(synchronize_session="evaluate") + update(User) + .where(User.name == "foo") + .values(fullname="Foo Bar") + .execution_options(synchronize_session="evaluate") ) - :ref:`orm_expression_update_delete` @@ -1575,7 +1516,6 @@ will all be removed in 2.0:: # string use removed q = session.query(Address).filter(with_parent(u1, "addresses")) - **Migration to 2.0** Modern SQLAlchemy 1.x versions support the recommended technique which @@ -1622,7 +1562,6 @@ attributes in a list will be removed:: # chaining removed q = session.query(User).join("orders", "items", "keywords") - **Migration to 2.0** Use individual calls to :meth:`_orm.Query.join` for 1.x /2.0 cross compatible @@ -1671,11 +1610,13 @@ Use explicit aliases instead:: n1 = aliased(Node) n2 = aliased(Node) - q = select(Node).join(Node.children.of_type(n1)).\ - where(n1.name == "some sub child").\ - join(n1.children.of_type(n2)).\ - where(n2.name == "some sub child") - + q = ( + select(Node) + .join(Node.children.of_type(n1)) + .where(n1.name == "some sub child") + .join(n1.children.of_type(n2)) + .where(n2.name == "some sub child") + ) **Discussion** @@ -1714,8 +1655,13 @@ as well as "address.email_address" but only return User objects:: # 1.xx code - result = session.query(User).join(User.addresses).\ - distinct().order_by(Address.email_address).all() + result = ( + session.query(User) + .join(User.addresses) + .distinct() + .order_by(Address.email_address) + .all() + ) In version 2.0, the "email_address" column will not be automatically added to the columns clause, and the above query will fail, since relational @@ -1730,8 +1676,12 @@ returning the main entity object, and not the extra column, use the # 1.4 / 2.0 code - stmt = select(User, Address.email_address).join(User.addresses).\ - distinct().order_by(Address.email_address) + stmt = ( + select(User, Address.email_address) + .join(User.addresses) + .distinct() + .order_by(Address.email_address) + ) result = session.execute(stmt).columns(User).all() @@ -1758,10 +1708,12 @@ Selecting from the query itself as a subquery, e.g. "from_self()" The :meth:`_orm.Query.from_self` method will be removed from :class:`_orm.Query`:: # from_self is removed - q = session.query(User, Address.email_address).\ - join(User.addresses).\ - from_self(User).order_by(Address.email_address) - + q = ( + session.query(User, Address.email_address) + .join(User.addresses) + .from_self(User) + .order_by(Address.email_address) + ) **Migration to 2.0** @@ -1775,8 +1727,7 @@ since the final query wants to query in terms of both the ``User`` and from sqlalchemy.orm import aliased - subq = session.query(User, Address.email_address).\ - join(User.addresses).subquery() + subq = session.query(User, Address.email_address).join(User.addresses).subquery() ua = aliased(User, subq) @@ -1788,8 +1739,7 @@ The same form may be used in :term:`2.0 style`:: from sqlalchemy.orm import aliased - subq = select(User, Address.email_address).\ - join(User.addresses).subquery() + subq = select(User, Address.email_address).join(User.addresses).subquery() ua = aliased(User, subq) @@ -1799,7 +1749,6 @@ The same form may be used in :term:`2.0 style`:: result = session.execute(stmt) - **Discussion** The :meth:`_query.Query.from_self` method is a very complicated method that is rarely @@ -1832,8 +1781,7 @@ labeling:: # 1.4 / 2.0 code - subq = select(User, Address).\ - join(User.addresses).subquery() + subq = select(User, Address).join(User.addresses).subquery() ua = aliased(User, subq) aa = aliased(Address, subq) @@ -1923,9 +1871,7 @@ where the "joined eager loading" loader strategy is used with collections:: # In the new API, uniquing is available but not implicitly # enabled - result = session.execute( - select(User).options(joinedload(User.addresses)) - ) + result = session.execute(select(User).options(joinedload(User.addresses))) # this actually will raise an error to let the user know that # uniquing should be applied @@ -1994,16 +1940,15 @@ to achieve 2.0 style querying that's in terms of a specific relationship: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" posts = relationship(Post, lazy="dynamic") + jack = session.get(User, 5) # filter Jack's blog posts - posts = session.scalars( - jack.posts.statement.where(Post.headline == "this is a post") - ) + posts = session.scalars(jack.posts.statement.where(Post.headline == "this is a post")) * Use the :func:`_orm.with_parent` function to construct a :func:`_sql.select` construct directly:: @@ -2013,9 +1958,9 @@ to achieve 2.0 style querying that's in terms of a specific relationship: jack = session.get(User, 5) posts = session.scalars( - select(Post). - where(with_parent(jack, User.posts)). - where(Post.headline == "this is a post") + select(Post) + .where(with_parent(jack, User.posts)) + .where(Post.headline == "this is a post") ) **Discussion** @@ -2050,7 +1995,6 @@ is, this pattern:: # commits, won't be supported sess.flush() - **Migration to 2.0** The main reason a :class:`_orm.Session` is used in "autocommit" mode @@ -2066,7 +2010,7 @@ be called:: sess = Session(engine) sess.begin() # begin explicitly; if not called, will autobegin - # when database access is needed + # when database access is needed sess.add(obj) @@ -2104,6 +2048,7 @@ a decorator may be used:: import contextlib + @contextlib.contextmanager def transaction(session): if not session.in_transaction(): @@ -2112,7 +2057,6 @@ a decorator may be used:: else: yield - The above context manager may be used in the same way the "subtransaction" flag works, such as in the following example:: @@ -2122,12 +2066,14 @@ The above context manager may be used in the same way the with transaction(session): method_b(session) + # method_b also starts a transaction, but when # called from method_a participates in the ongoing # transaction. def method_b(session): with transaction(session): - session.add(SomeObject('bat', 'lala')) + session.add(SomeObject("bat", "lala")) + Session = sessionmaker(engine) @@ -2142,8 +2088,10 @@ or methods to be concerned with the details of transaction demarcation:: def method_a(session): method_b(session) + def method_b(session): - session.add(SomeObject('bat', 'lala')) + session.add(SomeObject("bat", "lala")) + Session = sessionmaker(engine) diff --git a/doc/build/changelog/unreleased_14/8525.rst b/doc/build/changelog/unreleased_14/8525.rst index 3031ec378c..8508e396b4 100644 --- a/doc/build/changelog/unreleased_14/8525.rst +++ b/doc/build/changelog/unreleased_14/8525.rst @@ -7,4 +7,4 @@ Database via Azure Active Directory", which apparently lacks the ``system_views`` view entirely. Error catching has been extended that under no circumstances will this method ever fail, provided database connectivity - is present. \ No newline at end of file + is present. diff --git a/doc/build/changelog/unreleased_14/8569.rst b/doc/build/changelog/unreleased_14/8569.rst index fc3b3f7398..5ae6fce091 100644 --- a/doc/build/changelog/unreleased_14/8569.rst +++ b/doc/build/changelog/unreleased_14/8569.rst @@ -10,4 +10,4 @@ combinations of SQL label names and aliasing. This "wrapping" is not appropriate for :func:`_orm.contains_eager` which has always had the contract that the user-defined SQL statement is unmodified with the - exception of adding the appropriate columns to be fetched. \ No newline at end of file + exception of adding the appropriate columns to be fetched. diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index 5228235e73..9481d9d4e4 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -21,7 +21,7 @@ Basic Usage Recall from :doc:`/core/engines` that an :class:`_engine.Engine` is created via the :func:`_sa.create_engine` call:: - engine = create_engine('mysql://scott:tiger@localhost/test') + engine = create_engine("mysql://scott:tiger@localhost/test") The typical usage of :func:`_sa.create_engine` is once per particular database URL, held globally for the lifetime of a single application process. A single @@ -48,7 +48,7 @@ a textual statement to the database looks like:: with engine.connect() as connection: result = connection.execute(text("select username from users")) for row in result: - print("username:", row['username']) + print("username:", row["username"]) Above, the :meth:`_engine.Engine.connect` method returns a :class:`_engine.Connection` object, and by using it in a Python context manager (e.g. the ``with:`` @@ -146,13 +146,15 @@ issue a transaction on a :class:`_engine.Connection`, but only the outermost with connection.begin(): # open a transaction method_b(connection) + # method_b also starts a transaction def method_b(connection): - with connection.begin(): # open a transaction - this runs in the - # context of method_a's transaction + with connection.begin(): # open a transaction - this runs in the + # context of method_a's transaction connection.execute(text("insert into mytable values ('bat', 'lala')")) connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"}) + # open a Connection and call method_a with engine.connect() as conn: method_a(conn) @@ -187,12 +189,14 @@ adapt the example from the previous section to this practice looks like:: def method_a(connection): method_b(connection) + # method_b uses the connection and assumes the transaction # is external def method_b(connection): connection.execute(text("insert into mytable values ('bat', 'lala')")) connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"}) + # open a Connection inside of a transaction and call method_a with engine.begin() as conn: method_a(conn) @@ -227,6 +231,7 @@ a decorator may be used:: import contextlib + @contextlib.contextmanager def transaction(connection): if not connection.in_transaction(): @@ -242,6 +247,7 @@ The above contextmanager would be used as:: with transaction(connection): # open a transaction method_b(connection) + # method_b either starts a transaction, or uses the one already # present def method_b(connection): @@ -249,6 +255,7 @@ The above contextmanager would be used as:: connection.execute(text("insert into mytable values ('bat', 'lala')")) connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"}) + # open a Connection and call method_a with engine.connect() as conn: method_a(conn) @@ -260,6 +267,7 @@ present:: import contextlib + def connectivity(engine): connection = None @@ -285,6 +293,7 @@ Using the above would look like:: with connectivity(): method_b(connectivity) + # method_b also wants to use a connection from the context, so it # also calls "with:", but also it actually uses the connection. def method_b(connectivity): @@ -292,6 +301,7 @@ Using the above would look like:: connection.execute(text("insert into mytable values ('bat', 'lala')")) connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"}) + # create a new connection/transaction context object and call # method_a method_a(connectivity(engine)) @@ -438,9 +448,7 @@ parameter to :func:`_sa.create_engine`:: eng = create_engine( "postgresql://scott:tiger@localhost/test", - execution_options={ - "isolation_level": "REPEATABLE READ" - } + execution_options={"isolation_level": "REPEATABLE READ"}, ) With the above setting, the DBAPI connection will be set to use a @@ -461,7 +469,6 @@ separated off from the main engine:: autocommit_engine = eng.execution_options(isolation_level="AUTOCOMMIT") - Above, the :meth:`_engine.Engine.execution_options` method creates a shallow copy of the original :class:`_engine.Engine`. Both ``eng`` and ``autocommit_engine`` share the same dialect and connection pool. However, the @@ -726,11 +733,7 @@ combination has includes: These three behaviors are illustrated in the example below:: with engine.connect() as conn: - result = ( - conn. - execution_options(yield_per=100). - execute(text("select * from table")) - ) + result = conn.execution_options(yield_per=100).execute(text("select * from table")) for partition in result.partitions(): # partition is an iterable that will be at most 100 items @@ -818,7 +821,7 @@ which is not a :class:`_engine.Connection`. This was illustrated using the result = engine.execute(text("select username from users")) for row in result: - print("username:", row['username']) + print("username:", row["username"]) In addition to "connectionless" execution, it is also possible to use the :meth:`~.Executable.execute` method of @@ -832,9 +835,11 @@ Given a table as below:: from sqlalchemy import MetaData, Table, Column, Integer metadata_obj = MetaData() - users_table = Table('users', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)) + users_table = Table( + "users", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String(50)), ) Explicit execution delivers the SQL text or constructed SQL expression to the @@ -948,9 +953,10 @@ to render under different schema names without any changes. Given a table:: user_table = Table( - 'user', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)) + "user", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String(50)), ) The "schema" of this :class:`_schema.Table` as defined by the @@ -960,7 +966,8 @@ that all :class:`_schema.Table` objects with a schema of ``None`` would instead render the schema as ``user_schema_one``:: connection = engine.connect().execution_options( - schema_translate_map={None: "user_schema_one"}) + schema_translate_map={None: "user_schema_one"} + ) result = connection.execute(user_table.select()) @@ -974,10 +981,11 @@ map can specify any number of target->destination schemas:: connection = engine.connect().execution_options( schema_translate_map={ - None: "user_schema_one", # no schema name -> "user_schema_one" - "special": "special_schema", # schema="special" becomes "special_schema" - "public": None # Table objects with schema="public" will render with no schema - }) + None: "user_schema_one", # no schema name -> "user_schema_one" + "special": "special_schema", # schema="special" becomes "special_schema" + "public": None, # Table objects with schema="public" will render with no schema + } + ) The :paramref:`.Connection.execution_options.schema_translate_map` parameter affects all DDL and SQL constructs generated from the SQL expression language, @@ -1002,7 +1010,7 @@ as the schema name is passed to these methods explicitly. to the :class:`_orm.Session`. The :class:`_orm.Session` uses a new :class:`_engine.Connection` for each transaction:: - schema_engine = engine.execution_options(schema_translate_map = { ... } ) + schema_engine = engine.execution_options(schema_translate_map={...}) session = Session(schema_engine) @@ -1148,9 +1156,7 @@ As an example, we will examine the logging produced by the following program:: s = Session(e) - s.add_all( - [A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()])] - ) + s.add_all([A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()])]) s.commit() for a_rec in s.query(A): @@ -1401,6 +1407,7 @@ a SQL string directly, dialect authors can apply the attribute as follows:: from sqlalchemy.engine.default import DefaultDialect + class MyDialect(DefaultDialect): supports_statement_cache = True @@ -1426,9 +1433,9 @@ like this:: def limit_clause(self, select, **kw): text = "" if select._limit is not None: - text += " \n LIMIT %d" % (select._limit, ) + text += " \n LIMIT %d" % (select._limit,) if select._offset is not None: - text += " \n OFFSET %d" % (select._offset, ) + text += " \n OFFSET %d" % (select._offset,) return text The above routine renders the :attr:`.Select._limit` and @@ -1546,6 +1553,7 @@ approach:: from sqlalchemy import lambda_stmt + def run_my_statement(connection, parameter): stmt = lambda_stmt(lambda: select(table)) stmt += lambda s: s.where(table.c.col == parameter) @@ -1553,6 +1561,7 @@ approach:: return connection.execute(stmt) + with engine.connect() as conn: result = run_my_statement(some_connection, "some parameter") @@ -1588,9 +1597,10 @@ Basic guidelines include: def upd(id_, newname): stmt = lambda_stmt(lambda: users.update()) stmt += lambda s: s.values(name=newname) - stmt += lambda s: s.where(users.c.id==id_) + stmt += lambda s: s.where(users.c.id == id_) return stmt + with engine.begin() as conn: conn.execute(upd(7, "foo")) @@ -1621,12 +1631,10 @@ Basic guidelines include: >>> def my_stmt(x, y): ... stmt = lambda_stmt(lambda: select(func.max(x, y))) ... return stmt - ... >>> engine = create_engine("sqlite://", echo=True) >>> with engine.connect() as conn: ... print(conn.scalar(my_stmt(5, 10))) ... print(conn.scalar(my_stmt(12, 8))) - ... {opensql}SELECT max(?, ?) AS max_1 [generated in 0.00057s] (5, 10){stop} 10 @@ -1677,15 +1685,14 @@ Basic guidelines include: >>> def my_stmt(x, y): ... def get_x(): ... return x + ... ... def get_y(): ... return y ... ... stmt = lambda_stmt(lambda: select(func.max(get_x(), get_y()))) ... return stmt - ... >>> with engine.connect() as conn: ... print(conn.scalar(my_stmt(5, 10))) - ... Traceback (most recent call last): # ... sqlalchemy.exc.InvalidRequestError: Can't invoke Python callable get_x() @@ -1701,6 +1708,7 @@ Basic guidelines include: >>> def my_stmt(x, y): ... def get_x(): ... return x + ... ... def get_y(): ... return y ... @@ -1722,14 +1730,11 @@ Basic guidelines include: ... def __init__(self, x, y): ... self.x = x ... self.y = y - ... >>> def my_stmt(foo): ... stmt = lambda_stmt(lambda: select(func.max(foo.x, foo.y))) ... return stmt - ... >>> with engine.connect() as conn: - ... print(conn.scalar(my_stmt(Foo(5, 10)))) - ... + ... print(conn.scalar(my_stmt(Foo(5, 10)))) Traceback (most recent call last): # ... sqlalchemy.exc.InvalidRequestError: Closure variable named 'foo' inside of @@ -1766,8 +1771,7 @@ Basic guidelines include: >>> def my_stmt(foo): ... stmt = lambda_stmt( - ... lambda: select(func.max(foo.x, foo.y)), - ... track_closure_variables=False + ... lambda: select(func.max(foo.x, foo.y)), track_closure_variables=False ... ) ... return stmt @@ -1783,13 +1787,9 @@ Basic guidelines include: >>> def my_stmt(self, foo): ... stmt = lambda_stmt( - ... lambda: select(*self.column_expressions), - ... track_closure_variables=False - ... ) - ... stmt = stmt.add_criteria( - ... lambda: self.where_criteria, - ... track_on=[self] + ... lambda: select(*self.column_expressions), track_closure_variables=False ... ) + ... stmt = stmt.add_criteria(lambda: self.where_criteria, track_on=[self]) ... return stmt Using ``track_on`` means the given objects will be stored long term in the @@ -1812,7 +1812,7 @@ SQL expression construct by producing a structure that represents all the state within the construct:: >>> from sqlalchemy import select, column - >>> stmt = select(column('q')) + >>> stmt = select(column("q")) >>> cache_key = stmt._generate_cache_key() >>> print(cache_key) # somewhat paraphrased CacheKey(key=( @@ -2028,7 +2028,6 @@ method may be used:: with engine.connect() as conn: conn.exec_driver_sql("SET param='bar'") - .. versionadded:: 1.4 Added the :meth:`_engine.Connection.exec_driver_sql` method. .. _dbapi_connections_cursor: @@ -2105,7 +2104,7 @@ may potentially be used with your DBAPI. An example of this pattern is:: connection = engine.raw_connection() try: cursor_obj = connection.cursor() - cursor_obj.callproc("my_procedure", ['x', 'y', 'z']) + cursor_obj.callproc("my_procedure", ["x", "y", "z"]) results = list(cursor_obj.fetchall()) cursor_obj.close() connection.commit() @@ -2151,8 +2150,6 @@ Multiple result set support is available from a raw DBAPI cursor using the finally: connection.close() - - Registering New Dialects ======================== @@ -2168,7 +2165,7 @@ to create a new dialect "foodialect://", the steps are as follows: via ``foodialect.dialect``. 3. The entry point can be established in setup.py as follows:: - entry_points=""" + entry_points = """ [sqlalchemy.dialects] foodialect = foodialect.dialect:FooDialect """ @@ -2178,7 +2175,7 @@ an existing SQLAlchemy-supported database, the name can be given including a database-qualification. For example, if ``FooDialect`` were in fact a MySQL dialect, the entry point could be established like this:: - entry_points=""" + entry_points = """ [sqlalchemy.dialects] mysql.foodialect = foodialect.dialect:FooDialect """ @@ -2192,6 +2189,7 @@ SQLAlchemy also allows a dialect to be registered within the current process, by the need for separate installation. Use the ``register()`` function as follows:: from sqlalchemy.dialects import registry + registry.register("mysql.foodialect", "myapp.dialect", "MyMySQLDialect") The above will respond to ``create_engine("mysql+foodialect://")`` and load the diff --git a/doc/build/core/constraints.rst b/doc/build/core/constraints.rst index 038c3134dd..aa322238f5 100644 --- a/doc/build/core/constraints.rst +++ b/doc/build/core/constraints.rst @@ -33,11 +33,13 @@ column. The single column foreign key is more common, and at the column level is specified by constructing a :class:`~sqlalchemy.schema.ForeignKey` object as an argument to a :class:`~sqlalchemy.schema.Column` object:: - user_preference = Table('user_preference', metadata_obj, - Column('pref_id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False), - Column('pref_name', String(40), nullable=False), - Column('pref_value', String(100)) + user_preference = Table( + "user_preference", + metadata_obj, + Column("pref_id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.user_id"), nullable=False), + Column("pref_name", String(40), nullable=False), + Column("pref_value", String(100)), ) Above, we define a new table ``user_preference`` for which each row must @@ -64,21 +66,27 @@ known as a *composite* foreign key, and almost always references a table that has a composite primary key. Below we define a table ``invoice`` which has a composite primary key:: - invoice = Table('invoice', metadata_obj, - Column('invoice_id', Integer, primary_key=True), - Column('ref_num', Integer, primary_key=True), - Column('description', String(60), nullable=False) + invoice = Table( + "invoice", + metadata_obj, + Column("invoice_id", Integer, primary_key=True), + Column("ref_num", Integer, primary_key=True), + Column("description", String(60), nullable=False), ) And then a table ``invoice_item`` with a composite foreign key referencing ``invoice``:: - invoice_item = Table('invoice_item', metadata_obj, - Column('item_id', Integer, primary_key=True), - Column('item_name', String(60), nullable=False), - Column('invoice_id', Integer, nullable=False), - Column('ref_num', Integer, nullable=False), - ForeignKeyConstraint(['invoice_id', 'ref_num'], ['invoice.invoice_id', 'invoice.ref_num']) + invoice_item = Table( + "invoice_item", + metadata_obj, + Column("item_id", Integer, primary_key=True), + Column("item_name", String(60), nullable=False), + Column("invoice_id", Integer, nullable=False), + Column("ref_num", Integer, nullable=False), + ForeignKeyConstraint( + ["invoice_id", "ref_num"], ["invoice.invoice_id", "invoice.ref_num"] + ), ) It's important to note that the @@ -126,22 +134,20 @@ statements, on all backends other than SQLite which does not support most forms of ALTER. Given a schema like:: node = Table( - 'node', metadata_obj, - Column('node_id', Integer, primary_key=True), - Column( - 'primary_element', Integer, - ForeignKey('element.element_id') - ) + "node", + metadata_obj, + Column("node_id", Integer, primary_key=True), + Column("primary_element", Integer, ForeignKey("element.element_id")), ) element = Table( - 'element', metadata_obj, - Column('element_id', Integer, primary_key=True), - Column('parent_node_id', Integer), + "element", + metadata_obj, + Column("element_id", Integer, primary_key=True), + Column("parent_node_id", Integer), ForeignKeyConstraint( - ['parent_node_id'], ['node.node_id'], - name='fk_element_parent_node_id' - ) + ["parent_node_id"], ["node.node_id"], name="fk_element_parent_node_id" + ), ) When we call upon :meth:`_schema.MetaData.create_all` on a backend such as the @@ -151,7 +157,7 @@ constraints are created separately: .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... metadata_obj.create_all(conn, checkfirst=False) + ... metadata_obj.create_all(conn, checkfirst=False) {opensql}CREATE TABLE element ( element_id SERIAL NOT NULL, parent_node_id INTEGER, @@ -179,7 +185,7 @@ those constraints that are named: .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... metadata_obj.drop_all(conn, checkfirst=False) + ... metadata_obj.drop_all(conn, checkfirst=False) {opensql}ALTER TABLE element DROP CONSTRAINT fk_element_parent_node_id DROP TABLE node DROP TABLE element @@ -205,13 +211,16 @@ to manually resolve dependency cycles. We can add this flag only to the ``'element'`` table as follows:: element = Table( - 'element', metadata_obj, - Column('element_id', Integer, primary_key=True), - Column('parent_node_id', Integer), + "element", + metadata_obj, + Column("element_id", Integer, primary_key=True), + Column("parent_node_id", Integer), ForeignKeyConstraint( - ['parent_node_id'], ['node.node_id'], - use_alter=True, name='fk_element_parent_node_id' - ) + ["parent_node_id"], + ["node.node_id"], + use_alter=True, + name="fk_element_parent_node_id", + ), ) in our CREATE DDL we will see the ALTER statement only for this constraint, @@ -220,7 +229,7 @@ and not the other one: .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... metadata_obj.create_all(conn, checkfirst=False) + ... metadata_obj.create_all(conn, checkfirst=False) {opensql}CREATE TABLE element ( element_id SERIAL NOT NULL, parent_node_id INTEGER, @@ -282,22 +291,29 @@ generation of this clause via the ``onupdate`` and ``ondelete`` keyword arguments. The value is any string which will be output after the appropriate "ON UPDATE" or "ON DELETE" phrase:: - child = Table('child', metadata_obj, - Column('id', Integer, - ForeignKey('parent.id', onupdate="CASCADE", ondelete="CASCADE"), - primary_key=True - ) - ) - - composite = Table('composite', metadata_obj, - Column('id', Integer, primary_key=True), - Column('rev_id', Integer), - Column('note_id', Integer), + child = Table( + "child", + metadata_obj, + Column( + "id", + Integer, + ForeignKey("parent.id", onupdate="CASCADE", ondelete="CASCADE"), + primary_key=True, + ), + ) + + composite = Table( + "composite", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("rev_id", Integer), + Column("note_id", Integer), ForeignKeyConstraint( - ['rev_id', 'note_id'], - ['revisions.id', 'revisions.note_id'], - onupdate="CASCADE", ondelete="SET NULL" - ) + ["rev_id", "note_id"], + ["revisions.id", "revisions.note_id"], + onupdate="CASCADE", + ondelete="SET NULL", + ), ) Note that these clauses require ``InnoDB`` tables when used with MySQL. @@ -327,17 +343,16 @@ unique constraints and/or those with multiple columns are created via the from sqlalchemy import UniqueConstraint metadata_obj = MetaData() - mytable = Table('mytable', metadata_obj, - + mytable = Table( + "mytable", + metadata_obj, # per-column anonymous unique constraint - Column('col1', Integer, unique=True), - - Column('col2', Integer), - Column('col3', Integer), - + Column("col1", Integer, unique=True), + Column("col2", Integer), + Column("col3", Integer), # explicit/composite unique constraint. 'name' is optional. - UniqueConstraint('col2', 'col3', name='uix_1') - ) + UniqueConstraint("col2", "col3", name="uix_1"), + ) CHECK Constraint ---------------- @@ -357,17 +372,16 @@ MySQL. from sqlalchemy import CheckConstraint metadata_obj = MetaData() - mytable = Table('mytable', metadata_obj, - + mytable = Table( + "mytable", + metadata_obj, # per-column CHECK constraint - Column('col1', Integer, CheckConstraint('col1>5')), - - Column('col2', Integer), - Column('col3', Integer), - + Column("col1", Integer, CheckConstraint("col1>5")), + Column("col2", Integer), + Column("col3", Integer), # table level CHECK constraint. 'name' is optional. - CheckConstraint('col2 > col3 + 5', name='check1') - ) + CheckConstraint("col2 > col3 + 5", name="check1"), + ) {sql}mytable.create(engine) CREATE TABLE mytable ( @@ -388,12 +402,14 @@ option of being configured directly:: from sqlalchemy import PrimaryKeyConstraint - my_table = Table('mytable', metadata_obj, - Column('id', Integer), - Column('version_id', Integer), - Column('data', String(50)), - PrimaryKeyConstraint('id', 'version_id', name='mytable_pk') - ) + my_table = Table( + "mytable", + metadata_obj, + Column("id", Integer), + Column("version_id", Integer), + Column("data", String(50)), + PrimaryKeyConstraint("id", "version_id", name="mytable_pk"), + ) .. seealso:: @@ -468,11 +484,11 @@ one exception case where an existing name can be further embellished). An example naming convention that suits basic cases is as follows:: convention = { - "ix": 'ix_%(column_0_label)s', - "uq": "uq_%(table_name)s_%(column_0_name)s", - "ck": "ck_%(table_name)s_%(constraint_name)s", - "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", - "pk": "pk_%(table_name)s" + "ix": "ix_%(column_0_label)s", + "uq": "uq_%(table_name)s_%(column_0_name)s", + "ck": "ck_%(table_name)s_%(constraint_name)s", + "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", + "pk": "pk_%(table_name)s", } metadata_obj = MetaData(naming_convention=convention) @@ -482,10 +498,12 @@ the target :class:`_schema.MetaData` collection. For example, we can observe the name produced when we create an unnamed :class:`.UniqueConstraint`:: - >>> user_table = Table('user', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30), nullable=False), - ... UniqueConstraint('name') + >>> user_table = Table( + ... "user", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30), nullable=False), + ... UniqueConstraint("name"), ... ) >>> list(user_table.constraints)[1].name 'uq_user_name' @@ -493,10 +511,12 @@ For example, we can observe the name produced when we create an unnamed This same feature takes effect even if we just use the :paramref:`_schema.Column.unique` flag:: - >>> user_table = Table('user', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30), nullable=False, unique=True) - ... ) + >>> user_table = Table( + ... "user", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30), nullable=False, unique=True), + ... ) >>> list(user_table.constraints)[1].name 'uq_user_name' @@ -543,16 +563,17 @@ deterministically truncated using a 4-character suffix based on the md5 hash of the long name. For example, the naming convention below will generate very long names given the column names in use:: - metadata_obj = MetaData(naming_convention={ - "uq": "uq_%(table_name)s_%(column_0_N_name)s" - }) + metadata_obj = MetaData( + naming_convention={"uq": "uq_%(table_name)s_%(column_0_N_name)s"} + ) long_names = Table( - 'long_names', metadata_obj, - Column('information_channel_code', Integer, key='a'), - Column('billing_convention_name', Integer, key='b'), - Column('product_identifier', Integer, key='c'), - UniqueConstraint('a', 'b', 'c') + "long_names", + metadata_obj, + Column("information_channel_code", Integer, key="a"), + Column("billing_convention_name", Integer, key="b"), + Column("product_identifier", Integer, key="c"), + UniqueConstraint("a", "b", "c"), ) On the PostgreSQL dialect, names longer than 63 characters will be truncated @@ -580,20 +601,22 @@ that as follows:: import uuid + def fk_guid(constraint, table): - str_tokens = [ - table.name, - ] + [ - element.parent.name for element in constraint.elements - ] + [ - element.target_fullname for element in constraint.elements - ] - guid = uuid.uuid5(uuid.NAMESPACE_OID, "_".join(str_tokens).encode('ascii')) + str_tokens = ( + [ + table.name, + ] + + [element.parent.name for element in constraint.elements] + + [element.target_fullname for element in constraint.elements] + ) + guid = uuid.uuid5(uuid.NAMESPACE_OID, "_".join(str_tokens).encode("ascii")) return str(guid) + convention = { "fk_guid": fk_guid, - "ix": 'ix_%(column_0_label)s', + "ix": "ix_%(column_0_label)s", "fk": "fk_%(fk_guid)s", } @@ -602,18 +625,21 @@ name as follows:: >>> metadata_obj = MetaData(naming_convention=convention) - >>> user_table = Table('user', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('version', Integer, primary_key=True), - ... Column('data', String(30)) - ... ) - >>> address_table = Table('address', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', Integer), - ... Column('user_version_id', Integer) - ... ) - >>> fk = ForeignKeyConstraint(['user_id', 'user_version_id'], - ... ['user.id', 'user.version']) + >>> user_table = Table( + ... "user", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("version", Integer, primary_key=True), + ... Column("data", String(30)), + ... ) + >>> address_table = Table( + ... "address", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("user_id", Integer), + ... Column("user_version_id", Integer), + ... ) + >>> fk = ForeignKeyConstraint(["user_id", "user_version_id"], ["user.id", "user.version"]) >>> address_table.append_constraint(fk) >>> fk.name fk_0cd51ab5-8d70-56e8-a83c-86661737766d @@ -646,9 +672,11 @@ A typical convention is ``"ck_%(table_name)s_%(constraint_name)s"``:: naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) - Table('foo', metadata_obj, - Column('value', Integer), - CheckConstraint('value > 5', name='value_gt_5') + Table( + "foo", + metadata_obj, + Column("value", Integer), + CheckConstraint("value > 5", name="value_gt_5"), ) The above table will produce the name ``ck_foo_value_gt_5``:: @@ -663,13 +691,9 @@ token; we can make use of this by ensuring we use a :class:`_schema.Column` or :func:`_expression.column` element within the constraint's expression, either by declaring the constraint separate from the table:: - metadata_obj = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - foo = Table('foo', metadata_obj, - Column('value', Integer) - ) + foo = Table("foo", metadata_obj, Column("value", Integer)) CheckConstraint(foo.c.value > 5) @@ -677,13 +701,10 @@ or by using a :func:`_expression.column` inline:: from sqlalchemy import column - metadata_obj = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - foo = Table('foo', metadata_obj, - Column('value', Integer), - CheckConstraint(column('value') > 5) + foo = Table( + "foo", metadata_obj, Column("value", Integer), CheckConstraint(column("value") > 5) ) Both will produce the name ``ck_foo_value``:: @@ -712,9 +733,7 @@ and :class:`.Enum` which generate a CHECK constraint accompanying the type. The name for the constraint here is most directly set up by sending the "name" parameter, e.g. :paramref:`.Boolean.name`:: - Table('foo', metadata_obj, - Column('flag', Boolean(name='ck_foo_flag')) - ) + Table("foo", metadata_obj, Column("flag", Boolean(name="ck_foo_flag"))) The naming convention feature may be combined with these types as well, normally by using a convention which includes ``%(constraint_name)s`` @@ -724,9 +743,7 @@ and then applying a name to the type:: naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) - Table('foo', metadata_obj, - Column('flag', Boolean(name='flag_bool')) - ) + Table("foo", metadata_obj, Column("flag", Boolean(name="flag_bool"))) The above table will produce the constraint name ``ck_foo_flag_bool``:: @@ -748,13 +765,9 @@ The CHECK constraint may also make use of the ``column_0_name`` token, which works nicely with :class:`.SchemaType` since these constraints have only one column:: - metadata_obj = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - Table('foo', metadata_obj, - Column('flag', Boolean()) - ) + Table("foo", metadata_obj, Column("flag", Boolean())) The above schema will produce:: @@ -822,25 +835,24 @@ INDEX" is issued right after the create statements for the table: .. sourcecode:: python+sql metadata_obj = MetaData() - mytable = Table('mytable', metadata_obj, + mytable = Table( + "mytable", + metadata_obj, # an indexed column, with index "ix_mytable_col1" - Column('col1', Integer, index=True), - + Column("col1", Integer, index=True), # a uniquely indexed column with index "ix_mytable_col2" - Column('col2', Integer, index=True, unique=True), - - Column('col3', Integer), - Column('col4', Integer), - - Column('col5', Integer), - Column('col6', Integer), - ) + Column("col2", Integer, index=True, unique=True), + Column("col3", Integer), + Column("col4", Integer), + Column("col5", Integer), + Column("col6", Integer), + ) # place an index on col3, col4 - Index('idx_col34', mytable.c.col3, mytable.c.col4) + Index("idx_col34", mytable.c.col3, mytable.c.col4) # place a unique index on col5, col6 - Index('myindex', mytable.c.col5, mytable.c.col6, unique=True) + Index("myindex", mytable.c.col5, mytable.c.col6, unique=True) {sql}mytable.create(engine) CREATE TABLE mytable ( @@ -863,26 +875,24 @@ objects directly. :class:`.Index` also supports identify columns:: metadata_obj = MetaData() - mytable = Table('mytable', metadata_obj, - Column('col1', Integer), - - Column('col2', Integer), - - Column('col3', Integer), - Column('col4', Integer), - + mytable = Table( + "mytable", + metadata_obj, + Column("col1", Integer), + Column("col2", Integer), + Column("col3", Integer), + Column("col4", Integer), # place an index on col1, col2 - Index('idx_col12', 'col1', 'col2'), - + Index("idx_col12", "col1", "col2"), # place a unique index on col3, col4 - Index('idx_col34', 'col3', 'col4', unique=True) + Index("idx_col34", "col3", "col4", unique=True), ) The :class:`~sqlalchemy.schema.Index` object also supports its own ``create()`` method: .. sourcecode:: python+sql - i = Index('someindex', mytable.c.col5) + i = Index("someindex", mytable.c.col5) {sql}i.create(engine) CREATE INDEX someindex ON mytable (col5){stop} @@ -897,14 +907,14 @@ value, the :meth:`_expression.ColumnElement.desc` modifier may be used:: from sqlalchemy import Index - Index('someindex', mytable.c.somecol.desc()) + Index("someindex", mytable.c.somecol.desc()) Or with a backend that supports functional indexes such as PostgreSQL, a "case insensitive" index can be created using the ``lower()`` function:: from sqlalchemy import func, Index - Index('someindex', func.lower(mytable.c.somecol)) + Index("someindex", func.lower(mytable.c.somecol)) Index API --------- diff --git a/doc/build/core/custom_types.rst b/doc/build/core/custom_types.rst index 5f9e0555d6..0db63fad94 100644 --- a/doc/build/core/custom_types.rst +++ b/doc/build/core/custom_types.rst @@ -24,6 +24,7 @@ can be associated with any type:: from sqlalchemy.ext.compiler import compiles from sqlalchemy.types import BINARY + @compiles(BINARY, "sqlite") def compile_binary_sqlite(type_, compiler, **kw): return "BLOB" @@ -93,6 +94,7 @@ which coerces as needed:: from sqlalchemy.types import TypeDecorator, Unicode + class CoerceUTF8(TypeDecorator): """Safely coerce Python bytestrings to Unicode before passing off to the database.""" @@ -101,7 +103,7 @@ which coerces as needed:: def process_bind_param(self, value, dialect): if isinstance(value, str): - value = value.decode('utf-8') + value = value.decode("utf-8") return value Rounding Numerics @@ -113,6 +115,7 @@ many decimal places. Here's a recipe that rounds them down:: from sqlalchemy.types import TypeDecorator, Numeric from decimal import Decimal + class SafeNumeric(TypeDecorator): """Adds quantization to Numeric.""" @@ -120,12 +123,11 @@ many decimal places. Here's a recipe that rounds them down:: def __init__(self, *arg, **kw): TypeDecorator.__init__(self, *arg, **kw) - self.quantize_int = - self.impl.scale + self.quantize_int = -self.impl.scale self.quantize = Decimal(10) ** self.quantize_int def process_bind_param(self, value, dialect): - if isinstance(value, Decimal) and \ - value.as_tuple()[2] < self.quantize_int: + if isinstance(value, Decimal) and value.as_tuple()[2] < self.quantize_int: value = value.quantize(self.quantize) return value @@ -147,6 +149,7 @@ denormalize:: import datetime + class TZDateTime(TypeDecorator): impl = DateTime cache_ok = True @@ -155,9 +158,7 @@ denormalize:: if value is not None: if not value.tzinfo: raise TypeError("tzinfo is required") - value = value.astimezone(datetime.timezone.utc).replace( - tzinfo=None - ) + value = value.astimezone(datetime.timezone.utc).replace(tzinfo=None) return value def process_result_value(self, value, dialect): @@ -165,7 +166,6 @@ denormalize:: value = value.replace(tzinfo=datetime.timezone.utc) return value - .. _custom_guid_type: Backend-agnostic GUID Type @@ -180,6 +180,7 @@ binary in CHAR(16) if desired:: from sqlalchemy.dialects.postgresql import UUID import uuid + class GUID(TypeDecorator): """Platform-independent GUID type. @@ -187,11 +188,12 @@ binary in CHAR(16) if desired:: CHAR(32), storing as stringified hex values. """ + impl = CHAR cache_ok = True def load_dialect_impl(self, dialect): - if dialect.name == 'postgresql': + if dialect.name == "postgresql": return dialect.type_descriptor(UUID()) else: return dialect.type_descriptor(CHAR(32)) @@ -199,7 +201,7 @@ binary in CHAR(16) if desired:: def process_bind_param(self, value, dialect): if value is None: return value - elif dialect.name == 'postgresql': + elif dialect.name == "postgresql": return str(value) else: if not isinstance(value, uuid.UUID): @@ -269,12 +271,12 @@ dictionary-oriented JSON structure, we can apply this as:: json_type = MutableDict.as_mutable(JSONEncodedDict) + class MyClass(Base): # ... json_data = Column(json_type) - .. seealso:: :ref:`mutable_toplevel` @@ -295,8 +297,7 @@ get at this with a type like ``JSONEncodedDict``, we need to from sqlalchemy import type_coerce, String - stmt = select(my_table).where( - type_coerce(my_table.c.json_data, String).like('%foo%')) + stmt = select(my_table).where(type_coerce(my_table.c.json_data, String).like("%foo%")) :class:`.TypeDecorator` provides a built-in system for working up type translations like these based on operators. If we wanted to frequently use the @@ -307,6 +308,7 @@ method:: from sqlalchemy.sql import operators from sqlalchemy import String + class JSONEncodedDict(TypeDecorator): impl = VARCHAR @@ -367,6 +369,7 @@ in conjunction with :data:`~.sqlalchemy.sql.expression.func`:: from sqlalchemy import func from sqlalchemy.types import UserDefinedType + class Geometry(UserDefinedType): def get_col_spec(self): return "GEOMETRY" @@ -380,13 +383,18 @@ in conjunction with :data:`~.sqlalchemy.sql.expression.func`:: We can apply the ``Geometry`` type into :class:`_schema.Table` metadata and use it in a :func:`_expression.select` construct:: - geometry = Table('geometry', metadata, - Column('geom_id', Integer, primary_key=True), - Column('geom_data', Geometry) - ) + geometry = Table( + "geometry", + metadata, + Column("geom_id", Integer, primary_key=True), + Column("geom_data", Geometry), + ) - print(select(geometry).where( - geometry.c.geom_data == 'LINESTRING(189412 252431,189631 259122)')) + print( + select(geometry).where( + geometry.c.geom_data == "LINESTRING(189412 252431,189631 259122)" + ) + ) The resulting SQL embeds both functions as appropriate. ``ST_AsText`` is applied to the columns clause so that the return value is run through @@ -403,7 +411,7 @@ with the labeling of the wrapped expression. Such as, if we rendered a :func:`_expression.select` against a :func:`.label` of our expression, the string label is moved to the outside of the wrapped expression:: - print(select(geometry.c.geom_data.label('my_data'))) + print(select(geometry.c.geom_data.label("my_data"))) Output:: @@ -415,11 +423,21 @@ Another example is we decorate PostgreSQL ``pgcrypto`` extension to encrypt/decrypt values transparently:: - from sqlalchemy import create_engine, String, select, func, \ - MetaData, Table, Column, type_coerce, TypeDecorator + from sqlalchemy import ( + create_engine, + String, + select, + func, + MetaData, + Table, + Column, + type_coerce, + TypeDecorator, + ) from sqlalchemy.dialects.postgresql import BYTEA + class PGPString(TypeDecorator): impl = BYTEA @@ -440,24 +458,24 @@ transparently:: def column_expression(self, col): return func.pgp_sym_decrypt(col, self.passphrase) + metadata_obj = MetaData() - message = Table('message', metadata_obj, - Column('username', String(50)), - Column('message', - PGPString("this is my passphrase")), - ) + message = Table( + "message", + metadata_obj, + Column("username", String(50)), + Column("message", PGPString("this is my passphrase")), + ) engine = create_engine("postgresql://scott:tiger@localhost/test", echo=True) with engine.begin() as conn: metadata_obj.create_all(conn) - conn.execute(message.insert(), username="some user", - message="this is my message") + conn.execute(message.insert(), username="some user", message="this is my message") - print(conn.scalar( - select(message.c.message).\ - where(message.c.username == "some user") - )) + print( + conn.scalar(select(message.c.message).where(message.c.username == "some user")) + ) The ``pgp_sym_encrypt`` and ``pgp_sym_decrypt`` functions are applied to the INSERT and SELECT statements:: @@ -499,7 +517,7 @@ is given a string representing the SQL operator to render, and the return value is a Python callable that accepts any arbitrary right-hand side expression:: >>> from sqlalchemy import column - >>> expr = column('x').op('>>')(column('y')) + >>> expr = column("x").op(">>")(column("y")) >>> print(expr) x >> y @@ -524,6 +542,7 @@ SQL itself:: from sqlalchemy import Integer + class MyInt(Integer): class comparator_factory(Integer.Comparator): def __add__(self, other): @@ -548,6 +567,7 @@ object directly:: from sqlalchemy import Integer + class MyInt(Integer): class comparator_factory(Integer.Comparator): def __add__(self, other): @@ -561,6 +581,7 @@ to integers:: from sqlalchemy import Integer, func + class MyInt(Integer): class comparator_factory(Integer.Comparator): def log(self, other): @@ -589,17 +610,18 @@ along with a :class:`.custom_op` to produce the factorial expression:: from sqlalchemy.sql.expression import UnaryExpression from sqlalchemy.sql import operators + class MyInteger(Integer): class comparator_factory(Integer.Comparator): def factorial(self): - return UnaryExpression(self.expr, - modifier=operators.custom_op("!"), - type_=MyInteger) + return UnaryExpression( + self.expr, modifier=operators.custom_op("!"), type_=MyInteger + ) Using the above type:: >>> from sqlalchemy.sql import column - >>> print(column('x', MyInteger).factorial()) + >>> print(column("x", MyInteger).factorial()) x ! .. seealso:: @@ -651,8 +673,10 @@ datatype. For example:: >>> from sqlalchemy import Table, Column, MetaData, create_engine, PickleType, Integer >>> metadata = MetaData() - >>> my_table = Table("my_table", metadata, Column('id', Integer), Column("data", PickleType)) - >>> engine = create_engine("sqlite://", echo='debug') + >>> my_table = Table( + ... "my_table", metadata, Column("id", Integer), Column("data", PickleType) + ... ) + >>> engine = create_engine("sqlite://", echo="debug") >>> my_table.create(engine) INFO sqlalchemy.engine.base.Engine CREATE TABLE my_table ( @@ -703,7 +727,9 @@ use reflection in combination with explicit :class:`_schema.Column` objects for columns for which we want to use a custom or decorated datatype:: >>> metadata_three = MetaData() - >>> my_reflected_table = Table("my_table", metadata_three, Column("data", PickleType), autoload_with=engine) + >>> my_reflected_table = Table( + ... "my_table", metadata_three, Column("data", PickleType), autoload_with=engine + ... ) The ``my_reflected_table`` object above is reflected, and will load the definition of the "id" column from the SQLite database. But for the "data" @@ -726,6 +752,7 @@ for example we knew that we wanted all :class:`.BLOB` datatypes to in fact be from sqlalchemy import PickleType from sqlalchemy import Table + @event.listens_for(Table, "column_reflect") def _setup_pickletype(inspector, table, column_info): if isinstance(column_info["type"], BLOB): @@ -741,4 +768,4 @@ In practice, the above event-based approach would likely have additional rules in order to affect only those columns where the datatype is important, such as a lookup table of table names and possibly column names, or other heuristics in order to accurately determine which columns should be established with an -in Python datatype. \ No newline at end of file +in Python datatype. diff --git a/doc/build/core/ddl.rst b/doc/build/core/ddl.rst index 9c2fed198d..95665f26b9 100644 --- a/doc/build/core/ddl.rst +++ b/doc/build/core/ddl.rst @@ -32,9 +32,11 @@ other DDL elements except it accepts a string which is the text to be emitted: event.listen( metadata, "after_create", - DDL("ALTER TABLE users ADD CONSTRAINT " + DDL( + "ALTER TABLE users ADD CONSTRAINT " "cst_user_name_length " - " CHECK (length(user_name) >= 8)") + " CHECK (length(user_name) >= 8)" + ), ) A more comprehensive method of creating libraries of DDL constructs is to use @@ -54,9 +56,10 @@ method. For example, if we wanted to create a trigger but only on the PostgreSQL backend, we could invoke this as:: mytable = Table( - 'mytable', metadata, - Column('id', Integer, primary_key=True), - Column('data', String(50)) + "mytable", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), ) func = DDL( @@ -73,30 +76,18 @@ the PostgreSQL backend, we could invoke this as:: "FOR EACH ROW EXECUTE PROCEDURE my_func();" ) - event.listen( - mytable, - 'after_create', - func.execute_if(dialect='postgresql') - ) + event.listen(mytable, "after_create", func.execute_if(dialect="postgresql")) - event.listen( - mytable, - 'after_create', - trigger.execute_if(dialect='postgresql') - ) + event.listen(mytable, "after_create", trigger.execute_if(dialect="postgresql")) The :paramref:`.DDLElement.execute_if.dialect` keyword also accepts a tuple of string dialect names:: event.listen( - mytable, - "after_create", - trigger.execute_if(dialect=('postgresql', 'mysql')) + mytable, "after_create", trigger.execute_if(dialect=("postgresql", "mysql")) ) event.listen( - mytable, - "before_drop", - trigger.execute_if(dialect=('postgresql', 'mysql')) + mytable, "before_drop", trigger.execute_if(dialect=("postgresql", "mysql")) ) The :meth:`.DDLElement.execute_if` method can also work against a callable @@ -108,27 +99,29 @@ first looking within the PostgreSQL catalogs to see if it exists: def should_create(ddl, target, connection, **kw): row = connection.execute( - "select conname from pg_constraint where conname='%s'" % - ddl.element.name).scalar() + "select conname from pg_constraint where conname='%s'" % ddl.element.name + ).scalar() return not bool(row) + def should_drop(ddl, target, connection, **kw): return not should_create(ddl, target, connection, **kw) + event.listen( users, "after_create", DDL( "ALTER TABLE users ADD CONSTRAINT " "cst_user_name_length CHECK (length(user_name) >= 8)" - ).execute_if(callable_=should_create) + ).execute_if(callable_=should_create), ) event.listen( users, "before_drop", - DDL( - "ALTER TABLE users DROP CONSTRAINT cst_user_name_length" - ).execute_if(callable_=should_drop) + DDL("ALTER TABLE users DROP CONSTRAINT cst_user_name_length").execute_if( + callable_=should_drop + ), ) {sql}users.create(engine) @@ -198,22 +191,20 @@ constraints, using these as we did in our previous example of def should_create(ddl, target, connection, **kw): row = connection.execute( - "select conname from pg_constraint where conname='%s'" % - ddl.element.name).scalar() + "select conname from pg_constraint where conname='%s'" % ddl.element.name + ).scalar() return not bool(row) + def should_drop(ddl, target, connection, **kw): return not should_create(ddl, target, connection, **kw) + event.listen( - users, - "after_create", - AddConstraint(constraint).execute_if(callable_=should_create) + users, "after_create", AddConstraint(constraint).execute_if(callable_=should_create) ) event.listen( - users, - "before_drop", - DropConstraint(constraint).execute_if(callable_=should_drop) + users, "before_drop", DropConstraint(constraint).execute_if(callable_=should_drop) ) {sql}users.create(engine) diff --git a/doc/build/core/defaults.rst b/doc/build/core/defaults.rst index bccc8375c1..60e7e2bc57 100644 --- a/doc/build/core/defaults.rst +++ b/doc/build/core/defaults.rst @@ -59,9 +59,7 @@ Scalar Defaults The simplest kind of default is a scalar value used as the default value of a column:: - Table("mytable", metadata_obj, - Column("somecolumn", Integer, default=12) - ) + Table("mytable", metadata_obj, Column("somecolumn", Integer, default=12)) Above, the value "12" will be bound as the column value during an INSERT if no other value is supplied. @@ -70,10 +68,7 @@ A scalar value may also be associated with an UPDATE statement, though this is not very common (as UPDATE statements are usually looking for dynamic defaults):: - Table("mytable", metadata_obj, - Column("somecolumn", Integer, onupdate=25) - ) - + Table("mytable", metadata_obj, Column("somecolumn", Integer, onupdate=25)) Python-Executed Functions ------------------------- @@ -86,13 +81,18 @@ incrementing counter to a primary key column:: # a function which counts upwards i = 0 + + def mydefault(): global i i += 1 return i - t = Table("mytable", metadata_obj, - Column('id', Integer, primary_key=True, default=mydefault), + + t = Table( + "mytable", + metadata_obj, + Column("id", Integer, primary_key=True, default=mydefault), ) It should be noted that for real "incrementing sequence" behavior, the @@ -109,11 +109,12 @@ the :paramref:`_schema.Column.onupdate` attribute:: import datetime - t = Table("mytable", metadata_obj, - Column('id', Integer, primary_key=True), - + t = Table( + "mytable", + metadata_obj, + Column("id", Integer, primary_key=True), # define 'last_updated' to be populated with datetime.now() - Column('last_updated', DateTime, onupdate=datetime.datetime.now), + Column("last_updated", DateTime, onupdate=datetime.datetime.now), ) When an update statement executes and no value is passed for ``last_updated``, @@ -139,11 +140,14 @@ updated on the row. To access the context, provide a function that accepts a single ``context`` argument:: def mydefault(context): - return context.get_current_parameters()['counter'] + 12 + return context.get_current_parameters()["counter"] + 12 - t = Table('mytable', metadata_obj, - Column('counter', Integer), - Column('counter_plus_twelve', Integer, default=mydefault, onupdate=mydefault) + + t = Table( + "mytable", + metadata_obj, + Column("counter", Integer), + Column("counter_plus_twelve", Integer, default=mydefault, onupdate=mydefault), ) The above default generation function is applied so that it will execute for @@ -184,18 +188,21 @@ The :paramref:`_schema.Column.default` and :paramref:`_schema.Column.onupdate` k also be passed SQL expressions, which are in most cases rendered inline within the INSERT or UPDATE statement:: - t = Table("mytable", metadata_obj, - Column('id', Integer, primary_key=True), - + t = Table( + "mytable", + metadata_obj, + Column("id", Integer, primary_key=True), # define 'create_date' to default to now() - Column('create_date', DateTime, default=func.now()), - + Column("create_date", DateTime, default=func.now()), # define 'key' to pull its default from the 'keyvalues' table - Column('key', String(20), default=select(keyvalues.c.key).where(keyvalues.c.type='type1')), - + Column( + "key", + String(20), + default=select(keyvalues.c.key).where(keyvalues.c.type="type1"), + ), # define 'last_modified' to use the current_timestamp SQL function on update - Column('last_modified', DateTime, onupdate=func.utc_timestamp()) - ) + Column("last_modified", DateTime, onupdate=func.utc_timestamp()), + ) Above, the ``create_date`` column will be populated with the result of the ``now()`` SQL function (which, depending on backend, compiles into ``NOW()`` @@ -257,10 +264,12 @@ placed in the CREATE TABLE statement during a :meth:`_schema.Table.create` opera .. sourcecode:: python+sql - t = Table('test', metadata_obj, - Column('abc', String(20), server_default='abc'), - Column('created_at', DateTime, server_default=func.sysdate()), - Column('index_value', Integer, server_default=text("0")) + t = Table( + "test", + metadata_obj, + Column("abc", String(20), server_default="abc"), + Column("created_at", DateTime, server_default=func.sysdate()), + Column("index_value", Integer, server_default=text("0")), ) A create call for the above table will produce:: @@ -296,10 +305,12 @@ may be called out using :class:`.FetchedValue` as a marker:: from sqlalchemy.schema import FetchedValue - t = Table('test', metadata_obj, - Column('id', Integer, primary_key=True), - Column('abc', TIMESTAMP, server_default=FetchedValue()), - Column('def', String(20), server_onupdate=FetchedValue()) + t = Table( + "test", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("abc", TIMESTAMP, server_default=FetchedValue()), + Column("def", String(20), server_onupdate=FetchedValue()), ) The :class:`.FetchedValue` indicator does not affect the rendered DDL for the @@ -344,13 +355,17 @@ The :class:`~sqlalchemy.schema.Sequence` may be placed on any column as a configured to fire off during UPDATE operations if desired. It is most commonly used in conjunction with a single integer primary key column:: - table = Table("cartitems", metadata_obj, + table = Table( + "cartitems", + metadata_obj, Column( "cart_id", Integer, - Sequence('cart_id_seq', metadata=metadata_obj), primary_key=True), + Sequence("cart_id_seq", metadata=metadata_obj), + primary_key=True, + ), Column("description", String(40)), - Column("createdate", DateTime()) + Column("createdate", DateTime()), ) Where above, the table "cartitems" is associated with a sequence named @@ -397,7 +412,7 @@ object, it can be invoked with its "next value" instruction by passing it directly to a SQL execution method:: with my_engine.connect() as conn: - seq = Sequence('some_sequence') + seq = Sequence("some_sequence") nextid = conn.execute(seq) In order to embed the "next value" function of a :class:`.Sequence` @@ -405,7 +420,7 @@ inside of a SQL statement like a SELECT or INSERT, use the :meth:`.Sequence.next method, which will render at statement compilation time a SQL function that is appropriate for the target backend:: - >>> my_seq = Sequence('some_sequence') + >>> my_seq = Sequence("some_sequence") >>> stmt = select(my_seq.next_value()) >>> print(stmt.compile(dialect=postgresql.dialect())) SELECT nextval('some_sequence') AS next_value_1 @@ -418,24 +433,29 @@ Associating a Sequence with the MetaData For many years, the SQLAlchemy documentation referred to the example of associating a :class:`.Sequence` with a table as follows:: - table = Table("cartitems", metadata_obj, - Column("cart_id", Integer, Sequence('cart_id_seq'), - primary_key=True), + table = Table( + "cartitems", + metadata_obj, + Column("cart_id", Integer, Sequence("cart_id_seq"), primary_key=True), Column("description", String(40)), - Column("createdate", DateTime()) + Column("createdate", DateTime()), ) While the above is a prominent idiomatic pattern, it is recommended that the :class:`.Sequence` in most cases be explicitly associated with the :class:`_schema.MetaData`, using the :paramref:`.Sequence.metadata` parameter:: - table = Table("cartitems", metadata_obj, + table = Table( + "cartitems", + metadata_obj, Column( "cart_id", Integer, - Sequence('cart_id_seq', metadata=metadata_obj), primary_key=True), + Sequence("cart_id_seq", metadata=metadata_obj), + primary_key=True, + ), Column("description", String(40)), - Column("createdate", DateTime()) + Column("createdate", DateTime()), ) The :class:`.Sequence` object is a first class @@ -480,8 +500,8 @@ The preceding sections illustrate how to associate a :class:`.Sequence` with a :class:`_schema.Column` as the **Python side default generator**:: Column( - "cart_id", Integer, Sequence('cart_id_seq', metadata=metadata_obj), - primary_key=True) + "cart_id", Integer, Sequence("cart_id_seq", metadata=metadata_obj), primary_key=True + ) In the above case, the :class:`.Sequence` will automatically be subject to CREATE SEQUENCE / DROP SEQUENCE DDL when the related :class:`_schema.Table` @@ -497,24 +517,30 @@ we illustrate the same :class:`.Sequence` being associated with the :class:`_schema.Column` both as the Python-side default generator as well as the server-side default generator:: - cart_id_seq = Sequence('cart_id_seq', metadata=metadata_obj) - table = Table("cartitems", metadata_obj, + cart_id_seq = Sequence("cart_id_seq", metadata=metadata_obj) + table = Table( + "cartitems", + metadata_obj, Column( - "cart_id", Integer, cart_id_seq, - server_default=cart_id_seq.next_value(), primary_key=True), + "cart_id", + Integer, + cart_id_seq, + server_default=cart_id_seq.next_value(), + primary_key=True, + ), Column("description", String(40)), - Column("createdate", DateTime()) + Column("createdate", DateTime()), ) or with the ORM:: class CartItem(Base): - __tablename__ = 'cartitems' + __tablename__ = "cartitems" - cart_id_seq = Sequence('cart_id_seq', metadata=Base.metadata) + cart_id_seq = Sequence("cart_id_seq", metadata=Base.metadata) cart_id = Column( - Integer, cart_id_seq, - server_default=cart_id_seq.next_value(), primary_key=True) + Integer, cart_id_seq, server_default=cart_id_seq.next_value(), primary_key=True + ) description = Column(String(40)) createdate = Column(DateTime) @@ -665,8 +691,8 @@ Example:: data = Table( "data", metadata_obj, - Column('id', Integer, Identity(start=42, cycle=True), primary_key=True), - Column('data', String) + Column("id", Integer, Identity(start=42, cycle=True), primary_key=True), + Column("data", String), ) The DDL for the ``data`` table when run on a PostgreSQL 12 backend will look diff --git a/doc/build/core/engines.rst b/doc/build/core/engines.rst index ffbfc10888..f27caa2d4f 100644 --- a/doc/build/core/engines.rst +++ b/doc/build/core/engines.rst @@ -22,7 +22,8 @@ Creating an engine is just a matter of issuing a single call, :func:`_sa.create_engine()`:: from sqlalchemy import create_engine - engine = create_engine('postgresql://scott:tiger@localhost:5432/mydatabase') + + engine = create_engine("postgresql://scott:tiger@localhost:5432/mydatabase") The above engine creates a :class:`.Dialect` object tailored towards PostgreSQL, as well as a :class:`_pool.Pool` object which will establish a DBAPI @@ -118,13 +119,13 @@ The PostgreSQL dialect uses psycopg2 as the default DBAPI. Other PostgreSQL DBAPIs include pg8000 and asyncpg:: # default - engine = create_engine('postgresql://scott:tiger@localhost/mydatabase') + engine = create_engine("postgresql://scott:tiger@localhost/mydatabase") # psycopg2 - engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/mydatabase') + engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/mydatabase") # pg8000 - engine = create_engine('postgresql+pg8000://scott:tiger@localhost/mydatabase') + engine = create_engine("postgresql+pg8000://scott:tiger@localhost/mydatabase") More notes on connecting to PostgreSQL at :ref:`postgresql_toplevel`. @@ -135,13 +136,13 @@ The MySQL dialect uses mysqlclient as the default DBAPI. There are other MySQL DBAPIs available, including PyMySQL:: # default - engine = create_engine('mysql://scott:tiger@localhost/foo') + engine = create_engine("mysql://scott:tiger@localhost/foo") # mysqlclient (a maintained fork of MySQL-Python) - engine = create_engine('mysql+mysqldb://scott:tiger@localhost/foo') + engine = create_engine("mysql+mysqldb://scott:tiger@localhost/foo") # PyMySQL - engine = create_engine('mysql+pymysql://scott:tiger@localhost/foo') + engine = create_engine("mysql+pymysql://scott:tiger@localhost/foo") More notes on connecting to MySQL at :ref:`mysql_toplevel`. @@ -150,9 +151,9 @@ Oracle The Oracle dialect uses cx_oracle as the default DBAPI:: - engine = create_engine('oracle://scott:tiger@127.0.0.1:1521/sidname') + engine = create_engine("oracle://scott:tiger@127.0.0.1:1521/sidname") - engine = create_engine('oracle+cx_oracle://scott:tiger@tnsname') + engine = create_engine("oracle+cx_oracle://scott:tiger@tnsname") More notes on connecting to Oracle at :ref:`oracle_toplevel`. @@ -163,10 +164,10 @@ The SQL Server dialect uses pyodbc as the default DBAPI. pymssql is also available:: # pyodbc - engine = create_engine('mssql+pyodbc://scott:tiger@mydsn') + engine = create_engine("mssql+pyodbc://scott:tiger@mydsn") # pymssql - engine = create_engine('mssql+pymssql://scott:tiger@hostname:port/dbname') + engine = create_engine("mssql+pymssql://scott:tiger@hostname:port/dbname") More notes on connecting to SQL Server at :ref:`mssql_toplevel`. @@ -182,22 +183,22 @@ For a relative file path, this requires three slashes:: # sqlite:/// # where is relative: - engine = create_engine('sqlite:///foo.db') + engine = create_engine("sqlite:///foo.db") And for an absolute file path, the three slashes are followed by the absolute path:: # Unix/Mac - 4 initial slashes in total - engine = create_engine('sqlite:////absolute/path/to/foo.db') + engine = create_engine("sqlite:////absolute/path/to/foo.db") # Windows - engine = create_engine('sqlite:///C:\\path\\to\\foo.db') + engine = create_engine("sqlite:///C:\\path\\to\\foo.db") # Windows alternative using raw string - engine = create_engine(r'sqlite:///C:\path\to\foo.db') + engine = create_engine(r"sqlite:///C:\path\to\foo.db") To use a SQLite ``:memory:`` database, specify an empty URL:: - engine = create_engine('sqlite://') + engine = create_engine("sqlite://") More notes on connecting to SQLite at :ref:`sqlite_toplevel`. @@ -263,7 +264,9 @@ Engine Creation API for keys and either strings or tuples of strings for values, e.g.:: >>> from sqlalchemy.engine import make_url - >>> url = make_url("postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt") + >>> url = make_url( + ... "postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt" + ... ) >>> url.query immutabledict({'alt_host': ('host1', 'host2'), 'ssl_cipher': '/path/to/crt'}) @@ -335,9 +338,7 @@ often specified in the query string of the URL directly. A common example of this is DBAPIs that accept an argument ``encoding`` for character encodings, such as most MySQL DBAPIs:: - engine = create_engine( - "mysql+pymysql://user:pass@host/test?charset=utf8mb4" - ) + engine = create_engine("mysql+pymysql://user:pass@host/test?charset=utf8mb4") The advantage of using the query string is that additional DBAPI options may be specified in configuration files in a manner that's portable to the DBAPI @@ -356,7 +357,9 @@ supported at this level. method directly as follows:: >>> from sqlalchemy import create_engine - >>> engine = create_engine("mysql+pymysql://some_user:some_pass@some_host/test?charset=utf8mb4") + >>> engine = create_engine( + ... "mysql+pymysql://some_user:some_pass@some_host/test?charset=utf8mb4" + ... ) >>> args, kwargs = engine.dialect.create_connect_args(engine.url) >>> args, kwargs ([], {'host': 'some_host', 'database': 'test', 'user': 'some_user', 'password': 'some_pass', 'charset': 'utf8mb4', 'client_flag': 2}) @@ -381,14 +384,14 @@ underlying implementation the connection:: engine = create_engine( "postgresql://user:pass@hostname/dbname", - connect_args={"connection_factory": MyConnectionFactory} + connect_args={"connection_factory": MyConnectionFactory}, ) Another example is the pyodbc "timeout" parameter:: engine = create_engine( - "mssql+pyodbc://user:pass@sqlsrvr?driver=ODBC+Driver+13+for+SQL+Server", - connect_args={"timeout": 30} + "mssql+pyodbc://user:pass@sqlsrvr?driver=ODBC+Driver+13+for+SQL+Server", + connect_args={"timeout": 30}, ) The above example also illustrates that both URL "query string" parameters as @@ -409,9 +412,10 @@ collections can then be modified in place to alter how they are used:: engine = create_engine("postgresql://user:pass@hostname/dbname") + @event.listens_for(engine, "do_connect") def receive_do_connect(dialect, conn_rec, cargs, cparams): - cparams['connection_factory'] = MyConnectionFactory + cparams["connection_factory"] = MyConnectionFactory .. _engines_dynamic_tokens: @@ -428,9 +432,10 @@ parameter, this could be implemented as:: engine = create_engine("postgresql://user@hostname/dbname") + @event.listens_for(engine, "do_connect") def provide_token(dialect, conn_rec, cargs, cparams): - cparams['token'] = get_authentication_token() + cparams["token"] = get_authentication_token() .. seealso:: @@ -449,9 +454,8 @@ SQLAlchemy:: from sqlalchemy import event - engine = create_engine( - "postgresql://user:pass@hostname/dbname" - ) + engine = create_engine("postgresql://user:pass@hostname/dbname") + @event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): @@ -459,7 +463,6 @@ SQLAlchemy:: cursor_obj.execute("SET some session variables") cursor_obj.close() - Fully Replacing the DBAPI ``connect()`` function ------------------------------------------------ @@ -469,9 +472,8 @@ and returning it:: from sqlalchemy import event - engine = create_engine( - "postgresql://user:pass@hostname/dbname" - ) + engine = create_engine("postgresql://user:pass@hostname/dbname") + @event.listens_for(engine, "do_connect") def receive_do_connect(dialect, conn_rec, cargs, cparams): @@ -531,7 +533,7 @@ For example, to log SQL queries using Python logging instead of the import logging logging.basicConfig() - logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO) + logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO) By default, the log level is set to ``logging.WARN`` within the entire ``sqlalchemy`` namespace so that no log operations occur, even within an @@ -559,10 +561,9 @@ parameters are a shortcut to immediate logging to ``sys.stdout``:: >>> from sqlalchemy import create_engine, text - >>> e = create_engine("sqlite://", echo=True, echo_pool='debug') + >>> e = create_engine("sqlite://", echo=True, echo_pool="debug") >>> with e.connect() as conn: - ... print(conn.scalar(text("select 'hi'"))) - ... + ... print(conn.scalar(text("select 'hi'"))) 2020-10-24 12:54:57,701 DEBUG sqlalchemy.pool.impl.SingletonThreadPool Created new connection 2020-10-24 12:54:57,701 DEBUG sqlalchemy.pool.impl.SingletonThreadPool Connection checked out from pool 2020-10-24 12:54:57,702 INFO sqlalchemy.engine.Engine select 'hi' @@ -574,6 +575,7 @@ parameters are a shortcut to immediate logging to ``sys.stdout``:: Use of these flags is roughly equivalent to:: import logging + logging.basicConfig() logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO) logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG) @@ -597,10 +599,9 @@ string. To set this to a specific name, use the >>> from sqlalchemy import create_engine >>> from sqlalchemy import text - >>> e = create_engine("sqlite://", echo=True, logging_name='myengine') + >>> e = create_engine("sqlite://", echo=True, logging_name="myengine") >>> with e.connect() as conn: ... conn.execute(text("select 'hi'")) - ... 2020-10-24 12:47:04,291 INFO sqlalchemy.engine.Engine.myengine select 'hi' 2020-10-24 12:47:04,292 INFO sqlalchemy.engine.Engine.myengine () @@ -669,7 +670,6 @@ these parameters from being logged for privacy purposes, enable the >>> e = create_engine("sqlite://", echo=True, hide_parameters=True) >>> with e.connect() as conn: ... conn.execute(text("select :some_private_name"), {"some_private_name": "pii"}) - ... 2020-10-24 12:48:32,808 INFO sqlalchemy.engine.Engine select ? 2020-10-24 12:48:32,808 INFO sqlalchemy.engine.Engine [SQL parameters hidden due to hide_parameters=True] diff --git a/doc/build/core/event.rst b/doc/build/core/event.rst index af4e33ba9a..fbdc72183e 100644 --- a/doc/build/core/event.rst +++ b/doc/build/core/event.rst @@ -25,16 +25,19 @@ and that a user-defined listener function should receive two positional argument from sqlalchemy.event import listen from sqlalchemy.pool import Pool + def my_on_connect(dbapi_con, connection_record): print("New DBAPI connection:", dbapi_con) - listen(Pool, 'connect', my_on_connect) + + listen(Pool, "connect", my_on_connect) To listen with the :func:`.listens_for` decorator looks like:: from sqlalchemy.event import listens_for from sqlalchemy.pool import Pool + @listens_for(Pool, "connect") def my_on_connect(dbapi_con, connection_record): print("New DBAPI connection:", dbapi_con) @@ -54,9 +57,10 @@ that accepts ``**keyword`` arguments, by passing ``named=True`` to either from sqlalchemy.event import listens_for from sqlalchemy.pool import Pool + @listens_for(Pool, "connect", named=True) def my_on_connect(**kw): - print("New DBAPI connection:", kw['dbapi_connection']) + print("New DBAPI connection:", kw["dbapi_connection"]) When using named argument passing, the names listed in the function argument specification will be used as keys in the dictionary. @@ -68,10 +72,11 @@ as long as the names match up:: from sqlalchemy.event import listens_for from sqlalchemy.pool import Pool + @listens_for(Pool, "connect", named=True) def my_on_connect(dbapi_connection, **kw): print("New DBAPI connection:", dbapi_connection) - print("Connection record:", kw['connection_record']) + print("Connection record:", kw["connection_record"]) Above, the presence of ``**kw`` tells :func:`.listens_for` that arguments should be passed to the function by name, rather than positionally. @@ -95,25 +100,26 @@ and objects:: from sqlalchemy.engine import Engine import psycopg2 + def connect(): - return psycopg2.connect(user='ed', host='127.0.0.1', dbname='test') + return psycopg2.connect(user="ed", host="127.0.0.1", dbname="test") + my_pool = QueuePool(connect) - my_engine = create_engine('postgresql://ed@localhost/test') + my_engine = create_engine("postgresql://ed@localhost/test") # associate listener with all instances of Pool - listen(Pool, 'connect', my_on_connect) + listen(Pool, "connect", my_on_connect) # associate listener with all instances of Pool # via the Engine class - listen(Engine, 'connect', my_on_connect) + listen(Engine, "connect", my_on_connect) # associate listener with my_pool - listen(my_pool, 'connect', my_on_connect) + listen(my_pool, "connect", my_on_connect) # associate listener with my_engine.pool - listen(my_engine, 'connect', my_on_connect) - + listen(my_engine, "connect", my_on_connect) .. _event_modifiers: @@ -130,11 +136,12 @@ this value can be supported:: def validate_phone(target, value, oldvalue, initiator): """Strip non-numeric characters from a phone number""" - return re.sub(r'\D', '', value) + return re.sub(r"\D", "", value) + # setup listener on UserContact.phone attribute, instructing # it to use the return value - listen(UserContact.phone, 'set', validate_phone, retval=True) + listen(UserContact.phone, "set", validate_phone, retval=True) Event Reference --------------- diff --git a/doc/build/core/functions.rst b/doc/build/core/functions.rst index efa7c78d33..6fcee6edaa 100644 --- a/doc/build/core/functions.rst +++ b/doc/build/core/functions.rst @@ -44,7 +44,7 @@ common SQL functions that set up the expected return type for each function automatically. The are invoked in the same way as any other member of the :data:`_sql.func` namespace:: - select(func.count('*')).select_from(some_table) + select(func.count("*")).select_from(some_table) Note that any name not known to :data:`_sql.func` generates the function name as is - there is no restriction on what SQL functions can be called, known or diff --git a/doc/build/core/future.rst b/doc/build/core/future.rst index 204e401350..6323e732a3 100644 --- a/doc/build/core/future.rst +++ b/doc/build/core/future.rst @@ -15,6 +15,7 @@ by passing the :paramref:`_sa.create_engine.future` flag to :func:`_sa.create_engine`:: from sqlalchemy import create_engine + engine = create_engine("postgresql://user:pass@host/dbname", future=True) Similarly, with the ORM, to enable "future" behavior in the ORM :class:`.Session`, diff --git a/doc/build/core/metadata.rst b/doc/build/core/metadata.rst index 03721c2b6c..154472af5d 100644 --- a/doc/build/core/metadata.rst +++ b/doc/build/core/metadata.rst @@ -37,12 +37,12 @@ The remaining positional arguments are mostly from sqlalchemy import Table, Column, Integer, String user = Table( - 'user', + "user", metadata_obj, - Column('user_id', Integer, primary_key=True), - Column('user_name', String(16), nullable=False), - Column('email_address', String(60)), - Column('nickname', String(50), nullable=False) + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + Column("email_address", String(60)), + Column("nickname", String(50), nullable=False), ) Above, a table called ``user`` is described, which contains four columns. The @@ -69,7 +69,7 @@ dependency (that is, each table is preceded by all tables which it references):: >>> for t in metadata_obj.sorted_tables: - ... print(t.name) + ... print(t.name) user user_preference invoice @@ -82,10 +82,12 @@ module-level variables in an application. Once a accessors which allow inspection of its properties. Given the following :class:`~sqlalchemy.schema.Table` definition:: - employees = Table('employees', metadata_obj, - Column('employee_id', Integer, primary_key=True), - Column('employee_name', String(60), nullable=False), - Column('employee_dept', Integer, ForeignKey("departments.department_id")) + employees = Table( + "employees", + metadata_obj, + Column("employee_id", Integer, primary_key=True), + Column("employee_name", String(60), nullable=False), + Column("employee_dept", Integer, ForeignKey("departments.department_id")), ) Note the :class:`~sqlalchemy.schema.ForeignKey` object used in this table - @@ -100,7 +102,7 @@ table include:: employees.c.employee_id # via string - employees.c['employee_id'] + employees.c["employee_id"] # iterate through all columns for c in employees.c: @@ -171,22 +173,26 @@ will issue the CREATE statements: .. sourcecode:: python+sql - engine = create_engine('sqlite:///:memory:') + engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() - user = Table('user', metadata_obj, - Column('user_id', Integer, primary_key=True), - Column('user_name', String(16), nullable=False), - Column('email_address', String(60), key='email'), - Column('nickname', String(50), nullable=False) + user = Table( + "user", + metadata_obj, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + Column("email_address", String(60), key="email"), + Column("nickname", String(50), nullable=False), ) - user_prefs = Table('user_prefs', metadata_obj, - Column('pref_id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False), - Column('pref_name', String(40), nullable=False), - Column('pref_value', String(100)) + user_prefs = Table( + "user_prefs", + metadata_obj, + Column("pref_id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.user_id"), nullable=False), + Column("pref_name", String(40), nullable=False), + Column("pref_value", String(100)), ) {sql}metadata_obj.create_all(engine) @@ -222,14 +228,16 @@ default issue the CREATE or DROP regardless of the table being present: .. sourcecode:: python+sql - engine = create_engine('sqlite:///:memory:') + engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() - employees = Table('employees', metadata_obj, - Column('employee_id', Integer, primary_key=True), - Column('employee_name', String(60), nullable=False, key='name'), - Column('employee_dept', Integer, ForeignKey("departments.department_id")) + employees = Table( + "employees", + metadata_obj, + Column("employee_id", Integer, primary_key=True), + Column("employee_name", String(60), nullable=False, key="name"), + Column("employee_dept", Integer, ForeignKey("departments.department_id")), ) {sql}employees.create(engine) CREATE TABLE employees( @@ -340,11 +348,11 @@ using a Core :class:`_schema.Table` object as follows:: metadata_obj = MetaData() financial_info = Table( - 'financial_info', + "financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('value', String(100), nullable=False), - schema='remote_banks' + Column("id", Integer, primary_key=True), + Column("value", String(100), nullable=False), + schema="remote_banks", ) SQL that is rendered using this :class:`_schema.Table`, such as the SELECT @@ -361,7 +369,7 @@ using the combination of the schema and table name. We can view this in the :attr:`_schema.MetaData.tables` collection by searching for the key ``'remote_banks.financial_info'``:: - >>> metadata_obj.tables['remote_banks.financial_info'] + >>> metadata_obj.tables["remote_banks.financial_info"] Table('financial_info', MetaData(), Column('id', Integer(), table=, primary_key=True, nullable=False), Column('value', String(length=100), table=, nullable=False), @@ -374,9 +382,9 @@ objects, even if the referring table is also in that same schema:: customer = Table( "customer", metadata_obj, - Column('id', Integer, primary_key=True), - Column('financial_info_id', ForeignKey("remote_banks.financial_info.id")), - schema='remote_banks' + Column("id", Integer, primary_key=True), + Column("financial_info_id", ForeignKey("remote_banks.financial_info.id")), + schema="remote_banks", ) The :paramref:`_schema.Table.schema` argument may also be used with certain @@ -386,7 +394,7 @@ important on a database such as Microsoft SQL Server where there are often dotted "database/owner" tokens. The tokens may be placed directly in the name at once, such as:: - schema="dbo.scott" + schema = "dbo.scott" .. seealso:: @@ -409,10 +417,10 @@ construct:: metadata_obj = MetaData(schema="remote_banks") financial_info = Table( - 'financial_info', + "financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('value', String(100), nullable=False), + Column("id", Integer, primary_key=True), + Column("value", String(100), nullable=False), ) Above, for any :class:`_schema.Table` object (or :class:`_schema.Sequence` object @@ -422,7 +430,7 @@ act as though the parameter were set to the value ``"remote_banks"``. This includes that the :class:`_schema.Table` is cataloged in the :class:`_schema.MetaData` using the schema-qualified name, that is:: - metadata_obj.tables['remote_banks.financial_info'] + metadata_obj.tables["remote_banks.financial_info"] When using the :class:`_schema.ForeignKey` or :class:`_schema.ForeignKeyConstraint` objects to refer to this table, either the schema-qualified name or the @@ -432,20 +440,20 @@ table:: # either will work: refers_to_financial_info = Table( - 'refers_to_financial_info', + "refers_to_financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('fiid', ForeignKey('financial_info.id')), + Column("id", Integer, primary_key=True), + Column("fiid", ForeignKey("financial_info.id")), ) # or refers_to_financial_info = Table( - 'refers_to_financial_info', + "refers_to_financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('fiid', ForeignKey('remote_banks.financial_info.id')), + Column("id", Integer, primary_key=True), + Column("fiid", ForeignKey("remote_banks.financial_info.id")), ) When using a :class:`_schema.MetaData` object that sets @@ -458,11 +466,11 @@ to specify that it should not be schema qualified may use the special symbol metadata_obj = MetaData(schema="remote_banks") financial_info = Table( - 'financial_info', + "financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('value', String(100), nullable=False), - schema=BLANK_SCHEMA # will not use "remote_banks" + Column("id", Integer, primary_key=True), + Column("value", String(100), nullable=False), + schema=BLANK_SCHEMA, # will not use "remote_banks" ) .. seealso:: @@ -511,6 +519,7 @@ Oracle CURRENT_SCHEMA variable to an alternate name:: engine = create_engine("oracle+cx_oracle://scott:tiger@tsn_name") + @event.listens_for(engine, "connect", insert=True) def set_current_schema(dbapi_connection, connection_record): cursor_obj = dbapi_connection.cursor() @@ -552,11 +561,13 @@ example, MySQL has different table backend types, including "MyISAM" and "InnoDB". This can be expressed with :class:`~sqlalchemy.schema.Table` using ``mysql_engine``:: - addresses = Table('engine_email_addresses', metadata_obj, - Column('address_id', Integer, primary_key=True), - Column('remote_user_id', Integer, ForeignKey(users.c.user_id)), - Column('email_address', String(20)), - mysql_engine='InnoDB' + addresses = Table( + "engine_email_addresses", + metadata_obj, + Column("address_id", Integer, primary_key=True), + Column("remote_user_id", Integer, ForeignKey(users.c.user_id)), + Column("email_address", String(20)), + mysql_engine="InnoDB", ) Other backends may support table-level options as well - these would be diff --git a/doc/build/core/operators.rst b/doc/build/core/operators.rst index d119db1e0c..10b6db3380 100644 --- a/doc/build/core/operators.rst +++ b/doc/build/core/operators.rst @@ -11,17 +11,17 @@ Operator Reference >>> user_table = Table( ... "user_account", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30)), - ... Column('fullname', String) + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30)), + ... Column("fullname", String), ... ) >>> from sqlalchemy import ForeignKey >>> address_table = Table( ... "address", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', None, ForeignKey('user_account.id')), - ... Column('email_address', String, nullable=False) + ... Column("id", Integer, primary_key=True), + ... Column("user_id", None, ForeignKey("user_account.id")), + ... Column("email_address", String, nullable=False), ... ) >>> metadata_obj.create_all(engine) BEGIN (implicit) @@ -30,7 +30,7 @@ Operator Reference >>> Base = declarative_base() >>> from sqlalchemy.orm import relationship >>> class User(Base): - ... __tablename__ = 'user_account' + ... __tablename__ = "user_account" ... ... id = Column(Integer, primary_key=True) ... name = Column(String(30)) @@ -39,14 +39,14 @@ Operator Reference ... addresses = relationship("Address", back_populates="user") ... ... def __repr__(self): - ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" + ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): - ... __tablename__ = 'address' + ... __tablename__ = "address" ... ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) - ... user_id = Column(Integer, ForeignKey('user_account.id')) + ... user_id = Column(Integer, ForeignKey("user_account.id")) ... ... user = relationship("User", back_populates="addresses") ... @@ -55,22 +55,34 @@ Operator Reference >>> conn = engine.connect() >>> from sqlalchemy.orm import Session >>> session = Session(conn) - >>> session.add_all([ - ... User(name="spongebob", fullname="Spongebob Squarepants", addresses=[ - ... Address(email_address="spongebob@sqlalchemy.org") - ... ]), - ... User(name="sandy", fullname="Sandy Cheeks", addresses=[ - ... Address(email_address="sandy@sqlalchemy.org"), - ... Address(email_address="squirrel@squirrelpower.org") - ... ]), - ... User(name="patrick", fullname="Patrick Star", addresses=[ - ... Address(email_address="pat999@aol.com") - ... ]), - ... User(name="squidward", fullname="Squidward Tentacles", addresses=[ - ... Address(email_address="stentcl@sqlalchemy.org") - ... ]), - ... User(name="ehkrabs", fullname="Eugene H. Krabs"), - ... ]) + >>> session.add_all( + ... [ + ... User( + ... name="spongebob", + ... fullname="Spongebob Squarepants", + ... addresses=[Address(email_address="spongebob@sqlalchemy.org")], + ... ), + ... User( + ... name="sandy", + ... fullname="Sandy Cheeks", + ... addresses=[ + ... Address(email_address="sandy@sqlalchemy.org"), + ... Address(email_address="squirrel@squirrelpower.org"), + ... ], + ... ), + ... User( + ... name="patrick", + ... fullname="Patrick Star", + ... addresses=[Address(email_address="pat999@aol.com")], + ... ), + ... User( + ... name="squidward", + ... fullname="Squidward Tentacles", + ... addresses=[Address(email_address="stentcl@sqlalchemy.org")], + ... ), + ... User(name="ehkrabs", fullname="Eugene H. Krabs"), + ... ] + ... ) >>> session.commit() BEGIN ... >>> conn.begin() @@ -108,49 +120,49 @@ strings, dates, and many others: * :meth:`_sql.ColumnOperators.__eq__` (Python "``==``" operator):: - >>> print(column('x') == 5) + >>> print(column("x") == 5) x = :x_1 .. * :meth:`_sql.ColumnOperators.__ne__` (Python "``!=``" operator):: - >>> print(column('x') != 5) + >>> print(column("x") != 5) x != :x_1 .. * :meth:`_sql.ColumnOperators.__gt__` (Python "``>``" operator):: - >>> print(column('x') > 5) + >>> print(column("x") > 5) x > :x_1 .. * :meth:`_sql.ColumnOperators.__lt__` (Python "``<``" operator):: - >>> print(column('x') < 5) + >>> print(column("x") < 5) x < :x_1 .. * :meth:`_sql.ColumnOperators.__ge__` (Python "``>=``" operator):: - >>> print(column('x') >= 5) + >>> print(column("x") >= 5) x >= :x_1 .. * :meth:`_sql.ColumnOperators.__le__` (Python "``<=``" operator):: - >>> print(column('x') <= 5) + >>> print(column("x") <= 5) x <= :x_1 .. * :meth:`_sql.ColumnOperators.between`:: - >>> print(column('x').between(5, 10)) + >>> print(column("x").between(5, 10)) x BETWEEN :x_1 AND :x_2 .. @@ -171,7 +183,7 @@ IN is available most typically by passing a list of values to the :meth:`_sql.ColumnOperators.in_` method:: - >>> print(column('x').in_([1, 2, 3])) + >>> print(column("x").in_([1, 2, 3])) x IN (__[POSTCOMPILE_x_1]) The special bound form ``__[POSTCOMPILE`` is rendered into individual parameters @@ -211,12 +223,12 @@ NOT IN "NOT IN" is available via the :meth:`_sql.ColumnOperators.not_in` operator:: - >>> print(column('x').not_in([1, 2, 3])) + >>> print(column("x").not_in([1, 2, 3])) (x NOT IN (__[POSTCOMPILE_x_1])) This is typically more easily available by negating with the ``~`` operator:: - >>> print(~column('x').in_([1, 2, 3])) + >>> print(~column("x").in_([1, 2, 3])) (x NOT IN (__[POSTCOMPILE_x_1])) Tuple IN Expressions @@ -229,7 +241,7 @@ building block for tuple comparisons. The :meth:`_sql.Tuple.in_` operator then receives a list of tuples:: >>> from sqlalchemy import tuple_ - >>> tup = tuple_(column('x', Integer), column('y', Integer)) + >>> tup = tuple_(column("x", Integer), column("y", Integer)) >>> expr = tup.in_([(1, 2), (3, 4)]) >>> print(expr) (x, y) IN (__[POSTCOMPILE_param_1]) @@ -256,14 +268,14 @@ operators work with subqueries. The form provides that a :class:`_sql.Select` construct is passed in directly, without any explicit conversion to a named subquery:: - >>> print(column('x').in_(select(user_table.c.id))) + >>> print(column("x").in_(select(user_table.c.id))) x IN (SELECT user_account.id FROM user_account) Tuples work as expected:: >>> print( - ... tuple_(column('x'), column('y')).in_( + ... tuple_(column("x"), column("y")).in_( ... select(user_table.c.id, address_table.c.id).join(address_table) ... ) ... ) @@ -283,14 +295,14 @@ databases support: as " IS NULL". The ``NULL`` constant is most easily acquired using regular Python ``None``:: - >>> print(column('x').is_(None)) + >>> print(column("x").is_(None)) x IS NULL SQL NULL is also explicitly available, if needed, using the :func:`_sql.null` construct:: >>> from sqlalchemy import null - >>> print(column('x').is_(null())) + >>> print(column("x").is_(null())) x IS NULL The :meth:`_sql.ColumnOperators.is_` operator is automatically invoked when @@ -300,7 +312,7 @@ databases support: explicitly, paricularly when used with a dynamic value:: >>> a = None - >>> print(column('x') == a) + >>> print(column("x") == a) x IS NULL Note that the Python ``is`` operator is **not overloaded**. Even though @@ -311,26 +323,26 @@ databases support: Similar to :meth:`_sql.ColumnOperators.is_`, produces "IS NOT":: - >>> print(column('x').is_not(None)) + >>> print(column("x").is_not(None)) x IS NOT NULL Is similarly equivalent to ``!= None``:: - >>> print(column('x') != None) + >>> print(column("x") != None) x IS NOT NULL * :meth:`_sql.ColumnOperators.is_distinct_from`: Produces SQL IS DISTINCT FROM:: - >>> print(column('x').is_distinct_from('some value')) + >>> print(column("x").is_distinct_from("some value")) x IS DISTINCT FROM :x_1 * :meth:`_sql.ColumnOperators.isnot_distinct_from`: Produces SQL IS NOT DISTINCT FROM:: - >>> print(column('x').isnot_distinct_from('some value')) + >>> print(column("x").isnot_distinct_from("some value")) x IS NOT DISTINCT FROM :x_1 String Comparisons @@ -338,7 +350,7 @@ String Comparisons * :meth:`_sql.ColumnOperators.like`:: - >>> print(column('x').like('word')) + >>> print(column("x").like("word")) x LIKE :x_1 .. @@ -348,14 +360,14 @@ String Comparisons Case insensitive LIKE makes use of the SQL ``lower()`` function on a generic backend. On the PostgreSQL backend it will use ``ILIKE``:: - >>> print(column('x').ilike('word')) + >>> print(column("x").ilike("word")) lower(x) LIKE lower(:x_1) .. * :meth:`_sql.ColumnOperators.notlike`:: - >>> print(column('x').notlike('word')) + >>> print(column("x").notlike("word")) x NOT LIKE :x_1 .. @@ -363,7 +375,7 @@ String Comparisons * :meth:`_sql.ColumnOperators.notilike`:: - >>> print(column('x').notilike('word')) + >>> print(column("x").notilike("word")) lower(x) NOT LIKE lower(:x_1) .. @@ -378,21 +390,21 @@ backends or sometimes a function like ``concat()``: * :meth:`_sql.ColumnOperators.startswith`:: The string containment operators - >>> print(column('x').startswith('word')) + >>> print(column("x").startswith("word")) x LIKE :x_1 || '%' .. * :meth:`_sql.ColumnOperators.endswith`:: - >>> print(column('x').endswith('word')) + >>> print(column("x").endswith("word")) x LIKE '%' || :x_1 .. * :meth:`_sql.ColumnOperators.contains`:: - >>> print(column('x').contains('word')) + >>> print(column("x").contains("word")) x LIKE '%' || :x_1 || '%' .. @@ -408,7 +420,7 @@ behaviors and results on different databases: This is a dialect-specific operator that makes use of the MATCH feature of the underlying database, if available:: - >>> print(column('x').match('word')) + >>> print(column("x").match("word")) x MATCH :x_1 .. @@ -419,13 +431,13 @@ behaviors and results on different databases: for example the PostgreSQL dialect:: >>> from sqlalchemy.dialects import postgresql - >>> print(column('x').regexp_match('word').compile(dialect=postgresql.dialect())) + >>> print(column("x").regexp_match("word").compile(dialect=postgresql.dialect())) x ~ %(x_1)s Or MySQL:: >>> from sqlalchemy.dialects import mysql - >>> print(column('x').regexp_match('word').compile(dialect=mysql.dialect())) + >>> print(column("x").regexp_match("word").compile(dialect=mysql.dialect())) x REGEXP %s .. @@ -440,20 +452,20 @@ String Alteration String concatenation:: - >>> print(column('x').concat("some string")) + >>> print(column("x").concat("some string")) x || :x_1 This operator is available via :meth:`_sql.ColumnOperators.__add__`, that is, the Python ``+`` operator, when working with a column expression that derives from :class:`_types.String`:: - >>> print(column('x', String) + "some string") + >>> print(column("x", String) + "some string") x || :x_1 The operator will produce the appropriate database-specific construct, such as on MySQL it's historically been the ``concat()`` SQL function:: - >>> print((column('x', String) + "some string").compile(dialect=mysql.dialect())) + >>> print((column("x", String) + "some string").compile(dialect=mysql.dialect())) concat(x, %s) .. @@ -463,7 +475,7 @@ String Alteration Complementary to :meth:`_sql.ColumnOperators.regexp` this produces REGEXP REPLACE equivalent for the backends which support it:: - >>> print(column('x').regexp_replace('foo', 'bar').compile(dialect=postgresql.dialect())) + >>> print(column("x").regexp_replace("foo", "bar").compile(dialect=postgresql.dialect())) REGEXP_REPLACE(x, %(x_1)s, %(x_2)s) .. @@ -473,7 +485,11 @@ String Alteration Produces the COLLATE SQL operator which provides for specific collations at expression time:: - >>> print((column('x').collate('latin1_german2_ci') == 'Müller').compile(dialect=mysql.dialect())) + >>> print( + ... (column("x").collate("latin1_german2_ci") == "Müller").compile( + ... dialect=mysql.dialect() + ... ) + ... ) (x COLLATE latin1_german2_ci) = %s @@ -481,7 +497,11 @@ String Alteration >>> from sqlalchemy import literal - >>> print((literal('Müller').collate('latin1_german2_ci') == column('x')).compile(dialect=mysql.dialect())) + >>> print( + ... (literal("Müller").collate("latin1_german2_ci") == column("x")).compile( + ... dialect=mysql.dialect() + ... ) + ... ) (%s COLLATE latin1_german2_ci) = x .. @@ -491,10 +511,10 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__add__`, :meth:`_sql.ColumnOperators.__radd__` (Python "``+``" operator):: - >>> print(column('x') + 5) + >>> print(column("x") + 5) x + :x_1 - >>> print(5 + column('x')) + >>> print(5 + column("x")) :x_1 + x .. @@ -507,10 +527,10 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__sub__`, :meth:`_sql.ColumnOperators.__rsub__` (Python "``-``" operator):: - >>> print(column('x') - 5) + >>> print(column("x") - 5) x - :x_1 - >>> print(5 - column('x')) + >>> print(5 - column("x")) :x_1 - x .. @@ -518,19 +538,19 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__mul__`, :meth:`_sql.ColumnOperators.__rmul__` (Python "``*``" operator):: - >>> print(column('x') * 5) + >>> print(column("x") * 5) x * :x_1 - >>> print(5 * column('x')) + >>> print(5 * column("x")) :x_1 * x .. * :meth:`_sql.ColumnOperators.__div__`, :meth:`_sql.ColumnOperators.__rdiv__` (Python "``/``" operator):: - >>> print(column('x') / 5) + >>> print(column("x") / 5) x / :x_1 - >>> print(5 / column('x')) + >>> print(5 / column("x")) :x_1 / x .. @@ -538,9 +558,9 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__mod__`, :meth:`_sql.ColumnOperators.__rmod__` (Python "``%``" operator):: - >>> print(column('x') % 5) + >>> print(column("x") % 5) x % :x_1 - >>> print(5 % column('x')) + >>> print(5 % column("x")) :x_1 % x .. @@ -553,10 +573,10 @@ The most common conjunction, "AND", is automatically applied if we make repeated :meth:`_sql.Update.where` and :meth:`_sql.Delete.where`:: >>> print( - ... select(address_table.c.email_address). - ... where(user_table.c.name == 'squidward'). - ... where(address_table.c.user_id == user_table.c.id) - ... ) + ... select(address_table.c.email_address) + ... .where(user_table.c.name == "squidward") + ... .where(address_table.c.user_id == user_table.c.id) + ... ) SELECT address.email_address FROM address, user_account WHERE user_account.name = :name_1 AND address.user_id = user_account.id @@ -564,12 +584,10 @@ The most common conjunction, "AND", is automatically applied if we make repeated :meth:`_sql.Select.where`, :meth:`_sql.Update.where` and :meth:`_sql.Delete.where` also accept multiple expressions with the same effect:: >>> print( - ... select(address_table.c.email_address). - ... where( - ... user_table.c.name == 'squidward', - ... address_table.c.user_id == user_table.c.id - ... ) - ... ) + ... select(address_table.c.email_address).where( + ... user_table.c.name == "squidward", address_table.c.user_id == user_table.c.id + ... ) + ... ) SELECT address.email_address FROM address, user_account WHERE user_account.name = :name_1 AND address.user_id = user_account.id @@ -579,11 +597,10 @@ The "AND" conjunction, as well as its partner "OR", are both available directly >>> from sqlalchemy import and_, or_ >>> print( - ... select(address_table.c.email_address). - ... where( + ... select(address_table.c.email_address).where( ... and_( - ... or_(user_table.c.name == 'squidward', user_table.c.name == 'sandy'), - ... address_table.c.user_id == user_table.c.id + ... or_(user_table.c.name == "squidward", user_table.c.name == "sandy"), + ... address_table.c.user_id == user_table.c.id, ... ) ... ) ... ) @@ -596,13 +613,13 @@ A negation is available using the :func:`_sql.not_` function. This will typically invert the operator in a boolean expression:: >>> from sqlalchemy import not_ - >>> print(not_(column('x') == 5)) + >>> print(not_(column("x") == 5)) x != :x_1 It also may apply a keyword such as ``NOT`` when appropriate:: >>> from sqlalchemy import Boolean - >>> print(not_(column('x', Boolean))) + >>> print(not_(column("x", Boolean))) NOT x @@ -622,7 +639,7 @@ The above conjunction functions :func:`_sql.and_`, :func:`_sql.or_`, The Python binary ``&`` operator is overloaded to behave the same as :func:`_sql.and_` (note parenthesis around the two operands):: - >>> print((column('x') == 5) & (column('y') == 10)) + >>> print((column("x") == 5) & (column("y") == 10)) x = :x_1 AND y = :y_1 .. @@ -633,7 +650,7 @@ The above conjunction functions :func:`_sql.and_`, :func:`_sql.or_`, The Python binary ``|`` operator is overloaded to behave the same as :func:`_sql.or_` (note parenthesis around the two operands):: - >>> print((column('x') == 5) | (column('y') == 10)) + >>> print((column("x") == 5) | (column("y") == 10)) x = :x_1 OR y = :y_1 .. @@ -645,11 +662,11 @@ The above conjunction functions :func:`_sql.and_`, :func:`_sql.or_`, as :func:`_sql.not_`, either inverting the existing operator, or applying the ``NOT`` keyword to the expression as a whole:: - >>> print(~(column('x') == 5)) + >>> print(~(column("x") == 5)) x != :x_1 >>> from sqlalchemy import Boolean - >>> print(~column('x', Boolean)) + >>> print(~column("x", Boolean)) NOT x .. @@ -665,4 +682,4 @@ TODO .. Setup code, not for display >>> conn.close() - ROLLBACK \ No newline at end of file + ROLLBACK diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst index 59223ee7aa..b8800ead4a 100644 --- a/doc/build/core/pooling.rst +++ b/doc/build/core/pooling.rst @@ -35,8 +35,7 @@ directly to :func:`~sqlalchemy.create_engine` as keyword arguments: ``pool_size``, ``max_overflow``, ``pool_recycle`` and ``pool_timeout``. For example:: - engine = create_engine('postgresql://me@localhost/mydb', - pool_size=20, max_overflow=0) + engine = create_engine("postgresql://me@localhost/mydb", pool_size=20, max_overflow=0) In the case of SQLite, the :class:`.SingletonThreadPool` or :class:`.NullPool` are selected by the dialect to provide @@ -68,14 +67,16 @@ of building the pool for you. Common options include specifying :class:`.QueuePool` with SQLite:: from sqlalchemy.pool import QueuePool - engine = create_engine('sqlite:///file.db', poolclass=QueuePool) + + engine = create_engine("sqlite:///file.db", poolclass=QueuePool) Disabling pooling using :class:`.NullPool`:: from sqlalchemy.pool import NullPool + engine = create_engine( - 'postgresql+psycopg2://scott:tiger@localhost/test', - poolclass=NullPool) + "postgresql+psycopg2://scott:tiger@localhost/test", poolclass=NullPool + ) Using a Custom Connection Function ---------------------------------- @@ -95,10 +96,12 @@ by any additional options:: import sqlalchemy.pool as pool import psycopg2 + def getconn(): - c = psycopg2.connect(user='ed', host='127.0.0.1', dbname='test') + c = psycopg2.connect(user="ed", host="127.0.0.1", dbname="test") return c + mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5) DBAPI connections can then be procured from the pool using the @@ -263,6 +266,7 @@ behaviors are needed:: some_engine = create_engine(...) + @event.listens_for(some_engine, "engine_connect") def ping_connection(connection, branch): if branch: @@ -327,6 +331,7 @@ that they are replaced with new ones upon next checkout. This flow is illustrated by the code example below:: from sqlalchemy import create_engine, exc + e = create_engine(...) c = e.connect() @@ -365,6 +370,7 @@ such as MySQL that automatically close connections that have been stale after a period of time:: from sqlalchemy import create_engine + e = create_engine("mysql://scott:tiger@localhost/test", pool_recycle=3600) Above, any DBAPI connection that has been open for more than one hour will be invalidated and replaced, @@ -433,8 +439,7 @@ close these connections out. The difference between FIFO and LIFO is basically whether or not its desirable for the pool to keep a full set of connections ready to go even during idle periods:: - engine = create_engine( - "postgreql://", pool_use_lifo=True, pool_pre_ping=True) + engine = create_engine("postgreql://", pool_use_lifo=True, pool_pre_ping=True) Above, we also make use of the :paramref:`_sa.create_engine.pool_pre_ping` flag so that connections which are closed from the server side are gracefully @@ -476,8 +481,8 @@ are three general approaches to this: more than once:: from sqlalchemy.pool import NullPool - engine = create_engine("mysql://user:pass@host/dbname", poolclass=NullPool) + engine = create_engine("mysql://user:pass@host/dbname", poolclass=NullPool) 2. Call :meth:`_engine.Engine.dispose` on any given :class:`_engine.Engine`, passing the :paramref:`.Engine.dispose.close` parameter with a value of @@ -490,19 +495,21 @@ are three general approaches to this: engine = create_engine("mysql+mysqldb://user:pass@host/dbname") + def run_in_process(some_data_record): with engine.connect() as conn: conn.execute(text("...")) + def initializer(): """ensure the parent proc's database connections are not touched - in the new connection pool""" + in the new connection pool""" engine.dispose(close=False) + with Pool(10, initializer=initializer) as p: p.map(run_in_process, data) - .. versionadded:: 1.4.33 Added the :paramref:`.Engine.dispose.close` parameter to allow the replacement of a connection pool in a child process without interfering with the connections used by the parent @@ -527,10 +534,12 @@ are three general approaches to this: engine = create_engine("mysql://user:pass@host/dbname") + def run_in_process(): with engine.connect() as conn: conn.execute(text("...")) + # before process starts, ensure engine.dispose() is called engine.dispose() p = Process(target=run_in_process) @@ -545,19 +554,20 @@ are three general approaches to this: engine = create_engine("...") + @event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): - connection_record.info['pid'] = os.getpid() + connection_record.info["pid"] = os.getpid() + @event.listens_for(engine, "checkout") def checkout(dbapi_connection, connection_record, connection_proxy): pid = os.getpid() - if connection_record.info['pid'] != pid: + if connection_record.info["pid"] != pid: connection_record.dbapi_connection = connection_proxy.dbapi_connection = None raise exc.DisconnectionError( - "Connection record belongs to pid %s, " - "attempting to check out in pid %s" % - (connection_record.info['pid'], pid) + "Connection record belongs to pid %s, " + "attempting to check out in pid %s" % (connection_record.info["pid"], pid) ) Above, we use an approach similar to that described in diff --git a/doc/build/core/reflection.rst b/doc/build/core/reflection.rst index d9547344e7..8c31b7ff00 100644 --- a/doc/build/core/reflection.rst +++ b/doc/build/core/reflection.rst @@ -13,7 +13,7 @@ existing within the database. This process is called *reflection*. In the most simple case you need only specify the table name, a :class:`~sqlalchemy.schema.MetaData` object, and the ``autoload_with`` argument:: - >>> messages = Table('messages', metadata_obj, autoload_with=engine) + >>> messages = Table("messages", metadata_obj, autoload_with=engine) >>> [c.name for c in messages.columns] ['message_id', 'message_name', 'date'] @@ -30,7 +30,7 @@ Below, assume the table ``shopping_cart_items`` references a table named ``shopping_carts``. Reflecting the ``shopping_cart_items`` table has the effect such that the ``shopping_carts`` table will also be loaded:: - >>> shopping_cart_items = Table('shopping_cart_items', metadata_obj, autoload_with=engine) + >>> shopping_cart_items = Table("shopping_cart_items", metadata_obj, autoload_with=engine) >>> 'shopping_carts' in metadata_obj.tables: True @@ -43,7 +43,7 @@ you the already-existing :class:`~sqlalchemy.schema.Table` object if one already exists with the given name. Such as below, we can access the already generated ``shopping_carts`` table just by naming it:: - shopping_carts = Table('shopping_carts', metadata_obj) + shopping_carts = Table("shopping_carts", metadata_obj) Of course, it's a good idea to use ``autoload_with=engine`` with the above table regardless. This is so that the table's attributes will be loaded if they have @@ -61,11 +61,16 @@ Individual columns can be overridden with explicit values when reflecting tables; this is handy for specifying custom datatypes, constraints such as primary keys that may not be configured within the database, etc.:: - >>> mytable = Table('mytable', metadata_obj, - ... Column('id', Integer, primary_key=True), # override reflected 'id' to have primary key - ... Column('mydata', Unicode(50)), # override reflected 'mydata' to be Unicode - ... # additional Column objects which require no change are reflected normally - ... autoload_with=some_engine) + >>> mytable = Table( + ... "mytable", + ... metadata_obj, + ... Column( + ... "id", Integer, primary_key=True + ... ), # override reflected 'id' to have primary key + ... Column("mydata", Unicode(50)), # override reflected 'mydata' to be Unicode + ... # additional Column objects which require no change are reflected normally + ... autoload_with=some_engine, + ... ) .. seealso:: @@ -92,10 +97,12 @@ extrapolate these constraints. Use the "override" technique for this, specifying explicitly those columns which are part of the primary key or have foreign key constraints:: - my_view = Table("some_view", metadata, - Column("view_id", Integer, primary_key=True), - Column("related_thing", Integer, ForeignKey("othertable.thing_id")), - autoload_with=engine + my_view = Table( + "some_view", + metadata, + Column("view_id", Integer, primary_key=True), + Column("related_thing", Integer, ForeignKey("othertable.thing_id")), + autoload_with=engine, ) Reflecting All Tables at Once @@ -109,8 +116,8 @@ object's dictionary of tables:: metadata_obj = MetaData() metadata_obj.reflect(bind=someengine) - users_table = metadata_obj.tables['users'] - addresses_table = metadata_obj.tables['addresses'] + users_table = metadata_obj.tables["users"] + addresses_table = metadata_obj.tables["addresses"] ``metadata.reflect()`` also provides a handy way to clear or delete all the rows in a database:: @@ -149,7 +156,7 @@ The end result is that :class:`_schema.Table` objects from the "project" schema will be reflected, and they will be populated as schema-qualified with that name:: - >>> metadata_obj.tables['project.messages'] + >>> metadata_obj.tables["project.messages"] Table('messages', MetaData(), Column('message_id', INTEGER(), table=), schema='project') Similarly, an individual :class:`_schema.Table` object that includes the @@ -157,7 +164,7 @@ Similarly, an individual :class:`_schema.Table` object that includes the database schema, overriding any default schema that may have been configured on the owning :class:`_schema.MetaData` collection:: - >>> messages = Table('messages', metadata_obj, schema="project", autoload_with=someengine) + >>> messages = Table("messages", metadata_obj, schema="project", autoload_with=someengine) >>> messages Table('messages', MetaData(), Column('message_id', INTEGER(), table=), schema='project') @@ -246,7 +253,9 @@ semantically equivalent:: >>> # reflect in non-schema qualified fashion >>> messages_table_1 = Table("messages", metadata_obj, autoload_with=someengine) >>> # reflect in schema qualified fashion - >>> messages_table_2 = Table("messages", metadata_obj, schema="project", autoload_with=someengine) + >>> messages_table_2 = Table( + ... "messages", metadata_obj, schema="project", autoload_with=someengine + ... ) >>> # two different objects >>> messages_table_1 is messages_table_2 False @@ -280,7 +289,9 @@ fashion then loads a related table that will also be performed in a schema qualified fashion:: >>> # reflect "messages" in a schema qualified fashion - >>> messages_table_1 = Table("messages", metadata_obj, schema="project", autoload_with=someengine) + >>> messages_table_1 = Table( + ... "messages", metadata_obj, schema="project", autoload_with=someengine + ... ) The above ``messages_table_1`` will refer to ``projects`` also in a schema qualified fashion. This "projects" table will be reflected automatically by @@ -343,7 +354,8 @@ database is also available. This is known as the "Inspector":: from sqlalchemy import create_engine from sqlalchemy import inspect - engine = create_engine('...') + + engine = create_engine("...") insp = inspect(engine) print(insp.get_table_names()) diff --git a/doc/build/core/tutorial.rst b/doc/build/core/tutorial.rst index 7a91e39a3f..e0b3e179fd 100644 --- a/doc/build/core/tutorial.rst +++ b/doc/build/core/tutorial.rst @@ -97,7 +97,7 @@ anywhere. To connect we use :func:`~sqlalchemy.create_engine`: .. sourcecode:: pycon+sql >>> from sqlalchemy import create_engine - >>> engine = create_engine('sqlite:///:memory:', echo=True) + >>> engine = create_engine("sqlite:///:memory:", echo=True) The ``echo`` flag is a shortcut to setting up SQLAlchemy logging, which is accomplished via Python's standard ``logging`` module. With it enabled, we'll @@ -154,17 +154,21 @@ addresses" for each row in the "users" table: >>> from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey >>> metadata_obj = MetaData() - >>> users = Table('users', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String), - ... Column('fullname', String), + >>> users = Table( + ... "users", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("name", String), + ... Column("fullname", String), ... ) - >>> addresses = Table('addresses', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', None, ForeignKey('users.id')), - ... Column('email_address', String, nullable=False) - ... ) + >>> addresses = Table( + ... "addresses", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("user_id", None, ForeignKey("users.id")), + ... Column("email_address", String, nullable=False), + ... ) All about how to define :class:`~sqlalchemy.schema.Table` objects, as well as how to create them from an existing database automatically, is described in @@ -206,7 +210,7 @@ each table first before creating, so it's safe to call multiple times: issue CREATE TABLE, a "length" may be provided to the :class:`~sqlalchemy.types.String` type as below:: - Column('name', String(50)) + Column("name", String(50)) The length field on :class:`~sqlalchemy.types.String`, as well as similar precision/scale fields available on :class:`~sqlalchemy.types.Integer`, :class:`~sqlalchemy.types.Numeric`, etc. are not referenced by @@ -217,15 +221,18 @@ each table first before creating, so it's safe to call multiple times: without being instructed. For that, you use the :class:`~sqlalchemy.schema.Sequence` construct:: from sqlalchemy import Sequence - Column('id', Integer, Sequence('user_id_seq'), primary_key=True) + + Column("id", Integer, Sequence("user_id_seq"), primary_key=True) A full, foolproof :class:`~sqlalchemy.schema.Table` is therefore:: - users = Table('users', metadata_obj, - Column('id', Integer, Sequence('user_id_seq'), primary_key=True), - Column('name', String(50)), - Column('fullname', String(50)), - Column('nickname', String(50)) + users = Table( + "users", + metadata_obj, + Column("id", Integer, Sequence("user_id_seq"), primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(50)), ) We include this more verbose :class:`_schema.Table` construct separately @@ -255,7 +262,7 @@ Notice above that the INSERT statement names every column in the ``users`` table. This can be limited by using the ``values()`` method, which establishes the VALUES clause of the INSERT explicitly:: - >>> ins = users.insert().values(name='jack', fullname='Jack Jones') + >>> ins = users.insert().values(name="jack", fullname="Jack Jones") >>> str(ins) 'INSERT INTO users (name, fullname) VALUES (:name, :fullname)' @@ -351,7 +358,7 @@ and use it in the "normal" way: .. sourcecode:: pycon+sql >>> ins = users.insert() - >>> conn.execute(ins, {"id": 2, "name":"wendy", "fullname": "Wendy Williams"}) + >>> conn.execute(ins, {"id": 2, "name": "wendy", "fullname": "Wendy Williams"}) {opensql}INSERT INTO users (id, name, fullname) VALUES (?, ?, ?) [...] (2, 'wendy', 'Wendy Williams') COMMIT @@ -370,12 +377,15 @@ inserted, as we do here to add some email addresses: .. sourcecode:: pycon+sql - >>> conn.execute(addresses.insert(), [ - ... {'user_id': 1, 'email_address' : 'jack@yahoo.com'}, - ... {'user_id': 1, 'email_address' : 'jack@msn.com'}, - ... {'user_id': 2, 'email_address' : 'www@www.org'}, - ... {'user_id': 2, 'email_address' : 'wendy@aol.com'}, - ... ]) + >>> conn.execute( + ... addresses.insert(), + ... [ + ... {"user_id": 1, "email_address": "jack@yahoo.com"}, + ... {"user_id": 1, "email_address": "jack@msn.com"}, + ... {"user_id": 2, "email_address": "www@www.org"}, + ... {"user_id": 2, "email_address": "wendy@aol.com"}, + ... ], + ... ) {opensql}INSERT INTO addresses (user_id, email_address) VALUES (?, ?) [...] ((1, 'jack@yahoo.com'), (1, 'jack@msn.com'), (2, 'www@www.org'), (2, 'wendy@aol.com')) COMMIT @@ -484,7 +494,7 @@ programmatically generated, or contains non-ascii characters, the [...] () {stop}>>> row = result.fetchone() - >>> print("name:", row._mapping['name'], "; fullname:", row._mapping['fullname']) + >>> print("name:", row._mapping["name"], "; fullname:", row._mapping["fullname"]) name: jack ; fullname: Jack Jones .. deprecated:: 1.4 @@ -522,7 +532,12 @@ collection: .. sourcecode:: pycon+sql {sql}>>> for row in conn.execute(s): - ... print("name:", row._mapping[users.c.name], "; fullname:", row._mapping[users.c.fullname]) + ... print( + ... "name:", + ... row._mapping[users.c.name], + ... "; fullname:", + ... row._mapping[users.c.fullname], + ... ) SELECT users.id, users.name, users.fullname FROM users [...] () @@ -681,7 +696,7 @@ equals, not equals, etc.: users.name IS NULL >>> # reverse works too - >>> print('fred' > users.c.name) + >>> print("fred" > users.c.name) users.name < :name_1 If we add two integer columns together, we get an addition expression: @@ -707,8 +722,9 @@ not all of them. MySQL users, fear not: .. sourcecode:: pycon+sql - >>> print((users.c.name + users.c.fullname). - ... compile(bind=create_engine('mysql://'))) # doctest: +SKIP + >>> print( + ... (users.c.name + users.c.fullname).compile(bind=create_engine("mysql://")) + ... ) # doctest: +SKIP concat(users.name, users.fullname) The above illustrates the SQL that's generated for an @@ -720,12 +736,12 @@ always use the :meth:`.Operators.op` method; this generates whatever operator yo .. sourcecode:: pycon+sql - >>> print(users.c.name.op('tiddlywinks')('foo')) + >>> print(users.c.name.op("tiddlywinks")("foo")) users.name tiddlywinks :name_1 This function can also be used to make bitwise operators explicit. For example:: - somecolumn.op('&')(0xff) + somecolumn.op("&")(0xFF) is a bitwise AND of the value in ``somecolumn``. @@ -735,15 +751,14 @@ column. For this case, be sure to make the type explicit, if not what's normally expected, using :func:`.type_coerce`:: from sqlalchemy import type_coerce - expr = type_coerce(somecolumn.op('-%>')('foo'), MySpecialType()) - stmt = select(expr) + expr = type_coerce(somecolumn.op("-%>")("foo"), MySpecialType()) + stmt = select(expr) For boolean operators, use the :meth:`.Operators.bool_op` method, which will ensure that the return type of the expression is handled as boolean:: - somecolumn.bool_op('-->')('some value') - + somecolumn.bool_op("-->")("some value") Commonly Used Operators ------------------------- @@ -760,11 +775,11 @@ objects is at :class:`.ColumnOperators`. * :meth:`equals <.ColumnOperators.__eq__>`:: - statement.where(users.c.name == 'ed') + statement.where(users.c.name == "ed") * :meth:`not equals <.ColumnOperators.__ne__>`:: - statement.where(users.c.name != 'ed') + statement.where(users.c.name != "ed") * :meth:`LIKE <.ColumnOperators.like>`:: @@ -785,23 +800,25 @@ objects is at :class:`.ColumnOperators`. * :meth:`IN <.ColumnOperators.in_>`:: - statement.where(users.c.name.in_(['ed', 'wendy', 'jack'])) + statement.where(users.c.name.in_(["ed", "wendy", "jack"])) # works with Select objects too: - statement.where.filter(users.c.name.in_( - select(users.c.name).where(users.c.name.like('%ed%')) - )) + statement.where.filter( + users.c.name.in_(select(users.c.name).where(users.c.name.like("%ed%"))) + ) # use tuple_() for composite (multi-column) queries from sqlalchemy import tuple_ + statement.where( - tuple_(users.c.name, users.c.nickname).\ - in_([('ed', 'edsnickname'), ('wendy', 'windy')]) + tuple_(users.c.name, users.c.nickname).in_( + [("ed", "edsnickname"), ("wendy", "windy")] + ) ) * :meth:`NOT IN <.ColumnOperators.not_in>`:: - statement.where(~users.c.name.in_(['ed', 'wendy', 'jack'])) + statement.where(~users.c.name.in_(["ed", "wendy", "jack"])) * :meth:`IS NULL <.ColumnOperators.is_>`:: @@ -878,16 +895,17 @@ a :meth:`~.ColumnOperators.like`): .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import and_, or_, not_ - >>> print(and_( - ... users.c.name.like('j%'), + >>> print( + ... and_( + ... users.c.name.like("j%"), ... users.c.id == addresses.c.user_id, ... or_( - ... addresses.c.email_address == 'wendy@aol.com', - ... addresses.c.email_address == 'jack@yahoo.com' + ... addresses.c.email_address == "wendy@aol.com", + ... addresses.c.email_address == "jack@yahoo.com", ... ), - ... not_(users.c.id > 5) - ... ) - ... ) + ... not_(users.c.id > 5), + ... ) + ... ) users.name LIKE :name_1 AND users.id = addresses.user_id AND (addresses.email_address = :email_address_1 OR addresses.email_address = :email_address_2) @@ -899,12 +917,14 @@ parenthesis: .. sourcecode:: pycon+sql - >>> print(users.c.name.like('j%') & (users.c.id == addresses.c.user_id) & - ... ( - ... (addresses.c.email_address == 'wendy@aol.com') | \ - ... (addresses.c.email_address == 'jack@yahoo.com') - ... ) \ - ... & ~(users.c.id>5) + >>> print( + ... users.c.name.like("j%") + ... & (users.c.id == addresses.c.user_id) + ... & ( + ... (addresses.c.email_address == "wendy@aol.com") + ... | (addresses.c.email_address == "jack@yahoo.com") + ... ) + ... & ~(users.c.id > 5) ... ) users.name LIKE :name_1 AND users.id = addresses.user_id AND (addresses.email_address = :email_address_1 @@ -923,19 +943,16 @@ not have a name: .. sourcecode:: pycon+sql - >>> s = select((users.c.fullname + - ... ", " + addresses.c.email_address). - ... label('title')).\ - ... where( - ... and_( - ... users.c.id == addresses.c.user_id, - ... users.c.name.between('m', 'z'), - ... or_( - ... addresses.c.email_address.like('%@aol.com'), - ... addresses.c.email_address.like('%@msn.com') - ... ) - ... ) - ... ) + >>> s = select((users.c.fullname + ", " + addresses.c.email_address).label("title")).where( + ... and_( + ... users.c.id == addresses.c.user_id, + ... users.c.name.between("m", "z"), + ... or_( + ... addresses.c.email_address.like("%@aol.com"), + ... addresses.c.email_address.like("%@msn.com"), + ... ), + ... ) + ... ) >>> conn.execute(s).fetchall() {opensql}SELECT users.fullname || ? || addresses.email_address AS title FROM users, addresses @@ -954,17 +971,17 @@ A shortcut to using :func:`.and_` is to chain together multiple .. sourcecode:: pycon+sql - >>> s = select((users.c.fullname + - ... ", " + addresses.c.email_address). - ... label('title')).\ - ... where(users.c.id == addresses.c.user_id).\ - ... where(users.c.name.between('m', 'z')).\ - ... where( - ... or_( - ... addresses.c.email_address.like('%@aol.com'), - ... addresses.c.email_address.like('%@msn.com') - ... ) - ... ) + >>> s = ( + ... select((users.c.fullname + ", " + addresses.c.email_address).label("title")) + ... .where(users.c.id == addresses.c.user_id) + ... .where(users.c.name.between("m", "z")) + ... .where( + ... or_( + ... addresses.c.email_address.like("%@aol.com"), + ... addresses.c.email_address.like("%@msn.com"), + ... ) + ... ) + ... ) >>> conn.execute(s).fetchall() {opensql}SELECT users.fullname || ? || addresses.email_address AS title FROM users, addresses @@ -995,12 +1012,13 @@ unchanged. Below, we create a :func:`_expression.text` object and execute it: >>> from sqlalchemy.sql import text >>> s = text( ... "SELECT users.fullname || ', ' || addresses.email_address AS title " - ... "FROM users, addresses " - ... "WHERE users.id = addresses.user_id " - ... "AND users.name BETWEEN :x AND :y " - ... "AND (addresses.email_address LIKE :e1 " - ... "OR addresses.email_address LIKE :e2)") - >>> conn.execute(s, {"x":"m", "y":"z", "e1":"%@aol.com", "e2":"%@msn.com"}).fetchall() + ... "FROM users, addresses " + ... "WHERE users.id = addresses.user_id " + ... "AND users.name BETWEEN :x AND :y " + ... "AND (addresses.email_address LIKE :e1 " + ... "OR addresses.email_address LIKE :e2)" + ... ) + >>> conn.execute(s, {"x": "m", "y": "z", "e1": "%@aol.com", "e2": "%@msn.com"}).fetchall() {opensql}SELECT users.fullname || ', ' || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND @@ -1060,8 +1078,7 @@ When we call the :meth:`_expression.TextClause.columns` method, we get back a j = stmt.join(addresses, stmt.c.id == addresses.c.user_id) - new_stmt = select(stmt.c.id, addresses.c.id).\ - select_from(j).where(stmt.c.name == 'x') + new_stmt = select(stmt.c.id, addresses.c.id).select_from(j).where(stmt.c.name == "x") The positional form of :meth:`_expression.TextClause.columns` is particularly useful when relating textual SQL to existing Core or ORM models, because we can use @@ -1070,16 +1087,18 @@ result column names in the textual SQL: .. sourcecode:: pycon+sql - >>> stmt = text("SELECT users.id, addresses.id, users.id, " + >>> stmt = text( + ... "SELECT users.id, addresses.id, users.id, " ... "users.name, addresses.email_address AS email " ... "FROM users JOIN addresses ON users.id=addresses.user_id " - ... "WHERE users.id = 1").columns( - ... users.c.id, - ... addresses.c.id, - ... addresses.c.user_id, - ... users.c.name, - ... addresses.c.email_address - ... ) + ... "WHERE users.id = 1" + ... ).columns( + ... users.c.id, + ... addresses.c.id, + ... addresses.c.user_id, + ... users.c.name, + ... addresses.c.email_address, + ... ) >>> result = conn.execute(stmt) {opensql}SELECT users.id, addresses.id, users.id, users.name, addresses.email_address AS email @@ -1143,18 +1162,20 @@ need to refer to any pre-established :class:`_schema.Table` metadata: .. sourcecode:: pycon+sql - >>> s = select( - ... text("users.fullname || ', ' || addresses.email_address AS title") - ... ).\ - ... where( - ... and_( - ... text("users.id = addresses.user_id"), - ... text("users.name BETWEEN 'm' AND 'z'"), - ... text( - ... "(addresses.email_address LIKE :x " - ... "OR addresses.email_address LIKE :y)") - ... ) - ... ).select_from(text('users, addresses')) + >>> s = ( + ... select(text("users.fullname || ', ' || addresses.email_address AS title")) + ... .where( + ... and_( + ... text("users.id = addresses.user_id"), + ... text("users.name BETWEEN 'm' AND 'z'"), + ... text( + ... "(addresses.email_address LIKE :x " + ... "OR addresses.email_address LIKE :y)" + ... ), + ... ) + ... ) + ... .select_from(text("users, addresses")) + ... ) >>> conn.execute(s, {"x": "%@aol.com", "y": "%@msn.com"}).fetchall() {opensql}SELECT users.fullname || ', ' || addresses.email_address AS title FROM users, addresses @@ -1197,22 +1218,27 @@ be quoted: >>> from sqlalchemy import select, and_, text, String >>> from sqlalchemy.sql import table, literal_column - >>> s = select( - ... literal_column("users.fullname", String) + - ... ', ' + - ... literal_column("addresses.email_address").label("title") - ... ).\ - ... where( - ... and_( - ... literal_column("users.id") == literal_column("addresses.user_id"), - ... text("users.name BETWEEN 'm' AND 'z'"), - ... text( - ... "(addresses.email_address LIKE :x OR " - ... "addresses.email_address LIKE :y)") - ... ) - ... ).select_from(table('users')).select_from(table('addresses')) - - >>> conn.execute(s, {"x":"%@aol.com", "y":"%@msn.com"}).fetchall() + >>> s = ( + ... select( + ... literal_column("users.fullname", String) + ... + ", " + ... + literal_column("addresses.email_address").label("title") + ... ) + ... .where( + ... and_( + ... literal_column("users.id") == literal_column("addresses.user_id"), + ... text("users.name BETWEEN 'm' AND 'z'"), + ... text( + ... "(addresses.email_address LIKE :x OR " + ... "addresses.email_address LIKE :y)" + ... ), + ... ) + ... ) + ... .select_from(table("users")) + ... .select_from(table("addresses")) + ... ) + + >>> conn.execute(s, {"x": "%@aol.com", "y": "%@msn.com"}).fetchall() {opensql}SELECT users.fullname || ? || addresses.email_address AS anon_1 FROM users, addresses WHERE users.id = addresses.user_id @@ -1239,10 +1265,11 @@ are rendered fully: .. sourcecode:: pycon+sql >>> from sqlalchemy import func - >>> stmt = select( - ... addresses.c.user_id, - ... func.count(addresses.c.id).label('num_addresses')).\ - ... group_by("user_id").order_by("user_id", "num_addresses") + >>> stmt = ( + ... select(addresses.c.user_id, func.count(addresses.c.id).label("num_addresses")) + ... .group_by("user_id") + ... .order_by("user_id", "num_addresses") + ... ) {sql}>>> conn.execute(stmt).fetchall() SELECT addresses.user_id, count(addresses.id) AS num_addresses @@ -1256,10 +1283,11 @@ name: .. sourcecode:: pycon+sql >>> from sqlalchemy import func, desc - >>> stmt = select( - ... addresses.c.user_id, - ... func.count(addresses.c.id).label('num_addresses')).\ - ... group_by("user_id").order_by("user_id", desc("num_addresses")) + >>> stmt = ( + ... select(addresses.c.user_id, func.count(addresses.c.id).label("num_addresses")) + ... .group_by("user_id") + ... .order_by("user_id", desc("num_addresses")) + ... ) {sql}>>> conn.execute(stmt).fetchall() SELECT addresses.user_id, count(addresses.id) AS num_addresses @@ -1278,9 +1306,9 @@ by a column name that appears more than once: .. sourcecode:: pycon+sql >>> u1a, u1b = users.alias(), users.alias() - >>> stmt = select(u1a, u1b).\ - ... where(u1a.c.name > u1b.c.name).\ - ... order_by(u1a.c.name) # using "name" here would be ambiguous + >>> stmt = ( + ... select(u1a, u1b).where(u1a.c.name > u1b.c.name).order_by(u1a.c.name) + ... ) # using "name" here would be ambiguous {sql}>>> conn.execute(stmt).fetchall() SELECT users_1.id, users_1.name, users_1.fullname, users_2.id AS id_1, @@ -1325,13 +1353,14 @@ once for each address. We create two :class:`_expression.Alias` constructs aga >>> a1 = addresses.alias() >>> a2 = addresses.alias() - >>> s = select(users).\ - ... where(and_( - ... users.c.id == a1.c.user_id, - ... users.c.id == a2.c.user_id, - ... a1.c.email_address == 'jack@msn.com', - ... a2.c.email_address == 'jack@yahoo.com' - ... )) + >>> s = select(users).where( + ... and_( + ... users.c.id == a1.c.user_id, + ... users.c.id == a2.c.user_id, + ... a1.c.email_address == "jack@msn.com", + ... a2.c.email_address == "jack@yahoo.com", + ... ) + ... ) >>> conn.execute(s).fetchall() {opensql}SELECT users.id, users.name, users.fullname FROM users, addresses AS addresses_1, addresses AS addresses_2 @@ -1355,7 +1384,7 @@ itself, we don't need to be concerned about the generated name. However, for the purposes of debugging, it can be specified by passing a string name to the :meth:`_expression.FromClause.alias` method:: - >>> a1 = addresses.alias('a1') + >>> a1 = addresses.alias("a1") SELECT-oriented constructs which extend from :class:`_expression.SelectBase` may be turned into aliased subqueries using the :meth:`_expression.SelectBase.subquery` method, which @@ -1417,10 +1446,7 @@ username: .. sourcecode:: pycon+sql - >>> print(users.join(addresses, - ... addresses.c.email_address.like(users.c.name + '%') - ... ) - ... ) + >>> print(users.join(addresses, addresses.c.email_address.like(users.c.name + "%"))) users JOIN addresses ON addresses.email_address LIKE users.name || :name_1 When we create a :func:`_expression.select` construct, SQLAlchemy looks around at the @@ -1431,9 +1457,8 @@ here we make use of the :meth:`_expression.Select.select_from` method: .. sourcecode:: pycon+sql >>> s = select(users.c.fullname).select_from( - ... users.join(addresses, - ... addresses.c.email_address.like(users.c.name + '%')) - ... ) + ... users.join(addresses, addresses.c.email_address.like(users.c.name + "%")) + ... ) {sql}>>> conn.execute(s).fetchall() SELECT users.fullname FROM users JOIN addresses ON addresses.email_address LIKE users.name || ? @@ -1486,8 +1511,12 @@ typically acquires using the :meth:`_expression.Select.cte` method on a .. sourcecode:: pycon+sql - >>> users_cte = select(users.c.id, users.c.name).where(users.c.name == 'wendy').cte() - >>> stmt = select(addresses).where(addresses.c.user_id == users_cte.c.id).order_by(addresses.c.id) + >>> users_cte = select(users.c.id, users.c.name).where(users.c.name == "wendy").cte() + >>> stmt = ( + ... select(addresses) + ... .where(addresses.c.user_id == users_cte.c.id) + ... .order_by(addresses.c.id) + ... ) >>> conn.execute(stmt).fetchall() {opensql}WITH anon_1 AS (SELECT users.id AS id, users.name AS name @@ -1523,8 +1552,14 @@ this form looks like: >>> users_cte = select(users.c.id, users.c.name).cte(recursive=True) >>> users_recursive = users_cte.alias() - >>> users_cte = users_cte.union(select(users.c.id, users.c.name).where(users.c.id > users_recursive.c.id)) - >>> stmt = select(addresses).where(addresses.c.user_id == users_cte.c.id).order_by(addresses.c.id) + >>> users_cte = users_cte.union( + ... select(users.c.id, users.c.name).where(users.c.id > users_recursive.c.id) + ... ) + >>> stmt = ( + ... select(addresses) + ... .where(addresses.c.user_id == users_cte.c.id) + ... .order_by(addresses.c.id) + ... ) >>> conn.execute(stmt).fetchall() {opensql}WITH RECURSIVE anon_1(id, name) AS (SELECT users.id AS id, users.name AS name @@ -1562,7 +1597,7 @@ at execution time, as here where it converts to positional for SQLite: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import bindparam - >>> s = users.select().where(users.c.name == bindparam('username')) + >>> s = users.select().where(users.c.name == bindparam("username")) {sql}>>> conn.execute(s, {"username": "wendy"}).fetchall() SELECT users.id, users.name, users.fullname FROM users @@ -1577,7 +1612,9 @@ off to the database: .. sourcecode:: pycon+sql - >>> s = users.select().where(users.c.name.like(bindparam('username', type_=String) + text("'%'"))) + >>> s = users.select().where( + ... users.c.name.like(bindparam("username", type_=String) + text("'%'")) + ... ) {sql}>>> conn.execute(s, {"username": "wendy"}).fetchall() SELECT users.id, users.name, users.fullname FROM users @@ -1591,17 +1628,19 @@ single named value is needed in the execute parameters: .. sourcecode:: pycon+sql - >>> s = select(users, addresses).\ - ... where( - ... or_( - ... users.c.name.like( - ... bindparam('name', type_=String) + text("'%'")), - ... addresses.c.email_address.like( - ... bindparam('name', type_=String) + text("'@%'")) - ... ) - ... ).\ - ... select_from(users.outerjoin(addresses)).\ - ... order_by(addresses.c.id) + >>> s = ( + ... select(users, addresses) + ... .where( + ... or_( + ... users.c.name.like(bindparam("name", type_=String) + text("'%'")), + ... addresses.c.email_address.like( + ... bindparam("name", type_=String) + text("'@%'") + ... ), + ... ) + ... ) + ... .select_from(users.outerjoin(addresses)) + ... .order_by(addresses.c.id) + ... ) {sql}>>> conn.execute(s, {"name": "jack"}).fetchall() SELECT users.id, users.name, users.fullname, addresses.id AS id_1, addresses.user_id, addresses.email_address @@ -1629,7 +1668,7 @@ generates functions using attribute access: >>> print(func.now()) now() - >>> print(func.concat('x', 'y')) + >>> print(func.concat("x", "y")) concat(:concat_1, :concat_2) By "generates", we mean that **any** SQL function is created based on the word @@ -1657,7 +1696,6 @@ as date and numeric coercions, the type may need to be specified explicitly:: stmt = select(func.date(some_table.c.date_string, type_=Date)) - Functions are most typically used in the columns clause of a select statement, and can also be labeled as well as given a type. Labeling a function is recommended so that the result can be targeted in a result row based on a @@ -1670,11 +1708,8 @@ not important in this case: .. sourcecode:: pycon+sql >>> conn.execute( - ... select( - ... func.max(addresses.c.email_address, type_=String). - ... label('maxemail') - ... ) - ... ).scalar() + ... select(func.max(addresses.c.email_address, type_=String).label("maxemail")) + ... ).scalar() {opensql}SELECT max(addresses.email_address) AS maxemail FROM addresses [...] () @@ -1690,13 +1725,9 @@ well as bind parameters: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import column - >>> calculate = select(column('q'), column('z'), column('r')).\ - ... select_from( - ... func.calculate( - ... bindparam('x'), - ... bindparam('y') - ... ) - ... ) + >>> calculate = select(column("q"), column("z"), column("r")).select_from( + ... func.calculate(bindparam("x"), bindparam("y")) + ... ) >>> calc = calculate.alias() >>> print(select(users).where(users.c.id > calc.c.z)) SELECT users.id, users.name, users.fullname @@ -1712,10 +1743,9 @@ of our selectable: .. sourcecode:: pycon+sql - >>> calc1 = calculate.alias('c1').unique_params(x=17, y=45) - >>> calc2 = calculate.alias('c2').unique_params(x=5, y=12) - >>> s = select(users).\ - ... where(users.c.id.between(calc1.c.z, calc2.c.z)) + >>> calc1 = calculate.alias("c1").unique_params(x=17, y=45) + >>> calc2 = calculate.alias("c2").unique_params(x=5, y=12) + >>> s = select(users).where(users.c.id.between(calc1.c.z, calc2.c.z)) >>> print(s) SELECT users.id, users.name, users.fullname FROM users, @@ -1723,7 +1753,7 @@ of our selectable: (SELECT q, z, r FROM calculate(:x_2, :y_2)) AS c2 WHERE users.id BETWEEN c1.z AND c2.z - >>> s.compile().params # doctest: +SKIP + >>> s.compile().params # doctest: +SKIP {u'x_2': 5, u'y_2': 12, u'y_1': 45, u'x_1': 17} .. seealso:: @@ -1739,10 +1769,7 @@ Any :class:`.FunctionElement`, including functions generated by :data:`~.expression.func`, can be turned into a "window function", that is an OVER clause, using the :meth:`.FunctionElement.over` method:: - >>> s = select( - ... users.c.id, - ... func.row_number().over(order_by=users.c.name) - ... ) + >>> s = select(users.c.id, func.row_number().over(order_by=users.c.name)) >>> print(s) SELECT users.id, row_number() OVER (ORDER BY users.name) AS anon_1 FROM users @@ -1751,12 +1778,7 @@ OVER clause, using the :meth:`.FunctionElement.over` method:: either the :paramref:`.expression.over.rows` or :paramref:`.expression.over.range` parameters:: - >>> s = select( - ... users.c.id, - ... func.row_number().over( - ... order_by=users.c.name, - ... rows=(-2, None)) - ... ) + >>> s = select(users.c.id, func.row_number().over(order_by=users.c.name, rows=(-2, None))) >>> print(s) SELECT users.id, row_number() OVER (ORDER BY users.name ROWS BETWEEN :param_1 PRECEDING AND UNBOUNDED FOLLOWING) AS anon_1 @@ -1830,11 +1852,7 @@ string into one of MySQL's JSON functions: >>> from sqlalchemy import JSON >>> from sqlalchemy import type_coerce >>> from sqlalchemy.dialects import mysql - >>> s = select( - ... type_coerce( - ... {'some_key': {'foo': 'bar'}}, JSON - ... )['some_key'] - ... ) + >>> s = select(type_coerce({"some_key": {"foo": "bar"}}, JSON)["some_key"]) >>> print(s.compile(dialect=mysql.dialect())) SELECT JSON_EXTRACT(%s, %s) AS anon_1 @@ -1856,10 +1874,8 @@ module level functions :func:`_expression.union` and >>> from sqlalchemy.sql import union >>> u = union( - ... addresses.select(). - ... where(addresses.c.email_address == 'foo@bar.com'), - ... addresses.select(). - ... where(addresses.c.email_address.like('%@yahoo.com')), + ... addresses.select().where(addresses.c.email_address == "foo@bar.com"), + ... addresses.select().where(addresses.c.email_address.like("%@yahoo.com")), ... ).order_by(addresses.c.email_address) {sql}>>> conn.execute(u).fetchall() @@ -1882,10 +1898,8 @@ Also available, though not supported on all databases, are >>> from sqlalchemy.sql import except_ >>> u = except_( - ... addresses.select(). - ... where(addresses.c.email_address.like('%@%.com')), - ... addresses.select(). - ... where(addresses.c.email_address.like('%@msn.com')) + ... addresses.select().where(addresses.c.email_address.like("%@%.com")), + ... addresses.select().where(addresses.c.email_address.like("%@msn.com")), ... ) {sql}>>> conn.execute(u).fetchall() @@ -1910,13 +1924,13 @@ want the "union" to be stated as a subquery: .. sourcecode:: pycon+sql >>> u = except_( - ... union( - ... addresses.select(). - ... where(addresses.c.email_address.like('%@yahoo.com')), - ... addresses.select(). - ... where(addresses.c.email_address.like('%@msn.com')) - ... ).subquery().select(), # apply subquery here - ... addresses.select().where(addresses.c.email_address.like('%@msn.com')) + ... union( + ... addresses.select().where(addresses.c.email_address.like("%@yahoo.com")), + ... addresses.select().where(addresses.c.email_address.like("%@msn.com")), + ... ) + ... .subquery() + ... .select(), # apply subquery here + ... addresses.select().where(addresses.c.email_address.like("%@msn.com")), ... ) {sql}>>> conn.execute(u).fetchall() SELECT anon_1.id, anon_1.user_id, anon_1.email_address @@ -1966,10 +1980,8 @@ selected from the first SELECT; the SQLAlchemy compiler will ensure these will be rendered without table names:: >>> u = union( - ... addresses.select(). - ... where(addresses.c.email_address == 'foo@bar.com'), - ... addresses.select(). - ... where(addresses.c.email_address.like('%@yahoo.com')), + ... addresses.select().where(addresses.c.email_address == "foo@bar.com"), + ... addresses.select().where(addresses.c.email_address.like("%@yahoo.com")), ... ) >>> u = u.order_by(u.selected_columns.email_address) >>> print(u) @@ -1997,9 +2009,11 @@ or :meth:`_expression.SelectBase.label` method: .. sourcecode:: pycon+sql - >>> subq = select(func.count(addresses.c.id)).\ - ... where(users.c.id == addresses.c.user_id).\ - ... scalar_subquery() + >>> subq = ( + ... select(func.count(addresses.c.id)) + ... .where(users.c.id == addresses.c.user_id) + ... .scalar_subquery() + ... ) The above construct is now a :class:`_expression.ScalarSelect` object, which is an adapter around the original :class:`.~expression.Select` @@ -2022,9 +2036,11 @@ it using :meth:`_expression.SelectBase.label` instead: .. sourcecode:: pycon+sql - >>> subq = select(func.count(addresses.c.id)).\ - ... where(users.c.id == addresses.c.user_id).\ - ... label("address_count") + >>> subq = ( + ... select(func.count(addresses.c.id)) + ... .where(users.c.id == addresses.c.user_id) + ... .label("address_count") + ... ) >>> conn.execute(select(users.c.name, subq)).fetchall() {opensql}SELECT users.name, (SELECT count(addresses.id) AS count_1 FROM addresses @@ -2052,11 +2068,12 @@ still have at least one FROM clause of its own. For example: .. sourcecode:: pycon+sql - >>> stmt = select(addresses.c.user_id).\ - ... where(addresses.c.user_id == users.c.id).\ - ... where(addresses.c.email_address == 'jack@yahoo.com') - >>> enclosing_stmt = select(users.c.name).\ - ... where(users.c.id == stmt.scalar_subquery()) + >>> stmt = ( + ... select(addresses.c.user_id) + ... .where(addresses.c.user_id == users.c.id) + ... .where(addresses.c.email_address == "jack@yahoo.com") + ... ) + >>> enclosing_stmt = select(users.c.name).where(users.c.id == stmt.scalar_subquery()) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name FROM users @@ -2075,14 +2092,17 @@ may be correlated: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.id).\ - ... where(users.c.id == addresses.c.user_id).\ - ... where(users.c.name == 'jack').\ - ... correlate(addresses) - >>> enclosing_stmt = select( - ... users.c.name, addresses.c.email_address).\ - ... select_from(users.join(addresses)).\ - ... where(users.c.id == stmt.scalar_subquery()) + >>> stmt = ( + ... select(users.c.id) + ... .where(users.c.id == addresses.c.user_id) + ... .where(users.c.name == "jack") + ... .correlate(addresses) + ... ) + >>> enclosing_stmt = ( + ... select(users.c.name, addresses.c.email_address) + ... .select_from(users.join(addresses)) + ... .where(users.c.id == stmt.scalar_subquery()) + ... ) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id @@ -2097,11 +2117,8 @@ as the argument: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.id).\ - ... where(users.c.name == 'wendy').\ - ... correlate(None) - >>> enclosing_stmt = select(users.c.name).\ - ... where(users.c.id == stmt.scalar_subquery()) + >>> stmt = select(users.c.id).where(users.c.name == "wendy").correlate(None) + >>> enclosing_stmt = select(users.c.name).where(users.c.id == stmt.scalar_subquery()) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name FROM users @@ -2117,14 +2134,17 @@ by telling it to correlate all FROM clauses except for ``users``: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.id).\ - ... where(users.c.id == addresses.c.user_id).\ - ... where(users.c.name == 'jack').\ - ... correlate_except(users) - >>> enclosing_stmt = select( - ... users.c.name, addresses.c.email_address).\ - ... select_from(users.join(addresses)).\ - ... where(users.c.id == stmt.scalar_subquery()) + >>> stmt = ( + ... select(users.c.id) + ... .where(users.c.id == addresses.c.user_id) + ... .where(users.c.name == "jack") + ... .correlate_except(users) + ... ) + >>> enclosing_stmt = ( + ... select(users.c.name, addresses.c.email_address) + ... .select_from(users.join(addresses)) + ... .where(users.c.id == stmt.scalar_subquery()) + ... ) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id @@ -2165,10 +2185,13 @@ to the left side of the JOIN. SQLAlchemy Core supports a statement like the above using the :meth:`_expression.Select.lateral` method as follows:: >>> from sqlalchemy import table, column, select, true - >>> people = table('people', column('people_id'), column('age'), column('name')) - >>> books = table('books', column('book_id'), column('owner_id')) - >>> subq = select(books.c.book_id).\ - ... where(books.c.owner_id == people.c.people_id).lateral("book_subq") + >>> people = table("people", column("people_id"), column("age"), column("name")) + >>> books = table("books", column("book_id"), column("owner_id")) + >>> subq = ( + ... select(books.c.book_id) + ... .where(books.c.owner_id == people.c.people_id) + ... .lateral("book_subq") + ... ) >>> print(select(people).select_from(people.join(subq, true()))) SELECT people.people_id, people.age, people.name FROM people JOIN LATERAL (SELECT books.book_id AS book_id @@ -2237,9 +2260,11 @@ This is provided via the :meth:`_expression.SelectBase.group_by` method: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.name, func.count(addresses.c.id)).\ - ... select_from(users.join(addresses)).\ - ... group_by(users.c.name) + >>> stmt = ( + ... select(users.c.name, func.count(addresses.c.id)) + ... .select_from(users.join(addresses)) + ... .group_by(users.c.name) + ... ) >>> conn.execute(stmt).fetchall() {opensql}SELECT users.name, count(addresses.id) AS count_1 FROM users JOIN addresses @@ -2257,10 +2282,12 @@ method: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.name, func.count(addresses.c.id)).\ - ... select_from(users.join(addresses)).\ - ... group_by(users.c.name).\ - ... having(func.length(users.c.name) > 4) + >>> stmt = ( + ... select(users.c.name, func.count(addresses.c.id)) + ... .select_from(users.join(addresses)) + ... .group_by(users.c.name) + ... .having(func.length(users.c.name) > 4) + ... ) >>> conn.execute(stmt).fetchall() {opensql}SELECT users.name, count(addresses.id) AS count_1 FROM users JOIN addresses @@ -2276,10 +2303,11 @@ is the DISTINCT modifier. A simple DISTINCT clause can be added using the .. sourcecode:: pycon+sql - >>> stmt = select(users.c.name).\ - ... where(addresses.c.email_address. - ... contains(users.c.name)).\ - ... distinct() + >>> stmt = ( + ... select(users.c.name) + ... .where(addresses.c.email_address.contains(users.c.name)) + ... .distinct() + ... ) >>> conn.execute(stmt).fetchall() {opensql}SELECT DISTINCT users.name FROM users, addresses @@ -2298,9 +2326,12 @@ into the current backend's methodology: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.name, addresses.c.email_address).\ - ... select_from(users.join(addresses)).\ - ... limit(1).offset(1) + >>> stmt = ( + ... select(users.c.name, addresses.c.email_address) + ... .select_from(users.join(addresses)) + ... .limit(1) + ... .offset(1) + ... ) >>> conn.execute(stmt).fetchall() {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id @@ -2326,8 +2357,7 @@ as a value: .. sourcecode:: pycon+sql - >>> stmt = users.update().\ - ... values(fullname="Fullname: " + users.c.name) + >>> stmt = users.update().values(fullname="Fullname: " + users.c.name) >>> conn.execute(stmt) {opensql}UPDATE users SET fullname=(? || users.name) [...] ('Fullname: ',) @@ -2351,13 +2381,15 @@ as in the example below: .. sourcecode:: pycon+sql - >>> stmt = users.insert().\ - ... values(name=bindparam('_name') + " .. name") - >>> conn.execute(stmt, [ - ... {'id':4, '_name':'name1'}, - ... {'id':5, '_name':'name2'}, - ... {'id':6, '_name':'name3'}, - ... ]) + >>> stmt = users.insert().values(name=bindparam("_name") + " .. name") + >>> conn.execute( + ... stmt, + ... [ + ... {"id": 4, "_name": "name1"}, + ... {"id": 5, "_name": "name2"}, + ... {"id": 6, "_name": "name3"}, + ... ], + ... ) {opensql}INSERT INTO users (id, name) VALUES (?, (? || ?)) [...] ((4, 'name1', ' .. name'), (5, 'name2', ' .. name'), (6, 'name3', ' .. name')) COMMIT @@ -2369,9 +2401,7 @@ that can be specified: .. sourcecode:: pycon+sql - >>> stmt = users.update().\ - ... where(users.c.name == 'jack').\ - ... values(name='ed') + >>> stmt = users.update().where(users.c.name == "jack").values(name="ed") >>> conn.execute(stmt) {opensql}UPDATE users SET name=? WHERE users.name = ? @@ -2386,14 +2416,19 @@ used to achieve this: .. sourcecode:: pycon+sql - >>> stmt = users.update().\ - ... where(users.c.name == bindparam('oldname')).\ - ... values(name=bindparam('newname')) - >>> conn.execute(stmt, [ - ... {'oldname':'jack', 'newname':'ed'}, - ... {'oldname':'wendy', 'newname':'mary'}, - ... {'oldname':'jim', 'newname':'jake'}, - ... ]) + >>> stmt = ( + ... users.update() + ... .where(users.c.name == bindparam("oldname")) + ... .values(name=bindparam("newname")) + ... ) + >>> conn.execute( + ... stmt, + ... [ + ... {"oldname": "jack", "newname": "ed"}, + ... {"oldname": "wendy", "newname": "mary"}, + ... {"oldname": "jim", "newname": "jake"}, + ... ], + ... ) {opensql}UPDATE users SET name=? WHERE users.name = ? [...] (('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim')) COMMIT @@ -2410,9 +2445,9 @@ subquery using :meth:`_expression.Select.scalar_subquery`: .. sourcecode:: pycon+sql - >>> stmt = select(addresses.c.email_address).\ - ... where(addresses.c.user_id == users.c.id).\ - ... limit(1) + >>> stmt = ( + ... select(addresses.c.email_address).where(addresses.c.user_id == users.c.id).limit(1) + ... ) >>> conn.execute(users.update().values(fullname=stmt.scalar_subquery())) {opensql}UPDATE users SET fullname=(SELECT addresses.email_address FROM addresses @@ -2435,10 +2470,12 @@ multiple tables can be embedded into a single UPDATE statement separated by a co The SQLAlchemy :func:`_expression.update` construct supports both of these modes implicitly, by specifying multiple tables in the WHERE clause:: - stmt = users.update().\ - values(name='ed wood').\ - where(users.c.id == addresses.c.id).\ - where(addresses.c.email_address.startswith('ed%')) + stmt = ( + users.update() + .values(name="ed wood") + .where(users.c.id == addresses.c.id) + .where(addresses.c.email_address.startswith("ed%")) + ) conn.execute(stmt) The resulting SQL from the above statement would render as:: @@ -2450,13 +2487,12 @@ The resulting SQL from the above statement would render as:: When using MySQL, columns from each table can be assigned to in the SET clause directly, using the dictionary form passed to :meth:`_expression.Update.values`:: - stmt = users.update().\ - values({ - users.c.name:'ed wood', - addresses.c.email_address:'ed.wood@foo.com' - }).\ - where(users.c.id == addresses.c.id).\ - where(addresses.c.email_address.startswith('ed%')) + stmt = ( + users.update() + .values({users.c.name: "ed wood", addresses.c.email_address: "ed.wood@foo.com"}) + .where(users.c.id == addresses.c.id) + .where(addresses.c.email_address.startswith("ed%")) + ) The tables are referenced explicitly in the SET clause:: @@ -2506,8 +2542,9 @@ To suit this specific use case, the we supply a **series of 2-tuples** as the argument to the method:: - stmt = some_table.update().\ - ordered_values((some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10)) + stmt = some_table.update().ordered_values( + (some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10) + ) The series of 2-tuples is essentially the same structure as a Python dictionary, except that it explicitly suggests a specific ordering. Using the @@ -2539,7 +2576,7 @@ Finally, a delete. This is accomplished easily enough using the COMMIT {stop} - >>> conn.execute(users.delete().where(users.c.name > 'm')) + >>> conn.execute(users.delete().where(users.c.name > "m")) {opensql}DELETE FROM users WHERE users.name > ? [...] ('m',) COMMIT @@ -2559,9 +2596,11 @@ and MySQL, this is the "DELETE USING" syntax, and for SQL Server, it's a :func:`_expression.delete` construct supports both of these modes implicitly, by specifying multiple tables in the WHERE clause:: - stmt = users.delete().\ - where(users.c.id == addresses.c.id).\ - where(addresses.c.email_address.startswith('ed%')) + stmt = ( + users.delete() + .where(users.c.id == addresses.c.id) + .where(addresses.c.email_address.startswith("ed%")) + ) conn.execute(stmt) On a PostgreSQL backend, the resulting SQL from the above statement would render as:: diff --git a/doc/build/core/type_basics.rst b/doc/build/core/type_basics.rst index 49fc715f06..62d941e663 100644 --- a/doc/build/core/type_basics.rst +++ b/doc/build/core/type_basics.rst @@ -43,10 +43,10 @@ values to and from the database, as in the example below:: metadata_obj = MetaData() user = Table( - 'user', + "user", metadata_obj, - Column('user_name', String, primary_key=True), - Column('email_address', String(60)), + Column("user_name", String, primary_key=True), + Column("email_address", String(60)), ) When using a particular :class:`_types.TypeEngine` class in a diff --git a/doc/build/dialects/mssql.rst b/doc/build/dialects/mssql.rst index 7484000dbc..6fd573c8d3 100644 --- a/doc/build/dialects/mssql.rst +++ b/doc/build/dialects/mssql.rst @@ -19,12 +19,38 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQL server are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.mssql import \ - BIGINT, BINARY, BIT, CHAR, DATE, DATETIME, DATETIME2, \ - DATETIMEOFFSET, DECIMAL, FLOAT, IMAGE, INTEGER, JSON, MONEY, \ - NCHAR, NTEXT, NUMERIC, NVARCHAR, REAL, SMALLDATETIME, \ - SMALLINT, SMALLMONEY, SQL_VARIANT, TEXT, TIME, \ - TIMESTAMP, TINYINT, UNIQUEIDENTIFIER, VARBINARY, VARCHAR + from sqlalchemy.dialects.mssql import ( + BIGINT, + BINARY, + BIT, + CHAR, + DATE, + DATETIME, + DATETIME2, + DATETIMEOFFSET, + DECIMAL, + FLOAT, + IMAGE, + INTEGER, + JSON, + MONEY, + NCHAR, + NTEXT, + NUMERIC, + NVARCHAR, + REAL, + SMALLDATETIME, + SMALLINT, + SMALLMONEY, + SQL_VARIANT, + TEXT, + TIME, + TIMESTAMP, + TINYINT, + UNIQUEIDENTIFIER, + VARBINARY, + VARCHAR, + ) Types which are specific to SQL Server, or have SQL Server-specific construction arguments, are as follows: diff --git a/doc/build/dialects/mysql.rst b/doc/build/dialects/mysql.rst index c506a5fa43..52dd45cfac 100644 --- a/doc/build/dialects/mysql.rst +++ b/doc/build/dialects/mysql.rst @@ -19,12 +19,42 @@ MySQL Data Types As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with MySQL are importable from the top level dialect:: - from sqlalchemy.dialects.mysql import \ - BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, \ - DATETIME, DECIMAL, DECIMAL, DOUBLE, ENUM, FLOAT, INTEGER, \ - LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \ - NUMERIC, NVARCHAR, REAL, SET, SMALLINT, TEXT, TIME, TIMESTAMP, \ - TINYBLOB, TINYINT, TINYTEXT, VARBINARY, VARCHAR, YEAR + from sqlalchemy.dialects.mysql import ( + BIGINT, + BINARY, + BIT, + BLOB, + BOOLEAN, + CHAR, + DATE, + DATETIME, + DECIMAL, + DECIMAL, + DOUBLE, + ENUM, + FLOAT, + INTEGER, + LONGBLOB, + LONGTEXT, + MEDIUMBLOB, + MEDIUMINT, + MEDIUMTEXT, + NCHAR, + NUMERIC, + NVARCHAR, + REAL, + SET, + SMALLINT, + TEXT, + TIME, + TIMESTAMP, + TINYBLOB, + TINYINT, + TINYTEXT, + VARBINARY, + VARCHAR, + YEAR, + ) Types which are specific to MySQL, or have MySQL-specific construction arguments, are as follows: diff --git a/doc/build/dialects/oracle.rst b/doc/build/dialects/oracle.rst index 81cef78d27..d992a2f83b 100644 --- a/doc/build/dialects/oracle.rst +++ b/doc/build/dialects/oracle.rst @@ -12,11 +12,26 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with Oracle are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.oracle import \ - BFILE, BLOB, CHAR, CLOB, DATE, \ - DOUBLE_PRECISION, FLOAT, INTERVAL, LONG, NCLOB, NCHAR, \ - NUMBER, NVARCHAR, NVARCHAR2, RAW, TIMESTAMP, VARCHAR, \ - VARCHAR2 + from sqlalchemy.dialects.oracle import ( + BFILE, + BLOB, + CHAR, + CLOB, + DATE, + DOUBLE_PRECISION, + FLOAT, + INTERVAL, + LONG, + NCLOB, + NCHAR, + NUMBER, + NVARCHAR, + NVARCHAR2, + RAW, + TIMESTAMP, + VARCHAR, + VARCHAR2, + ) .. versionadded:: 1.2.19 Added :class:`_types.NCHAR` to the list of datatypes exported by the Oracle dialect. diff --git a/doc/build/dialects/postgresql.rst b/doc/build/dialects/postgresql.rst index c58aaee9b4..4e8fb98d95 100644 --- a/doc/build/dialects/postgresql.rst +++ b/doc/build/dialects/postgresql.rst @@ -12,12 +12,43 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with PostgreSQL are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.postgresql import \ - ARRAY, BIGINT, BIT, BOOLEAN, BYTEA, CHAR, CIDR, DATE, \ - DOUBLE_PRECISION, ENUM, FLOAT, HSTORE, INET, INTEGER, \ - INTERVAL, JSON, JSONB, MACADDR, MONEY, NUMERIC, OID, REAL, SMALLINT, TEXT, \ - TIME, TIMESTAMP, UUID, VARCHAR, INT4RANGE, INT8RANGE, NUMRANGE, \ - DATERANGE, TSRANGE, TSTZRANGE, TSVECTOR + from sqlalchemy.dialects.postgresql import ( + ARRAY, + BIGINT, + BIT, + BOOLEAN, + BYTEA, + CHAR, + CIDR, + DATE, + DOUBLE_PRECISION, + ENUM, + FLOAT, + HSTORE, + INET, + INTEGER, + INTERVAL, + JSON, + JSONB, + MACADDR, + MONEY, + NUMERIC, + OID, + REAL, + SMALLINT, + TEXT, + TIME, + TIMESTAMP, + UUID, + VARCHAR, + INT4RANGE, + INT8RANGE, + NUMRANGE, + DATERANGE, + TSRANGE, + TSTZRANGE, + TSVECTOR, + ) Types which are specific to PostgreSQL, or have PostgreSQL-specific construction arguments, are as follows: @@ -179,16 +210,15 @@ For example:: from sqlalchemy.dialects.postgresql import ExcludeConstraint, TSRANGE + class RoomBooking(Base): - __tablename__ = 'room_booking' + __tablename__ = "room_booking" room = Column(Integer(), primary_key=True) during = Column(TSRANGE()) - __table_args__ = ( - ExcludeConstraint(('room', '='), ('during', '&&')), - ) + __table_args__ = (ExcludeConstraint(("room", "="), ("during", "&&")),) PostgreSQL DML Constructs ------------------------- diff --git a/doc/build/dialects/sqlite.rst b/doc/build/dialects/sqlite.rst index 6d40daf5fe..d25301fa53 100644 --- a/doc/build/dialects/sqlite.rst +++ b/doc/build/dialects/sqlite.rst @@ -12,10 +12,23 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQLite are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.sqlite import \ - BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, \ - INTEGER, NUMERIC, JSON, SMALLINT, TEXT, TIME, TIMESTAMP, \ - VARCHAR + from sqlalchemy.dialects.sqlite import ( + BLOB, + BOOLEAN, + CHAR, + DATE, + DATETIME, + DECIMAL, + FLOAT, + INTEGER, + NUMERIC, + JSON, + SMALLINT, + TEXT, + TIME, + TIMESTAMP, + VARCHAR, + ) .. module:: sqlalchemy.dialects.sqlite diff --git a/doc/build/errors.rst b/doc/build/errors.rst index 3c0632af69..f270ee3202 100644 --- a/doc/build/errors.rst +++ b/doc/build/errors.rst @@ -441,7 +441,7 @@ Normally, a Core SQL construct or ORM :class:`_query.Query` object can be string directly, such as when we use ``print()``:: >>> from sqlalchemy import column - >>> print(column('x') == 5) + >>> print(column("x") == 5) x = :x_1 When the above SQL expression is stringified, the :class:`.StrSQLCompiler` @@ -455,11 +455,9 @@ to turn into a string, such as the PostgreSQL >>> from sqlalchemy.dialects.postgresql import insert >>> from sqlalchemy import table, column - >>> my_table = table('my_table', column('x'), column('y')) - >>> insert_stmt = insert(my_table).values(x='foo') - >>> insert_stmt = insert_stmt.on_conflict_do_nothing( - ... index_elements=['y'] - ... ) + >>> my_table = table("my_table", column("x"), column("y")) + >>> insert_stmt = insert(my_table).values(x="foo") + >>> insert_stmt = insert_stmt.on_conflict_do_nothing(index_elements=["y"]) >>> print(insert_stmt) Traceback (most recent call last): @@ -501,14 +499,12 @@ This often occurs when attempting to use a :func:`.column_property` or declarative such as:: class Bar(Base): - __tablename__ = 'bar' + __tablename__ = "bar" id = Column(Integer, primary_key=True) cprop = deferred(Column(Integer)) - __table_args__ = ( - CheckConstraint(cprop > 5), - ) + __table_args__ = (CheckConstraint(cprop > 5),) Above, the ``cprop`` attribute is used inline before it has been mapped, however this ``cprop`` attribute is not a :class:`_schema.Column`, @@ -527,16 +523,12 @@ The solution is to access the :class:`_schema.Column` directly using the :attr:`.ColumnProperty.expression` attribute:: class Bar(Base): - __tablename__ = 'bar' + __tablename__ = "bar" id = Column(Integer, primary_key=True) cprop = deferred(Column(Integer)) - __table_args__ = ( - CheckConstraint(cprop.expression > 5), - ) - - + __table_args__ = (CheckConstraint(cprop.expression > 5),) .. _error_cd3x: @@ -547,7 +539,7 @@ This error occurs when a statement makes use of :func:`.bindparam` either implicitly or explicitly and does not provide a value when the statement is executed:: - stmt = select(table.c.column).where(table.c.id == bindparam('my_param')) + stmt = select(table.c.column).where(table.c.id == bindparam("my_param")) result = conn.execute(stmt) @@ -594,11 +586,12 @@ this error is generated:: Since "b" is required, pass it as ``None`` so that the INSERT may proceed:: e.execute( - t.insert(), [ + t.insert(), + [ {"a": 1, "b": 2, "c": 3}, {"a": 2, "b": None, "c": 4}, {"a": 3, "b": 4, "c": 5}, - ] + ], ) .. seealso:: @@ -620,12 +613,7 @@ Core and the full rationale is discussed at :ref:`change_4617`. Given an example as:: m = MetaData() - t = Table( - 't', m, - Column('a', Integer), - Column('b', Integer), - Column('c', Integer) - ) + t = Table("t", m, Column("a", Integer), Column("b", Integer), Column("c", Integer)) stmt = select(t) Above, ``stmt`` represents a SELECT statement. The error is produced when we want @@ -678,10 +666,12 @@ construct:: a1 = Address.__table__ - q = s.query(User).\ - join(a1, User.addresses).\ - filter(Address.email_address == 'ed@foo.com').all() - + q = ( + s.query(User) + .join(a1, User.addresses) + .filter(Address.email_address == "ed@foo.com") + .all() + ) The above pattern also allows an arbitrary selectable, such as a Core :class:`_sql.Join` or :class:`_sql.Alias` object, @@ -690,23 +680,26 @@ Core element would need to be referred towards directly:: a1 = Address.__table__.alias() - q = s.query(User).\ - join(a1, User.addresses).\ - filter(a1.c.email_address == 'ed@foo.com').all() + q = ( + s.query(User) + .join(a1, User.addresses) + .filter(a1.c.email_address == "ed@foo.com") + .all() + ) The correct way to specify a join target is always by using the mapped class itself or an :class:`_orm.aliased` object, in the latter case using the :meth:`_orm.PropComparator.of_type` modifier to set up an alias:: # normal join to relationship entity - q = s.query(User).\ - join(User.addresses).\ - filter(Address.email_address == 'ed@foo.com') + q = s.query(User).join(User.addresses).filter(Address.email_address == "ed@foo.com") # name Address target explicitly, not necessary but legal - q = s.query(User).\ - join(Address, User.addresses).\ - filter(Address.email_address == 'ed@foo.com') + q = ( + s.query(User) + .join(Address, User.addresses) + .filter(Address.email_address == "ed@foo.com") + ) Join to an alias:: @@ -715,15 +708,14 @@ Join to an alias:: a1 = aliased(Address) # of_type() form; recommended - q = s.query(User).\ - join(User.addresses.of_type(a1)).\ - filter(a1.email_address == 'ed@foo.com') + q = ( + s.query(User) + .join(User.addresses.of_type(a1)) + .filter(a1.email_address == "ed@foo.com") + ) # target, onclause form - q = s.query(User).\ - join(a1, User.addresses).\ - filter(a1.email_address == 'ed@foo.com') - + q = s.query(User).join(a1, User.addresses).filter(a1.email_address == "ed@foo.com") .. _error_xaj2: @@ -741,7 +733,7 @@ alias to one side or the other; SQLAlchemy applies an alias to the right side of the join. For example given a joined inheritance mapping as:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) manager_id = Column(ForeignKey("manager.id")) name = Column(String(50)) @@ -750,17 +742,18 @@ of the join. For example given a joined inheritance mapping as:: reports_to = relationship("Manager", foreign_keys=manager_id) __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type, + "polymorphic_identity": "employee", + "polymorphic_on": type, } + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) __mapper_args__ = { - 'polymorphic_identity':'manager', - 'inherit_condition': id == Employee.id + "polymorphic_identity": "manager", + "inherit_condition": id == Employee.id, } The above mapping includes a relationship between the ``Employee`` and @@ -824,10 +817,10 @@ embedding the join into a new subquery: If we then wanted to use :func:`_orm.contains_eager` to populate the ``reports_to`` attribute, we refer to the alias:: - >>> stmt =select(Employee).join( - ... Employee.reports_to.of_type(manager_alias) - ... ).options( - ... contains_eager(Employee.reports_to.of_type(manager_alias)) + >>> stmt = ( + ... select(Employee) + ... .join(Employee.reports_to.of_type(manager_alias)) + ... .options(contains_eager(Employee.reports_to.of_type(manager_alias))) ... ) Without using the explicit :func:`_orm.aliased` object, in some more nested @@ -960,6 +953,7 @@ is set on a many-to-one or many-to-many relationship, such as:: # configuration step occurs a = relationship("A", back_populates="bs", cascade="all, delete-orphan") + configure_mappers() Above, the "delete-orphan" setting on ``B.a`` indicates the intent that @@ -1222,12 +1216,12 @@ items in each case:: "Child", primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 0)", backref="parent", - overlaps="c2, parent" + overlaps="c2, parent", ) c2 = relationship( "Child", primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 1)", - overlaps="c1, parent" + overlaps="c1, parent", ) @@ -1238,7 +1232,6 @@ items in each case:: flag = Column(Integer) - Above, the ORM will know that the overlap between ``Parent.c1``, ``Parent.c2`` and ``Child.parent`` is intentional. @@ -1289,8 +1282,7 @@ the ``prebuffer_rows`` execution option may be used as follows:: # result internally pre-fetches all objects result = sess.execute( - select(User).where(User.id == 7), - execution_options={"prebuffer_rows": True} + select(User).where(User.id == 7), execution_options={"prebuffer_rows": True} ) # context manager is closed, so session_obj above is closed, identity @@ -1577,10 +1569,10 @@ the :meth:`.Executable.execute` method directly off of a Core expression object that is not associated with any :class:`_engine.Engine`:: metadata_obj = MetaData() - table = Table('t', metadata_obj, Column('q', Integer)) + table = Table("t", metadata_obj, Column("q", Integer)) stmt = select(table) - result = stmt.execute() # <--- raises + result = stmt.execute() # <--- raises What the logic is expecting is that the :class:`_schema.MetaData` object has been **bound** to a :class:`_engine.Engine`:: @@ -1597,7 +1589,7 @@ The correct way to invoke statements is via the :meth:`_engine.Connection.execute` method of a :class:`_engine.Connection`:: with engine.connect() as conn: - result = conn.execute(stmt) + result = conn.execute(stmt) When using the ORM, a similar facility is available via the :class:`.Session`:: diff --git a/doc/build/faq/connections.rst b/doc/build/faq/connections.rst index 27ba5f4ed5..fe8e56f815 100644 --- a/doc/build/faq/connections.rst +++ b/doc/build/faq/connections.rst @@ -27,8 +27,9 @@ How do I pass custom connect arguments to my database API? The :func:`_sa.create_engine` call accepts additional arguments either directly via the ``connect_args`` keyword argument:: - e = create_engine("mysql://scott:tiger@localhost/test", - connect_args={"encoding": "utf8"}) + e = create_engine( + "mysql://scott:tiger@localhost/test", connect_args={"encoding": "utf8"} + ) Or for basic string and integer arguments, they can usually be specified in the query string of the URL:: @@ -256,9 +257,7 @@ statement executions:: fn(cursor_obj, statement, context=context, *arg) except engine.dialect.dbapi.Error as raw_dbapi_err: connection = context.root_connection - if engine.dialect.is_disconnect( - raw_dbapi_err, connection, cursor_obj - ): + if engine.dialect.is_disconnect(raw_dbapi_err, connection, cursor_obj): if retry > num_retries: raise engine.logger.error( @@ -316,9 +315,7 @@ using the following proof of concept script. Once run, it will emit a time.sleep(5) e = reconnecting_engine( - create_engine( - "mysql://scott:tiger@localhost/test", echo_pool=True - ), + create_engine("mysql://scott:tiger@localhost/test", echo_pool=True), num_retries=5, retry_interval=2, ) @@ -374,7 +371,10 @@ configured using ``reset_on_return``:: from sqlalchemy import create_engine from sqlalchemy.pool import QueuePool - engine = create_engine('mysql://scott:tiger@localhost/myisam_database', pool=QueuePool(reset_on_return=False)) + engine = create_engine( + "mysql://scott:tiger@localhost/myisam_database", + pool=QueuePool(reset_on_return=False), + ) I'm on SQL Server - how do I turn those ROLLBACKs into COMMITs? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -383,8 +383,9 @@ I'm on SQL Server - how do I turn those ROLLBACKs into COMMITs? to ``True``, ``False``, and ``None``. Setting to ``commit`` will cause a COMMIT as any connection is returned to the pool:: - engine = create_engine('mssql://scott:tiger@mydsn', pool=QueuePool(reset_on_return='commit')) - + engine = create_engine( + "mssql://scott:tiger@mydsn", pool=QueuePool(reset_on_return="commit") + ) I am using multiple connections with a SQLite database (typically to test transaction operation), and my test program is not working! ---------------------------------------------------------------------------------------------------------------------------------------------------------- diff --git a/doc/build/faq/metadata_schema.rst b/doc/build/faq/metadata_schema.rst index 2556db60c1..2eab0033a5 100644 --- a/doc/build/faq/metadata_schema.rst +++ b/doc/build/faq/metadata_schema.rst @@ -88,9 +88,12 @@ metadata creation sequence as a string, using this recipe:: from sqlalchemy import create_mock_engine + def dump(sql, *multiparams, **params): print(sql.compile(dialect=engine.dialect)) - engine = create_mock_engine('postgresql://', dump) + + + engine = create_mock_engine("postgresql://", dump) metadata_obj.create_all(engine, checkfirst=False) The `Alembic `_ tool also supports diff --git a/doc/build/faq/ormconfiguration.rst b/doc/build/faq/ormconfiguration.rst index f257f7ce99..1059354ed8 100644 --- a/doc/build/faq/ormconfiguration.rst +++ b/doc/build/faq/ormconfiguration.rst @@ -48,7 +48,7 @@ applied directly to the mapper:: class SomeClass(Base): __table__ = some_table_with_no_pk __mapper_args__ = { - 'primary_key':[some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar] + "primary_key": [some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar] } Better yet is when using fully declared table metadata, use the ``primary_key=True`` @@ -142,16 +142,18 @@ Given the example as follows:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(A): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) As of SQLAlchemy version 0.9.5, the above condition is detected, and will warn that the ``id`` column of ``A`` and ``B`` is being combined under @@ -161,33 +163,33 @@ that a ``B`` object's primary key will always mirror that of its ``A``. A mapping which resolves this is as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(A): - __tablename__ = 'b' + __tablename__ = "b" - b_id = Column('id', Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + b_id = Column("id", Integer, primary_key=True) + a_id = Column(Integer, ForeignKey("a.id")) Suppose we did want ``A.id`` and ``B.id`` to be mirrors of each other, despite the fact that ``B.a_id`` is where ``A.id`` is related. We could combine them together using :func:`.column_property`:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(A): - __tablename__ = 'b' + __tablename__ = "b" # probably not what you want, but this is a demonstration id = column_property(Column(Integer, primary_key=True), A.id) - a_id = Column(Integer, ForeignKey('a.id')) - - + a_id = Column(Integer, ForeignKey("a.id")) I'm using Declarative and setting primaryjoin/secondaryjoin using an ``and_()`` or ``or_()``, and I am getting an error message about foreign keys. ------------------------------------------------------------------------------------------------------------------------------------------------------------------ @@ -197,21 +199,27 @@ Are you doing this?:: class MyClass(Base): # .... - foo = relationship("Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar")) + foo = relationship( + "Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar") + ) That's an ``and_()`` of two string expressions, which SQLAlchemy cannot apply any mapping towards. Declarative allows :func:`_orm.relationship` arguments to be specified as strings, which are converted into expression objects using ``eval()``. But this doesn't occur inside of an ``and_()`` expression - it's a special operation declarative applies only to the *entirety* of what's passed to primaryjoin or other arguments as a string:: class MyClass(Base): # .... - foo = relationship("Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)") + foo = relationship( + "Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)" + ) Or if the objects you need are already available, skip the strings:: class MyClass(Base): # .... - foo = relationship(Dest, primaryjoin=and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)) + foo = relationship( + Dest, primaryjoin=and_(MyClass.id == Dest.foo_id, MyClass.foo == Dest.bar) + ) The same idea applies to all the other arguments, such as ``foreign_keys``:: diff --git a/doc/build/faq/performance.rst b/doc/build/faq/performance.rst index 781d6c79d3..91061c8592 100644 --- a/doc/build/faq/performance.rst +++ b/doc/build/faq/performance.rst @@ -215,16 +215,16 @@ using a recipe like the following:: logger = logging.getLogger("myapp.sqltime") logger.setLevel(logging.DEBUG) + @event.listens_for(Engine, "before_cursor_execute") - def before_cursor_execute(conn, cursor, statement, - parameters, context, executemany): - conn.info.setdefault('query_start_time', []).append(time.time()) + def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): + conn.info.setdefault("query_start_time", []).append(time.time()) logger.debug("Start Query: %s", statement) + @event.listens_for(Engine, "after_cursor_execute") - def after_cursor_execute(conn, cursor, statement, - parameters, context, executemany): - total = time.time() - conn.info['query_start_time'].pop(-1) + def after_cursor_execute(conn, cursor, statement, parameters, context, executemany): + total = time.time() - conn.info["query_start_time"].pop(-1) logger.debug("Query Complete!") logger.debug("Total Time: %f", total) @@ -255,6 +255,7 @@ Below is a simple recipe which works profiling into a context manager:: import pstats import contextlib + @contextlib.contextmanager def profiled(): pr = cProfile.Profile() @@ -262,7 +263,7 @@ Below is a simple recipe which works profiling into a context manager:: yield pr.disable() s = io.StringIO() - ps = pstats.Stats(pr, stream=s).sort_stats('cumulative') + ps = pstats.Stats(pr, stream=s).sort_stats("cumulative") ps.print_stats() # uncomment this to see who's calling what # ps.print_callers() @@ -271,7 +272,7 @@ Below is a simple recipe which works profiling into a context manager:: To profile a section of code:: with profiled(): - Session.query(FooClass).filter(FooClass.somevalue==8).all() + Session.query(FooClass).filter(FooClass.somevalue == 8).all() The output of profiling can be used to give an idea where time is being spent. A section of profiling output looks like this:: @@ -357,12 +358,13 @@ this:: from sqlalchemy import TypeDecorator import time + class Foo(TypeDecorator): impl = String def process_result_value(self, value, thing): # intentionally add slowness for illustration purposes - time.sleep(.001) + time.sleep(0.001) return value the profiling output of this intentionally slow operation can be seen like this:: diff --git a/doc/build/faq/sessions.rst b/doc/build/faq/sessions.rst index 1145a408fa..c070781981 100644 --- a/doc/build/faq/sessions.rst +++ b/doc/build/faq/sessions.rst @@ -91,12 +91,14 @@ does not properly handle the exception. For example:: from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base - Base = declarative_base(create_engine('sqlite://')) + Base = declarative_base(create_engine("sqlite://")) + class Foo(Base): - __tablename__ = 'foo' + __tablename__ = "foo" id = Column(Integer, primary_key=True) + Base.metadata.create_all() session = sessionmaker()() @@ -113,7 +115,6 @@ does not properly handle the exception. For example:: # continue using session without rolling back session.commit() - The usage of the :class:`.Session` should fit within a structure similar to this:: try: @@ -186,7 +187,7 @@ point of view there is still a transaction that is now in an inactive state. Given a block such as:: - sess = Session() # begins a logical transaction + sess = Session() # begins a logical transaction try: sess.flush() @@ -237,7 +238,7 @@ will **deduplicate the objects based on primary key**. That is, if we for example use the ``User`` mapping described at :ref:`ormtutorial_toplevel`, and we had a SQL query like the following:: - q = session.query(User).outerjoin(User.addresses).filter(User.name == 'jack') + q = session.query(User).outerjoin(User.addresses).filter(User.name == "jack") Above, the sample data used in the tutorial has two rows in the ``addresses`` table for the ``users`` row with the name ``'jack'``, primary key value 5. @@ -257,7 +258,9 @@ This is because when the :class:`_query.Query` object returns full entities, the are **deduplicated**. This does not occur if we instead request individual columns back:: - >>> session.query(User.id, User.name).outerjoin(User.addresses).filter(User.name == 'jack').all() + >>> session.query(User.id, User.name).outerjoin(User.addresses).filter( + ... User.name == "jack" + ... ).all() [(5, 'jack'), (5, 'jack')] There are two main reasons the :class:`_query.Query` will deduplicate: @@ -338,6 +341,7 @@ one:: print("ITER!") return iter([1, 2, 3, 4, 5]) + list(Iterates()) output:: @@ -422,7 +426,7 @@ be performed for any :term:`persistent` object using :meth:`.Session.expire`:: o = Session.query(SomeClass).first() o.foo_id = 7 - Session.expire(o, ['foo']) # object must be persistent for this + Session.expire(o, ["foo"]) # object must be persistent for this foo_7 = Session.query(Foo).get(7) @@ -444,11 +448,10 @@ have meaning until the row is inserted; otherwise there is no row yet:: Session.flush() # emits INSERT # expire this because we already set .foo to None - Session.expire(o, ['foo']) + Session.expire(o, ["foo"]) assert new_obj.foo is foo_7 # now it loads - .. topic:: Attribute loading for non-persistent objects One variant on the "pending" behavior above is if we use the flag @@ -504,21 +507,21 @@ The function can be demonstrated as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", backref="a") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - c_id = Column(ForeignKey('c.id')) + a_id = Column(ForeignKey("a.id")) + c_id = Column(ForeignKey("c.id")) c = relationship("C", backref="bs") class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) diff --git a/doc/build/faq/sqlexpressions.rst b/doc/build/faq/sqlexpressions.rst index 5dcf3e96ad..287647a793 100644 --- a/doc/build/faq/sqlexpressions.rst +++ b/doc/build/faq/sqlexpressions.rst @@ -19,7 +19,7 @@ function (note the Python ``print`` function also calls ``str()`` automatically if we don't use it explicitly):: >>> from sqlalchemy import table, column, select - >>> t = table('my_table', column('x')) + >>> t = table("my_table", column("x")) >>> statement = select(t) >>> print(str(statement)) SELECT my_table.x @@ -31,7 +31,7 @@ The ``str()`` builtin, or an equivalent, can be invoked on ORM as:: >>> from sqlalchemy import column - >>> print(column('x') == 'some value') + >>> print(column("x") == "some value") x = :x_1 Stringifying for Specific Databases @@ -59,6 +59,7 @@ instantiate a :class:`.Dialect` object directly, as below where we use a PostgreSQL dialect:: from sqlalchemy.dialects import postgresql + print(statement.compile(dialect=postgresql.dialect())) Note that any dialect can be assembled using :func:`_sa.create_engine` itself @@ -98,7 +99,7 @@ flag, passed to ``compile_kwargs``:: from sqlalchemy.sql import table, column, select - t = table('t', column('x')) + t = table("t", column("x")) s = select(t).where(t.c.x == 5) @@ -159,12 +160,14 @@ datatype:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) data = Column(UUID) + stmt = select(A).where(A.data == uuid.uuid4()) Given the above model and statement which will compare a column to a single @@ -216,6 +219,7 @@ include: their positional order for the statement as compiled:: import re + e = create_engine("sqlite+pysqlite://") # will use qmark style, i.e. ? for param @@ -224,7 +228,7 @@ include: # params in positional order params = (repr(compiled.params[name]) for name in compiled.positiontup) - print(re.sub(r'\?', lambda m: next(params), str(compiled))) + print(re.sub(r"\?", lambda m: next(params), str(compiled))) The above snippet prints:: @@ -240,6 +244,7 @@ include: from sqlalchemy.ext.compiler import compiles from sqlalchemy.sql.expression import BindParameter + @compiles(BindParameter) def _render_literal_bindparam(element, compiler, use_my_literal_recipe=False, **kw): if not use_my_literal_recipe: @@ -250,6 +255,7 @@ include: # render the value directly return repr(element.value) + e = create_engine("postgresql+psycopg2://") print(stmt.compile(e, compile_kwargs={"use_my_literal_recipe": True})) @@ -265,6 +271,7 @@ include: from sqlalchemy import TypeDecorator + class UUIDStringify(TypeDecorator): impl = UUID @@ -275,6 +282,7 @@ include: or locally within the statement using :func:`_sql.type_coerce`, such as :: from sqlalchemy import type_coerce + stmt = select(A).where(type_coerce(A.data, UUIDStringify) == uuid.uuid4()) print(stmt.compile(e, compile_kwargs={"literal_binds": True})) @@ -331,7 +339,7 @@ in the same way, such as SQLite's positional form:: >>> e = create_engine("sqlite+pysqlite://") >>> compiled = stmt.compile(e, compile_kwargs={"render_postcompile": True}) >>> params = (repr(compiled.params[name]) for name in compiled.positiontup) - >>> print(re.sub(r'\?', lambda m: next(params), str(compiled))) + >>> print(re.sub(r"\?", lambda m: next(params), str(compiled))) SELECT a.id, a.data FROM a WHERE a.data IN (UUID('aa1944d6-9a5a-45d5-b8da-0ba1ef0a4f38'), UUID('a81920e6-15e2-4392-8a3c-d775ffa9ccd2'), UUID('b5574cdb-ff9b-49a3-be52-dbc89f087bfa')) @@ -414,13 +422,13 @@ I'm using op() to generate a custom operator and my parenthesis are not coming o The :meth:`.Operators.op` method allows one to create a custom database operator otherwise not known by SQLAlchemy:: - >>> print(column('q').op('->')(column('p'))) + >>> print(column("q").op("->")(column("p"))) q -> p However, when using it on the right side of a compound expression, it doesn't generate parenthesis as we expect:: - >>> print((column('q1') + column('q2')).op('->')(column('p'))) + >>> print((column("q1") + column("q2")).op("->")(column("p"))) q1 + q2 -> p Where above, we probably want ``(q1 + q2) -> p``. @@ -430,14 +438,14 @@ the :paramref:`.Operators.op.precedence` parameter, to a high number, where 100 is the maximum value, and the highest number used by any SQLAlchemy operator is currently 15:: - >>> print((column('q1') + column('q2')).op('->', precedence=100)(column('p'))) + >>> print((column("q1") + column("q2")).op("->", precedence=100)(column("p"))) (q1 + q2) -> p We can also usually force parenthesization around a binary expression (e.g. an expression that has left/right operands and an operator) using the :meth:`_expression.ColumnElement.self_group` method:: - >>> print((column('q1') + column('q2')).self_group().op('->')(column('p'))) + >>> print((column("q1") + column("q2")).self_group().op("->")(column("p"))) (q1 + q2) -> p Why are the parentheses rules like this? @@ -449,7 +457,7 @@ generate parenthesis based on groupings, it uses operator precedence and if the operator is known to be associative, so that parenthesis are generated minimally. Otherwise, an expression like:: - column('a') & column('b') & column('c') & column('d') + column("a") & column("b") & column("c") & column("d") would produce:: @@ -459,7 +467,7 @@ which is fine but would probably annoy people (and be reported as a bug). In other cases, it leads to things that are more likely to confuse databases or at the very least readability, such as:: - column('q', ARRAY(Integer, dimensions=2))[5][6] + column("q", ARRAY(Integer, dimensions=2))[5][6] would produce:: @@ -476,16 +484,16 @@ What if we defaulted the value of :paramref:`.Operators.op.precedence` to 100, e.g. the highest? Then this expression makes more parenthesis, but is otherwise OK, that is, these two are equivalent:: - >>> print((column('q') - column('y')).op('+', precedence=100)(column('z'))) + >>> print((column("q") - column("y")).op("+", precedence=100)(column("z"))) (q - y) + z - >>> print((column('q') - column('y')).op('+')(column('z'))) + >>> print((column("q") - column("y")).op("+")(column("z"))) q - y + z but these two are not:: - >>> print(column('q') - column('y').op('+', precedence=100)(column('z'))) + >>> print(column("q") - column("y").op("+", precedence=100)(column("z"))) q - y + z - >>> print(column('q') - column('y').op('+')(column('z'))) + >>> print(column("q") - column("y").op("+")(column("z"))) q - (y + z) For now, it's not clear that as long as we are doing parenthesization based on diff --git a/doc/build/faq/thirdparty.rst b/doc/build/faq/thirdparty.rst index 27c8fbf743..4b8bb7c556 100644 --- a/doc/build/faq/thirdparty.rst +++ b/doc/build/faq/thirdparty.rst @@ -28,17 +28,18 @@ by queries. This may be illustrated from code based on the following:: import numpy + class A(Base): __tablename__ = "a" id = Column(Integer, primary_key=True) data = Column(Integer) + # .. later session.add(A(data=numpy.int64(10))) session.commit() - In the latter case, the issue is due to the ``numpy.int64`` datatype overriding the ``__eq__()`` method and enforcing that the return type of an expression is ``numpy.True`` or ``numpy.False``, which breaks SQLAlchemy's expression @@ -47,9 +48,9 @@ expressions from Python equality comparisons:: >>> import numpy >>> from sqlalchemy import column, Integer - >>> print(column('x', Integer) == numpy.int64(10)) # works + >>> print(column("x", Integer) == numpy.int64(10)) # works x = :x_1 - >>> print(numpy.int64(10) == column('x', Integer)) # breaks + >>> print(numpy.int64(10) == column("x", Integer)) # breaks False These errors are both solved in the same way, which is that special numpy @@ -61,9 +62,7 @@ applying the Python ``int()`` function to types like ``numpy.int32`` and session.add(A(data=int(data))) - result = session.execute( - select(A.data).where(int(data) == A.data) - ) + result = session.execute(select(A.data).where(int(data) == A.data)) session.commit() @@ -72,4 +71,4 @@ applying the Python ``int()`` function to types like ``numpy.int32`` and SQL expression for WHERE/HAVING role expected, got True ------------------------------------------------------- -See :ref:`numpy_int64`. \ No newline at end of file +See :ref:`numpy_int64`. diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst index 111adb13b9..b7d5476e46 100644 --- a/doc/build/glossary.rst +++ b/doc/build/glossary.rst @@ -74,7 +74,6 @@ Glossary # Session returns a Result that has ORM entities list_of_users = result.scalars().all() - reflection reflected In SQLAlchemy, this term refers to the feature of querying a database's @@ -191,7 +190,7 @@ Glossary dictionary is associated with a copy of the object, which contains key/value pairs significant to various internal systems, mostly within the ORM:: - some_column = Column('some_column', Integer) + some_column = Column("some_column", Integer) some_column_annotated = some_column._annotate({"entity": User}) The annotation system differs from the public dictionary :attr:`_schema.Column.info` @@ -265,7 +264,7 @@ Glossary on mapped classes. When a class is mapped as such:: class MyClass(Base): - __tablename__ = 'foo' + __tablename__ = "foo" id = Column(Integer, primary_key=True) data = Column(String) @@ -1062,16 +1061,17 @@ Glossary single department. A SQLAlchemy mapping might look like:: class Department(Base): - __tablename__ = 'department' + __tablename__ = "department" id = Column(Integer, primary_key=True) name = Column(String(30)) employees = relationship("Employee") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) - dep_id = Column(Integer, ForeignKey('department.id')) + dep_id = Column(Integer, ForeignKey("department.id")) .. seealso:: @@ -1113,15 +1113,16 @@ Glossary single department. A SQLAlchemy mapping might look like:: class Department(Base): - __tablename__ = 'department' + __tablename__ = "department" id = Column(Integer, primary_key=True) name = Column(String(30)) + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) - dep_id = Column(Integer, ForeignKey('department.id')) + dep_id = Column(Integer, ForeignKey("department.id")) department = relationship("Department") .. seealso:: @@ -1146,16 +1147,17 @@ Glossary used in :term:`one to many` as follows:: class Department(Base): - __tablename__ = 'department' + __tablename__ = "department" id = Column(Integer, primary_key=True) name = Column(String(30)) employees = relationship("Employee", backref="department") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) - dep_id = Column(Integer, ForeignKey('department.id')) + dep_id = Column(Integer, ForeignKey("department.id")) A backref can be applied to any relationship, including one to many, many to one, and :term:`many to many`. @@ -1207,24 +1209,25 @@ Glossary specified using plain table metadata:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) projects = relationship( "Project", - secondary=Table('employee_project', Base.metadata, - Column("employee_id", Integer, ForeignKey('employee.id'), - primary_key=True), - Column("project_id", Integer, ForeignKey('project.id'), - primary_key=True) - ), - backref="employees" - ) + secondary=Table( + "employee_project", + Base.metadata, + Column("employee_id", Integer, ForeignKey("employee.id"), primary_key=True), + Column("project_id", Integer, ForeignKey("project.id"), primary_key=True), + ), + backref="employees", + ) + class Project(Base): - __tablename__ = 'project' + __tablename__ = "project" id = Column(Integer, primary_key=True) name = Column(String(30)) @@ -1320,30 +1323,29 @@ Glossary A SQLAlchemy declarative mapping for the above might look like:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) class Project(Base): - __tablename__ = 'project' + __tablename__ = "project" id = Column(Integer, primary_key=True) name = Column(String(30)) class EmployeeProject(Base): - __tablename__ = 'employee_project' + __tablename__ = "employee_project" - employee_id = Column(Integer, ForeignKey('employee.id'), primary_key=True) - project_id = Column(Integer, ForeignKey('project.id'), primary_key=True) + employee_id = Column(Integer, ForeignKey("employee.id"), primary_key=True) + project_id = Column(Integer, ForeignKey("project.id"), primary_key=True) role_name = Column(String(30)) project = relationship("Project", backref="project_employees") employee = relationship("Employee", backref="employee_projects") - Employees can be added to a project given a role name:: proj = Project(name="Client A") @@ -1351,10 +1353,12 @@ Glossary emp1 = Employee(name="emp1") emp2 = Employee(name="emp2") - proj.project_employees.extend([ - EmployeeProject(employee=emp1, role_name="tech lead"), - EmployeeProject(employee=emp2, role_name="account executive") - ]) + proj.project_employees.extend( + [ + EmployeeProject(employee=emp1, role_name="tech lead"), + EmployeeProject(employee=emp2, role_name="account executive"), + ] + ) .. seealso:: diff --git a/doc/build/intro.rst b/doc/build/intro.rst index 4f1b64d15b..2d8ac407de 100644 --- a/doc/build/intro.rst +++ b/doc/build/intro.rst @@ -203,7 +203,7 @@ Python prompt like this: .. sourcecode:: python+sql >>> import sqlalchemy - >>> sqlalchemy.__version__ # doctest: +SKIP + >>> sqlalchemy.__version__ # doctest: +SKIP 1.4.0 Next Steps diff --git a/doc/build/orm/basic_relationships.rst b/doc/build/orm/basic_relationships.rst index ad57d4ca07..6ca4de39c6 100644 --- a/doc/build/orm/basic_relationships.rst +++ b/doc/build/orm/basic_relationships.rst @@ -304,7 +304,6 @@ for each :func:`_orm.relationship` specify the common association table:: "Parent", secondary=association_table, back_populates="children" ) - When using the :paramref:`_orm.relationship.backref` parameter instead of :paramref:`_orm.relationship.back_populates`, the backref will automatically use the same :paramref:`_orm.relationship.secondary` argument for the @@ -321,9 +320,7 @@ reverse relationship:: class Parent(Base): __tablename__ = "left" id = Column(Integer, primary_key=True) - children = relationship( - "Child", secondary=association_table, backref="parents" - ) + children = relationship("Child", secondary=association_table, backref="parents") class Child(Base): diff --git a/doc/build/orm/cascades.rst b/doc/build/orm/cascades.rst index 3c1180404c..7cfd5d19dd 100644 --- a/doc/build/orm/cascades.rst +++ b/doc/build/orm/cascades.rst @@ -109,10 +109,10 @@ and added to another:: >>> user1 = sess1.query(User).filter_by(id=1).first() >>> address1 = user1.addresses[0] - >>> sess1.close() # user1, address1 no longer associated with sess1 + >>> sess1.close() # user1, address1 no longer associated with sess1 >>> user1.addresses.remove(address1) # address1 no longer associated with user1 >>> sess2 = Session() - >>> sess2.add(user1) # ... but it still gets added to the new session, + >>> sess2.add(user1) # ... but it still gets added to the new session, >>> address1 in sess2 # because it's still "pending" for flush True @@ -588,9 +588,9 @@ default takes place on attribute change events emitted from backrefs. This is probably a confusing statement more easily described through demonstration; it means that, given a mapping such as this:: - mapper_registry.map_imperatively(Order, order_table, properties={ - 'items' : relationship(Item, backref='order') - }) + mapper_registry.map_imperatively( + Order, order_table, properties={"items": relationship(Item, backref="order")} + ) If an ``Order`` is already in the session, and is assigned to the ``order`` attribute of an ``Item``, the backref appends the ``Item`` to the ``items`` @@ -611,9 +611,11 @@ place:: This behavior can be disabled using the :paramref:`_orm.relationship.cascade_backrefs` flag:: - mapper_registry.map_imperatively(Order, order_table, properties={ - 'items' : relationship(Item, backref='order', cascade_backrefs=False) - }) + mapper_registry.map_imperatively( + Order, + order_table, + properties={"items": relationship(Item, backref="order", cascade_backrefs=False)}, + ) So above, the assignment of ``i1.order = o1`` will append ``i1`` to the ``items`` collection of ``o1``, but will not add ``i1`` to the session. You can, of @@ -628,11 +630,17 @@ parameter may be set to ``False`` on the backref side by using the :func:`_orm.backref` function instead of a string. For example, the above relationship could be declared:: - mapper_registry.map_imperatively(Order, order_table, properties={ - 'items' : relationship( - Item, backref=backref('order', cascade_backrefs=False), cascade_backrefs=False - ) - }) + mapper_registry.map_imperatively( + Order, + order_table, + properties={ + "items": relationship( + Item, + backref=backref("order", cascade_backrefs=False), + cascade_backrefs=False, + ) + }, + ) This sets the ``cascade_backrefs=False`` behavior on both relationships. @@ -700,6 +708,7 @@ illustrated in the example below:: addresses = relationship("Address", cascade="all, delete-orphan") + # ... del user.addresses[1] diff --git a/doc/build/orm/collections.rst b/doc/build/orm/collections.rst index 800d2613bd..da50b3f8db 100644 --- a/doc/build/orm/collections.rst +++ b/doc/build/orm/collections.rst @@ -467,16 +467,21 @@ interface are detected and instrumented via duck-typing: class ListLike(object): def __init__(self): self.data = [] + def append(self, item): self.data.append(item) + def remove(self, item): self.data.remove(item) + def extend(self, items): self.data.extend(items) + def __iter__(self): return iter(self.data) + def foo(self): - return 'foo' + return "foo" ``append``, ``remove``, and ``extend`` are known list-like methods, and will be instrumented automatically. ``__iter__`` is not a mutator method and won't @@ -491,10 +496,13 @@ explicit about the interface you are implementing by providing an def __init__(self): self.data = set() + def append(self, item): self.data.add(item) + def remove(self, item): self.data.remove(item) + def __iter__(self): return iter(self.data) @@ -522,6 +530,7 @@ get the job done. from sqlalchemy.orm.collections import collection + class SetLike(object): __emulates__ = set @@ -580,6 +589,7 @@ collection support to other classes. It uses a keying function to delegate to from sqlalchemy.util import OrderedDict from sqlalchemy.orm.collections import MappedCollection + class NodeMap(OrderedDict, MappedCollection): """Holds 'Node' objects, keyed by the 'name' attribute with insert order maintained.""" @@ -643,6 +653,7 @@ to restrict the decorations to just your usage in relationships. For example: class MyAwesomeList(some.great.library.AwesomeList): pass + # ... relationship(..., collection_class=MyAwesomeList) The ORM uses this approach for built-ins, quietly substituting a trivial diff --git a/doc/build/orm/composites.rst b/doc/build/orm/composites.rst index 181993db5c..670ae871fd 100644 --- a/doc/build/orm/composites.rst +++ b/doc/build/orm/composites.rst @@ -24,11 +24,7 @@ A simple example represents pairs of columns as a ``Point`` object. return f"Point(x={self.x!r}, y={self.y!r})" def __eq__(self, other): - return ( - isinstance(other, Point) - and other.x == self.x - and other.y == self.y - ) + return isinstance(other, Point) and other.x == self.x and other.y == self.y def __ne__(self, other): return not self.__eq__(other) @@ -180,11 +176,7 @@ itself be a composite object, which is then mapped to a class ``HasVertex``:: return f"Point(x={self.x!r}, y={self.y!r})" def __eq__(self, other): - return ( - isinstance(other, Point) - and other.x == self.x - and other.y == self.y - ) + return isinstance(other, Point) and other.x == self.x and other.y == self.y def __ne__(self, other): return not self.__eq__(other) @@ -201,10 +193,7 @@ itself be a composite object, which is then mapped to a class ``HasVertex``:: return Vertex(Point(x1, y1), Point(x2, y2)) def __composite_values__(self): - return ( - self.start.__composite_values__() - + self.end.__composite_values__() - ) + return self.start.__composite_values__() + self.end.__composite_values__() class HasVertex(Base): @@ -224,7 +213,10 @@ We can then use the above mapping as:: s.add(hv) s.commit() - hv = s.query(HasVertex).filter( - HasVertex.vertex == Vertex(Point(1, 2), Point(3, 4))).first() + hv = ( + s.query(HasVertex) + .filter(HasVertex.vertex == Vertex(Point(1, 2), Point(3, 4))) + .first() + ) print(hv.vertex.start) print(hv.vertex.end) diff --git a/doc/build/orm/contextual.rst b/doc/build/orm/contextual.rst index 102ea50d88..9fadf6c732 100644 --- a/doc/build/orm/contextual.rst +++ b/doc/build/orm/contextual.rst @@ -253,6 +253,7 @@ this in conjunction with a hypothetical event marker provided by the web framewo Session = scoped_session(sessionmaker(bind=some_engine), scopefunc=get_current_request) + @on_request_end def remove_session(req): Session.remove() diff --git a/doc/build/orm/dataclasses.rst b/doc/build/orm/dataclasses.rst index fa37e011e5..4e9943e76e 100644 --- a/doc/build/orm/dataclasses.rst +++ b/doc/build/orm/dataclasses.rst @@ -173,9 +173,7 @@ association:: __tablename__ = "user" __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) + id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)}) name: str = field(default=None, metadata={"sa": Column(String(50))}) fullname: str = field(default=None, metadata={"sa": Column(String(50))}) nickname: str = field(default=None, metadata={"sa": Column(String(12))}) @@ -189,15 +187,9 @@ association:: class Address: __tablename__ = "address" __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) - user_id: int = field( - init=False, metadata={"sa": Column(ForeignKey("user.id"))} - ) - email_address: str = field( - default=None, metadata={"sa": Column(String(50))} - ) + id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)}) + user_id: int = field(init=False, metadata={"sa": Column(ForeignKey("user.id"))}) + email_address: str = field(default=None, metadata={"sa": Column(String(50))}) .. _orm_imperative_dataclasses: @@ -231,6 +223,7 @@ variables:: mapper_registry = registry() + @dataclass class User: id: int = field(init=False) @@ -239,34 +232,40 @@ variables:: nickname: str = None addresses: List[Address] = field(default_factory=list) + @dataclass class Address: id: int = field(init=False) user_id: int = field(init=False) email_address: str = None + metadata_obj = MetaData() user = Table( - 'user', + "user", metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('fullname', String(50)), - Column('nickname', String(12)), + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), ) address = Table( - 'address', + "address", metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String(50)), + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), ) - mapper_registry.map_imperatively(User, user, properties={ - 'addresses': relationship(Address, backref='user', order_by=address.c.id), - }) + mapper_registry.map_imperatively( + User, + user, + properties={ + "addresses": relationship(Address, backref="user", order_by=address.c.id), + }, + ) mapper_registry.map_imperatively(Address, address) @@ -302,9 +301,7 @@ came from a mixin that is itself a dataclass, the form would be:: __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) + id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)}) addresses: List[Address] = field( default_factory=list, metadata={"sa": lambda: relationship("Address")} @@ -315,15 +312,11 @@ came from a mixin that is itself a dataclass, the form would be:: class AddressMixin: __tablename__ = "address" __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) + id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)}) user_id: int = field( init=False, metadata={"sa": lambda: Column(ForeignKey("user.id"))} ) - email_address: str = field( - default=None, metadata={"sa": Column(String(50))} - ) + email_address: str = field(default=None, metadata={"sa": Column(String(50))}) @mapper_registry.mapped @@ -422,6 +415,7 @@ object is declared inline with the declarative class. The } } + @mapper_registry.mapped @define(slots=False) class Address: @@ -436,7 +430,6 @@ object is declared inline with the declarative class. The user_id: int email_address: Optional[str] - .. note:: The ``attrs`` ``slots=True`` option, which enables ``__slots__`` on a mapped class, cannot be used with SQLAlchemy mappings without fully implementing alternative @@ -469,6 +462,7 @@ as well:: mapper_registry = registry() + @define(slots=False) class User: id: int @@ -477,34 +471,40 @@ as well:: nickname: str addresses: List[Address] + @define(slots=False) class Address: id: int user_id: int email_address: Optional[str] + metadata_obj = MetaData() user = Table( - 'user', + "user", metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('fullname', String(50)), - Column('nickname', String(12)), + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), ) address = Table( - 'address', + "address", metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String(50)), + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), ) - mapper_registry.map_imperatively(User, user, properties={ - 'addresses': relationship(Address, backref='user', order_by=address.c.id), - }) + mapper_registry.map_imperatively( + User, + user, + properties={ + "addresses": relationship(Address, backref="user", order_by=address.c.id), + }, + ) mapper_registry.map_imperatively(Address, address) diff --git a/doc/build/orm/declarative_config.rst b/doc/build/orm/declarative_config.rst index 55bf0f74c7..3a811ed82e 100644 --- a/doc/build/orm/declarative_config.rst +++ b/doc/build/orm/declarative_config.rst @@ -117,9 +117,7 @@ hybrid table style:: Column("lastname", String(50)), ) - fullname = column_property( - __table__.c.firstname + " " + __table__.c.lastname - ) + fullname = column_property(__table__.c.firstname + " " + __table__.c.lastname) addresses = relationship("Address", back_populates="user") @@ -182,14 +180,12 @@ particular columns as part of what the ORM should consider to be a primary key for the class, independently of schema-level primary key constraints:: class GroupUsers(Base): - __tablename__ = 'group_users' + __tablename__ = "group_users" user_id = Column(String(40)) group_id = Column(String(40)) - __mapper_args__ = { - "primary_key": [user_id, group_id] - } + __mapper_args__ = {"primary_key": [user_id, group_id]} .. seealso:: @@ -246,7 +242,6 @@ configuring a single-table inheritance mapping:: polymorphic_identity="employee", ) - .. seealso:: :ref:`single_inheritance` - background on the ORM single table inheritance @@ -282,21 +277,23 @@ collection:: def __mapper_args__(cls): return { "exclude_properties": [ - column.key for column in cls.__table__.c if - column.info.get("exclude", False) + column.key + for column in cls.__table__.c + if column.info.get("exclude", False) ] } + Base = declarative_base() + class SomeClass(ExcludeColsWFlag, Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" id = Column(Integer, primary_key=True) data = Column(String) not_needed = Column(String, info={"exclude": True}) - Above, the ``ExcludeColsWFlag`` mixin provides a per-class ``__mapper_args__`` hook that will scan for :class:`.Column` objects that include the key/value ``'exclude': True`` passed to the :paramref:`.Column.info` parameter, and then @@ -323,7 +320,7 @@ assumed to be completed and the 'configure' step has finished:: class MyClass(Base): @classmethod def __declare_last__(cls): - "" + """""" # do something with mappings ``__declare_first__()`` @@ -335,7 +332,7 @@ configuration via the :meth:`.MapperEvents.before_configured` event:: class MyClass(Base): @classmethod def __declare_first__(cls): - "" + """""" # do something before mappings are configured .. versionadded:: 0.9.3 diff --git a/doc/build/orm/declarative_mixins.rst b/doc/build/orm/declarative_mixins.rst index 21345ccdc9..29ac56b97d 100644 --- a/doc/build/orm/declarative_mixins.rst +++ b/doc/build/orm/declarative_mixins.rst @@ -154,10 +154,11 @@ patterns common to many classes can be defined as callables:: class ReferenceAddressMixin: @declared_attr def address_id(cls): - return Column(Integer, ForeignKey('address.id')) + return Column(Integer, ForeignKey("address.id")) + class User(ReferenceAddressMixin, Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) Where above, the ``address_id`` class-level callable is executed at the @@ -176,11 +177,12 @@ will resolve them at class construction time:: def type_(cls): return Column(String(50)) - __mapper_args__= {'polymorphic_on':type_} + __mapper_args__ = {"polymorphic_on": type_} + class MyModel(MyMixin, Base): - __tablename__='test' - id = Column(Integer, primary_key=True) + __tablename__ = "test" + id = Column(Integer, primary_key=True) .. _orm_declarative_mixins_relationships: @@ -199,7 +201,7 @@ reference a common target class via many-to-one:: class RefTargetMixin: @declared_attr def target_id(cls): - return Column('target_id', ForeignKey('target.id')) + return Column("target_id", ForeignKey("target.id")) @declared_attr def target(cls): @@ -220,7 +222,6 @@ reference a common target class via many-to-one:: __tablename__ = "target" id = Column(Integer, primary_key=True) - Using Advanced Relationship Arguments (e.g. ``primaryjoin``, etc.) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -266,13 +267,11 @@ The condition above is resolved using a lambda:: class RefTargetMixin: @declared_attr def target_id(cls): - return Column('target_id', ForeignKey('target.id')) + return Column("target_id", ForeignKey("target.id")) @declared_attr def target(cls): - return relationship(Target, - primaryjoin=lambda: Target.id==cls.target_id - ) + return relationship(Target, primaryjoin=lambda: Target.id == cls.target_id) or alternatively, the string form (which ultimately generates a lambda):: @@ -284,9 +283,7 @@ or alternatively, the string form (which ultimately generates a lambda):: @declared_attr def target(cls): - return relationship( - Target, primaryjoin=f"Target.id=={cls.__name__}.target_id" - ) + return relationship(Target, primaryjoin=f"Target.id=={cls.__name__}.target_id") .. seealso:: @@ -526,9 +523,7 @@ establish it as part of ``__table_args__``:: @declared_attr def __table_args__(cls): - return ( - Index(f"test_idx_{cls.__tablename__}", "a", "b"), - ) + return (Index(f"test_idx_{cls.__tablename__}", "a", "b"),) class MyModel(MyMixin, Base): diff --git a/doc/build/orm/declarative_styles.rst b/doc/build/orm/declarative_styles.rst index 7a68d6fbb3..c1536a78ba 100644 --- a/doc/build/orm/declarative_styles.rst +++ b/doc/build/orm/declarative_styles.rst @@ -23,7 +23,6 @@ The most common approach is to generate a "base" class using the # declarative base class Base = declarative_base() - The declarative base class may also be created from an existing :class:`_orm.registry`, by using the :meth:`_orm.registry.generate_base` method:: @@ -91,6 +90,7 @@ be produced in a fully explicit fashion using the mapper_registry = registry() + class Base(metaclass=DeclarativeMeta): __abstract__ = True diff --git a/doc/build/orm/declarative_tables.rst b/doc/build/orm/declarative_tables.rst index dad145c42f..986205ec0d 100644 --- a/doc/build/orm/declarative_tables.rst +++ b/doc/build/orm/declarative_tables.rst @@ -343,12 +343,11 @@ use a declarative hybrid mapping, passing the from sqlalchemy import Table from sqlalchemy.orm import declarative_base - engine = create_engine( - "postgresql+psycopg2://user:pass@hostname/my_existing_database" - ) + engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database") Base = declarative_base() + class MyClass(Base): __table__ = Table( "mytable", @@ -365,16 +364,15 @@ objects at once, then refer to them from the :class:`.MetaData`:: from sqlalchemy import Table from sqlalchemy.orm import declarative_base - engine = create_engine( - "postgresql+psycopg2://user:pass@hostname/my_existing_database" - ) + engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database") Base = declarative_base() Base.metadata.reflect(engine) + class MyClass(Base): - __table__ = Base.metadata.tables['mytable'] + __table__ = Base.metadata.tables["mytable"] .. seealso:: @@ -430,9 +428,7 @@ the ``Reflected.prepare`` method is called. The above mapping is not complete until we do so, given an :class:`_engine.Engine`:: - engine = create_engine( - "postgresql+psycopg2://user:pass@hostname/my_existing_database" - ) + engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database") Reflected.prepare(engine) The purpose of the ``Reflected`` class is to define the scope at which diff --git a/doc/build/orm/extensions/associationproxy.rst b/doc/build/orm/extensions/associationproxy.rst index 6c7bfcee07..214338a0b7 100644 --- a/doc/build/orm/extensions/associationproxy.rst +++ b/doc/build/orm/extensions/associationproxy.rst @@ -29,6 +29,7 @@ Each ``User`` can have any number of ``Keyword`` objects, and vice-versa Base = declarative_base() + class User(Base): __tablename__ = "user" id = Column(Integer, primary_key=True) @@ -232,7 +233,6 @@ objects that are obtained from the underlying ``UserKeywordAssociation`` element >>> user = User("log") >>> for kw in (Keyword("new_from_blammo"), Keyword("its_big")): ... user.keywords.append(kw) - ... >>> print(user.keywords) [Keyword('new_from_blammo'), Keyword('its_big')] @@ -441,21 +441,18 @@ transparently using the association proxy. In the example below, we illustrate usage of the assignment operator, also appropriately handled by the association proxy, to apply a dictionary value to the collection at once:: - >>> user = User('log') - >>> user.keywords = { - ... 'sk1':'kw1', - ... 'sk2':'kw2' - ... } + >>> user = User("log") + >>> user.keywords = {"sk1": "kw1", "sk2": "kw2"} >>> print(user.keywords) {'sk1': 'kw1', 'sk2': 'kw2'} - >>> user.keywords['sk3'] = 'kw3' - >>> del user.keywords['sk2'] + >>> user.keywords["sk3"] = "kw3" + >>> del user.keywords["sk2"] >>> print(user.keywords) {'sk1': 'kw1', 'sk3': 'kw3'} >>> # illustrate un-proxied usage - ... print(user.user_keyword_associations['sk3'].kw) + ... print(user.user_keyword_associations["sk3"].kw) <__main__.Keyword object at 0x12ceb90> One caveat with our example above is that because ``Keyword`` objects are created @@ -513,9 +510,7 @@ to a related object, as in the example mapping below:: ) # column-targeted association proxy - special_keys = association_proxy( - "user_keyword_associations", "special_key" - ) + special_keys = association_proxy("user_keyword_associations", "special_key") class UserKeywordAssociation(Base): @@ -531,7 +526,6 @@ to a related object, as in the example mapping below:: id = Column(Integer, primary_key=True) keyword = Column("keyword", String(64)) - The SQL generated takes the form of a correlated subquery against the EXISTS SQL operator so that it can be used in a WHERE clause without the need for additional modifications to the enclosing query. If the diff --git a/doc/build/orm/extensions/asyncio.rst b/doc/build/orm/extensions/asyncio.rst index c21d561b6b..9ae1dfc237 100644 --- a/doc/build/orm/extensions/asyncio.rst +++ b/doc/build/orm/extensions/asyncio.rst @@ -192,9 +192,7 @@ illustrates a complete example including mapper and session configuration:: # expire_on_commit=False will prevent attributes from being expired # after commit. - async_session = sessionmaker( - engine, expire_on_commit=False, class_=AsyncSession - ) + async_session = sessionmaker(engine, expire_on_commit=False, class_=AsyncSession) async with async_session() as session: async with session.begin(): @@ -595,7 +593,6 @@ constructs are illustrated below:: asyncio.run(go()) - The above example prints something along the lines of:: New DBAPI connection: > @@ -779,14 +776,14 @@ the usual ``await`` keywords are necessary, including for the :meth:`_asyncio.async_scoped_session.remove` method:: async def some_function(some_async_session, some_object): - # use the AsyncSession directly - some_async_session.add(some_object) + # use the AsyncSession directly + some_async_session.add(some_object) - # use the AsyncSession via the context-local proxy - await AsyncScopedSession.commit() + # use the AsyncSession via the context-local proxy + await AsyncScopedSession.commit() - # "remove" the current proxied AsyncSession for the local context - await AsyncScopedSession.remove() + # "remove" the current proxied AsyncSession for the local context + await AsyncScopedSession.remove() .. versionadded:: 1.4.19 diff --git a/doc/build/orm/extensions/baked.rst b/doc/build/orm/extensions/baked.rst index b3c21716a2..60bf06b2a1 100644 --- a/doc/build/orm/extensions/baked.rst +++ b/doc/build/orm/extensions/baked.rst @@ -213,6 +213,7 @@ Our example becomes:: my_simple_cache = {} + def lookup(session, id_argument): if "my_key" not in my_simple_cache: query = session.query(Model).filter(Model.id == bindparam("id")) @@ -294,6 +295,7 @@ into a direct use of "bakery" as follows:: parameterized_query = bakery.bake(create_model_query) if include_frobnizzle: + def include_frobnizzle_in_query(query): return query.filter(Model.frobnizzle == True) @@ -362,9 +364,7 @@ statement compilation time:: bakery = baked.bakery() baked_query = bakery(lambda session: session.query(User)) - baked_query += lambda q: q.filter( - User.name.in_(bindparam("username", expanding=True)) - ) + baked_query += lambda q: q.filter(User.name.in_(bindparam("username", expanding=True))) result = baked_query.with_session(session).params(username=["ed", "fred"]).all() diff --git a/doc/build/orm/extensions/declarative/mixins.rst b/doc/build/orm/extensions/declarative/mixins.rst index cde4c12bd1..7a18f07a7f 100644 --- a/doc/build/orm/extensions/declarative/mixins.rst +++ b/doc/build/orm/extensions/declarative/mixins.rst @@ -5,4 +5,4 @@ Mixin and Custom Base Classes ============================= -See :ref:`orm_mixins_toplevel` for this section. \ No newline at end of file +See :ref:`orm_mixins_toplevel` for this section. diff --git a/doc/build/orm/extensions/mypy.rst b/doc/build/orm/extensions/mypy.rst index 0d808f5c8a..b0d8930650 100644 --- a/doc/build/orm/extensions/mypy.rst +++ b/doc/build/orm/extensions/mypy.rst @@ -116,9 +116,7 @@ mapping, using the typical example of the ``User`` class:: # a select() construct makes use of SQL expressions derived from the # User class itself - select_stmt = ( - select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) - ) + select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) Above, the steps that the Mypy extension can take include: @@ -161,9 +159,7 @@ following:: ) name: Mapped[Optional[str]] = Mapped._special_method(Column(String)) - def __init__( - self, id: Optional[int] = ..., name: Optional[str] = ... - ) -> None: + def __init__(self, id: Optional[int] = ..., name: Optional[str] = ...) -> None: ... @@ -171,10 +167,7 @@ following:: print(f"Username: {some_user.name}") - select_stmt = ( - select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) - ) - + select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) The key steps which have been taken above include: @@ -452,9 +445,7 @@ applied explicitly:: id = Column(Integer, primary_key=True) name = Column(String) - addresses: Mapped[List["Address"]] = relationship( - "Address", back_populates="user" - ) + addresses: Mapped[List["Address"]] = relationship("Address", back_populates="user") class Address(Base): diff --git a/doc/build/orm/inheritance.rst b/doc/build/orm/inheritance.rst index 4fd3569be5..9c64668da1 100644 --- a/doc/build/orm/inheritance.rst +++ b/doc/build/orm/inheritance.rst @@ -641,6 +641,7 @@ almost the same way as we do other forms of inheritance mappings:: Base = declarative_base() + class Employee(ConcreteBase, Base): __tablename__ = "employee" id = Column(Integer, primary_key=True) @@ -749,6 +750,7 @@ base class with the ``__abstract__`` indicator:: Base = declarative_base() + class Employee(Base): __abstract__ = True @@ -817,6 +819,7 @@ class called :class:`.AbstractConcreteBase` which achieves this automatically:: "concrete": True, } + Base.registry.configure() Above, the :meth:`_orm.registry.configure` method is invoked, which will @@ -997,7 +1000,6 @@ mapping is illustrated below:: "concrete": True, } - Above, we use :func:`.polymorphic_union` in the same manner as before, except that we omit the ``employee`` table. diff --git a/doc/build/orm/inheritance_loading.rst b/doc/build/orm/inheritance_loading.rst index daf60b7f83..281a43a5c5 100644 --- a/doc/build/orm/inheritance_loading.rst +++ b/doc/build/orm/inheritance_loading.rst @@ -104,7 +104,7 @@ subclasses: entity = with_polymorphic(Employee, [Engineer, Manager]) # include columns for all mapped subclasses - entity = with_polymorphic(Employee, '*') + entity = with_polymorphic(Employee, "*") .. tip:: @@ -135,18 +135,15 @@ with the same name: .. sourcecode:: python+sql - engineer_employee = with_polymorphic( - Employee, [Engineer], aliased=True) - manager_employee = with_polymorphic( - Employee, [Manager], aliased=True) - - q = s.query(engineer_employee, manager_employee).\ - join( - manager_employee, - and_( - engineer_employee.id > manager_employee.id, - engineer_employee.name == manager_employee.name - ) + engineer_employee = with_polymorphic(Employee, [Engineer], aliased=True) + manager_employee = with_polymorphic(Employee, [Manager], aliased=True) + + q = s.query(engineer_employee, manager_employee).join( + manager_employee, + and_( + engineer_employee.id > manager_employee.id, + engineer_employee.name == manager_employee.name, + ), ) q.all() {opensql} @@ -195,18 +192,15 @@ is necessary: .. sourcecode:: python+sql - engineer_employee = with_polymorphic( - Employee, [Engineer], flat=True) - manager_employee = with_polymorphic( - Employee, [Manager], flat=True) - - q = s.query(engineer_employee, manager_employee).\ - join( - manager_employee, - and_( - engineer_employee.id > manager_employee.id, - engineer_employee.name == manager_employee.name - ) + engineer_employee = with_polymorphic(Employee, [Engineer], flat=True) + manager_employee = with_polymorphic(Employee, [Manager], flat=True) + + q = s.query(engineer_employee, manager_employee).join( + manager_employee, + and_( + engineer_employee.id > manager_employee.id, + engineer_employee.name == manager_employee.name, + ), ) q.all() {opensql} @@ -260,11 +254,11 @@ specific to ``Engineer`` as well as ``Manager`` in terms of ``eng_plus_manager`` eng_plus_manager = with_polymorphic(Employee, [Engineer, Manager]) query = session.query(eng_plus_manager).filter( - or_( - eng_plus_manager.Engineer.engineer_info=='x', - eng_plus_manager.Manager.manager_data=='y' - ) - ) + or_( + eng_plus_manager.Engineer.engineer_info == "x", + eng_plus_manager.Manager.manager_data == "y", + ) + ) A query as above would generate SQL resembling the following: @@ -307,15 +301,15 @@ default. We can add the parameter to our ``Employee`` mapping first introduced at :ref:`joined_inheritance`:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type, - 'with_polymorphic': '*' + "polymorphic_identity": "employee", + "polymorphic_on": type, + "with_polymorphic": "*", } Above is a common setting for :paramref:`.mapper.with_polymorphic`, @@ -339,22 +333,17 @@ that they should individually participate in polymorphic loading by default using the :paramref:`.mapper.polymorphic_load` parameter:: class Engineer(Employee): - __tablename__ = 'engineer' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "engineer" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) engineer_info = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity':'engineer', - 'polymorphic_load': 'inline' - } + __mapper_args__ = {"polymorphic_identity": "engineer", "polymorphic_load": "inline"} + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_data = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity':'manager', - 'polymorphic_load': 'inline' - } + __mapper_args__ = {"polymorphic_identity": "manager", "polymorphic_load": "inline"} Setting the :paramref:`.mapper.polymorphic_load` parameter to the value ``"inline"`` means that the ``Engineer`` and ``Manager`` classes above @@ -374,14 +363,9 @@ that entity, so that the entity (and its subclasses) can be referred to directly, rather than using an alias object. For simple cases it might be considered to be more succinct:: - session.query(Employee).\ - with_polymorphic([Engineer, Manager]).\ - filter( - or_( - Engineer.engineer_info=='w', - Manager.manager_data=='q' - ) - ) + session.query(Employee).with_polymorphic([Engineer, Manager]).filter( + or_(Engineer.engineer_info == "w", Manager.manager_data == "q") + ) The :meth:`_query.Query.with_polymorphic` method has a more complicated job than the :func:`_orm.with_polymorphic` function, as it needs to correctly @@ -445,37 +429,35 @@ by default by specifying the :paramref:`.mapper.polymorphic_load` parameter, using the value ``"selectin"`` on a per-subclass basis:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity': 'employee', - 'polymorphic_on': type - } + __mapper_args__ = {"polymorphic_identity": "employee", "polymorphic_on": type} + class Engineer(Employee): - __tablename__ = 'engineer' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "engineer" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) engineer_name = Column(String(30)) __mapper_args__ = { - 'polymorphic_load': 'selectin', - 'polymorphic_identity': 'engineer', + "polymorphic_load": "selectin", + "polymorphic_identity": "engineer", } + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_name = Column(String(30)) __mapper_args__ = { - 'polymorphic_load': 'selectin', - 'polymorphic_identity': 'manager', + "polymorphic_load": "selectin", + "polymorphic_identity": "manager", } - Unlike when using :func:`_orm.with_polymorphic`, when using the :func:`_orm.selectin_polymorphic` style of loading, we do **not** have the ability to refer to the ``Engineer`` or ``Manager`` entities within our main @@ -491,8 +473,7 @@ loading via the :func:`_orm.joinedload` function:: from sqlalchemy.orm import selectin_polymorphic query = session.query(Employee).options( - selectin_polymorphic(Employee, [Manager, Engineer]), - joinedload(Manager.paperwork) + selectin_polymorphic(Employee, [Manager, Engineer]), joinedload(Manager.paperwork) ) Using the query above, we get three SELECT statements emitted, however @@ -541,24 +522,22 @@ a load of ``Manager`` also fully loads ``VicePresident`` subtypes at the same ti # use "Employee" example from the enclosing section + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_name = Column(String(30)) __mapper_args__ = { - 'polymorphic_load': 'selectin', - 'polymorphic_identity': 'manager', + "polymorphic_load": "selectin", + "polymorphic_identity": "manager", } + class VicePresident(Manager): vp_info = Column(String(30)) - __mapper_args__ = { - "polymorphic_load": "inline", - "polymorphic_identity": "vp" - } - + __mapper_args__ = {"polymorphic_load": "inline", "polymorphic_identity": "vp"} Above, we add a ``vp_info`` column to the ``manager`` table, local to the ``VicePresident`` subclass. This subclass is linked to the polymorphic @@ -592,8 +571,7 @@ set up, we could get the same result as follows:: manager_poly = with_polymorphic(Manager, [VicePresident]) - s.query(Employee).options( - selectin_polymorphic(Employee, [manager_poly])).all() + s.query(Employee).options(selectin_polymorphic(Employee, [manager_poly])).all() .. _inheritance_of_type: @@ -619,33 +597,35 @@ with a ``Company`` object. We'll add a ``company_id`` column to the .. sourcecode:: python class Company(Base): - __tablename__ = 'company' + __tablename__ = "company" id = Column(Integer, primary_key=True) name = Column(String(50)) - employees = relationship("Employee", - backref='company') + employees = relationship("Employee", backref="company") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) type = Column(String(20)) - company_id = Column(Integer, ForeignKey('company.id')) + company_id = Column(Integer, ForeignKey("company.id")) __mapper_args__ = { - 'polymorphic_on':type, - 'polymorphic_identity':'employee', + "polymorphic_on": type, + "polymorphic_identity": "employee", } + class Engineer(Employee): - __tablename__ = 'engineer' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "engineer" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) engineer_info = Column(String(50)) - __mapper_args__ = {'polymorphic_identity':'engineer'} + __mapper_args__ = {"polymorphic_identity": "engineer"} + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_data = Column(String(50)) - __mapper_args__ = {'polymorphic_identity':'manager'} + __mapper_args__ = {"polymorphic_identity": "manager"} When querying from ``Company`` onto the ``Employee`` relationship, the :meth:`_query.Query.join` method as well as operators like :meth:`.PropComparator.any` @@ -656,34 +636,29 @@ against the ``Engineer`` class, we can tell those methods to join or subquery against the set of columns representing the subclass using the :meth:`~.orm.interfaces.PropComparator.of_type` operator:: - session.query(Company).\ - join(Company.employees.of_type(Engineer)).\ - filter(Engineer.engineer_info=='someinfo') + session.query(Company).join(Company.employees.of_type(Engineer)).filter( + Engineer.engineer_info == "someinfo" + ) Similarly, to join from ``Company`` to the polymorphic entity that includes both ``Engineer`` and ``Manager`` columns:: - manager_and_engineer = with_polymorphic( - Employee, [Manager, Engineer]) + manager_and_engineer = with_polymorphic(Employee, [Manager, Engineer]) - session.query(Company).\ - join(Company.employees.of_type(manager_and_engineer)).\ - filter( - or_( - manager_and_engineer.Engineer.engineer_info == 'someinfo', - manager_and_engineer.Manager.manager_data == 'somedata' - ) + session.query(Company).join(Company.employees.of_type(manager_and_engineer)).filter( + or_( + manager_and_engineer.Engineer.engineer_info == "someinfo", + manager_and_engineer.Manager.manager_data == "somedata", ) + ) The :meth:`.PropComparator.any` and :meth:`.PropComparator.has` operators also can be used with :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type`, such as when the embedded criterion is in terms of a subclass:: - session.query(Company).\ - filter( - Company.employees.of_type(Engineer). - any(Engineer.engineer_info=='someinfo') - ).all() + session.query(Company).filter( + Company.employees.of_type(Engineer).any(Engineer.engineer_info == "someinfo") + ).all() .. _eagerloading_polymorphic_subtypes: @@ -708,16 +683,11 @@ can be used to combine eager loading and :func:`_orm.with_polymorphic`, so that all sub-attributes of all referenced subtypes can be loaded:: - manager_and_engineer = with_polymorphic( - Employee, [Manager, Engineer], - flat=True) + manager_and_engineer = with_polymorphic(Employee, [Manager, Engineer], flat=True) - session.query(Company).\ - options( - joinedload( - Company.employees.of_type(manager_and_engineer) - ) - ) + session.query(Company).options( + joinedload(Company.employees.of_type(manager_and_engineer)) + ) .. note:: @@ -866,9 +836,7 @@ In our example from :ref:`single_inheritance`, the ``Manager`` mapping for examp class Manager(Employee): manager_data = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity':'manager' - } + __mapper_args__ = {"polymorphic_identity": "manager"} Above, there would be no ``Employee.manager_data`` attribute, even though the ``employee`` table has a ``manager_data`` column. @@ -914,13 +882,10 @@ inheritance in the case of single inheritance; it allows both for eager loading of subclass attributes as well as specification of subclasses in a query, just without the overhead of using OUTER JOIN:: - employee_poly = with_polymorphic(Employee, '*') + employee_poly = with_polymorphic(Employee, "*") q = session.query(employee_poly).filter( - or_( - employee_poly.name == 'a', - employee_poly.Manager.manager_data == 'b' - ) + or_(employee_poly.name == "a", employee_poly.Manager.manager_data == "b") ) Above, our query remains against a single table however we can refer to the diff --git a/doc/build/orm/join_conditions.rst b/doc/build/orm/join_conditions.rst index 509ccc98f3..e9ab6a39ee 100644 --- a/doc/build/orm/join_conditions.rst +++ b/doc/build/orm/join_conditions.rst @@ -25,8 +25,9 @@ class:: Base = declarative_base() + class Customer(Base): - __tablename__ = 'customer' + __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String) @@ -36,8 +37,9 @@ class:: billing_address = relationship("Address") shipping_address = relationship("Address") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) street = Column(String) city = Column(String) @@ -64,7 +66,7 @@ by instructing for each one which foreign key column should be considered, and the appropriate form is as follows:: class Customer(Base): - __tablename__ = 'customer' + __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String) @@ -127,18 +129,21 @@ load those ``Address`` objects which specify a city of "Boston":: Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) - boston_addresses = relationship("Address", - primaryjoin="and_(User.id==Address.user_id, " - "Address.city=='Boston')") + boston_addresses = relationship( + "Address", + primaryjoin="and_(User.id==Address.user_id, " "Address.city=='Boston')", + ) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) - user_id = Column(Integer, ForeignKey('user.id')) + user_id = Column(Integer, ForeignKey("user.id")) street = Column(String) city = Column(String) @@ -208,19 +213,21 @@ type of the other:: Base = declarative_base() + class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign_keys, remote_side - parent_host = relationship("HostEntry", - primaryjoin=ip_address == cast(content, INET), - foreign_keys=content, - remote_side=ip_address - ) + parent_host = relationship( + "HostEntry", + primaryjoin=ip_address == cast(content, INET), + foreign_keys=content, + remote_side=ip_address, + ) The above relationship will produce a join like:: @@ -241,8 +248,9 @@ SQL expressions:: from sqlalchemy.orm import foreign, remote + class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = Column(Integer, primary_key=True) ip_address = Column(INET) @@ -250,11 +258,10 @@ SQL expressions:: # relationship() using explicit foreign() and remote() annotations # in lieu of separate arguments - parent_host = relationship("HostEntry", - primaryjoin=remote(ip_address) == \ - cast(foreign(content), INET), - ) - + parent_host = relationship( + "HostEntry", + primaryjoin=remote(ip_address) == cast(foreign(content), INET), + ) .. _relationship_custom_operator: @@ -273,18 +280,20 @@ A comparison like the above may be used directly with a :func:`_orm.relationship`:: class IPA(Base): - __tablename__ = 'ip_address' + __tablename__ = "ip_address" id = Column(Integer, primary_key=True) v4address = Column(INET) - network = relationship("Network", - primaryjoin="IPA.v4address.bool_op('<<')" - "(foreign(Network.v4representation))", - viewonly=True - ) + network = relationship( + "Network", + primaryjoin="IPA.v4address.bool_op('<<')" "(foreign(Network.v4representation))", + viewonly=True, + ) + + class Network(Base): - __tablename__ = 'network' + __tablename__ = "network" id = Column(Integer, primary_key=True) v4representation = Column(CIDR) @@ -317,6 +326,7 @@ two expressions. The below example illustrates this with the from sqlalchemy import Column, Integer, func from sqlalchemy.orm import relationship, foreign + class Polygon(Base): __tablename__ = "polygon" id = Column(Integer, primary_key=True) @@ -327,6 +337,7 @@ two expressions. The below example illustrates this with the viewonly=True, ) + class Point(Base): __tablename__ = "point" id = Column(Integer, primary_key=True) @@ -356,35 +367,34 @@ for both; then to make ``Article`` refer to ``Writer`` as well, ``Article.magazine`` and ``Article.writer``:: class Magazine(Base): - __tablename__ = 'magazine' + __tablename__ = "magazine" id = Column(Integer, primary_key=True) class Article(Base): - __tablename__ = 'article' + __tablename__ = "article" article_id = Column(Integer) - magazine_id = Column(ForeignKey('magazine.id')) + magazine_id = Column(ForeignKey("magazine.id")) writer_id = Column() magazine = relationship("Magazine") writer = relationship("Writer") __table_args__ = ( - PrimaryKeyConstraint('article_id', 'magazine_id'), + PrimaryKeyConstraint("article_id", "magazine_id"), ForeignKeyConstraint( - ['writer_id', 'magazine_id'], - ['writer.id', 'writer.magazine_id'] + ["writer_id", "magazine_id"], ["writer.id", "writer.magazine_id"] ), ) class Writer(Base): - __tablename__ = 'writer' + __tablename__ = "writer" id = Column(Integer, primary_key=True) - magazine_id = Column(ForeignKey('magazine.id'), primary_key=True) + magazine_id = Column(ForeignKey("magazine.id"), primary_key=True) magazine = relationship("Magazine") When the above mapping is configured, we will see this warning emitted:: @@ -431,7 +441,7 @@ To get just #1 and #2, we could specify only ``Article.writer_id`` as the class Article(Base): # ... - writer = relationship("Writer", foreign_keys='Article.writer_id') + writer = relationship("Writer", foreign_keys="Article.writer_id") However, this has the effect of ``Article.writer`` not taking ``Article.magazine_id`` into account when querying against ``Writer``: @@ -456,7 +466,8 @@ annotating with :func:`_orm.foreign`:: writer = relationship( "Writer", primaryjoin="and_(Writer.id == foreign(Article.writer_id), " - "Writer.magazine_id == Article.magazine_id)") + "Writer.magazine_id == Article.magazine_id)", + ) .. versionchanged:: 1.0.0 the ORM will attempt to warn when a column is used as the synchronization target from more than one relationship @@ -482,16 +493,16 @@ is considered to be "many to one". For the comparison we'll use here, we'll be dealing with collections so we keep things configured as "one to many":: class Element(Base): - __tablename__ = 'element' + __tablename__ = "element" path = Column(String, primary_key=True) - descendants = relationship('Element', - primaryjoin= - remote(foreign(path)).like( - path.concat('/%')), - viewonly=True, - order_by=path) + descendants = relationship( + "Element", + primaryjoin=remote(foreign(path)).like(path.concat("/%")), + viewonly=True, + order_by=path, + ) Above, if given an ``Element`` object with a path attribute of ``"/foo/bar2"``, we seek for a load of ``Element.descendants`` to look like:: @@ -530,20 +541,24 @@ is when establishing a many-to-many relationship from a class to itself, as show Base = declarative_base() - node_to_node = Table("node_to_node", Base.metadata, + node_to_node = Table( + "node_to_node", + Base.metadata, Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True), - Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True) + Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True), ) + class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) label = Column(String) - right_nodes = relationship("Node", - secondary=node_to_node, - primaryjoin=id==node_to_node.c.left_node_id, - secondaryjoin=id==node_to_node.c.right_node_id, - backref="left_nodes" + right_nodes = relationship( + "Node", + secondary=node_to_node, + primaryjoin=id == node_to_node.c.left_node_id, + secondaryjoin=id == node_to_node.c.right_node_id, + backref="left_nodes", ) Where above, SQLAlchemy can't know automatically which columns should connect @@ -561,14 +576,15 @@ When referring to a plain :class:`_schema.Table` object in a declarative string, use the string name of the table as it is present in the :class:`_schema.MetaData`:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) label = Column(String) - right_nodes = relationship("Node", - secondary="node_to_node", - primaryjoin="Node.id==node_to_node.c.left_node_id", - secondaryjoin="Node.id==node_to_node.c.right_node_id", - backref="left_nodes" + right_nodes = relationship( + "Node", + secondary="node_to_node", + primaryjoin="Node.id==node_to_node.c.left_node_id", + secondaryjoin="Node.id==node_to_node.c.right_node_id", + backref="left_nodes", ) .. warning:: When passed as a Python-evaluable string, the @@ -588,26 +604,38 @@ to ``node.c.id``:: metadata_obj = MetaData() mapper_registry = registry() - node_to_node = Table("node_to_node", metadata_obj, + node_to_node = Table( + "node_to_node", + metadata_obj, Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True), - Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True) + Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True), ) - node = Table("node", metadata_obj, - Column('id', Integer, primary_key=True), - Column('label', String) + node = Table( + "node", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("label", String), ) + + class Node(object): pass - mapper_registry.map_imperatively(Node, node, properties={ - 'right_nodes':relationship(Node, - secondary=node_to_node, - primaryjoin=node.c.id==node_to_node.c.left_node_id, - secondaryjoin=node.c.id==node_to_node.c.right_node_id, - backref="left_nodes" - )}) + mapper_registry.map_imperatively( + Node, + node, + properties={ + "right_nodes": relationship( + Node, + secondary=node_to_node, + primaryjoin=node.c.id == node_to_node.c.left_node_id, + secondaryjoin=node.c.id == node_to_node.c.right_node_id, + backref="left_nodes", + ) + }, + ) Note that in both examples, the :paramref:`_orm.relationship.backref` keyword specifies a ``left_nodes`` backref - when @@ -649,35 +677,38 @@ target consisting of multiple tables. Below is an example of such a join condition (requires version 0.9.2 at least to function as is):: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) + b_id = Column(ForeignKey("b.id")) + + d = relationship( + "D", + secondary="join(B, D, B.d_id == D.id)." "join(C, C.d_id == D.id)", + primaryjoin="and_(A.b_id == B.id, A.id == C.a_id)", + secondaryjoin="D.id == B.d_id", + uselist=False, + viewonly=True, + ) - d = relationship("D", - secondary="join(B, D, B.d_id == D.id)." - "join(C, C.d_id == D.id)", - primaryjoin="and_(A.b_id == B.id, A.id == C.a_id)", - secondaryjoin="D.id == B.d_id", - uselist=False, - viewonly=True - ) class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - d_id = Column(ForeignKey('d.id')) + d_id = Column(ForeignKey("d.id")) + class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - d_id = Column(ForeignKey('d.id')) + a_id = Column(ForeignKey("a.id")) + d_id = Column(ForeignKey("d.id")) + class D(Base): - __tablename__ = 'd' + __tablename__ = "d" id = Column(Integer, primary_key=True) @@ -749,33 +780,37 @@ entities ``C`` and ``D``, which also must have rows that line up with the rows in both ``A`` and ``B`` simultaneously:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) + b_id = Column(ForeignKey("b.id")) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) + class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) some_c_value = Column(String) + class D(Base): - __tablename__ = 'd' + __tablename__ = "d" id = Column(Integer, primary_key=True) - c_id = Column(ForeignKey('c.id')) - b_id = Column(ForeignKey('b.id')) + c_id = Column(ForeignKey("c.id")) + b_id = Column(ForeignKey("b.id")) some_d_value = Column(String) + # 1. set up the join() as a variable, so we can refer # to it in the mapping multiple times. j = join(B, D, D.b_id == B.id).join(C, C.id == D.c_id) @@ -827,9 +862,10 @@ so in terms of ``B_viacd_subquery`` rather than ``B`` directly: .. sourcecode:: python+sql ( - sess.query(A).join(A.b). - filter(B_viacd_subquery.some_b_column == "some b"). - order_by(B_viacd_subquery.id) + sess.query(A) + .join(A.b) + .filter(B_viacd_subquery.some_b_column == "some b") + .order_by(B_viacd_subquery.id) ).all() {opensql}SELECT a.id AS a_id, a.b_id AS a_b_id @@ -851,35 +887,32 @@ illustrates a non-primary mapper relationship that will load the first ten items for each collection:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) + partition = select( - B, - func.row_number().over( - order_by=B.id, partition_by=B.a_id - ).label('index') + B, func.row_number().over(order_by=B.id, partition_by=B.a_id).label("index") ).alias() partitioned_b = aliased(B, partition) A.partitioned_bs = relationship( - partitioned_b, - primaryjoin=and_(partitioned_b.a_id == A.id, partition.c.index < 10) + partitioned_b, primaryjoin=and_(partitioned_b.a_id == A.id, partition.c.index < 10) ) We can use the above ``partitioned_bs`` relationship with most of the loader strategies, such as :func:`.selectinload`:: for a1 in s.query(A).options(selectinload(A.partitioned_bs)): - print(a1.partitioned_bs) # <-- will be no more than ten objects + print(a1.partitioned_bs) # <-- will be no more than ten objects Where above, the "selectinload" query looks like: @@ -921,7 +954,7 @@ conjunction with :class:`_query.Query` as follows: .. sourcecode:: python class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) @property @@ -934,4 +967,4 @@ of special Python attributes. .. seealso:: - :ref:`mapper_hybrids` \ No newline at end of file + :ref:`mapper_hybrids` diff --git a/doc/build/orm/loading.rst b/doc/build/orm/loading.rst index 0aca6cd0c9..fdb27806f4 100644 --- a/doc/build/orm/loading.rst +++ b/doc/build/orm/loading.rst @@ -1,3 +1,3 @@ :orphan: -Moved! :doc:`/orm/loading_relationships` \ No newline at end of file +Moved! :doc:`/orm/loading_relationships` diff --git a/doc/build/orm/loading_columns.rst b/doc/build/orm/loading_columns.rst index de10901e46..a50ac07b97 100644 --- a/doc/build/orm/loading_columns.rst +++ b/doc/build/orm/loading_columns.rst @@ -26,8 +26,9 @@ attribute is first referenced on the individual object instance:: from sqlalchemy.orm import deferred from sqlalchemy import Integer, String, Text, Binary, Column + class Book(Base): - __tablename__ = 'book' + __tablename__ = "book" book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) @@ -38,9 +39,9 @@ attribute is first referenced on the individual object instance:: Classical mappings as always place the usage of :func:`_orm.deferred` in the ``properties`` dictionary against the table-bound :class:`_schema.Column`:: - mapper_registry.map_imperatively(Book, book_table, properties={ - 'photo':deferred(book_table.c.photo) - }) + mapper_registry.map_imperatively( + Book, book_table, properties={"photo": deferred(book_table.c.photo)} + ) Deferred columns can be associated with a "group" name, so that they load together when any of them are first accessed. The example below defines a @@ -49,15 +50,15 @@ photos will be loaded in one SELECT statement. The ``.excerpt`` will be loaded separately when it is accessed:: class Book(Base): - __tablename__ = 'book' + __tablename__ = "book" book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = Column(String(2000)) excerpt = deferred(Column(Text)) - photo1 = deferred(Column(Binary), group='photos') - photo2 = deferred(Column(Binary), group='photos') - photo3 = deferred(Column(Binary), group='photos') + photo1 = deferred(Column(Binary), group="photos") + photo2 = deferred(Column(Binary), group="photos") + photo3 = deferred(Column(Binary), group="photos") .. _deferred_options: @@ -73,7 +74,7 @@ basic query options are :func:`_orm.defer` and from sqlalchemy.orm import undefer query = session.query(Book) - query = query.options(defer('summary'), undefer('excerpt')) + query = query.options(defer("summary"), undefer("excerpt")) query.all() Above, the "summary" column will not load until accessed, and the "excerpt" @@ -85,7 +86,7 @@ using :func:`_orm.undefer_group`, sending in the group name:: from sqlalchemy.orm import undefer_group query = session.query(Book) - query.options(undefer_group('photos')).all() + query.options(undefer_group("photos")).all() .. _deferred_loading_w_multiple: @@ -117,8 +118,8 @@ those explicitly specified:: query = session.query(Author) query = query.options( - joinedload(Author.books).load_only(Book.summary, Book.excerpt), - ) + joinedload(Author.books).load_only(Book.summary, Book.excerpt), + ) Option structures as above can also be organized in more complex ways, such as hierarchically using the :meth:`_orm.Load.options` @@ -132,14 +133,13 @@ may be used:: query = session.query(Author) query = query.options( - joinedload(Author.book).options( - load_only(Book.summary, Book.excerpt), - joinedload(Book.citations).options( - joinedload(Citation.author), - defer(Citation.fulltext) - ) - ) - ) + joinedload(Author.book).options( + load_only(Book.summary, Book.excerpt), + joinedload(Book.citations).options( + joinedload(Citation.author), defer(Citation.fulltext) + ), + ) + ) .. versionadded:: 1.3.6 Added :meth:`_orm.Load.options` to allow easier construction of hierarchies of loader options. @@ -154,7 +154,7 @@ to create the same structure as we did above using :meth:`_orm.Load.options` as: query = query.options( joinedload(Author.book).load_only(Book.summary, Book.excerpt), defaultload(Author.book).joinedload(Book.citations).joinedload(Citation.author), - defaultload(Author.book).defaultload(Book.citations).defer(Citation.fulltext) + defaultload(Author.book).defaultload(Book.citations).defer(Citation.fulltext), ) .. seealso:: @@ -173,8 +173,7 @@ the "summary" and "excerpt" columns, we could say:: from sqlalchemy.orm import defer from sqlalchemy.orm import undefer - session.query(Book).options( - defer('*'), undefer("summary"), undefer("excerpt")) + session.query(Book).options(defer("*"), undefer("summary"), undefer("excerpt")) Above, the :func:`.defer` option is applied using a wildcard to all column attributes on the ``Book`` class. Then, the :func:`.undefer` option is used @@ -208,9 +207,7 @@ both at once. Using :class:`_orm.Load` looks like:: from sqlalchemy.orm import Load query = session.query(Book, Author).join(Book.author) - query = query.options( - Load(Book).load_only(Book.summary, Book.excerpt) - ) + query = query.options(Load(Book).load_only(Book.summary, Book.excerpt)) Above, :class:`_orm.Load` is used in conjunction with the exclusionary option :func:`.load_only` so that the deferral of all other columns only takes @@ -246,16 +243,15 @@ Deferred "raiseload" can be configured at the mapper level via class Book(Base): - __tablename__ = 'book' + __tablename__ = "book" book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = deferred(Column(String(2000)), raiseload=True) excerpt = deferred(Column(Text), raiseload=True) - book_w_excerpt = session.query(Book).options(undefer(Book.excerpt)).first() - + book_w_excerpt = session.query(Book).options(undefer(Book.excerpt)).first() Column Deferral API ------------------- @@ -286,8 +282,8 @@ The bundle allows columns to be grouped together:: from sqlalchemy.orm import Bundle - bn = Bundle('mybundle', MyClass.data1, MyClass.data2) - for row in session.query(bn).filter(bn.c.data1 == 'd1'): + bn = Bundle("mybundle", MyClass.data1, MyClass.data2) + for row in session.query(bn).filter(bn.c.data1 == "d1"): print(row.mybundle.data1, row.mybundle.data2) The bundle can be subclassed to provide custom behaviors when results @@ -300,13 +296,14 @@ return structure with a straight Python dictionary:: from sqlalchemy.orm import Bundle + class DictBundle(Bundle): def create_row_processor(self, query, procs, labels): """Override create_row_processor to return values as dictionaries""" + def proc(row): - return dict( - zip(labels, (proc(row) for proc in procs)) - ) + return dict(zip(labels, (proc(row) for proc in procs))) + return proc .. note:: @@ -322,9 +319,9 @@ return structure with a straight Python dictionary:: A result from the above bundle will return dictionary values:: - bn = DictBundle('mybundle', MyClass.data1, MyClass.data2) - for row in session.query(bn).filter(bn.c.data1 == 'd1'): - print(row.mybundle['data1'], row.mybundle['data2']) + bn = DictBundle("mybundle", MyClass.data1, MyClass.data2) + for row in session.query(bn).filter(bn.c.data1 == "d1"): + print(row.mybundle["data1"], row.mybundle["data2"]) The :class:`.Bundle` construct is also integrated into the behavior of :func:`.composite`, where it is used to return composite attributes as objects diff --git a/doc/build/orm/loading_relationships.rst b/doc/build/orm/loading_relationships.rst index 5a1d5151d4..ad77f6e0de 100644 --- a/doc/build/orm/loading_relationships.rst +++ b/doc/build/orm/loading_relationships.rst @@ -88,10 +88,10 @@ For example, to configure a relationship to use joined eager loading when the parent object is queried:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) - children = relationship("Child", lazy='joined') + children = relationship("Child", lazy="joined") Above, whenever a collection of ``Parent`` objects are loaded, each ``Parent`` will also have its ``children`` collection populated, using @@ -128,16 +128,16 @@ The loader options can also be "chained" using **method chaining** to specify how loading should occur further levels deep:: session.query(Parent).options( - joinedload(Parent.children). - subqueryload(Child.subelements)).all() + joinedload(Parent.children).subqueryload(Child.subelements) + ).all() Chained loader options can be applied against a "lazy" loaded collection. This means that when a collection or association is lazily loaded upon access, the specified option will then take effect:: session.query(Parent).options( - lazyload(Parent.children). - subqueryload(Child.subelements)).all() + lazyload(Parent.children).subqueryload(Child.subelements) + ).all() Above, the query will return ``Parent`` objects without the ``children`` collections loaded. When the ``children`` collection on a particular @@ -149,9 +149,7 @@ The above examples, using :class:`_orm.Query`, are now referred to as :term:`1.x style` queries. The options system is available as well for :term:`2.0 style` queries using the :meth:`_sql.Select.options` method:: - stmt = select(Parent).options( - lazyload(Parent.children). - subqueryload(Child.subelements)) + stmt = select(Parent).options(lazyload(Parent.children).subqueryload(Child.subelements)) result = session.execute(stmt) @@ -191,18 +189,14 @@ Using method chaining, the loader style of each link in the path is explicitly stated. To navigate along a path without changing the existing loader style of a particular attribute, the :func:`.defaultload` method/function may be used:: - session.query(A).options( - defaultload(A.atob). - joinedload(B.btoc)).all() + session.query(A).options(defaultload(A.atob).joinedload(B.btoc)).all() A similar approach can be used to specify multiple sub-options at once, using the :meth:`_orm.Load.options` method:: session.query(A).options( - defaultload(A.atob).options( - joinedload(B.btoc), - joinedload(B.btod) - )).all() + defaultload(A.atob).options(joinedload(B.btoc), joinedload(B.btod)) + ).all() .. versionadded:: 1.3.6 added :meth:`_orm.Load.options` @@ -219,8 +213,8 @@ the :meth:`_orm.Load.options` method:: memory. For example, given the previous example:: session.query(Parent).options( - lazyload(Parent.children). - subqueryload(Child.subelements)).all() + lazyload(Parent.children).subqueryload(Child.subelements) + ).all() if the ``children`` collection on a particular ``Parent`` object loaded by the above query is expired (such as when a :class:`.Session` object's @@ -235,8 +229,8 @@ the :meth:`_orm.Load.options` method:: # change the options on Parent objects that were already loaded session.query(Parent).populate_existing().options( - lazyload(Parent.children). - lazyload(Child.subelements)).all() + lazyload(Parent.children).lazyload(Child.subelements) + ).all() If the objects loaded above are fully cleared from the :class:`.Session`, such as due to garbage collection or that :meth:`.Session.expunge_all` @@ -310,6 +304,7 @@ replaces the behavior of lazy loading with an informative error being raised:: from sqlalchemy.orm import raiseload + session.query(User).options(raiseload(User.addresses)) Above, a ``User`` object loaded from the above query will not have @@ -320,8 +315,7 @@ access this attribute, an ORM exception is raised. indicate that all relationships should use this strategy. For example, to set up only one attribute as eager loading, and all the rest as raise:: - session.query(Order).options( - joinedload(Order.items), raiseload('*')) + session.query(Order).options(joinedload(Order.items), raiseload("*")) The above wildcard will apply to **all** relationships not just on ``Order`` besides ``items``, but all those on the ``Item`` objects as well. To set up @@ -330,14 +324,11 @@ path with :class:`_orm.Load`:: from sqlalchemy.orm import Load - session.query(Order).options( - joinedload(Order.items), Load(Order).raiseload('*')) + session.query(Order).options(joinedload(Order.items), Load(Order).raiseload("*")) Conversely, to set up the raise for just the ``Item`` objects:: - session.query(Order).options( - joinedload(Order.items).raiseload('*')) - + session.query(Order).options(joinedload(Order.items).raiseload("*")) The :func:`.raiseload` option applies only to relationship attributes. For column-oriented attributes, the :func:`.defer` option supports the @@ -382,9 +373,9 @@ using the :func:`_orm.joinedload` loader option: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... options(joinedload(User.addresses)).\ - ... filter_by(name='jack').all() + >>> jack = ( + ... session.query(User).options(joinedload(User.addresses)).filter_by(name="jack").all() + ... ) {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, @@ -409,13 +400,12 @@ at the mapping level via the :paramref:`_orm.relationship.innerjoin` flag:: class Address(Base): # ... - user_id = Column(ForeignKey('users.id'), nullable=False) + user_id = Column(ForeignKey("users.id"), nullable=False) user = relationship(User, lazy="joined", innerjoin=True) At the query option level, via the :paramref:`_orm.joinedload.innerjoin` flag:: - session.query(Address).options( - joinedload(Address.user, innerjoin=True)) + session.query(Address).options(joinedload(Address.user, innerjoin=True)) The JOIN will right-nest itself when applied in a chain that includes an OUTER JOIN: @@ -423,8 +413,8 @@ an OUTER JOIN: .. sourcecode:: python+sql >>> session.query(User).options( - ... joinedload(User.addresses). - ... joinedload(Address.widgets, innerjoin=True)).all() + ... joinedload(User.addresses).joinedload(Address.widgets, innerjoin=True) + ... ).all() {opensql}SELECT widgets_1.id AS widgets_1_id, widgets_1.name AS widgets_1_name, @@ -519,10 +509,13 @@ named in the query: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... options(joinedload(User.addresses)).\ - ... filter(User.name=='jack').\ - ... order_by(Address.email_address).all() + >>> jack = ( + ... session.query(User) + ... .options(joinedload(User.addresses)) + ... .filter(User.name == "jack") + ... .order_by(Address.email_address) + ... .all() + ... ) {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, @@ -544,10 +537,13 @@ address is to use :meth:`_query.Query.join`: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... join(User.addresses).\ - ... filter(User.name=='jack').\ - ... order_by(Address.email_address).all() + >>> jack = ( + ... session.query(User) + ... .join(User.addresses) + ... .filter(User.name == "jack") + ... .order_by(Address.email_address) + ... .all() + ... ) {opensql} SELECT users.id AS users_id, @@ -568,11 +564,14 @@ are ordering on, the other is used anonymously to load the contents of the .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... join(User.addresses).\ - ... options(joinedload(User.addresses)).\ - ... filter(User.name=='jack').\ - ... order_by(Address.email_address).all() + >>> jack = ( + ... session.query(User) + ... .join(User.addresses) + ... .options(joinedload(User.addresses)) + ... .filter(User.name == "jack") + ... .order_by(Address.email_address) + ... .all() + ... ) {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, @@ -600,12 +599,14 @@ to see why :func:`joinedload` does what it does, consider if we were .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... join(User.addresses).\ - ... options(joinedload(User.addresses)).\ - ... filter(User.name=='jack').\ - ... filter(Address.email_address=='someaddress@foo.com').\ - ... all() + >>> jack = ( + ... session.query(User) + ... .join(User.addresses) + ... .options(joinedload(User.addresses)) + ... .filter(User.name == "jack") + ... .filter(Address.email_address == "someaddress@foo.com") + ... .all() + ... ) {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, @@ -634,12 +635,14 @@ into :func:`.subqueryload`: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... join(User.addresses).\ - ... options(subqueryload(User.addresses)).\ - ... filter(User.name=='jack').\ - ... filter(Address.email_address=='someaddress@foo.com').\ - ... all() + >>> jack = ( + ... session.query(User) + ... .join(User.addresses) + ... .options(subqueryload(User.addresses)) + ... .filter(User.name == "jack") + ... .filter(Address.email_address == "someaddress@foo.com") + ... .all() + ... ) {opensql}SELECT users.id AS users_id, users.name AS users_name, @@ -688,9 +691,12 @@ the collection members to load them at once: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... options(subqueryload(User.addresses)).\ - ... filter_by(name='jack').all() + >>> jack = ( + ... session.query(User) + ... .options(subqueryload(User.addresses)) + ... .filter_by(name="jack") + ... .all() + ... ) {opensql}SELECT users.id AS users_id, users.name AS users_name, @@ -752,18 +758,15 @@ the same ordering as used by the parent query. Without it, there is a chance that the inner query could return the wrong rows:: # incorrect, no ORDER BY - session.query(User).options( - subqueryload(User.addresses)).first() + session.query(User).options(subqueryload(User.addresses)).first() # incorrect if User.name is not unique - session.query(User).options( - subqueryload(User.addresses) - ).order_by(User.name).first() + session.query(User).options(subqueryload(User.addresses)).order_by(User.name).first() # correct - session.query(User).options( - subqueryload(User.addresses) - ).order_by(User.name, User.id).first() + session.query(User).options(subqueryload(User.addresses)).order_by( + User.name, User.id + ).first() .. seealso:: @@ -793,9 +796,12 @@ order to load related associations: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... options(selectinload(User.addresses)).\ - ... filter(or_(User.name == 'jack', User.name == 'ed')).all() + >>> jack = ( + ... session.query(User) + ... .options(selectinload(User.addresses)) + ... .filter(or_(User.name == "jack", User.name == "ed")) + ... .all() + ... ) {opensql}SELECT users.id AS users_id, users.name AS users_name, @@ -829,8 +835,7 @@ value from the parent object is used: .. sourcecode:: python+sql - >>> session.query(Address).\ - ... options(selectinload(Address.user)).all() + >>> session.query(Address).options(selectinload(Address.user)).all() {opensql}SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, @@ -1012,7 +1017,7 @@ attributes not otherwise specified in the :class:`_query.Query`. This feature is available by passing the string ``'*'`` as the argument to any of these options:: - session.query(MyClass).options(lazyload('*')) + session.query(MyClass).options(lazyload("*")) Above, the ``lazyload('*')`` option will supersede the ``lazy`` setting of all :func:`_orm.relationship` constructs in use for that query, @@ -1028,10 +1033,7 @@ query, such as :func:`.eagerload`, :func:`.subqueryload`, etc. The query below will still use joined loading for the ``widget`` relationship:: - session.query(MyClass).options( - lazyload('*'), - joinedload(MyClass.widget) - ) + session.query(MyClass).options(lazyload("*"), joinedload(MyClass.widget)) If multiple ``'*'`` options are passed, the last one overrides those previously passed. @@ -1045,8 +1047,7 @@ we can instruct all relationships on ``Address`` only to use lazy loading by first applying the :class:`_orm.Load` object, then specifying the ``*`` as a chained option:: - session.query(User, Address).options( - Load(Address).lazyload('*')) + session.query(User, Address).options(Load(Address).lazyload("*")) Above, all relationships on ``Address`` will be set to a lazy load. @@ -1073,18 +1074,18 @@ explicitly. Below, we specify a join between ``User`` and ``Address`` and additionally establish this as the basis for eager loading of ``User.addresses``:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) addresses = relationship("Address") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" # ... - q = session.query(User).join(User.addresses).\ - options(contains_eager(User.addresses)) + q = session.query(User).join(User.addresses).options(contains_eager(User.addresses)) If the "eager" portion of the statement is "aliased", the path should be specified using :meth:`.PropComparator.of_type`, which allows @@ -1096,9 +1097,11 @@ the specific :func:`_orm.aliased` construct to be passed: adalias = aliased(Address) # construct a Query object which expects the "addresses" results - query = session.query(User).\ - outerjoin(User.addresses.of_type(adalias)).\ - options(contains_eager(User.addresses.of_type(adalias))) + query = ( + session.query(User) + .outerjoin(User.addresses.of_type(adalias)) + .options(contains_eager(User.addresses.of_type(adalias))) + ) # get results normally r = query.all() @@ -1117,9 +1120,7 @@ The path given as the argument to :func:`.contains_eager` needs to be a full path from the starting entity. For example if we were loading ``Users->orders->Order->items->Item``, the option would be used as:: - query(User).options( - contains_eager(User.orders). - contains_eager(Order.items)) + query(User).options(contains_eager(User.orders).contains_eager(Order.items)) Using contains_eager() to load a custom-filtered collection result ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1136,11 +1137,13 @@ routing it using :func:`_orm.contains_eager`, also using :meth:`_query.Query.populate_existing` to ensure any already-loaded collections are overwritten:: - q = session.query(User).\ - join(User.addresses).\ - filter(Address.email_address.like('%@aol.com')).\ - options(contains_eager(User.addresses)).\ - populate_existing() + q = ( + session.query(User) + .join(User.addresses) + .filter(Address.email_address.like("%@aol.com")) + .options(contains_eager(User.addresses)) + .populate_existing() + ) The above query will load only ``User`` objects which contain at least ``Address`` object that contains the substring ``'aol.com'`` in its @@ -1204,20 +1207,16 @@ Given the following mapping:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) - b = relationship( - "B", - backref=backref("a", uselist=False), - lazy='joined') + b_id = Column(ForeignKey("b.id")) + b = relationship("B", backref=backref("a", uselist=False), lazy="joined") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - If we query for an ``A`` row, and then ask it for ``a.b.a``, we will get an extra SELECT:: @@ -1232,10 +1231,11 @@ can create an on-load rule to populate this for us:: from sqlalchemy import event from sqlalchemy.orm import attributes + @event.listens_for(A, "load") def load_b(target, context): - if 'b' in target.__dict__: - attributes.set_committed_value(target.b, 'a', target) + if "b" in target.__dict__: + attributes.set_committed_value(target.b, "a", target) Now when we query for ``A``, we will get ``A.b`` from the joined eager load, and ``A.b.a`` from our event: @@ -1253,7 +1253,6 @@ and ``A.b.a`` from our event: (1, 0) {stop}assert a1.b.a is a1 - Relationship Loader API ----------------------- diff --git a/doc/build/orm/mapped_attributes.rst b/doc/build/orm/mapped_attributes.rst index a4fd3115d5..5ee7d6498f 100644 --- a/doc/build/orm/mapped_attributes.rst +++ b/doc/build/orm/mapped_attributes.rst @@ -19,15 +19,16 @@ issued when the ORM is populating the object:: from sqlalchemy.orm import validates + class EmailAddress(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) email = Column(String) - @validates('email') + @validates("email") def validate_email(self, key, address): - if '@' not in address: + if "@" not in address: raise ValueError("failed simple email validation") return address @@ -42,18 +43,18 @@ collection:: from sqlalchemy.orm import validates + class User(Base): # ... addresses = relationship("Address") - @validates('addresses') + @validates("addresses") def validate_address(self, key, address): - if '@' not in address.email: + if "@" not in address.email: raise ValueError("failed simplified email validation") return address - The validation function by default does not get emitted for collection remove events, as the typical expectation is that a value being discarded doesn't require validation. However, :func:`.validates` supports reception @@ -63,18 +64,18 @@ argument which if ``True`` indicates that the operation is a removal:: from sqlalchemy.orm import validates + class User(Base): # ... addresses = relationship("Address") - @validates('addresses', include_removes=True) + @validates("addresses", include_removes=True) def validate_address(self, key, address, is_remove): if is_remove: - raise ValueError( - "not allowed to remove items from the collection") + raise ValueError("not allowed to remove items from the collection") else: - if '@' not in address.email: + if "@" not in address.email: raise ValueError("failed simplified email validation") return address @@ -85,14 +86,15 @@ event occurs as a result of a backref:: from sqlalchemy.orm import validates + class User(Base): # ... - addresses = relationship("Address", backref='user') + addresses = relationship("Address", backref="user") - @validates('addresses', include_backrefs=False) + @validates("addresses", include_backrefs=False) def validate_address(self, key, address): - if '@' not in address: + if "@" not in address: raise ValueError("failed simplified email validation") return address @@ -131,7 +133,7 @@ plain descriptor, and to have it read/write from a mapped attribute with a different name. Below we illustrate this using Python 2.6-style properties:: class EmailAddress(Base): - __tablename__ = 'email_address' + __tablename__ = "email_address" id = Column(Integer, primary_key=True) @@ -158,8 +160,9 @@ usable with :class:`_query.Query`. To provide these, we instead use the from sqlalchemy.ext.hybrid import hybrid_property + class EmailAddress(Base): - __tablename__ = 'email_address' + __tablename__ = "email_address" id = Column(Integer, primary_key=True) @@ -180,6 +183,7 @@ that is, from the ``EmailAddress`` class directly: .. sourcecode:: python+sql from sqlalchemy.orm import Session + session = Session() {sql}address = session.query(EmailAddress).\ @@ -189,14 +193,12 @@ that is, from the ``EmailAddress`` class directly: FROM address WHERE address.email = ? ('address@example.com',) - {stop} - address.email = 'otheraddress@example.com' + address.email = "otheraddress@example.com" {sql}session.commit() UPDATE address SET email=? WHERE address.id = ? ('otheraddress@example.com', 1) COMMIT - {stop} The :class:`~.hybrid_property` also allows us to change the behavior of the attribute, including defining separate behaviors when the attribute is @@ -206,7 +208,7 @@ host name automatically, we might define two sets of string manipulation logic:: class EmailAddress(Base): - __tablename__ = 'email_address' + __tablename__ = "email_address" id = Column(Integer, primary_key=True) @@ -245,7 +247,6 @@ attribute, a SQL function is rendered which produces the same effect: FROM address WHERE substr(address.email, ?, length(address.email) - ?) = ? (0, 12, 'address') - {stop} Read more about Hybrids at :ref:`hybrids_toplevel`. @@ -261,9 +262,10 @@ In the most basic sense, the synonym is an easy way to make a certain attribute available by an additional name:: from sqlalchemy.orm import synonym - + + class MyClass(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) job_status = Column(String(50)) @@ -274,19 +276,19 @@ The above class ``MyClass`` has two attributes, ``.job_status`` and ``.status`` that will behave as one attribute, both at the expression level:: - >>> print(MyClass.job_status == 'some_status') + >>> print(MyClass.job_status == "some_status") my_table.job_status = :job_status_1 - >>> print(MyClass.status == 'some_status') + >>> print(MyClass.status == "some_status") my_table.job_status = :job_status_1 and at the instance level:: - >>> m1 = MyClass(status='x') + >>> m1 = MyClass(status="x") >>> m1.status, m1.job_status ('x', 'x') - >>> m1.job_status = 'y' + >>> m1.job_status = "y" >>> m1.status, m1.job_status ('y', 'y') @@ -299,7 +301,7 @@ a user-defined :term:`descriptor`. We can supply our ``status`` synonym with a ``@property``:: class MyClass(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) status = Column(String(50)) @@ -315,8 +317,9 @@ using the :func:`.synonym_for` decorator:: from sqlalchemy.ext.declarative import synonym_for + class MyClass(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) status = Column(String(50)) diff --git a/doc/build/orm/mapped_sql_expr.rst b/doc/build/orm/mapped_sql_expr.rst index eefd1d5d68..47af9b22c4 100644 --- a/doc/build/orm/mapped_sql_expr.rst +++ b/doc/build/orm/mapped_sql_expr.rst @@ -21,8 +21,9 @@ will provide for us the ``fullname``, which is the string concatenation of the t from sqlalchemy.ext.hybrid import hybrid_property + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @@ -51,8 +52,9 @@ needs to be present inside the hybrid, using the ``if`` statement in Python and from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.sql import case + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @@ -66,9 +68,12 @@ needs to be present inside the hybrid, using the ``if`` statement in Python and @fullname.expression def fullname(cls): - return case([ - (cls.firstname != None, cls.firstname + " " + cls.lastname), - ], else_ = cls.lastname) + return case( + [ + (cls.firstname != None, cls.firstname + " " + cls.lastname), + ], + else_=cls.lastname, + ) .. _mapper_column_property_sql_expressions: @@ -95,8 +100,9 @@ follows:: from sqlalchemy.orm import column_property + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @@ -115,28 +121,30 @@ of ``Address`` objects available for a particular ``User``:: Base = declarative_base() + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) - user_id = Column(Integer, ForeignKey('user.id')) + user_id = Column(Integer, ForeignKey("user.id")) + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) address_count = column_property( - select(func.count(Address.id)). - where(Address.user_id==id). - correlate_except(Address). - scalar_subquery() + select(func.count(Address.id)) + .where(Address.user_id == id) + .correlate_except(Address) + .scalar_subquery() ) In the above example, we define a :func:`_expression.ScalarSelect` construct like the following:: stmt = ( - select(func.count(Address.id)). - where(Address.user_id==id). - correlate_except(Address). - scalar_subquery() + select(func.count(Address.id)) + .where(Address.user_id == id) + .correlate_except(Address) + .scalar_subquery() ) Above, we first use :func:`_sql.select` to create a :class:`_sql.Select` @@ -166,9 +174,7 @@ to add an additional property after the fact:: # only works if a declarative base class is in use User.address_count = column_property( - select(func.count(Address.id)). - where(Address.user_id==User.id). - scalar_subquery() + select(func.count(Address.id)).where(Address.user_id == User.id).scalar_subquery() ) When using mapping styles that don't use :func:`_orm.declarative_base`, @@ -180,9 +186,10 @@ which can be obtained using :func:`_sa.inspect`:: reg = registry() + @reg.mapped class User: - __tablename__ = 'user' + __tablename__ = "user" # ... additional mapping directives @@ -191,11 +198,12 @@ which can be obtained using :func:`_sa.inspect`:: # works for any kind of mapping from sqlalchemy import inspect + inspect(User).add_property( column_property( - select(func.count(Address.id)). - where(Address.user_id==User.id). - scalar_subquery() + select(func.count(Address.id)) + .where(Address.user_id == User.id) + .scalar_subquery() ) ) @@ -205,17 +213,19 @@ association table to both tables in a relationship:: from sqlalchemy import and_ + class Author(Base): # ... book_count = column_property( - select(func.count(books.c.id) - ).where( + select(func.count(books.c.id)) + .where( and_( - book_authors.c.author_id==authors.c.id, - book_authors.c.book_id==books.c.id + book_authors.c.author_id == authors.c.id, + book_authors.c.book_id == books.c.id, ) - ).scalar_subquery() + ) + .scalar_subquery() ) .. _mapper_column_property_sql_expressions_composed: @@ -238,21 +248,20 @@ attribute, which is itself a :class:`.ColumnProperty`:: class File(Base): - __tablename__ = 'file' + __tablename__ = "file" id = Column(Integer, primary_key=True) name = Column(String(64)) extension = Column(String(8)) - filename = column_property(name + '.' + extension) - path = column_property('C:/' + filename.expression) + filename = column_property(name + "." + extension) + path = column_property("C:/" + filename.expression) When the ``File`` class is used in expressions normally, the attributes assigned to ``filename`` and ``path`` are usable directly. The use of the :attr:`.ColumnProperty.expression` attribute is only necessary when using the :class:`.ColumnProperty` directly within the mapping definition:: - q = session.query(File.path).filter(File.filename == 'foo.txt') - + q = session.query(File.path).filter(File.filename == "foo.txt") Using a plain descriptor ------------------------ @@ -269,19 +278,18 @@ which is then used to emit a query:: from sqlalchemy.orm import object_session from sqlalchemy import select, func + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @property def address_count(self): - return object_session(self).\ - scalar( - select(func.count(Address.id)).\ - where(Address.user_id==self.id) - ) + return object_session(self).scalar( + select(func.count(Address.id)).where(Address.user_id == self.id) + ) The plain descriptor approach is useful as a last resort, but is less performant in the usual case than both the hybrid and column property approaches, in that @@ -310,8 +318,9 @@ may be applied:: from sqlalchemy.orm import query_expression + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) x = Column(Integer) y = Column(Integer) @@ -322,8 +331,8 @@ We can then query for objects of type ``A``, applying an arbitrary SQL expression to be populated into ``A.expr``:: from sqlalchemy.orm import with_expression - q = session.query(A).options( - with_expression(A.expr, A.x + A.y)) + + q = session.query(A).options(with_expression(A.expr, A.x + A.y)) The :func:`.query_expression` mapping has these caveats: @@ -341,8 +350,12 @@ The :func:`.query_expression` mapping has these caveats: To ensure the attribute is re-loaded, use :meth:`_orm.Query.populate_existing`:: - obj = session.query(A).populate_existing().options( - with_expression(A.expr, some_expr)).first() + obj = ( + session.query(A) + .populate_existing() + .options(with_expression(A.expr, some_expr)) + .first() + ) * The query_expression value **does not refresh when the object is expired**. Once the object is expired, either via :meth:`.Session.expire` @@ -357,18 +370,24 @@ The :func:`.query_expression` mapping has these caveats: ad-hoc expression; that is, this won't work:: # wont work - q = session.query(A).options( - with_expression(A.expr, A.x + A.y) - ).filter(A.expr > 5).order_by(A.expr) + q = ( + session.query(A) + .options(with_expression(A.expr, A.x + A.y)) + .filter(A.expr > 5) + .order_by(A.expr) + ) The ``A.expr`` expression will resolve to NULL in the above WHERE clause and ORDER BY clause. To use the expression throughout the query, assign to a variable and use that:: a_expr = A.x + A.y - q = session.query(A).options( - with_expression(A.expr, a_expr) - ).filter(a_expr > 5).order_by(a_expr) + q = ( + session.query(A) + .options(with_expression(A.expr, a_expr)) + .filter(a_expr > 5) + .order_by(a_expr) + ) .. versionadded:: 1.2 diff --git a/doc/build/orm/mapping_columns.rst b/doc/build/orm/mapping_columns.rst index 788d5776ef..1ec8636b6d 100644 --- a/doc/build/orm/mapping_columns.rst +++ b/doc/build/orm/mapping_columns.rst @@ -46,9 +46,9 @@ The name assigned to the Python attribute which maps to it that way, as we illustrate here in a Declarative mapping:: class User(Base): - __tablename__ = 'user' - id = Column('user_id', Integer, primary_key=True) - name = Column('user_name', String(50)) + __tablename__ = "user" + id = Column("user_id", Integer, primary_key=True) + name = Column("user_name", String(50)) Where above ``User.id`` resolves to a column named ``user_id`` and ``User.name`` resolves to a column named ``user_name``. @@ -65,11 +65,14 @@ The corresponding technique for an :term:`imperative` mapping is to place the desired key in the :paramref:`_orm.mapper.properties` dictionary with the desired key:: - mapper_registry.map_imperatively(User, user_table, properties={ - 'id': user_table.c.user_id, - 'name': user_table.c.user_name, - }) - + mapper_registry.map_imperatively( + User, + user_table, + properties={ + "id": user_table.c.user_id, + "name": user_table.c.user_name, + }, + ) .. _mapper_automated_reflection_schemes: @@ -91,14 +94,13 @@ instance:: @event.listens_for(Base.metadata, "column_reflect") def column_reflect(inspector, table, column_info): # set column.key = "attr_" - column_info['key'] = "attr_%s" % column_info['name'].lower() + column_info["key"] = "attr_%s" % column_info["name"].lower() With the above event, the reflection of :class:`_schema.Column` objects will be intercepted with our event that adds a new ".key" element, such as in a mapping as below:: class MyClass(Base): - __table__ = Table("some_table", Base.metadata, - autoload_with=some_engine) + __table__ = Table("some_table", Base.metadata, autoload_with=some_engine) The approach also works with both the :class:`.DeferredReflection` base class as well as with the :ref:`automap_toplevel` extension. For automap @@ -131,8 +133,9 @@ result in the former value being loaded first:: from sqlalchemy.orm import column_property + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = column_property(Column(String(50)), active_history=True) @@ -156,7 +159,7 @@ that is the string concatenation of the ``firstname`` and ``lastname`` columns:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @@ -211,7 +214,7 @@ map such a table as in the following example:: metadata, Column("user_id", String(40), nullable=False), Column("group_id", String(40), nullable=False), - UniqueConstraint("user_id", "group_id") + UniqueConstraint("user_id", "group_id"), ) @@ -220,9 +223,7 @@ map such a table as in the following example:: class GroupUsers(Base): __table__ = group_users - __mapper_args__ = { - "primary_key": [group_users.c.user_id, group_users.c.group_id] - } + __mapper_args__ = {"primary_key": [group_users.c.user_id, group_users.c.group_id]} Above, the ``group_users`` table is an association table of some kind with string columns ``user_id`` and ``group_id``, but no primary key is set up; @@ -250,9 +251,7 @@ For example:: class User(Base): __table__ = user_table - __mapper_args__ = { - 'include_properties' :['user_id', 'user_name'] - } + __mapper_args__ = {"include_properties": ["user_id", "user_name"]} ...will map the ``User`` class to the ``user_table`` table, only including the ``user_id`` and ``user_name`` columns - the rest are not referenced. @@ -260,9 +259,7 @@ Similarly:: class Address(Base): __table__ = address_table - __mapper_args__ = { - 'exclude_properties' : ['street', 'city', 'state', 'zip'] - } + __mapper_args__ = {"exclude_properties": ["street", "city", "state", "zip"]} ...will map the ``Address`` class to the ``address_table`` table, including all columns present except ``street``, ``city``, ``state``, and ``zip``. @@ -282,8 +279,8 @@ should be included or excluded:: class UserAddress(Base): __table__ = user_table.join(addresses_table) __mapper_args__ = { - 'exclude_properties' :[address_table.c.id], - 'primary_key' : [user_table.c.id] + "exclude_properties": [address_table.c.id], + "primary_key": [user_table.c.id], } .. note:: diff --git a/doc/build/orm/mapping_styles.rst b/doc/build/orm/mapping_styles.rst index 84db8cb087..edec17c14b 100644 --- a/doc/build/orm/mapping_styles.rst +++ b/doc/build/orm/mapping_styles.rst @@ -72,7 +72,7 @@ used in a declarative table mapping:: # an example mapping using the base class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) @@ -149,33 +149,40 @@ the :meth:`_orm.registry.map_imperatively` method:: mapper_registry = registry() user_table = Table( - 'user', + "user", mapper_registry.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('fullname', String(50)), - Column('nickname', String(12)) + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), ) + class User: pass - mapper_registry.map_imperatively(User, user_table) + mapper_registry.map_imperatively(User, user_table) Information about mapped attributes, such as relationships to other classes, are provided via the ``properties`` dictionary. The example below illustrates a second :class:`_schema.Table` object, mapped to a class called ``Address``, then linked to ``User`` via :func:`_orm.relationship`:: - address = Table('address', metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String(50)) - ) + address = Table( + "address", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), + ) - mapper_registry.map_imperatively(User, user, properties={ - 'addresses' : relationship(Address, backref='user', order_by=address.c.id) - }) + mapper_registry.map_imperatively( + User, + user, + properties={ + "addresses": relationship(Address, backref="user", order_by=address.c.id) + }, + ) mapper_registry.map_imperatively(Address, address) @@ -314,8 +321,9 @@ all the attributes that are named. E.g.:: Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(...) name = Column(...) @@ -324,7 +332,7 @@ all the attributes that are named. E.g.:: An object of type ``User`` above will have a constructor which allows ``User`` objects to be created as:: - u1 = User(name='some name', fullname='some fullname') + u1 = User(name="some name", fullname="some fullname") The above constructor may be customized by passing a Python callable to the :paramref:`_orm.registry.constructor` parameter which provides the @@ -337,15 +345,17 @@ The constructor also applies to imperative mappings:: mapper_registry = registry() user_table = Table( - 'user', + "user", mapper_registry.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)) + Column("id", Integer, primary_key=True), + Column("name", String(50)), ) + class User: pass + mapper_registry.map_imperatively(User, user_table) The above class, mapped imperatively as described at :ref:`orm_imperative_mapping`, @@ -505,7 +515,7 @@ as well as specific history on modifications to attributes since the last flush: >>> insp.attrs.nickname.value 'nickname' - >>> u1.nickname = 'new nickname' + >>> u1.nickname = "new nickname" >>> insp.attrs.nickname.history History(added=['new nickname'], unchanged=(), deleted=['nickname']) diff --git a/doc/build/orm/nonstandard_mappings.rst b/doc/build/orm/nonstandard_mappings.rst index bf6b0f247d..4bd2546e09 100644 --- a/doc/build/orm/nonstandard_mappings.rst +++ b/doc/build/orm/nonstandard_mappings.rst @@ -15,24 +15,27 @@ function creates a selectable unit comprised of multiple tables, complete with its own composite primary key, which can be mapped in the same way as a :class:`_schema.Table`:: - from sqlalchemy import Table, Column, Integer, \ - String, MetaData, join, ForeignKey + from sqlalchemy import Table, Column, Integer, String, MetaData, join, ForeignKey from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import column_property metadata_obj = MetaData() # define two Table objects - user_table = Table('user', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String), - ) - - address_table = Table('address', metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String) - ) + user_table = Table( + "user", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String), + ) + + address_table = Table( + "address", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String), + ) # define a join between them. This # takes place across the user.id and address.user_id @@ -104,9 +107,10 @@ may be used:: from sqlalchemy import event - @event.listens_for(PtoQ, 'before_update') + + @event.listens_for(PtoQ, "before_update") def receive_before_update(mapper, connection, target): - if target.some_required_attr_on_q is None: + if target.some_required_attr_on_q is None: connection.execute(q_table.insert(), {"id": target.id}) where above, a row is INSERTed into the ``q_table`` table by creating an @@ -128,15 +132,22 @@ includes a join to a subquery:: from sqlalchemy import select, func - subq = select( - func.count(orders.c.id).label('order_count'), - func.max(orders.c.price).label('highest_order'), - orders.c.customer_id - ).group_by(orders.c.customer_id).subquery() + subq = ( + select( + func.count(orders.c.id).label("order_count"), + func.max(orders.c.price).label("highest_order"), + orders.c.customer_id, + ) + .group_by(orders.c.customer_id) + .subquery() + ) + + customer_select = ( + select(customers, subq) + .join_from(customers, subq, customers.c.id == subq.c.customer_id) + .subquery() + ) - customer_select = select(customers, subq).join_from( - customers, subq, customers.c.id == subq.c.customer_id - ).subquery() class Customer(Base): __table__ = customer_select diff --git a/doc/build/orm/persistence_techniques.rst b/doc/build/orm/persistence_techniques.rst index 112ac5a319..09d1948e88 100644 --- a/doc/build/orm/persistence_techniques.rst +++ b/doc/build/orm/persistence_techniques.rst @@ -21,6 +21,7 @@ an attribute:: value = Column(Integer) + someobject = session.query(SomeClass).get(5) # set 'value' attribute to a SQL expression adding one @@ -89,10 +90,10 @@ This is most easily accomplished using the session = Session() # execute a string statement - result = session.execute("select * from table where id=:id", {'id':7}) + result = session.execute("select * from table where id=:id", {"id": 7}) # execute a SQL expression construct - result = session.execute(select(mytable).where(mytable.c.id==7)) + result = session.execute(select(mytable).where(mytable.c.id == 7)) The current :class:`~sqlalchemy.engine.Connection` held by the :class:`~sqlalchemy.orm.session.Session` is accessible using the @@ -118,13 +119,12 @@ proper context for the desired engine:: # need to specify mapper or class when executing result = session.execute( text("select * from table where id=:id"), - {'id':7}, - bind_arguments={'mapper': MyMappedClass} + {"id": 7}, + bind_arguments={"mapper": MyMappedClass}, ) result = session.execute( - select(mytable).where(mytable.c.id==7), - bind_arguments={'mapper': MyMappedClass} + select(mytable).where(mytable.c.id == 7), bind_arguments={"mapper": MyMappedClass} ) connection = session.connection(MyMappedClass) @@ -144,14 +144,15 @@ The ORM considers any attribute that was never set on an object as a "default" case; the attribute will be omitted from the INSERT statement:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) data = Column(String(50), nullable=True) + obj = MyObject(id=1) session.add(obj) session.commit() # INSERT with the 'data' column omitted; the database - # itself will persist this as the NULL value + # itself will persist this as the NULL value Omitting a column from the INSERT means that the column will have the NULL value set, *unless* the column has a default set up, @@ -161,29 +162,31 @@ behavior of SQLAlchemy's insert behavior with both client-side and server-side defaults:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) data = Column(String(50), nullable=True, server_default="default") + obj = MyObject(id=1) session.add(obj) session.commit() # INSERT with the 'data' column omitted; the database - # itself will persist this as the value 'default' + # itself will persist this as the value 'default' However, in the ORM, even if one assigns the Python value ``None`` explicitly to the object, this is treated the **same** as though the value were never assigned:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) data = Column(String(50), nullable=True, server_default="default") + obj = MyObject(id=1, data=None) session.add(obj) session.commit() # INSERT with the 'data' column explicitly set to None; - # the ORM still omits it from the statement and the - # database will still persist this as the value 'default' + # the ORM still omits it from the statement and the + # database will still persist this as the value 'default' The above operation will persist into the ``data`` column the server default value of ``"default"`` and not SQL NULL, even though ``None`` @@ -200,9 +203,9 @@ on a per-instance level, we assign the attribute using the obj = MyObject(id=1, data=null()) session.add(obj) session.commit() # INSERT with the 'data' column explicitly set as null(); - # the ORM uses this directly, bypassing all client- - # and server-side defaults, and the database will - # persist this as the NULL value + # the ORM uses this directly, bypassing all client- + # and server-side defaults, and the database will + # persist this as the NULL value The :obj:`_expression.null` SQL construct always translates into the SQL NULL value being directly present in the target INSERT statement. @@ -215,18 +218,21 @@ a type where the ORM should treat the value ``None`` the same as any other value and pass it through, rather than omitting it as a "missing" value:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) data = Column( - String(50).evaluates_none(), # indicate that None should always be passed - nullable=True, server_default="default") + String(50).evaluates_none(), # indicate that None should always be passed + nullable=True, + server_default="default", + ) + obj = MyObject(id=1, data=None) session.add(obj) session.commit() # INSERT with the 'data' column explicitly set to None; - # the ORM uses this directly, bypassing all client- - # and server-side defaults, and the database will - # persist this as the NULL value + # the ORM uses this directly, bypassing all client- + # and server-side defaults, and the database will + # persist this as the NULL value .. topic:: Evaluating None @@ -286,7 +292,7 @@ columns should be fetched immediately upon INSERT and sometimes UPDATE:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) timestamp = Column(DateTime(), server_default=func.now()) @@ -315,7 +321,7 @@ This case is the same as case 1 above, except we don't specify :paramref:`.orm.mapper.eager_defaults`:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) timestamp = Column(DateTime(), server_default=func.now()) @@ -366,7 +372,7 @@ For an explicit sequence as we use with Oracle, this just means we are using the :class:`.Sequence` construct:: class MyOracleModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, Sequence("my_sequence"), primary_key=True) data = Column(String(50)) @@ -385,7 +391,7 @@ by a trigger, we use :class:`.FetchedValue`. Below is a model that uses a SQL Server TIMESTAMP column as the primary key, which generates values automatically:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" timestamp = Column(TIMESTAMP(), server_default=FetchedValue(), primary_key=True) @@ -419,7 +425,7 @@ Using the example of a :class:`.DateTime` column for MySQL, we add an explicit pre-execute-supported default using the "NOW()" SQL function:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" timestamp = Column(DateTime(), default=func.now(), primary_key=True) @@ -445,13 +451,11 @@ into the column:: from sqlalchemy import cast, Binary + class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" - timestamp = Column( - TIMESTAMP(), - default=cast(func.now(), Binary), - primary_key=True) + timestamp = Column(TIMESTAMP(), default=cast(func.now(), Binary), primary_key=True) Above, in addition to selecting the "NOW()" function, we additionally make use of the :class:`.Binary` datatype in conjunction with :func:`.cast` so that @@ -478,12 +482,13 @@ We therefore must also specify that we'd like to coerce the return value to by passing this as the ``type_`` parameter:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" timestamp = Column( DateTime, - default=func.datetime('now', 'localtime', type_=DateTime), - primary_key=True) + default=func.datetime("now", "localtime", type_=DateTime), + primary_key=True, + ) The above mapping upon INSERT will look like: @@ -533,12 +538,17 @@ values using RETURNING when available, :paramref:`_schema.Column.server_default` to ensure that the fetch occurs:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) created = Column(DateTime(), default=func.now(), server_default=FetchedValue()) - updated = Column(DateTime(), onupdate=func.now(), server_default=FetchedValue(), server_onupdate=FetchedValue()) + updated = Column( + DateTime(), + onupdate=func.now(), + server_default=FetchedValue(), + server_onupdate=FetchedValue(), + ) __mapper_args__ = {"eager_defaults": True} @@ -573,8 +583,12 @@ corresponding to all the rows which were matched by the criteria:: from sqlalchemy import update - stmt = update(User).where(User.name == "squidward").values(name="spongebob").\ - returning(User.id) + stmt = ( + update(User) + .where(User.name == "squidward") + .values(name="spongebob") + .returning(User.id) + ) for row in session.execute(stmt): print(f"id: {row.id}") @@ -588,8 +602,12 @@ achieve this, we may combine the :class:`_dml.Update` construct which returns statement in an ORM context using the :meth:`_sql.Select.from_statement` method:: - stmt = update(User).where(User.name == "squidward").values(name="spongebob").\ - returning(User) + stmt = ( + update(User) + .where(User.name == "squidward") + .values(name="spongebob") + .returning(User) + ) orm_stmt = select(User).from_statement(stmt).execution_options(populate_existing=True) @@ -638,11 +656,7 @@ database, while simultaneously producing those objects as ORM instances:: index_elements=[User.name], set_=dict(fullname=stmt.excluded.fullname) ).returning(User) - orm_stmt = ( - select(User) - .from_statement(stmt) - .execution_options(populate_existing=True) - ) + orm_stmt = select(User).from_statement(stmt).execution_options(populate_existing=True) for user in session.execute( orm_stmt, ).scalars(): @@ -718,13 +732,13 @@ The dictionary is consulted whenever the :class:`.Session` needs to emit SQL on behalf of a particular kind of mapped class in order to locate the appropriate source of database connectivity:: - engine1 = create_engine('postgresql://db1') - engine2 = create_engine('postgresql://db2') + engine1 = create_engine("postgresql://db1") + engine2 = create_engine("postgresql://db2") Session = sessionmaker() # bind User operations to engine 1, Account operations to engine 2 - Session.configure(binds={User:engine1, Account:engine2}) + Session.configure(binds={User: engine1, Account: engine2}) session = Session() @@ -819,26 +833,25 @@ a custom :class:`.Session` which delivers the following rules: :: engines = { - 'leader':create_engine("sqlite:///leader.db"), - 'other':create_engine("sqlite:///other.db"), - 'follower1':create_engine("sqlite:///follower1.db"), - 'follower2':create_engine("sqlite:///follower2.db"), + "leader": create_engine("sqlite:///leader.db"), + "other": create_engine("sqlite:///other.db"), + "follower1": create_engine("sqlite:///follower1.db"), + "follower2": create_engine("sqlite:///follower2.db"), } from sqlalchemy.sql import Update, Delete from sqlalchemy.orm import Session, sessionmaker import random + class RoutingSession(Session): def get_bind(self, mapper=None, clause=None): if mapper and issubclass(mapper.class_, MyOtherClass): - return engines['other'] + return engines["other"] elif self._flushing or isinstance(clause, (Update, Delete)): - return engines['leader'] + return engines["leader"] else: - return engines[ - random.choice(['follower1','follower2']) - ] + return engines[random.choice(["follower1", "follower2"])] The above :class:`.Session` class is plugged in using the ``class_`` argument to :class:`.sessionmaker`:: @@ -959,19 +972,13 @@ The methods each work in the context of the :class:`.Session` object's transaction, like any other:: s = Session() - objects = [ - User(name="u1"), - User(name="u2"), - User(name="u3") - ] + objects = [User(name="u1"), User(name="u2"), User(name="u3")] s.bulk_save_objects(objects) For :meth:`.Session.bulk_insert_mappings`, and :meth:`.Session.bulk_update_mappings`, dictionaries are passed:: - s.bulk_insert_mappings(User, - [dict(name="u1"), dict(name="u2"), dict(name="u3")] - ) + s.bulk_insert_mappings(User, [dict(name="u1"), dict(name="u2"), dict(name="u3")]) .. seealso:: diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index 9fcd2c1bc0..70254234e4 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -23,37 +23,37 @@ upon the content at :ref:`tutorial_selecting_data`. >>> user_table = Table( ... "user_account", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30)), - ... Column('fullname', String) + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30)), + ... Column("fullname", String), ... ) >>> from sqlalchemy import ForeignKey >>> address_table = Table( ... "address", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', None, ForeignKey('user_account.id')), - ... Column('email_address', String, nullable=False) + ... Column("id", Integer, primary_key=True), + ... Column("user_id", None, ForeignKey("user_account.id")), + ... Column("email_address", String, nullable=False), ... ) >>> orders_table = Table( ... "user_order", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', None, ForeignKey('user_account.id')), - ... Column('email_address', String, nullable=False) + ... Column("id", Integer, primary_key=True), + ... Column("user_id", None, ForeignKey("user_account.id")), + ... Column("email_address", String, nullable=False), ... ) >>> order_items_table = Table( ... "order_items", ... metadata_obj, ... Column("order_id", ForeignKey("user_order.id"), primary_key=True), - ... Column("item_id", ForeignKey("item.id"), primary_key=True) + ... Column("item_id", ForeignKey("item.id"), primary_key=True), ... ) >>> items_table = Table( ... "item", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String), - ... Column('description', String) + ... Column("id", Integer, primary_key=True), + ... Column("name", String), + ... Column("description", String), ... ) >>> metadata_obj.create_all(engine) BEGIN (implicit) @@ -68,7 +68,7 @@ upon the content at :ref:`tutorial_selecting_data`. ... orders = relationship("Order") ... ... def __repr__(self): - ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" + ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): ... __table__ = address_table @@ -88,22 +88,34 @@ upon the content at :ref:`tutorial_selecting_data`. >>> conn = engine.connect() >>> from sqlalchemy.orm import Session >>> session = Session(conn) - >>> session.add_all([ - ... User(name="spongebob", fullname="Spongebob Squarepants", addresses=[ - ... Address(email_address="spongebob@sqlalchemy.org") - ... ]), - ... User(name="sandy", fullname="Sandy Cheeks", addresses=[ - ... Address(email_address="sandy@sqlalchemy.org"), - ... Address(email_address="squirrel@squirrelpower.org") - ... ]), - ... User(name="patrick", fullname="Patrick Star", addresses=[ - ... Address(email_address="pat999@aol.com") - ... ]), - ... User(name="squidward", fullname="Squidward Tentacles", addresses=[ - ... Address(email_address="stentcl@sqlalchemy.org") - ... ]), - ... User(name="ehkrabs", fullname="Eugene H. Krabs"), - ... ]) + >>> session.add_all( + ... [ + ... User( + ... name="spongebob", + ... fullname="Spongebob Squarepants", + ... addresses=[Address(email_address="spongebob@sqlalchemy.org")], + ... ), + ... User( + ... name="sandy", + ... fullname="Sandy Cheeks", + ... addresses=[ + ... Address(email_address="sandy@sqlalchemy.org"), + ... Address(email_address="squirrel@squirrelpower.org"), + ... ], + ... ), + ... User( + ... name="patrick", + ... fullname="Patrick Star", + ... addresses=[Address(email_address="pat999@aol.com")], + ... ), + ... User( + ... name="squidward", + ... fullname="Squidward Tentacles", + ... addresses=[Address(email_address="stentcl@sqlalchemy.org")], + ... ), + ... User(name="ehkrabs", fullname="Eugene H. Krabs"), + ... ] + ... ) >>> session.commit() BEGIN ... >>> conn.begin() @@ -117,7 +129,7 @@ SELECT statements are produced by the :func:`_sql.select` function which returns a :class:`_sql.Select` object:: >>> from sqlalchemy import select - >>> stmt = select(User).where(User.name == 'spongebob') + >>> stmt = select(User).where(User.name == "spongebob") To invoke a :class:`_sql.Select` with the ORM, it is passed to :meth:`_orm.Session.execute`:: @@ -184,7 +196,7 @@ same time:: >>> stmt = select(User, Address).join(User.addresses).order_by(User.id, Address.id) {sql}>>> for row in session.execute(stmt): - ... print(f"{row.User.name} {row.Address.email_address}") + ... print(f"{row.User.name} {row.Address.email_address}") SELECT user_account.id, user_account.name, user_account.fullname, address.id AS id_1, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -207,9 +219,9 @@ when passed to :func:`_sql.select`. They may be used in the same way as table columns are used:: {sql}>>> result = session.execute( - ... select(User.name, Address.email_address). - ... join(User.addresses). - ... order_by(User.id, Address.id) + ... select(User.name, Address.email_address) + ... .join(User.addresses) + ... .order_by(User.id, Address.id) ... ) SELECT user_account.name, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -238,8 +250,7 @@ allows sets of column expressions to be grouped in result rows:: >>> from sqlalchemy.orm import Bundle >>> stmt = select( - ... Bundle("user", User.name, User.fullname), - ... Bundle("email", Address.email_address) + ... Bundle("user", User.name, User.fullname), Bundle("email", Address.email_address) ... ).join_from(User, Address) {sql}>>> for row in session.execute(stmt): ... print(f"{row.user.name} {row.user.fullname} {row.email.email_address}") @@ -429,8 +440,7 @@ is used:: >>> from sqlalchemy import union_all >>> u = union_all( - ... select(User).where(User.id < 2), - ... select(User).where(User.id == 3) + ... select(User).where(User.id < 2), select(User).where(User.id == 3) ... ).order_by(User.id) >>> stmt = select(User).from_statement(u) >>> for user_obj in session.execute(stmt).scalars(): @@ -455,8 +465,7 @@ entity in a :func:`_sql.select` construct, including that we can add filtering and order by criteria based on its exported columns:: >>> subq = union_all( - ... select(User).where(User.id < 2), - ... select(User).where(User.id == 3) + ... select(User).where(User.id < 2), select(User).where(User.id == 3) ... ).subquery() >>> user_alias = aliased(User, subq) >>> stmt = select(user_alias).order_by(user_alias.id) @@ -531,11 +540,7 @@ a JOIN first from ``User`` to ``Order``, and a second from ``Order`` to relationship, it results in two separate JOIN elements, for a total of three JOIN elements in the resulting SQL:: - >>> stmt = ( - ... select(User). - ... join(User.orders). - ... join(Order.items) - ... ) + >>> stmt = select(User).join(User.orders).join(Order.items) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -559,12 +564,7 @@ as potential points to continue joining FROM. We can continue to add other elements to join FROM the ``User`` entity above, for example adding on the ``User.addresses`` relationship to our chain of joins:: - >>> stmt = ( - ... select(User). - ... join(User.orders). - ... join(Order.items). - ... join(User.addresses) - ... ) + >>> stmt = select(User).join(User.orders).join(Order.items).join(User.addresses) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -610,7 +610,7 @@ The third calling form allows both the target entity as well as the ON clause to be passed explicitly. A example that includes a SQL expression as the ON clause is as follows:: - >>> stmt = select(User).join(Address, User.id==Address.user_id) + >>> stmt = select(User).join(Address, User.id == Address.user_id) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account JOIN address ON user_account.id = address.user_id @@ -633,11 +633,11 @@ below:: >>> a1 = aliased(Address) >>> a2 = aliased(Address) >>> stmt = ( - ... select(User). - ... join(a1, User.addresses). - ... join(a2, User.addresses). - ... where(a1.email_address == 'ed@foo.com'). - ... where(a2.email_address == 'ed@bar.com') + ... select(User) + ... .join(a1, User.addresses) + ... .join(a2, User.addresses) + ... .where(a1.email_address == "ed@foo.com") + ... .where(a2.email_address == "ed@bar.com") ... ) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname @@ -653,11 +653,11 @@ substituted with an aliased entity by using the this method would be:: >>> stmt = ( - ... select(User). - ... join(User.addresses.of_type(a1)). - ... join(User.addresses.of_type(a2)). - ... where(a1.email_address == 'ed@foo.com'). - ... where(a2.email_address == 'ed@bar.com') + ... select(User) + ... .join(User.addresses.of_type(a1)) + ... .join(User.addresses.of_type(a2)) + ... .where(a1.email_address == "ed@foo.com") + ... .where(a2.email_address == "ed@bar.com") ... ) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname @@ -681,10 +681,7 @@ with the default criteria using AND. Below, the ON criteria between by ``AND``, the first one being the natural join along the foreign key, and the second being a custom limiting criteria:: - >>> stmt = ( - ... select(User). - ... join(User.addresses.and_(Address.email_address != 'foo@bar.com')) - ... ) + >>> stmt = select(User).join(User.addresses.and_(Address.email_address != "foo@bar.com")) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -711,11 +708,7 @@ is represented as a row limited subquery, we first construct a :class:`_sql.Subq object using :meth:`_sql.Select.subquery`, which may then be used as the target of the :meth:`_sql.Select.join` method:: - >>> subq = ( - ... select(Address). - ... where(Address.email_address == 'pat999@aol.com'). - ... subquery() - ... ) + >>> subq = select(Address).where(Address.email_address == "pat999@aol.com").subquery() >>> stmt = select(User).join(subq, User.id == subq.c.user_id) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname @@ -754,14 +747,14 @@ will remain unique within the statement, while the entities that are linked to it using :class:`_orm.aliased` refer to distinct sets of columns:: >>> user_address_subq = ( - ... select(User.id, User.name, Address.id, Address.email_address). - ... join_from(User, Address). - ... where(Address.email_address.in_(['pat999@aol.com', 'squirrel@squirrelpower.org'])). - ... subquery() + ... select(User.id, User.name, Address.id, Address.email_address) + ... .join_from(User, Address) + ... .where(Address.email_address.in_(["pat999@aol.com", "squirrel@squirrelpower.org"])) + ... .subquery() ... ) >>> user_alias = aliased(User, user_address_subq, name="user") >>> address_alias = aliased(Address, user_address_subq, name="address") - >>> stmt = select(user_alias, address_alias).where(user_alias.name == 'sandy') + >>> stmt = select(user_alias, address_alias).where(user_alias.name == "sandy") >>> for row in session.execute(stmt): ... print(f"{row.user} {row.address}") {opensql}SELECT anon_1.id, anon_1.name, anon_1.id_1, anon_1.email_address @@ -782,7 +775,7 @@ In cases where the left side of the current state of :class:`_sql.Select` is not in line with what we want to join from, the :meth:`_sql.Select.join_from` method may be used:: - >>> stmt = select(Address).join_from(User, User.addresses).where(User.name == 'sandy') + >>> stmt = select(Address).join_from(User, User.addresses).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -792,7 +785,7 @@ The :meth:`_sql.Select.join_from` method accepts two or three arguments, either in the form ``, ``, or ``, , []``:: - >>> stmt = select(Address).join_from(User, Address).where(User.name == 'sandy') + >>> stmt = select(Address).join_from(User, Address).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -803,7 +796,7 @@ can be used subsequent, the :meth:`_sql.Select.select_from` method may also be used:: - >>> stmt = select(Address).select_from(User).join(Address).where(User.name == 'sandy') + >>> stmt = select(Address).select_from(User).join(Address).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -820,7 +813,7 @@ be used:: such a :class:`_sql.Join` object. Therefore we can see the contents of :meth:`_sql.Select.select_from` being overridden in a case like this:: - >>> stmt = select(Address).select_from(User).join(Address.user).where(User.name == 'sandy') + >>> stmt = select(Address).select_from(User).join(Address.user).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM address JOIN user_account ON user_account.id = address.user_id @@ -837,8 +830,10 @@ be used:: >>> >>> j = address_table.join(user_table, user_table.c.id == address_table.c.user_id) >>> stmt = ( - ... select(address_table).select_from(user_table).select_from(j). - ... where(user_table.c.name == 'sandy') + ... select(address_table) + ... .select_from(user_table) + ... .select_from(j) + ... .where(user_table.c.name == "sandy") ... ) >>> print(stmt) SELECT address.id, address.user_id, address.email_address @@ -1163,7 +1158,7 @@ same way as the legacy :attr:`.Query.column_descriptions` attribute. The format returned is a list of dictionaries:: >>> from pprint import pprint - >>> user_alias = aliased(User, name='user2') + >>> user_alias = aliased(User, name="user2") >>> stmt = select(User, User.id, user_alias) >>> pprint(stmt.column_descriptions) [{'aliased': False, diff --git a/doc/build/orm/quickstart.rst b/doc/build/orm/quickstart.rst index f15fa4a6c7..f1240e7bd8 100644 --- a/doc/build/orm/quickstart.rst +++ b/doc/build/orm/quickstart.rst @@ -278,10 +278,10 @@ construct creates joins using the :meth:`_sql.Select.join` method: .. sourcecode:: pycon+sql >>> stmt = ( - ... select(Address) - ... .join(Address.user) - ... .where(User.name == "sandy") - ... .where(Address.email_address == "sandy@sqlalchemy.org") + ... select(Address) + ... .join(Address.user) + ... .where(User.name == "sandy") + ... .where(Address.email_address == "sandy@sqlalchemy.org") ... ) >>> sandy_address = session.scalars(stmt).one() {opensql}SELECT address.id, address.email_address, address.user_id @@ -320,9 +320,7 @@ address associated with "sandy", and also add a new email address to [...] ('patrick',) {stop} - >>> patrick.addresses.append( - ... Address(email_address="patrickstar@sqlalchemy.org") - ... ) + >>> patrick.addresses.append(Address(email_address="patrickstar@sqlalchemy.org")) {opensql}SELECT address.id AS address_id, address.email_address AS address_email_address, address.user_id AS address_user_id FROM address WHERE ? = address.user_id diff --git a/doc/build/orm/relationship_persistence.rst b/doc/build/orm/relationship_persistence.rst index f843764741..77396639ab 100644 --- a/doc/build/orm/relationship_persistence.rst +++ b/doc/build/orm/relationship_persistence.rst @@ -64,27 +64,27 @@ a complete example, including two :class:`_schema.ForeignKey` constructs:: Base = declarative_base() + class Entry(Base): - __tablename__ = 'entry' + __tablename__ = "entry" entry_id = Column(Integer, primary_key=True) - widget_id = Column(Integer, ForeignKey('widget.widget_id')) + widget_id = Column(Integer, ForeignKey("widget.widget_id")) name = Column(String(50)) + class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" widget_id = Column(Integer, primary_key=True) - favorite_entry_id = Column(Integer, - ForeignKey('entry.entry_id', - name="fk_favorite_entry")) + favorite_entry_id = Column( + Integer, ForeignKey("entry.entry_id", name="fk_favorite_entry") + ) name = Column(String(50)) - entries = relationship(Entry, primaryjoin= - widget_id==Entry.widget_id) - favorite_entry = relationship(Entry, - primaryjoin= - favorite_entry_id==Entry.entry_id, - post_update=True) + entries = relationship(Entry, primaryjoin=widget_id == Entry.widget_id) + favorite_entry = relationship( + Entry, primaryjoin=favorite_entry_id == Entry.entry_id, post_update=True + ) When a structure against the above configuration is flushed, the "widget" row will be INSERTed minus the "favorite_entry_id" value, then all the "entry" rows will @@ -94,8 +94,8 @@ row at a time for the time being): .. sourcecode:: pycon+sql - >>> w1 = Widget(name='somewidget') - >>> e1 = Entry(name='someentry') + >>> w1 = Widget(name="somewidget") + >>> e1 = Entry(name="someentry") >>> w1.favorite_entry = e1 >>> w1.entries = [e1] >>> session.add_all([w1, e1]) @@ -115,26 +115,32 @@ it's guaranteed that ``favorite_entry_id`` refers to an ``Entry`` that also refers to this ``Widget``. We can use a composite foreign key, as illustrated below:: - from sqlalchemy import Integer, ForeignKey, String, \ - Column, UniqueConstraint, ForeignKeyConstraint + from sqlalchemy import ( + Integer, + ForeignKey, + String, + Column, + UniqueConstraint, + ForeignKeyConstraint, + ) from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() + class Entry(Base): - __tablename__ = 'entry' + __tablename__ = "entry" entry_id = Column(Integer, primary_key=True) - widget_id = Column(Integer, ForeignKey('widget.widget_id')) + widget_id = Column(Integer, ForeignKey("widget.widget_id")) name = Column(String(50)) - __table_args__ = ( - UniqueConstraint("entry_id", "widget_id"), - ) + __table_args__ = (UniqueConstraint("entry_id", "widget_id"),) + class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" - widget_id = Column(Integer, autoincrement='ignore_fk', primary_key=True) + widget_id = Column(Integer, autoincrement="ignore_fk", primary_key=True) favorite_entry_id = Column(Integer) name = Column(String(50)) @@ -143,18 +149,19 @@ as illustrated below:: ForeignKeyConstraint( ["widget_id", "favorite_entry_id"], ["entry.widget_id", "entry.entry_id"], - name="fk_favorite_entry" + name="fk_favorite_entry", ), ) - entries = relationship(Entry, primaryjoin= - widget_id==Entry.widget_id, - foreign_keys=Entry.widget_id) - favorite_entry = relationship(Entry, - primaryjoin= - favorite_entry_id==Entry.entry_id, - foreign_keys=favorite_entry_id, - post_update=True) + entries = relationship( + Entry, primaryjoin=widget_id == Entry.widget_id, foreign_keys=Entry.widget_id + ) + favorite_entry = relationship( + Entry, + primaryjoin=favorite_entry_id == Entry.entry_id, + foreign_keys=favorite_entry_id, + post_update=True, + ) The above mapping features a composite :class:`_schema.ForeignKeyConstraint` bridging the ``widget_id`` and ``favorite_entry_id`` columns. To ensure @@ -184,8 +191,8 @@ capabilities of the database. An example mapping which illustrates this is:: class User(Base): - __tablename__ = 'user' - __table_args__ = {'mysql_engine': 'InnoDB'} + __tablename__ = "user" + __table_args__ = {"mysql_engine": "InnoDB"} username = Column(String(50), primary_key=True) fullname = Column(String(100)) @@ -194,13 +201,11 @@ illustrates this is:: class Address(Base): - __tablename__ = 'address' - __table_args__ = {'mysql_engine': 'InnoDB'} + __tablename__ = "address" + __table_args__ = {"mysql_engine": "InnoDB"} email = Column(String(50), primary_key=True) - username = Column(String(50), - ForeignKey('user.username', onupdate="cascade") - ) + username = Column(String(50), ForeignKey("user.username", onupdate="cascade")) Above, we illustrate ``onupdate="cascade"`` on the :class:`_schema.ForeignKey` object, and we also illustrate the ``mysql_engine='InnoDB'`` setting @@ -245,7 +250,7 @@ will be fully loaded into memory if not already locally present. Our previous mapping using ``passive_updates=False`` looks like:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" username = Column(String(50), primary_key=True) fullname = Column(String(100)) @@ -254,11 +259,12 @@ Our previous mapping using ``passive_updates=False`` looks like:: # does not implement ON UPDATE CASCADE addresses = relationship("Address", passive_updates=False) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" email = Column(String(50), primary_key=True) - username = Column(String(50), ForeignKey('user.username')) + username = Column(String(50), ForeignKey("user.username")) Key limitations of ``passive_updates=False`` include: diff --git a/doc/build/orm/self_referential.rst b/doc/build/orm/self_referential.rst index 71b7a06efd..6db51520e8 100644 --- a/doc/build/orm/self_referential.rst +++ b/doc/build/orm/self_referential.rst @@ -26,9 +26,9 @@ In this example, we'll work with a single mapped class called ``Node``, representing a tree structure:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) + parent_id = Column(Integer, ForeignKey("node.id")) data = Column(String(50)) children = relationship("Node") @@ -60,9 +60,9 @@ is a :class:`_schema.Column` or collection of :class:`_schema.Column` objects that indicate those which should be considered to be "remote":: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) + parent_id = Column(Integer, ForeignKey("node.id")) data = Column(String(50)) parent = relationship("Node", remote_side=[id]) @@ -75,13 +75,11 @@ As always, both directions can be combined into a bidirectional relationship using the :func:`.backref` function:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) + parent_id = Column(Integer, ForeignKey("node.id")) data = Column(String(50)) - children = relationship("Node", - backref=backref('parent', remote_side=[id]) - ) + children = relationship("Node", backref=backref("parent", remote_side=[id])) There are several examples included with SQLAlchemy illustrating self-referential strategies; these include :ref:`examples_adjacencylist` and @@ -99,11 +97,11 @@ the same account as that of the parent; while ``folder_id`` refers to a specific folder within that account:: class Folder(Base): - __tablename__ = 'folder' + __tablename__ = "folder" __table_args__ = ( - ForeignKeyConstraint( - ['account_id', 'parent_id'], - ['folder.account_id', 'folder.folder_id']), + ForeignKeyConstraint( + ["account_id", "parent_id"], ["folder.account_id", "folder.folder_id"] + ), ) account_id = Column(Integer, primary_key=True) @@ -111,10 +109,9 @@ to a specific folder within that account:: parent_id = Column(Integer) name = Column(String) - parent_folder = relationship("Folder", - backref="child_folders", - remote_side=[account_id, folder_id] - ) + parent_folder = relationship( + "Folder", backref="child_folders", remote_side=[account_id, folder_id] + ) Above, we pass ``account_id`` into the :paramref:`_orm.relationship.remote_side` list. :func:`_orm.relationship` recognizes that the ``account_id`` column here @@ -130,7 +127,7 @@ Self-Referential Query Strategies Querying of self-referential structures works like any other query:: # get all nodes named 'child2' - session.query(Node).filter(Node.data=='child2') + session.query(Node).filter(Node.data == "child2") However extra care is needed when attempting to join along the foreign key from one level of the tree to the next. In SQL, @@ -147,10 +144,9 @@ looks like: from sqlalchemy.orm import aliased nodealias = aliased(Node) - session.query(Node).filter(Node.data=='subchild1').\ - join(Node.parent.of_type(nodealias)).\ - filter(nodealias.data=="child2").\ - all() + session.query(Node).filter(Node.data == "subchild1").join( + Node.parent.of_type(nodealias) + ).filter(nodealias.data == "child2").all() {opensql}SELECT node.id AS node_id, node.parent_id AS node_parent_id, node.data AS node_data @@ -182,13 +178,12 @@ configured via :paramref:`~.relationships.join_depth`: .. sourcecode:: python+sql class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) + parent_id = Column(Integer, ForeignKey("node.id")) data = Column(String(50)) - children = relationship("Node", - lazy="joined", - join_depth=2) + children = relationship("Node", lazy="joined", join_depth=2) + session.query(Node).all() {opensql}SELECT node_1.id AS node_1_id, diff --git a/doc/build/orm/session_basics.rst b/doc/build/orm/session_basics.rst index 16b2cae5f8..fcf384d4a2 100644 --- a/doc/build/orm/session_basics.rst +++ b/doc/build/orm/session_basics.rst @@ -62,7 +62,7 @@ may look like:: # an Engine, which the Session will use for connection # resources - engine = create_engine('postgresql://scott:tiger@localhost/') + engine = create_engine("postgresql://scott:tiger@localhost/") # create session and add objects with Session(engine) as session: @@ -129,8 +129,8 @@ operations:: # create session and add objects with Session(engine) as session: with session.begin(): - session.add(some_object) - session.add(some_other_object) + session.add(some_object) + session.add(some_other_object) # inner context calls session.commit(), if there were no exceptions # outer context calls session.close() @@ -157,7 +157,7 @@ scope, the :class:`_orm.sessionmaker` can provide a factory for # an Engine, which the Session will use for connection # resources, typically in module scope - engine = create_engine('postgresql://scott:tiger@localhost/') + engine = create_engine("postgresql://scott:tiger@localhost/") # a sessionmaker(), also in the same scope as the engine Session = sessionmaker(engine) @@ -182,7 +182,7 @@ and also maintains a begin/commit/rollback block:: # an Engine, which the Session will use for connection # resources - engine = create_engine('postgresql://scott:tiger@localhost/') + engine = create_engine("postgresql://scott:tiger@localhost/") # a sessionmaker(), also in the same scope as the engine Session = sessionmaker(engine) @@ -223,10 +223,10 @@ will issue mapper queries within the context of this Session. By other ORM constructs such as an :func:`_orm.aliased` construct:: # query from a class - results = session.query(User).filter_by(name='ed').all() + results = session.query(User).filter_by(name="ed").all() # query with multiple classes, returns tuples - results = session.query(User, Address).join('addresses').filter_by(name='ed').all() + results = session.query(User, Address).join("addresses").filter_by(name="ed").all() # query using orm-columns, also returns tuples results = session.query(User.name, User.fullname).all() @@ -283,7 +283,7 @@ statements that use ORM entities:: result = session.execute(statement).scalars().all() # query with multiple classes - statement = select(User, Address).join('addresses').filter_by(name='ed') + statement = select(User, Address).join("addresses").filter_by(name="ed") # list of tuples result = session.execute(statement).all() @@ -328,12 +328,12 @@ already present and do not need to be added. Instances which are :term:`detached (i.e. have been removed from a session) may be re-associated with a session using this method:: - user1 = User(name='user1') - user2 = User(name='user2') + user1 = User(name="user1") + user2 = User(name="user2") session.add(user1) session.add(user2) - session.commit() # write changes to the database + session.commit() # write changes to the database To add a list of items to the session at once, use :meth:`~.Session.add_all`:: @@ -614,8 +614,9 @@ time refresh locally present objects which match those rows. To emit an ORM-enabled UPDATE in :term:`1.x style`, the :meth:`_query.Query.update` method may be used:: - session.query(User).filter(User.name == "squidward").\ - update({"name": "spongebob"}, synchronize_session="fetch") + session.query(User).filter(User.name == "squidward").update( + {"name": "spongebob"}, synchronize_session="fetch" + ) Above, an UPDATE will be emitted against all rows that match the name "squidward" and be updated to the name "spongebob". The @@ -630,8 +631,12 @@ Core :class:`_sql.Update` construct:: from sqlalchemy import update - stmt = update(User).where(User.name == "squidward").values(name="spongebob").\ - execution_options(synchronize_session="fetch") + stmt = ( + update(User) + .where(User.name == "squidward") + .values(name="spongebob") + .execution_options(synchronize_session="fetch") + ) result = session.execute(stmt) @@ -650,14 +655,17 @@ within the :class:`_orm.Session` will be marked as deleted and expunged. ORM-enabled delete, :term:`1.x style`:: - session.query(User).filter(User.name == "squidward").\ - delete(synchronize_session="fetch") + session.query(User).filter(User.name == "squidward").delete(synchronize_session="fetch") ORM-enabled delete, :term:`2.0 style`:: from sqlalchemy import delete - stmt = delete(User).where(User.name == "squidward").execution_options(synchronize_session="fetch") + stmt = ( + delete(User) + .where(User.name == "squidward") + .execution_options(synchronize_session="fetch") + ) session.execute(stmt) @@ -1035,6 +1043,7 @@ E.g. **don't do this**:: ### this is the **wrong way to do it** ### + class ThingOne(object): def go(self): session = Session() @@ -1045,6 +1054,7 @@ E.g. **don't do this**:: session.rollback() raise + class ThingTwo(object): def go(self): session = Session() @@ -1055,6 +1065,7 @@ E.g. **don't do this**:: session.rollback() raise + def run_my_program(): ThingOne().go() ThingTwo().go() @@ -1067,21 +1078,23 @@ transaction automatically:: ### this is a **better** (but not the only) way to do it ### + class ThingOne(object): def go(self, session): session.query(FooBar).update({"x": 5}) + class ThingTwo(object): def go(self, session): session.query(Widget).update({"q": 18}) + def run_my_program(): with Session() as session: with session.begin(): ThingOne().go(session) ThingTwo().go(session) - .. versionchanged:: 1.4 The :class:`_orm.Session` may be used as a context manager without the use of external helper functions. @@ -1119,6 +1132,7 @@ available on :class:`~sqlalchemy.orm.session.Session`:: The newer :ref:`core_inspection_toplevel` system can also be used:: from sqlalchemy import inspect + session = inspect(someobject).session .. _session_faq_threadsafe: diff --git a/doc/build/orm/session_events.rst b/doc/build/orm/session_events.rst index 544a6c5773..c24bb9fa96 100644 --- a/doc/build/orm/session_events.rst +++ b/doc/build/orm/session_events.rst @@ -47,6 +47,7 @@ options:: Session = sessionmaker(engine, future=True) + @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if orm_execute_state.is_select: @@ -58,7 +59,7 @@ options:: # ORDER BY if so col_descriptions = orm_execute_state.statement.column_descriptions - if col_descriptions[0]['entity'] is MyEntity: + if col_descriptions[0]["entity"] is MyEntity: orm_execute_state.statement = statement.order_by(MyEntity.name) The above example illustrates some simple modifications to SELECT statements. @@ -85,13 +86,14 @@ may be used on its own, or is ideally suited to be used within the Session = sessionmaker(engine, future=True) + @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if ( - orm_execute_state.is_select and - not orm_execute_state.is_column_load and - not orm_execute_state.is_relationship_load + orm_execute_state.is_select + and not orm_execute_state.is_column_load + and not orm_execute_state.is_relationship_load ): orm_execute_state.statement = orm_execute_state.statement.options( with_loader_criteria(MyEntity.public == True) @@ -114,6 +116,7 @@ Given a series of classes based on a mixin called ``HasTimestamp``:: import datetime + class HasTimestamp(object): timestamp = Column(DateTime, default=datetime.datetime.now) @@ -122,11 +125,11 @@ Given a series of classes based on a mixin called ``HasTimestamp``:: __tablename__ = "some_entity" id = Column(Integer, primary_key=True) + class SomeOtherEntity(HasTimestamp, Base): __tablename__ = "some_entity" id = Column(Integer, primary_key=True) - The above classes ``SomeEntity`` and ``SomeOtherEntity`` will each have a column ``timestamp`` that defaults to the current date and time. An event may be used to intercept all objects that extend from ``HasTimestamp`` and filter their @@ -135,9 +138,9 @@ to intercept all objects that extend from ``HasTimestamp`` and filter their @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if ( - orm_execute_state.is_select - and not orm_execute_state.is_column_load - and not orm_execute_state.is_relationship_load + orm_execute_state.is_select + and not orm_execute_state.is_column_load + and not orm_execute_state.is_relationship_load ): one_month_ago = datetime.datetime.today() - datetime.timedelta(months=1) @@ -145,7 +148,7 @@ to intercept all objects that extend from ``HasTimestamp`` and filter their with_loader_criteria( HasTimestamp, lambda cls: cls.timestamp >= one_month_ago, - include_aliases=True + include_aliases=True, ) ) @@ -202,6 +205,7 @@ E.g., using :meth:`_orm.SessionEvents.do_orm_execute` to implement a cache:: cache = {} + @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if "my_cache_key" in orm_execute_state.execution_options: @@ -222,7 +226,9 @@ E.g., using :meth:`_orm.SessionEvents.do_orm_execute` to implement a cache:: With the above hook in place, an example of using the cache would look like:: - stmt = select(User).where(User.name == 'sandy').execution_options(my_cache_key="key_sandy") + stmt = ( + select(User).where(User.name == "sandy").execution_options(my_cache_key="key_sandy") + ) result = session.execute(stmt) @@ -413,7 +419,8 @@ with a specific :class:`.Session` object:: session = Session() - @event.listens_for(session, 'transient_to_pending') + + @event.listens_for(session, "transient_to_pending") def object_is_pending(session, obj): print("new pending: %s" % obj) @@ -425,7 +432,8 @@ Or with the :class:`.Session` class itself, as well as with a specific maker = sessionmaker() - @event.listens_for(maker, 'transient_to_pending') + + @event.listens_for(maker, "transient_to_pending") def object_is_pending(session, obj): print("new pending: %s" % obj) @@ -457,11 +465,11 @@ intercept all new objects for a particular declarative base:: Base = declarative_base() + @event.listens_for(Base, "init", propagate=True) def intercept_init(instance, args, kwargs): print("new transient: %s" % instance) - Transient to Pending ^^^^^^^^^^^^^^^^^^^^ @@ -476,7 +484,6 @@ the :meth:`.SessionEvents.transient_to_pending` event:: def intercept_transient_to_pending(session, object_): print("transient to pending: %s" % object_) - Pending to Persistent ^^^^^^^^^^^^^^^^^^^^^ @@ -517,7 +524,6 @@ state via this particular avenue:: def intercept_loaded_as_persistent(session, object_): print("object loaded into persistent state: %s" % object_) - Persistent to Transient ^^^^^^^^^^^^^^^^^^^^^^^ @@ -561,7 +567,6 @@ Track the persistent to deleted transition with def intercept_persistent_to_deleted(session, object_): print("object was DELETEd, is now in deleted state: %s" % object_) - Deleted to Detached ^^^^^^^^^^^^^^^^^^^ @@ -575,7 +580,6 @@ the deleted to detached transition using :meth:`.SessionEvents.deleted_to_detach def intercept_deleted_to_detached(session, object_): print("deleted to detached: %s" % object_) - .. note:: While the object is in the deleted state, the :attr:`.InstanceState.deleted` @@ -618,7 +622,6 @@ objects moving back to persistent from detached using the def intercept_detached_to_persistent(session, object_): print("object became persistent again: %s" % object_) - Deleted to Persistent ^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/build/orm/session_state_management.rst b/doc/build/orm/session_state_management.rst index 31e82ab62c..c1d7230686 100644 --- a/doc/build/orm/session_state_management.rst +++ b/doc/build/orm/session_state_management.rst @@ -142,25 +142,25 @@ the :term:`persistent` state is as follows:: from sqlalchemy import event + def strong_reference_session(session): @event.listens_for(session, "pending_to_persistent") @event.listens_for(session, "deleted_to_persistent") @event.listens_for(session, "detached_to_persistent") @event.listens_for(session, "loaded_as_persistent") def strong_ref_object(sess, instance): - if 'refs' not in sess.info: - sess.info['refs'] = refs = set() + if "refs" not in sess.info: + sess.info["refs"] = refs = set() else: - refs = sess.info['refs'] + refs = sess.info["refs"] refs.add(instance) - @event.listens_for(session, "persistent_to_detached") @event.listens_for(session, "persistent_to_deleted") @event.listens_for(session, "persistent_to_transient") def deref_object(sess, instance): - sess.info['refs'].discard(instance) + sess.info["refs"].discard(instance) Above, we intercept the :meth:`.SessionEvents.pending_to_persistent`, :meth:`.SessionEvents.detached_to_persistent`, @@ -186,7 +186,6 @@ It may also be called for any :class:`.sessionmaker`:: maker = sessionmaker() strong_reference_session(maker) - .. _unitofwork_merging: Merging @@ -290,22 +289,23 @@ some unexpected state regarding the object being passed to :meth:`~.Session.merg Lets use the canonical example of the User and Address objects:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(50), nullable=False) addresses = relationship("Address", backref="user") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) email_address = Column(String(50), nullable=False) - user_id = Column(Integer, ForeignKey('user.id'), nullable=False) + user_id = Column(Integer, ForeignKey("user.id"), nullable=False) Assume a ``User`` object with one ``Address``, already persistent:: - >>> u1 = User(name='ed', addresses=[Address(email_address='ed@ed.com')]) + >>> u1 = User(name="ed", addresses=[Address(email_address="ed@ed.com")]) >>> session.add(u1) >>> session.commit() @@ -419,7 +419,7 @@ When we talk about expiration of data we are usually talking about an object that is in the :term:`persistent` state. For example, if we load an object as follows:: - user = session.query(User).filter_by(name='user1').first() + user = session.query(User).filter_by(name="user1").first() The above ``User`` object is persistent, and has a series of attributes present; if we were to look inside its ``__dict__``, we'd see that state @@ -481,7 +481,7 @@ Another key behavior of both :meth:`~.Session.expire` and :meth:`~.Session.refre is that all un-flushed changes on an object are discarded. That is, if we were to modify an attribute on our ``User``:: - >>> user.name = 'user2' + >>> user.name = "user2" but then we call :meth:`~.Session.expire` without first calling :meth:`~.Session.flush`, our pending value of ``'user2'`` is discarded:: @@ -500,7 +500,7 @@ it can also be passed a list of string attribute names, referring to specific attributes to be marked as expired:: # expire only attributes obj1.attr1, obj1.attr2 - session.expire(obj1, ['attr1', 'attr2']) + session.expire(obj1, ["attr1", "attr2"]) The :meth:`.Session.expire_all` method allows us to essentially call :meth:`.Session.expire` on all objects contained within the :class:`.Session` @@ -519,7 +519,7 @@ but unlike :meth:`~.Session.expire`, expects at least one name to be that of a column-mapped attribute:: # reload obj1.attr1, obj1.attr2 - session.refresh(obj1, ['attr1', 'attr2']) + session.refresh(obj1, ["attr1", "attr2"]) .. tip:: diff --git a/doc/build/orm/session_transaction.rst b/doc/build/orm/session_transaction.rst index c7df69f429..1d246b79ab 100644 --- a/doc/build/orm/session_transaction.rst +++ b/doc/build/orm/session_transaction.rst @@ -28,6 +28,7 @@ the scope of the :class:`_orm.SessionTransaction`. Below, assume we start with a :class:`_orm.Session`:: from sqlalchemy.orm import Session + session = Session(engine) We can now run operations within a demarcated transaction using a context @@ -139,7 +140,7 @@ method:: session.add(u1) session.add(u2) - nested = session.begin_nested() # establish a savepoint + nested = session.begin_nested() # establish a savepoint session.add(u3) nested.rollback() # rolls back u3, keeps u1 and u2 @@ -163,9 +164,9 @@ rolling back the whole transaction, as in the example below:: for record in records: try: with session.begin_nested(): - session.merge(record) + session.merge(record) except: - print("Skipped record %s" % record) + print("Skipped record %s" % record) session.commit() When the context manager yielded by :meth:`_orm.Session.begin_nested` @@ -264,8 +265,8 @@ Engine:: [ {"data": "some data one"}, {"data": "some data two"}, - {"data": "some data three"} - ] + {"data": "some data three"}, + ], ) conn.commit() @@ -274,11 +275,13 @@ Session:: Session = sessionmaker(engine, future=True) with Session() as session: - session.add_all([ - SomeClass(data="some data one"), - SomeClass(data="some data two"), - SomeClass(data="some data three") - ]) + session.add_all( + [ + SomeClass(data="some data one"), + SomeClass(data="some data two"), + SomeClass(data="some data three"), + ] + ) session.commit() Begin Once @@ -300,8 +303,8 @@ Engine:: [ {"data": "some data one"}, {"data": "some data two"}, - {"data": "some data three"} - ] + {"data": "some data three"}, + ], ) # commits and closes automatically @@ -310,14 +313,15 @@ Session:: Session = sessionmaker(engine, future=True) with Session.begin() as session: - session.add_all([ - SomeClass(data="some data one"), - SomeClass(data="some data two"), - SomeClass(data="some data three") - ]) + session.add_all( + [ + SomeClass(data="some data one"), + SomeClass(data="some data two"), + SomeClass(data="some data three"), + ] + ) # commits and closes automatically - Nested Transaction ~~~~~~~~~~~~~~~~~~~~ @@ -339,8 +343,8 @@ Engine:: [ {"data": "some data one"}, {"data": "some data two"}, - {"data": "some data three"} - ] + {"data": "some data three"}, + ], ) savepoint.commit() # or rollback @@ -352,17 +356,16 @@ Session:: with Session.begin() as session: savepoint = session.begin_nested() - session.add_all([ - SomeClass(data="some data one"), - SomeClass(data="some data two"), - SomeClass(data="some data three") - ]) + session.add_all( + [ + SomeClass(data="some data one"), + SomeClass(data="some data two"), + SomeClass(data="some data three"), + ] + ) savepoint.commit() # or rollback # commits automatically - - - .. _session_autocommit: .. _session_explicit_begin: @@ -399,8 +402,8 @@ point at which the "begin" operation occurs. To suit this, the try: item1 = session.query(Item).get(1) item2 = session.query(Item).get(2) - item1.foo = 'bar' - item2.bar = 'foo' + item1.foo = "bar" + item2.bar = "foo" session.commit() except: session.rollback() @@ -413,8 +416,8 @@ The above pattern is more idiomatically invoked using a context manager:: with session.begin(): item1 = session.query(Item).get(1) item2 = session.query(Item).get(2) - item1.foo = 'bar' - item2.bar = 'foo' + item1.foo = "bar" + item2.bar = "foo" The :meth:`_orm.Session.begin` method and the session's "autobegin" process use the same sequence of steps to begin the transaction. This includes @@ -453,6 +456,7 @@ a decorator may be used:: import contextlib + @contextlib.contextmanager def transaction(session): if not session.in_transaction(): @@ -461,7 +465,6 @@ a decorator may be used:: else: yield - The above context manager may be used in the same way the "subtransaction" flag works, such as in the following example:: @@ -471,12 +474,14 @@ The above context manager may be used in the same way the with transaction(session): method_b(session) + # method_b also starts a transaction, but when # called from method_a participates in the ongoing # transaction. def method_b(session): with transaction(session): - session.add(SomeObject('bat', 'lala')) + session.add(SomeObject("bat", "lala")) + Session = sessionmaker(engine) @@ -491,8 +496,10 @@ or methods to be concerned with the details of transaction demarcation:: def method_a(session): method_b(session) + def method_b(session): - session.add(SomeObject('bat', 'lala')) + session.add(SomeObject("bat", "lala")) + Session = sessionmaker(engine) @@ -518,13 +525,13 @@ also :meth:`_orm.Session.prepare` the session for interacting with transactions not managed by SQLAlchemy. To use two phase transactions set the flag ``twophase=True`` on the session:: - engine1 = create_engine('postgresql://db1') - engine2 = create_engine('postgresql://db2') + engine1 = create_engine("postgresql://db1") + engine2 = create_engine("postgresql://db2") Session = sessionmaker(twophase=True) # bind User operations to engine 1, Account operations to engine 2 - Session.configure(binds={User:engine1, Account:engine2}) + Session.configure(binds={User: engine1, Account: engine2}) session = Session() @@ -534,7 +541,6 @@ transactions set the flag ``twophase=True`` on the session:: # before committing both transactions session.commit() - .. _session_transaction_isolation: Setting Transaction Isolation Levels / DBAPI AUTOCOMMIT @@ -583,13 +589,11 @@ in all cases, which is then used as the source of connectivity for a from sqlalchemy.orm import sessionmaker eng = create_engine( - "postgresql://scott:tiger@localhost/test", - isolation_level='REPEATABLE READ' + "postgresql://scott:tiger@localhost/test", isolation_level="REPEATABLE READ" ) Session = sessionmaker(eng) - Another option, useful if there are to be two engines with different isolation levels at once, is to use the :meth:`_engine.Engine.execution_options` method, which will produce a shallow copy of the original :class:`_engine.Engine` which @@ -607,7 +611,6 @@ operations:: transactional_session = sessionmaker(eng) autocommit_session = sessionmaker(autocommit_engine) - Above, both "``eng``" and ``"autocommit_engine"`` share the same dialect and connection pool. However the "AUTOCOMMIT" mode will be set upon connections when they are acquired from the ``autocommit_engine``. The two @@ -660,7 +663,6 @@ methods:: with Session() as session: session.bind_mapper(User, autocommit_engine) - Setting Isolation for Individual Transactions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -683,7 +685,7 @@ level on a per-connection basis can be affected by using the # call connection() with options before any other operations proceed. # this will procure a new connection from the bound engine and begin a real # database transaction. - sess.connection(execution_options={'isolation_level': 'SERIALIZABLE'}) + sess.connection(execution_options={"isolation_level": "SERIALIZABLE"}) # ... work with session in SERIALIZABLE isolation level... @@ -715,15 +717,13 @@ the per-connection-transaction isolation level:: # call connection() with options before any other operations proceed. # this will procure a new connection from the bound engine and begin a # real database transaction. - sess.connection(execution_options={'isolation_level': 'SERIALIZABLE'}) + sess.connection(execution_options={"isolation_level": "SERIALIZABLE"}) # ... work with session in SERIALIZABLE isolation level... # outside the block, the transaction has been committed. the connection is # released and reverted to its previous isolation level. - - Tracking Transaction State with Events -------------------------------------- @@ -765,7 +765,8 @@ are reverted:: # global application scope. create Session class, engine Session = sessionmaker() - engine = create_engine('postgresql://...') + engine = create_engine("postgresql://...") + class SomeTest(TestCase): def setUp(self): @@ -775,11 +776,9 @@ are reverted:: # begin a non-ORM transaction self.trans = self.connection.begin() - # bind an individual Session to the connection self.session = Session(bind=self.connection) - ### optional ### # if the database supports SAVEPOINT (SQLite needs special diff --git a/doc/build/orm/tutorial.rst b/doc/build/orm/tutorial.rst index fb52023420..327957e9f6 100644 --- a/doc/build/orm/tutorial.rst +++ b/doc/build/orm/tutorial.rst @@ -72,7 +72,7 @@ Version Check A quick check to verify that we are on at least **version 1.4** of SQLAlchemy:: >>> import sqlalchemy - >>> sqlalchemy.__version__ # doctest:+SKIP + >>> sqlalchemy.__version__ # doctest:+SKIP 1.4.0 Connecting @@ -82,7 +82,7 @@ For this tutorial we will use an in-memory-only SQLite database. To connect we use :func:`~sqlalchemy.create_engine`:: >>> from sqlalchemy import create_engine - >>> engine = create_engine('sqlite:///:memory:', echo=True) + >>> engine = create_engine("sqlite:///:memory:", echo=True) The ``echo`` flag is a shortcut to setting up SQLAlchemy logging, which is accomplished via Python's standard ``logging`` module. With it enabled, we'll @@ -146,7 +146,7 @@ the table name, and names and datatypes of columns:: >>> from sqlalchemy import Column, Integer, String >>> class User(Base): - ... __tablename__ = 'users' + ... __tablename__ = "users" ... ... id = Column(Integer, primary_key=True) ... name = Column(String) @@ -154,8 +154,11 @@ the table name, and names and datatypes of columns:: ... nickname = Column(String) ... ... def __repr__(self): - ... return "" % ( - ... self.name, self.fullname, self.nickname) + ... return "" % ( + ... self.name, + ... self.fullname, + ... self.nickname, + ... ) .. sidebar:: Tip @@ -196,7 +199,7 @@ our table, known as :term:`table metadata`. The object used by SQLAlchemy to r this information for a specific table is called the :class:`_schema.Table` object, and here Declarative has made one for us. We can see this object by inspecting the ``__table__`` attribute:: - >>> User.__table__ # doctest: +NORMALIZE_WHITESPACE + >>> User.__table__ # doctest: +NORMALIZE_WHITESPACE Table('users', MetaData(), Column('id', Integer(), table=, primary_key=True, nullable=False), Column('name', String(), table=), @@ -269,21 +272,25 @@ the actual ``CREATE TABLE`` statement: without being instructed. For that, you use the :class:`~sqlalchemy.schema.Sequence` construct:: from sqlalchemy import Sequence - Column(Integer, Sequence('user_id_seq'), primary_key=True) + + Column(Integer, Sequence("user_id_seq"), primary_key=True) A full, foolproof :class:`~sqlalchemy.schema.Table` generated via our declarative mapping is therefore:: class User(Base): - __tablename__ = 'users' - id = Column(Integer, Sequence('user_id_seq'), primary_key=True) + __tablename__ = "users" + id = Column(Integer, Sequence("user_id_seq"), primary_key=True) name = Column(String(50)) fullname = Column(String(50)) nickname = Column(String(50)) def __repr__(self): return "" % ( - self.name, self.fullname, self.nickname) + self.name, + self.fullname, + self.nickname, + ) We include this more verbose table definition separately to highlight the difference between a minimal construct geared primarily @@ -296,7 +303,7 @@ Create an Instance of the Mapped Class With mappings complete, let's now create and inspect a ``User`` object:: - >>> ed_user = User(name='ed', fullname='Ed Jones', nickname='edsnickname') + >>> ed_user = User(name="ed", fullname="Ed Jones", nickname="edsnickname") >>> ed_user.name 'ed' >>> ed_user.nickname @@ -383,7 +390,7 @@ Adding and Updating Objects To persist our ``User`` object, we :meth:`~.Session.add` it to our :class:`~sqlalchemy.orm.session.Session`:: - >>> ed_user = User(name='ed', fullname='Ed Jones', nickname='edsnickname') + >>> ed_user = User(name="ed", fullname="Ed Jones", nickname="edsnickname") >>> session.add(ed_user) At this point, we say that the instance is **pending**; no SQL has yet been issued @@ -401,7 +408,9 @@ added: .. sourcecode:: python+sql - {sql}>>> our_user = session.query(User).filter_by(name='ed').first() # doctest:+NORMALIZE_WHITESPACE + {sql}>>> our_user = ( + ... session.query(User).filter_by(name="ed").first() + ... ) # doctest:+NORMALIZE_WHITESPACE BEGIN (implicit) INSERT INTO users (name, fullname, nickname) VALUES (?, ?, ?) [...] ('ed', 'Ed Jones', 'edsnickname') @@ -440,16 +449,19 @@ We can add more ``User`` objects at once using .. sourcecode:: python+sql - >>> session.add_all([ - ... User(name='wendy', fullname='Wendy Williams', nickname='windy'), - ... User(name='mary', fullname='Mary Contrary', nickname='mary'), - ... User(name='fred', fullname='Fred Flintstone', nickname='freddy')]) + >>> session.add_all( + ... [ + ... User(name="wendy", fullname="Wendy Williams", nickname="windy"), + ... User(name="mary", fullname="Mary Contrary", nickname="mary"), + ... User(name="fred", fullname="Fred Flintstone", nickname="freddy"), + ... ] + ... ) Also, we've decided Ed's nickname isn't that great, so lets change it: .. sourcecode:: python+sql - >>> ed_user.nickname = 'eddie' + >>> ed_user.nickname = "eddie" The :class:`~sqlalchemy.orm.session.Session` is paying attention. It knows, for example, that ``Ed Jones`` has been modified: @@ -498,7 +510,7 @@ If we look at Ed's ``id`` attribute, which earlier was ``None``, it now has a va .. sourcecode:: python+sql - {sql}>>> ed_user.id # doctest: +NORMALIZE_WHITESPACE + {sql}>>> ed_user.id # doctest: +NORMALIZE_WHITESPACE BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, @@ -535,20 +547,20 @@ we can roll back changes made too. Let's make two changes that we'll revert; .. sourcecode:: python+sql - >>> ed_user.name = 'Edwardo' + >>> ed_user.name = "Edwardo" and we'll add another erroneous user, ``fake_user``: .. sourcecode:: python+sql - >>> fake_user = User(name='fakeuser', fullname='Invalid', nickname='12345') + >>> fake_user = User(name="fakeuser", fullname="Invalid", nickname="12345") >>> session.add(fake_user) Querying the session, we can see that they're flushed into the current transaction: .. sourcecode:: python+sql - {sql}>>> session.query(User).filter(User.name.in_(['Edwardo', 'fakeuser'])).all() + {sql}>>> session.query(User).filter(User.name.in_(["Edwardo", "fakeuser"])).all() UPDATE users SET name=? WHERE users.id = ? [...] ('Edwardo', 1) INSERT INTO users (name, fullname, nickname) VALUES (?, ?, ?) @@ -588,7 +600,7 @@ issuing a SELECT illustrates the changes made to the database: .. sourcecode:: python+sql - {sql}>>> session.query(User).filter(User.name.in_(['ed', 'fakeuser'])).all() + {sql}>>> session.query(User).filter(User.name.in_(["ed", "fakeuser"])).all() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -655,7 +667,7 @@ class: .. sourcecode:: python+sql {sql}>>> for row in session.query(User, User.name).all(): - ... print(row.User, row.name) + ... print(row.User, row.name) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -675,8 +687,8 @@ is mapped to one (such as ``User.name``): .. sourcecode:: python+sql - {sql}>>> for row in session.query(User.name.label('name_label')).all(): - ... print(row.name_label) + {sql}>>> for row in session.query(User.name.label("name_label")).all(): + ... print(row.name_label) SELECT users.name AS name_label FROM users [...] (){stop} @@ -692,10 +704,10 @@ entities are present in the call to :meth:`~.Session.query`, can be controlled u .. sourcecode:: python+sql >>> from sqlalchemy.orm import aliased - >>> user_alias = aliased(User, name='user_alias') + >>> user_alias = aliased(User, name="user_alias") {sql}>>> for row in session.query(user_alias, user_alias.name).all(): - ... print(row.user_alias) + ... print(row.user_alias) SELECT user_alias.id AS user_alias_id, user_alias.name AS user_alias_name, user_alias.fullname AS user_alias_fullname, @@ -715,7 +727,7 @@ conjunction with ORDER BY: .. sourcecode:: python+sql {sql}>>> for u in session.query(User).order_by(User.id)[1:3]: - ... print(u) + ... print(u) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -731,9 +743,8 @@ and filtering results, which is accomplished either with .. sourcecode:: python+sql - {sql}>>> for name, in session.query(User.name).\ - ... filter_by(fullname='Ed Jones'): - ... print(name) + {sql}>>> for (name,) in session.query(User.name).filter_by(fullname="Ed Jones"): + ... print(name) SELECT users.name AS users_name FROM users WHERE users.fullname = ? [...] ('Ed Jones',) @@ -745,9 +756,8 @@ operators with the class-level attributes on your mapped class: .. sourcecode:: python+sql - {sql}>>> for name, in session.query(User.name).\ - ... filter(User.fullname=='Ed Jones'): - ... print(name) + {sql}>>> for (name,) in session.query(User.name).filter(User.fullname == "Ed Jones"): + ... print(name) SELECT users.name AS users_name FROM users WHERE users.fullname = ? [...] ('Ed Jones',) @@ -762,10 +772,10 @@ users named "ed" with a full name of "Ed Jones", you can call .. sourcecode:: python+sql - {sql}>>> for user in session.query(User).\ - ... filter(User.name=='ed').\ - ... filter(User.fullname=='Ed Jones'): - ... print(user) + {sql}>>> for user in ( + ... session.query(User).filter(User.name == "ed").filter(User.fullname == "Ed Jones") + ... ): + ... print(user) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -783,11 +793,11 @@ Here's a rundown of some of the most common operators used in * :meth:`equals <.ColumnOperators.__eq__>`:: - query.filter(User.name == 'ed') + query.filter(User.name == "ed") * :meth:`not equals <.ColumnOperators.__ne__>`:: - query.filter(User.name != 'ed') + query.filter(User.name != "ed") * :meth:`LIKE <.ColumnOperators.like>`:: @@ -808,23 +818,21 @@ Here's a rundown of some of the most common operators used in * :meth:`IN <.ColumnOperators.in_>`:: - query.filter(User.name.in_(['ed', 'wendy', 'jack'])) + query.filter(User.name.in_(["ed", "wendy", "jack"])) # works with query objects too: - query.filter(User.name.in_( - session.query(User.name).filter(User.name.like('%ed%')) - )) + query.filter(User.name.in_(session.query(User.name).filter(User.name.like("%ed%")))) # use tuple_() for composite (multi-column) queries from sqlalchemy import tuple_ + query.filter( - tuple_(User.name, User.nickname).\ - in_([('ed', 'edsnickname'), ('wendy', 'windy')]) + tuple_(User.name, User.nickname).in_([("ed", "edsnickname"), ("wendy", "windy")]) ) * :meth:`NOT IN <.ColumnOperators.not_in>`:: - query.filter(~User.name.in_(['ed', 'wendy', 'jack'])) + query.filter(~User.name.in_(["ed", "wendy", "jack"])) * :meth:`IS NULL <.ColumnOperators.is_>`:: @@ -886,7 +894,7 @@ database results. Here's a brief tour: .. sourcecode:: python+sql - >>> query = session.query(User).filter(User.name.like('%ed')).order_by(User.id) + >>> query = session.query(User).filter(User.name.like("%ed")).order_by(User.id) {sql}>>> query.all() SELECT users.id AS users_id, users.name AS users_name, @@ -964,8 +972,7 @@ database results. Here's a brief tour: .. sourcecode:: python+sql - >>> query = session.query(User.id).filter(User.name == 'ed').\ - ... order_by(User.id) + >>> query = session.query(User.id).filter(User.name == "ed").order_by(User.id) {sql}>>> query.scalar() SELECT users.id AS users_id FROM users @@ -988,9 +995,7 @@ by most applicable methods. For example, .. sourcecode:: python+sql >>> from sqlalchemy import text - {sql}>>> for user in session.query(User).\ - ... filter(text("id<224")).\ - ... order_by(text("id")).all(): + {sql}>>> for user in session.query(User).filter(text("id<224")).order_by(text("id")).all(): ... print(user.name) SELECT users.id AS users_id, users.name AS users_name, @@ -1010,8 +1015,9 @@ method: .. sourcecode:: python+sql - {sql}>>> session.query(User).filter(text("id<:value and name=:name")).\ - ... params(value=224, name='fred').order_by(User.id).one() + {sql}>>> session.query(User).filter(text("id<:value and name=:name")).params( + ... value=224, name="fred" + ... ).order_by(User.id).one() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -1029,8 +1035,9 @@ returned by the SQL statement based on column name: .. sourcecode:: python+sql - {sql}>>> session.query(User).from_statement( - ... text("SELECT * FROM users where name=:name")).params(name='ed').all() + {sql}>>> session.query(User).from_statement(text("SELECT * FROM users where name=:name")).params( + ... name="ed" + ... ).all() SELECT * FROM users where name=? [...] ('ed',) {stop}[] @@ -1041,10 +1048,9 @@ columns are passed in the desired order to :meth:`_expression.TextClause.columns .. sourcecode:: python+sql - >>> stmt = text("SELECT name, id, fullname, nickname " - ... "FROM users where name=:name") + >>> stmt = text("SELECT name, id, fullname, nickname " "FROM users where name=:name") >>> stmt = stmt.columns(User.name, User.id, User.fullname, User.nickname) - {sql}>>> session.query(User).from_statement(stmt).params(name='ed').all() + {sql}>>> session.query(User).from_statement(stmt).params(name="ed").all() SELECT name, id, fullname, nickname FROM users where name=? [...] ('ed',) {stop}[] @@ -1058,8 +1064,7 @@ any other case: >>> stmt = text("SELECT name, id FROM users where name=:name") >>> stmt = stmt.columns(User.name, User.id) - {sql}>>> session.query(User.id, User.name).\ - ... from_statement(stmt).params(name='ed').all() + {sql}>>> session.query(User.id, User.name).from_statement(stmt).params(name="ed").all() SELECT name, id FROM users where name=? [...] ('ed',) {stop}[(1, u'ed')] @@ -1077,7 +1082,7 @@ counting called :meth:`_query.Query.count`: .. sourcecode:: python+sql - {sql}>>> session.query(User).filter(User.name.like('%ed')).count() + {sql}>>> session.query(User).filter(User.name.like("%ed")).count() SELECT count(*) AS count_1 FROM (SELECT users.id AS users_id, users.name AS users_name, @@ -1125,7 +1130,7 @@ To achieve our simple ``SELECT count(*) FROM table``, we can apply it as: .. sourcecode:: python+sql - {sql}>>> session.query(func.count('*')).select_from(User).scalar() + {sql}>>> session.query(func.count("*")).select_from(User).scalar() SELECT count(?) AS count_1 FROM users [...] ('*',) @@ -1160,18 +1165,17 @@ declarative, we define this table along with its mapped class, ``Address``: >>> from sqlalchemy.orm import relationship >>> class Address(Base): - ... __tablename__ = 'addresses' + ... __tablename__ = "addresses" ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) - ... user_id = Column(Integer, ForeignKey('users.id')) + ... user_id = Column(Integer, ForeignKey("users.id")) ... ... user = relationship("User", back_populates="addresses") ... ... def __repr__(self): ... return "" % self.email_address - >>> User.addresses = relationship( - ... "Address", order_by=Address.id, back_populates="user") + >>> User.addresses = relationship("Address", order_by=Address.id, back_populates="user") The above class introduces the :class:`_schema.ForeignKey` construct, which is a directive applied to :class:`_schema.Column` that indicates that values in this @@ -1269,7 +1273,7 @@ default, the collection is a Python list. .. sourcecode:: python+sql - >>> jack = User(name='jack', fullname='Jack Bean', nickname='gjffdd') + >>> jack = User(name="jack", fullname="Jack Bean", nickname="gjffdd") >>> jack.addresses [] @@ -1279,8 +1283,9 @@ just assign a full list directly: .. sourcecode:: python+sql >>> jack.addresses = [ - ... Address(email_address='jack@google.com'), - ... Address(email_address='j25@yahoo.com')] + ... Address(email_address="jack@google.com"), + ... Address(email_address="j25@yahoo.com"), + ... ] When using a bidirectional relationship, elements added in one direction automatically become visible in the other direction. This behavior occurs @@ -1316,8 +1321,7 @@ Querying for Jack, we get just Jack back. No SQL is yet issued for Jack's addre .. sourcecode:: python+sql - {sql}>>> jack = session.query(User).\ - ... filter_by(name='jack').one() + {sql}>>> jack = session.query(User).filter_by(name="jack").one() BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, @@ -1366,10 +1370,12 @@ Below we load the ``User`` and ``Address`` entities at once using this method: .. sourcecode:: python+sql - {sql}>>> for u, a in session.query(User, Address).\ - ... filter(User.id==Address.user_id).\ - ... filter(Address.email_address=='jack@google.com').\ - ... all(): + {sql}>>> for u, a in ( + ... session.query(User, Address) + ... .filter(User.id == Address.user_id) + ... .filter(Address.email_address == "jack@google.com") + ... .all() + ... ): ... print(u) ... print(a) SELECT users.id AS users_id, @@ -1391,9 +1397,9 @@ using the :meth:`_query.Query.join` method: .. sourcecode:: python+sql - {sql}>>> session.query(User).join(Address).\ - ... filter(Address.email_address=='jack@google.com').\ - ... all() + {sql}>>> session.query(User).join(Address).filter( + ... Address.email_address == "jack@google.com" + ... ).all() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -1408,15 +1414,17 @@ and ``Address`` because there's only one foreign key between them. If there were no foreign keys, or several, :meth:`_query.Query.join` works better when one of the following forms are used:: - query.join(Address, User.id==Address.user_id) # explicit condition - query.join(User.addresses) # specify relationship from left to right - query.join(Address, User.addresses) # same, with explicit target - query.join(User.addresses.and_(Address.name != 'foo')) # use relationship + additional ON criteria + query.join(Address, User.id == Address.user_id) # explicit condition + query.join(User.addresses) # specify relationship from left to right + query.join(Address, User.addresses) # same, with explicit target + query.join( + User.addresses.and_(Address.name != "foo") + ) # use relationship + additional ON criteria As you would expect, the same idea is used for "outer" joins, using the :meth:`_query.Query.outerjoin` function:: - query.outerjoin(User.addresses) # LEFT OUTER JOIN + query.outerjoin(User.addresses) # LEFT OUTER JOIN The reference documentation for :meth:`_query.Query.join` contains detailed information and examples of the calling styles accepted by this method; :meth:`_query.Query.join` @@ -1431,7 +1439,6 @@ is an important method at the center of usage for any SQL-fluent application. query = session.query(User, Address).select_from(Address).join(User) - .. _ormtutorial_aliases: Using Aliases @@ -1453,12 +1460,13 @@ distinct email addresses at the same time: >>> from sqlalchemy.orm import aliased >>> adalias1 = aliased(Address) >>> adalias2 = aliased(Address) - {sql}>>> for username, email1, email2 in \ - ... session.query(User.name, adalias1.email_address, adalias2.email_address).\ - ... join(User.addresses.of_type(adalias1)).\ - ... join(User.addresses.of_type(adalias2)).\ - ... filter(adalias1.email_address=='jack@google.com').\ - ... filter(adalias2.email_address=='j25@yahoo.com'): + {sql}>>> for username, email1, email2 in ( + ... session.query(User.name, adalias1.email_address, adalias2.email_address) + ... .join(User.addresses.of_type(adalias1)) + ... .join(User.addresses.of_type(adalias2)) + ... .filter(adalias1.email_address == "jack@google.com") + ... .filter(adalias2.email_address == "j25@yahoo.com") + ... ): ... print(username, email1, email2) SELECT users.name AS users_name, addresses_1.email_address AS addresses_1_email_address, @@ -1501,9 +1509,11 @@ representing the statement generated by a particular construct, which are described in :ref:`sqlexpression_toplevel`:: >>> from sqlalchemy.sql import func - >>> stmt = session.query(Address.user_id, func.count('*').\ - ... label('address_count')).\ - ... group_by(Address.user_id).subquery() + >>> stmt = ( + ... session.query(Address.user_id, func.count("*").label("address_count")) + ... .group_by(Address.user_id) + ... .subquery() + ... ) The ``func`` keyword generates SQL functions, and the ``subquery()`` method on :class:`~sqlalchemy.orm.query.Query` produces a SQL expression construct @@ -1517,8 +1527,11 @@ accessible through an attribute called ``c``: .. sourcecode:: python+sql - {sql}>>> for u, count in session.query(User, stmt.c.address_count).\ - ... outerjoin(stmt, User.id==stmt.c.user_id).order_by(User.id): + {sql}>>> for u, count in ( + ... session.query(User, stmt.c.address_count) + ... .outerjoin(stmt, User.id == stmt.c.user_id) + ... .order_by(User.id) + ... ): ... print(u, count) SELECT users.id AS users_id, users.name AS users_name, @@ -1546,12 +1559,11 @@ to associate an "alias" of a mapped class to a subquery: .. sourcecode:: python+sql - {sql}>>> stmt = session.query(Address).\ - ... filter(Address.email_address != 'j25@yahoo.com').\ - ... subquery() + {sql}>>> stmt = ( + ... session.query(Address).filter(Address.email_address != "j25@yahoo.com").subquery() + ... ) >>> addr_alias = aliased(Address, stmt) - >>> for user, address in session.query(User, addr_alias).\ - ... join(addr_alias, User.addresses): + >>> for user, address in session.query(User, addr_alias).join(addr_alias, User.addresses): ... print(user) ... print(address) SELECT users.id AS users_id, @@ -1585,8 +1597,8 @@ There is an explicit EXISTS construct, which looks like this: .. sourcecode:: python+sql >>> from sqlalchemy.sql import exists - >>> stmt = exists().where(Address.user_id==User.id) - {sql}>>> for name, in session.query(User.name).filter(stmt): + >>> stmt = exists().where(Address.user_id == User.id) + {sql}>>> for (name,) in session.query(User.name).filter(stmt): ... print(name) SELECT users.name AS users_name FROM users @@ -1602,8 +1614,7 @@ usage of EXISTS automatically. Above, the statement can be expressed along the .. sourcecode:: python+sql - {sql}>>> for name, in session.query(User.name).\ - ... filter(User.addresses.any()): + {sql}>>> for (name,) in session.query(User.name).filter(User.addresses.any()): ... print(name) SELECT users.name AS users_name FROM users @@ -1617,8 +1628,9 @@ usage of EXISTS automatically. Above, the statement can be expressed along the .. sourcecode:: python+sql - {sql}>>> for name, in session.query(User.name).\ - ... filter(User.addresses.any(Address.email_address.like('%google%'))): + {sql}>>> for (name,) in session.query(User.name).filter( + ... User.addresses.any(Address.email_address.like("%google%")) + ... ): ... print(name) SELECT users.name AS users_name FROM users @@ -1634,8 +1646,7 @@ usage of EXISTS automatically. Above, the statement can be expressed along the .. sourcecode:: python+sql - {sql}>>> session.query(Address).\ - ... filter(~Address.user.has(User.name=='jack')).all() + {sql}>>> session.query(Address).filter(~Address.user.has(User.name == "jack")).all() SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id @@ -1671,18 +1682,18 @@ and behavior: * :meth:`~.RelationshipProperty.Comparator.any` (used for collections):: - query.filter(User.addresses.any(Address.email_address == 'bar')) + query.filter(User.addresses.any(Address.email_address == "bar")) # also takes keyword arguments: - query.filter(User.addresses.any(email_address='bar')) + query.filter(User.addresses.any(email_address="bar")) * :meth:`~.RelationshipProperty.Comparator.has` (used for scalar references):: - query.filter(Address.user.has(name='ed')) + query.filter(Address.user.has(name="ed")) * :meth:`_query.Query.with_parent` (used for any relationship):: - session.query(Address).with_parent(someuser, 'addresses') + session.query(Address).with_parent(someuser, "addresses") Eager Loading ============= @@ -1710,9 +1721,12 @@ at once: .. sourcecode:: python+sql >>> from sqlalchemy.orm import selectinload - {sql}>>> jack = session.query(User).\ - ... options(selectinload(User.addresses)).\ - ... filter_by(name='jack').one() + {sql}>>> jack = ( + ... session.query(User) + ... .options(selectinload(User.addresses)) + ... .filter_by(name="jack") + ... .one() + ... ) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -1749,9 +1763,9 @@ will emit the extra join regardless: >>> from sqlalchemy.orm import joinedload - {sql}>>> jack = session.query(User).\ - ... options(joinedload(User.addresses)).\ - ... filter_by(name='jack').one() + {sql}>>> jack = ( + ... session.query(User).options(joinedload(User.addresses)).filter_by(name="jack").one() + ... ) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -1812,11 +1826,13 @@ attribute: .. sourcecode:: python+sql >>> from sqlalchemy.orm import contains_eager - {sql}>>> jacks_addresses = session.query(Address).\ - ... join(Address.user).\ - ... filter(User.name=='jack').\ - ... options(contains_eager(Address.user)).\ - ... all() + {sql}>>> jacks_addresses = ( + ... session.query(Address) + ... .join(Address.user) + ... .filter(User.name == "jack") + ... .options(contains_eager(Address.user)) + ... .all() + ... ) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -1846,7 +1862,7 @@ in the session, then we'll issue a ``count`` query to see that no rows remain: .. sourcecode:: python+sql >>> session.delete(jack) - {sql}>>> session.query(User).filter_by(name='jack').count() + {sql}>>> session.query(User).filter_by(name="jack").count() UPDATE addresses SET user_id=? WHERE addresses.id = ? [...] ((None, 1), (None, 2)) DELETE FROM users WHERE users.id = ? @@ -1866,8 +1882,8 @@ So far, so good. How about Jack's ``Address`` objects ? .. sourcecode:: python+sql {sql}>>> session.query(Address).filter( - ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) - ... ).count() + ... Address.email_address.in_(["jack@google.com", "j25@yahoo.com"]) + ... ).count() SELECT count(*) AS count_1 FROM (SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, @@ -1905,28 +1921,32 @@ Next we'll declare the ``User`` class, adding in the ``addresses`` relationship including the cascade configuration (we'll leave the constructor out too):: >>> class User(Base): - ... __tablename__ = 'users' + ... __tablename__ = "users" ... ... id = Column(Integer, primary_key=True) ... name = Column(String) ... fullname = Column(String) ... nickname = Column(String) ... - ... addresses = relationship("Address", back_populates='user', - ... cascade="all, delete, delete-orphan") + ... addresses = relationship( + ... "Address", back_populates="user", cascade="all, delete, delete-orphan" + ... ) ... ... def __repr__(self): - ... return "" % ( - ... self.name, self.fullname, self.nickname) + ... return "" % ( + ... self.name, + ... self.fullname, + ... self.nickname, + ... ) Then we recreate ``Address``, noting that in this case we've created the ``Address.user`` relationship via the ``User`` class already:: >>> class Address(Base): - ... __tablename__ = 'addresses' + ... __tablename__ = "addresses" ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) - ... user_id = Column(Integer, ForeignKey('users.id')) + ... user_id = Column(Integer, ForeignKey("users.id")) ... user = relationship("User", back_populates="addresses") ... ... def __repr__(self): @@ -1963,7 +1983,7 @@ being deleted: # only one address remains {sql}>>> session.query(Address).filter( - ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) + ... Address.email_address.in_(["jack@google.com", "j25@yahoo.com"]) ... ).count() DELETE FROM addresses WHERE addresses.id = ? [...] (2,) @@ -1983,7 +2003,7 @@ with the user: >>> session.delete(jack) - {sql}>>> session.query(User).filter_by(name='jack').count() + {sql}>>> session.query(User).filter_by(name="jack").count() DELETE FROM addresses WHERE addresses.id = ? [...] (1,) DELETE FROM users WHERE users.id = ? @@ -1999,7 +2019,7 @@ with the user: {stop}0 {sql}>>> session.query(Address).filter( - ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) + ... Address.email_address.in_(["jack@google.com", "j25@yahoo.com"]) ... ).count() SELECT count(*) AS count_1 FROM (SELECT addresses.id AS addresses_id, @@ -2032,9 +2052,11 @@ to serve as the association table. This looks like the following:: >>> from sqlalchemy import Table, Text >>> # association table - >>> post_keywords = Table('post_keywords', Base.metadata, - ... Column('post_id', ForeignKey('posts.id'), primary_key=True), - ... Column('keyword_id', ForeignKey('keywords.id'), primary_key=True) + >>> post_keywords = Table( + ... "post_keywords", + ... Base.metadata, + ... Column("post_id", ForeignKey("posts.id"), primary_key=True), + ... Column("keyword_id", ForeignKey("keywords.id"), primary_key=True), ... ) Above, we can see declaring a :class:`_schema.Table` directly is a little different @@ -2048,17 +2070,15 @@ Next we define ``BlogPost`` and ``Keyword``, using complementary table as an association table:: >>> class BlogPost(Base): - ... __tablename__ = 'posts' + ... __tablename__ = "posts" ... ... id = Column(Integer, primary_key=True) - ... user_id = Column(Integer, ForeignKey('users.id')) + ... user_id = Column(Integer, ForeignKey("users.id")) ... headline = Column(String(255), nullable=False) ... body = Column(Text) ... ... # many to many BlogPost<->Keyword - ... keywords = relationship('Keyword', - ... secondary=post_keywords, - ... back_populates='posts') + ... keywords = relationship("Keyword", secondary=post_keywords, back_populates="posts") ... ... def __init__(self, headline, body, author): ... self.author = author @@ -2070,13 +2090,11 @@ table as an association table:: >>> class Keyword(Base): - ... __tablename__ = 'keywords' + ... __tablename__ = "keywords" ... ... id = Column(Integer, primary_key=True) ... keyword = Column(String(50), nullable=False, unique=True) - ... posts = relationship('BlogPost', - ... secondary=post_keywords, - ... back_populates='keywords') + ... posts = relationship("BlogPost", secondary=post_keywords, back_populates="keywords") ... ... def __init__(self, keyword): ... self.keyword = keyword @@ -2144,9 +2162,7 @@ Usage is not too different from what we've been doing. Let's give Wendy some bl .. sourcecode:: python+sql - {sql}>>> wendy = session.query(User).\ - ... filter_by(name='wendy').\ - ... one() + {sql}>>> wendy = session.query(User).filter_by(name="wendy").one() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -2163,8 +2179,8 @@ have any yet, so we can just create them: .. sourcecode:: python+sql - >>> post.keywords.append(Keyword('wendy')) - >>> post.keywords.append(Keyword('firstpost')) + >>> post.keywords.append(Keyword("wendy")) + >>> post.keywords.append(Keyword("firstpost")) We can now look up all blog posts with the keyword 'firstpost'. We'll use the ``any`` operator to locate "blog posts where any of its keywords has the @@ -2172,9 +2188,7 @@ keyword string 'firstpost'": .. sourcecode:: python+sql - {sql}>>> session.query(BlogPost).\ - ... filter(BlogPost.keywords.any(keyword='firstpost')).\ - ... all() + {sql}>>> session.query(BlogPost).filter(BlogPost.keywords.any(keyword="firstpost")).all() INSERT INTO keywords (keyword) VALUES (?) [...] ('wendy',) INSERT INTO keywords (keyword) VALUES (?) @@ -2201,10 +2215,9 @@ the query to narrow down to that ``User`` object as a parent: .. sourcecode:: python+sql - {sql}>>> session.query(BlogPost).\ - ... filter(BlogPost.author==wendy).\ - ... filter(BlogPost.keywords.any(keyword='firstpost')).\ - ... all() + {sql}>>> session.query(BlogPost).filter(BlogPost.author == wendy).filter( + ... BlogPost.keywords.any(keyword="firstpost") + ... ).all() SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, @@ -2223,9 +2236,7 @@ relationship, to query straight from there: .. sourcecode:: python+sql - {sql}>>> wendy.posts.\ - ... filter(BlogPost.keywords.any(keyword='firstpost')).\ - ... all() + {sql}>>> wendy.posts.filter(BlogPost.keywords.any(keyword="firstpost")).all() SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, diff --git a/doc/build/orm/versioning.rst b/doc/build/orm/versioning.rst index 30388eb8d2..790c1c1f92 100644 --- a/doc/build/orm/versioning.rst +++ b/doc/build/orm/versioning.rst @@ -55,15 +55,13 @@ to the mapped table, then establish it as the ``version_id_col`` within the mapper options:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) version_id = Column(Integer, nullable=False) name = Column(String(50), nullable=False) - __mapper_args__ = { - "version_id_col": version_id - } + __mapper_args__ = {"version_id_col": version_id} .. note:: It is **strongly recommended** that the ``version_id`` column be made NOT NULL. The versioning feature **does not support** a NULL @@ -105,16 +103,17 @@ support a native GUID type, but we illustrate here using a simple string):: import uuid + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) version_uuid = Column(String(32), nullable=False) name = Column(String(50), nullable=False) __mapper_args__ = { - 'version_id_col':version_uuid, - 'version_id_generator':lambda version: uuid.uuid4().hex + "version_id_col": version_uuid, + "version_id_generator": lambda version: uuid.uuid4().hex, } The persistence engine will call upon ``uuid.uuid4()`` each time a @@ -148,17 +147,15 @@ class as follows:: from sqlalchemy import FetchedValue + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(50), nullable=False) xmin = Column("xmin", String, system=True, server_default=FetchedValue()) - __mapper_args__ = { - 'version_id_col': xmin, - 'version_id_generator': False - } + __mapper_args__ = {"version_id_col": xmin, "version_id_generator": False} With the above mapping, the ORM will rely upon the ``xmin`` column for automatically providing the new value of the version id counter. @@ -222,25 +219,24 @@ at our choosing:: import uuid + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) version_uuid = Column(String(32), nullable=False) name = Column(String(50), nullable=False) - __mapper_args__ = { - 'version_id_col':version_uuid, - 'version_id_generator': False - } + __mapper_args__ = {"version_id_col": version_uuid, "version_id_generator": False} + - u1 = User(name='u1', version_uuid=uuid.uuid4()) + u1 = User(name="u1", version_uuid=uuid.uuid4()) session.add(u1) session.commit() - u1.name = 'u2' + u1.name = "u2" u1.version_uuid = uuid.uuid4() session.commit() @@ -252,7 +248,7 @@ for schemes where only certain classes of UPDATE are sensitive to concurrency issues:: # will leave version_uuid unchanged - u1.name = 'u3' + u1.name = "u3" session.commit() .. versionadded:: 0.9.0 diff --git a/doc/build/tutorial/data_insert.rst b/doc/build/tutorial/data_insert.rst index 63aeb51a08..0d745cb319 100644 --- a/doc/build/tutorial/data_insert.rst +++ b/doc/build/tutorial/data_insert.rst @@ -35,7 +35,7 @@ A simple example of :class:`_sql.Insert` illustrating the target table and the VALUES clause at once:: >>> from sqlalchemy import insert - >>> stmt = insert(user_table).values(name='spongebob', fullname="Spongebob Squarepants") + >>> stmt = insert(user_table).values(name="spongebob", fullname="Spongebob Squarepants") The above ``stmt`` variable is an instance of :class:`_sql.Insert`. Most SQL expressions can be stringified in place as a means to see the general @@ -122,8 +122,8 @@ illustrate this: ... insert(user_table), ... [ ... {"name": "sandy", "fullname": "Sandy Cheeks"}, - ... {"name": "patrick", "fullname": "Patrick Star"} - ... ] + ... {"name": "patrick", "fullname": "Patrick Star"}, + ... ], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -167,19 +167,19 @@ construct automatically. >>> from sqlalchemy import select, bindparam >>> scalar_subq = ( - ... select(user_table.c.id). - ... where(user_table.c.name==bindparam('username')). - ... scalar_subquery() + ... select(user_table.c.id) + ... .where(user_table.c.name == bindparam("username")) + ... .scalar_subquery() ... ) >>> with engine.connect() as conn: ... result = conn.execute( ... insert(address_table).values(user_id=scalar_subq), ... [ - ... {"username": 'spongebob', "email_address": "spongebob@sqlalchemy.org"}, - ... {"username": 'sandy', "email_address": "sandy@sqlalchemy.org"}, - ... {"username": 'sandy', "email_address": "sandy@squirrelpower.org"}, - ... ] + ... {"username": "spongebob", "email_address": "spongebob@sqlalchemy.org"}, + ... {"username": "sandy", "email_address": "sandy@sqlalchemy.org"}, + ... {"username": "sandy", "email_address": "sandy@squirrelpower.org"}, + ... ], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -221,7 +221,9 @@ method; in this case, the :class:`_engine.Result` object that's returned when the statement is executed has rows which can be fetched:: - >>> insert_stmt = insert(address_table).returning(address_table.c.id, address_table.c.email_address) + >>> insert_stmt = insert(address_table).returning( + ... address_table.c.id, address_table.c.email_address + ... ) >>> print(insert_stmt) {opensql}INSERT INTO address (id, user_id, email_address) VALUES (:id, :user_id, :email_address) diff --git a/doc/build/tutorial/data_select.rst b/doc/build/tutorial/data_select.rst index eab9dccefd..9b0b887da1 100644 --- a/doc/build/tutorial/data_select.rst +++ b/doc/build/tutorial/data_select.rst @@ -36,7 +36,7 @@ each method builds more state onto the object. Like the other SQL constructs, it can be stringified in place:: >>> from sqlalchemy import select - >>> stmt = select(user_table).where(user_table.c.name == 'spongebob') + >>> stmt = select(user_table).where(user_table.c.name == "spongebob") >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -71,7 +71,7 @@ elements within each row: .. sourcecode:: pycon+sql - >>> stmt = select(User).where(User.name == 'spongebob') + >>> stmt = select(User).where(User.name == "spongebob") >>> with Session(engine) as session: ... for row in session.execute(stmt): ... print(row) @@ -196,9 +196,7 @@ attribute of the ``User`` entity as the first element of the row, and combine it with full ``Address`` entities in the second element:: >>> session.execute( - ... select(User.name, Address). - ... where(User.id==Address.user_id). - ... order_by(Address.id) + ... select(User.name, Address).where(User.id == Address.user_id).order_by(Address.id) ... ).all() {opensql}SELECT user_account.name, address.id, address.email_address, address.user_id FROM user_account, address @@ -226,11 +224,9 @@ when referring to arbitrary SQL expressions in a result row by name: .. sourcecode:: pycon+sql >>> from sqlalchemy import func, cast - >>> stmt = ( - ... select( - ... ("Username: " + user_table.c.name).label("username"), - ... ).order_by(user_table.c.name) - ... ) + >>> stmt = select( + ... ("Username: " + user_table.c.name).label("username"), + ... ).order_by(user_table.c.name) >>> with engine.connect() as conn: ... for row in conn.execute(stmt): ... print(f"{row.username}") @@ -269,11 +265,7 @@ a hardcoded string literal ``'some label'`` and embed it within the SELECT statement:: >>> from sqlalchemy import text - >>> stmt = ( - ... select( - ... text("'some phrase'"), user_table.c.name - ... ).order_by(user_table.c.name) - ... ) + >>> stmt = select(text("'some phrase'"), user_table.c.name).order_by(user_table.c.name) >>> with engine.connect() as conn: ... print(conn.execute(stmt).all()) {opensql}BEGIN (implicit) @@ -295,10 +287,8 @@ towards in subqueries and other expressions:: >>> from sqlalchemy import literal_column - >>> stmt = ( - ... select( - ... literal_column("'some phrase'").label("p"), user_table.c.name - ... ).order_by(user_table.c.name) + >>> stmt = select(literal_column("'some phrase'").label("p"), user_table.c.name).order_by( + ... user_table.c.name ... ) >>> with engine.connect() as conn: ... for row in conn.execute(stmt): @@ -330,7 +320,7 @@ conjunction with Python operators such as ``==``, ``!=``, ``<``, ``>=`` etc. generate new SQL Expression objects, rather than plain boolean ``True``/``False`` values:: - >>> print(user_table.c.name == 'squidward') + >>> print(user_table.c.name == "squidward") user_account.name = :name_1 >>> print(address_table.c.user_id > 10) @@ -340,7 +330,7 @@ SQL Expression objects, rather than plain boolean ``True``/``False`` values:: We can use expressions like these to generate the WHERE clause by passing the resulting objects to the :meth:`_sql.Select.where` method:: - >>> print(select(user_table).where(user_table.c.name == 'squidward')) + >>> print(select(user_table).where(user_table.c.name == "squidward")) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account WHERE user_account.name = :name_1 @@ -350,9 +340,9 @@ To produce multiple expressions joined by AND, the :meth:`_sql.Select.where` method may be invoked any number of times:: >>> print( - ... select(address_table.c.email_address). - ... where(user_table.c.name == 'squidward'). - ... where(address_table.c.user_id == user_table.c.id) + ... select(address_table.c.email_address) + ... .where(user_table.c.name == "squidward") + ... .where(address_table.c.user_id == user_table.c.id) ... ) {opensql}SELECT address.email_address FROM address, user_account @@ -362,10 +352,8 @@ A single call to :meth:`_sql.Select.where` also accepts multiple expressions with the same effect:: >>> print( - ... select(address_table.c.email_address). - ... where( - ... user_table.c.name == 'squidward', - ... address_table.c.user_id == user_table.c.id + ... select(address_table.c.email_address).where( + ... user_table.c.name == "squidward", address_table.c.user_id == user_table.c.id ... ) ... ) {opensql}SELECT address.email_address @@ -378,11 +366,10 @@ of ORM entities:: >>> from sqlalchemy import and_, or_ >>> print( - ... select(Address.email_address). - ... where( + ... select(Address.email_address).where( ... and_( - ... or_(User.name == 'squidward', User.name == 'sandy'), - ... Address.user_id == User.id + ... or_(User.name == "squidward", User.name == "sandy"), + ... Address.user_id == User.id, ... ) ... ) ... ) @@ -396,9 +383,7 @@ popular method known as :meth:`_sql.Select.filter_by` which accepts keyword arguments that match to column keys or ORM attribute names. It will filter against the leftmost FROM clause or the last entity joined:: - >>> print( - ... select(User).filter_by(name='spongebob', fullname='Spongebob Squarepants') - ... ) + >>> print(select(User).filter_by(name="spongebob", fullname="Spongebob Squarepants")) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account WHERE user_account.name = :name_1 AND user_account.fullname = :fullname_1 @@ -440,8 +425,9 @@ method, which allows us to indicate the left and right side of the JOIN explicitly:: >>> print( - ... select(user_table.c.name, address_table.c.email_address). - ... join_from(user_table, address_table) + ... select(user_table.c.name, address_table.c.email_address).join_from( + ... user_table, address_table + ... ) ... ) {opensql}SELECT user_account.name, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -450,10 +436,7 @@ explicitly:: The other is the the :meth:`_sql.Select.join` method, which indicates only the right side of the JOIN, the left hand-side is inferred:: - >>> print( - ... select(user_table.c.name, address_table.c.email_address). - ... join(address_table) - ... ) + >>> print(select(user_table.c.name, address_table.c.email_address).join(address_table)) {opensql}SELECT user_account.name, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -470,10 +453,7 @@ where we establish ``user_table`` as the first element in the FROM clause and :meth:`_sql.Select.join` to establish ``address_table`` as the second:: - >>> print( - ... select(address_table.c.email_address). - ... select_from(user_table).join(address_table) - ... ) + >>> print(select(address_table.c.email_address).select_from(user_table).join(address_table)) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -484,9 +464,7 @@ FROM clause. For example, to SELECT from the common SQL expression produce the SQL ``count()`` function:: >>> from sqlalchemy import func - >>> print ( - ... select(func.count('*')).select_from(user_table) - ... ) + >>> print(select(func.count("*")).select_from(user_table)) {opensql}SELECT count(:count_2) AS count_1 FROM user_account @@ -515,9 +493,9 @@ accept an additional argument for the ON clause, which is stated using the same SQL Expression mechanics as we saw about in :ref:`tutorial_select_where_clause`:: >>> print( - ... select(address_table.c.email_address). - ... select_from(user_table). - ... join(address_table, user_table.c.id == address_table.c.user_id) + ... select(address_table.c.email_address) + ... .select_from(user_table) + ... .join(address_table, user_table.c.id == address_table.c.user_id) ... ) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -539,15 +517,11 @@ accept keyword arguments :paramref:`_sql.Select.join.isouter` and :paramref:`_sql.Select.join.full` which will render LEFT OUTER JOIN and FULL OUTER JOIN, respectively:: - >>> print( - ... select(user_table).join(address_table, isouter=True) - ... ) + >>> print(select(user_table).join(address_table, isouter=True)) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account LEFT OUTER JOIN address ON user_account.id = address.user_id{stop} - >>> print( - ... select(user_table).join(address_table, full=True) - ... ) + >>> print(select(user_table).join(address_table, full=True)) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account FULL OUTER JOIN address ON user_account.id = address.user_id{stop} @@ -644,10 +618,10 @@ than one address: >>> with engine.connect() as conn: ... result = conn.execute( - ... select(User.name, func.count(Address.id).label("count")). - ... join(Address). - ... group_by(User.name). - ... having(func.count(Address.id) > 1) + ... select(User.name, func.count(Address.id).label("count")) + ... .join(Address) + ... .group_by(User.name) + ... .having(func.count(Address.id) > 1) ... ) ... print(result.all()) {opensql}BEGIN (implicit) @@ -677,10 +651,11 @@ error if no match is found. The unary modifiers .. sourcecode:: pycon+sql >>> from sqlalchemy import func, desc - >>> stmt = select( - ... Address.user_id, - ... func.count(Address.id).label('num_addresses')).\ - ... group_by("user_id").order_by("user_id", desc("num_addresses")) + >>> stmt = ( + ... select(Address.user_id, func.count(Address.id).label("num_addresses")) + ... .group_by("user_id") + ... .order_by("user_id", desc("num_addresses")) + ... ) >>> print(stmt) {opensql}SELECT address.user_id, count(address.id) AS num_addresses FROM address GROUP BY address.user_id ORDER BY address.user_id, num_addresses DESC @@ -707,8 +682,9 @@ below for example returns all unique pairs of user names:: >>> user_alias_1 = user_table.alias() >>> user_alias_2 = user_table.alias() >>> print( - ... select(user_alias_1.c.name, user_alias_2.c.name). - ... join_from(user_alias_1, user_alias_2, user_alias_1.c.id > user_alias_2.c.id) + ... select(user_alias_1.c.name, user_alias_2.c.name).join_from( + ... user_alias_1, user_alias_2, user_alias_1.c.id > user_alias_2.c.id + ... ) ... ) {opensql}SELECT user_account_1.name, user_account_2.name AS name_1 FROM user_account AS user_account_1 @@ -730,11 +706,11 @@ while maintaining ORM functionality. The SELECT below selects from the >>> address_alias_1 = aliased(Address) >>> address_alias_2 = aliased(Address) >>> print( - ... select(User). - ... join_from(User, address_alias_1). - ... where(address_alias_1.email_address == 'patrick@aol.com'). - ... join_from(User, address_alias_2). - ... where(address_alias_2.email_address == 'patrick@gmail.com') + ... select(User) + ... .join_from(User, address_alias_1) + ... .where(address_alias_1.email_address == "patrick@aol.com") + ... .join_from(User, address_alias_2) + ... .where(address_alias_2.email_address == "patrick@gmail.com") ... ) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -775,10 +751,11 @@ We can construct a :class:`_sql.Subquery` that will select an aggregate count of rows from the ``address`` table (aggregate functions and GROUP BY were introduced previously at :ref:`tutorial_group_by_w_aggregates`): - >>> subq = select( - ... func.count(address_table.c.id).label("count"), - ... address_table.c.user_id - ... ).group_by(address_table.c.user_id).subquery() + >>> subq = ( + ... select(func.count(address_table.c.id).label("count"), address_table.c.user_id) + ... .group_by(address_table.c.user_id) + ... .subquery() + ... ) Stringifying the subquery by itself without it being embedded inside of another :class:`_sql.Select` or other statement produces the plain SELECT statement @@ -804,11 +781,9 @@ With a selection of rows contained within the ``subq`` object, we can apply the object to a larger :class:`_sql.Select` that will join the data to the ``user_account`` table:: - >>> stmt = select( - ... user_table.c.name, - ... user_table.c.fullname, - ... subq.c.count - ... ).join_from(user_table, subq) + >>> stmt = select(user_table.c.name, user_table.c.fullname, subq.c.count).join_from( + ... user_table, subq + ... ) >>> print(stmt) {opensql}SELECT user_account.name, user_account.fullname, anon_1.count @@ -834,16 +809,15 @@ the invocation of the :meth:`_sql.Select.subquery` method to use element in the same way, but the SQL rendered is the very different common table expression syntax:: - >>> subq = select( - ... func.count(address_table.c.id).label("count"), - ... address_table.c.user_id - ... ).group_by(address_table.c.user_id).cte() + >>> subq = ( + ... select(func.count(address_table.c.id).label("count"), address_table.c.user_id) + ... .group_by(address_table.c.user_id) + ... .cte() + ... ) - >>> stmt = select( - ... user_table.c.name, - ... user_table.c.fullname, - ... subq.c.count - ... ).join_from(user_table, subq) + >>> stmt = select(user_table.c.name, user_table.c.fullname, subq.c.count).join_from( + ... user_table, subq + ... ) >>> print(stmt) {opensql}WITH anon_1 AS @@ -894,9 +868,13 @@ each ``Address`` object ultimately came from a subquery against the .. sourcecode:: python+sql - >>> subq = select(Address).where(~Address.email_address.like('%@aol.com')).subquery() + >>> subq = select(Address).where(~Address.email_address.like("%@aol.com")).subquery() >>> address_subq = aliased(Address, subq) - >>> stmt = select(User, address_subq).join_from(User, address_subq).order_by(User.id, address_subq.id) + >>> stmt = ( + ... select(User, address_subq) + ... .join_from(User, address_subq) + ... .order_by(User.id, address_subq.id) + ... ) >>> with Session(engine) as session: ... for user, address in session.execute(stmt): ... print(f"{user} {address}") @@ -919,9 +897,13 @@ Another example follows, which is exactly the same except it makes use of the .. sourcecode:: python+sql - >>> cte_obj = select(Address).where(~Address.email_address.like('%@aol.com')).cte() + >>> cte_obj = select(Address).where(~Address.email_address.like("%@aol.com")).cte() >>> address_cte = aliased(Address, cte_obj) - >>> stmt = select(User, address_cte).join_from(User, address_cte).order_by(User.id, address_cte.id) + >>> stmt = ( + ... select(User, address_cte) + ... .join_from(User, address_cte) + ... .order_by(User.id, address_cte.id) + ... ) >>> with Session(engine) as session: ... for user, address in session.execute(stmt): ... print(f"{user} {address}") @@ -968,9 +950,11 @@ subquery is indicated explicitly by making use of the :meth:`_sql.Select.scalar_ method as below. It's default string form when stringified by itself renders as an ordinary SELECT statement that is selecting from two tables:: - >>> subq = select(func.count(address_table.c.id)).\ - ... where(user_table.c.id == address_table.c.user_id).\ - ... scalar_subquery() + >>> subq = ( + ... select(func.count(address_table.c.id)) + ... .where(user_table.c.id == address_table.c.user_id) + ... .scalar_subquery() + ... ) >>> print(subq) {opensql}(SELECT count(address.id) AS count_1 FROM address, user_account @@ -1003,13 +987,13 @@ Simple correlated subqueries will usually do the right thing that's desired. However, in the case where the correlation is ambiguous, SQLAlchemy will let us know that more clarity is needed:: - >>> stmt = select( - ... user_table.c.name, - ... address_table.c.email_address, - ... subq.label("address_count") - ... ).\ - ... join_from(user_table, address_table).\ - ... order_by(user_table.c.id, address_table.c.id) + >>> stmt = ( + ... select( + ... user_table.c.name, address_table.c.email_address, subq.label("address_count") + ... ) + ... .join_from(user_table, address_table) + ... .order_by(user_table.c.id, address_table.c.id) + ... ) >>> print(stmt) Traceback (most recent call last): ... @@ -1021,9 +1005,12 @@ To specify that the ``user_table`` is the one we seek to correlate we specify this using the :meth:`_sql.ScalarSelect.correlate` or :meth:`_sql.ScalarSelect.correlate_except` methods:: - >>> subq = select(func.count(address_table.c.id)).\ - ... where(user_table.c.id == address_table.c.user_id).\ - ... scalar_subquery().correlate(user_table) + >>> subq = ( + ... select(func.count(address_table.c.id)) + ... .where(user_table.c.id == address_table.c.user_id) + ... .scalar_subquery() + ... .correlate(user_table) + ... ) The statement then can return the data for this column like any other: @@ -1034,10 +1021,10 @@ The statement then can return the data for this column like any other: ... select( ... user_table.c.name, ... address_table.c.email_address, - ... subq.label("address_count") - ... ). - ... join_from(user_table, address_table). - ... order_by(user_table.c.id, address_table.c.id) + ... subq.label("address_count"), + ... ) + ... .join_from(user_table, address_table) + ... .order_by(user_table.c.id, address_table.c.id) ... ) ... print(result.all()) {opensql}BEGIN (implicit) @@ -1078,21 +1065,19 @@ use of LATERAL, selecting the "user account / count of email address" data as was discussed in the previous section:: >>> subq = ( - ... select( - ... func.count(address_table.c.id).label("address_count"), - ... address_table.c.email_address, - ... address_table.c.user_id, - ... ). - ... where(user_table.c.id == address_table.c.user_id). - ... lateral() + ... select( + ... func.count(address_table.c.id).label("address_count"), + ... address_table.c.email_address, + ... address_table.c.user_id, + ... ) + ... .where(user_table.c.id == address_table.c.user_id) + ... .lateral() + ... ) + >>> stmt = ( + ... select(user_table.c.name, subq.c.address_count, subq.c.email_address) + ... .join_from(user_table, subq) + ... .order_by(user_table.c.id, subq.c.email_address) ... ) - >>> stmt = select( - ... user_table.c.name, - ... subq.c.address_count, - ... subq.c.email_address - ... ).\ - ... join_from(user_table, subq).\ - ... order_by(user_table.c.id, subq.c.email_address) >>> print(stmt) {opensql}SELECT user_account.name, anon_1.address_count, anon_1.email_address FROM user_account @@ -1143,8 +1128,8 @@ that it has fewer methods. The :class:`_sql.CompoundSelect` produced by :meth:`_engine.Connection.execute`:: >>> from sqlalchemy import union_all - >>> stmt1 = select(user_table).where(user_table.c.name == 'sandy') - >>> stmt2 = select(user_table).where(user_table.c.name == 'spongebob') + >>> stmt1 = select(user_table).where(user_table.c.name == "sandy") + >>> stmt2 = select(user_table).where(user_table.c.name == "spongebob") >>> u = union_all(stmt1, stmt2) >>> with engine.connect() as conn: ... result = conn.execute(u) @@ -1167,9 +1152,9 @@ collection that may be referred towards in an enclosing :func:`_sql.select`:: >>> u_subq = u.subquery() >>> stmt = ( - ... select(u_subq.c.name, address_table.c.email_address). - ... join_from(address_table, u_subq). - ... order_by(u_subq.c.name, address_table.c.email_address) + ... select(u_subq.c.name, address_table.c.email_address) + ... .join_from(address_table, u_subq) + ... .order_by(u_subq.c.name, address_table.c.email_address) ... ) >>> with engine.connect() as conn: ... result = conn.execute(stmt) @@ -1204,8 +1189,8 @@ object that represents the SELECT / UNION / etc statement we want to execute; this statement should be composed against the target ORM entities or their underlying mapped :class:`_schema.Table` objects:: - >>> stmt1 = select(User).where(User.name == 'sandy') - >>> stmt2 = select(User).where(User.name == 'spongebob') + >>> stmt1 = select(User).where(User.name == "sandy") + >>> stmt2 = select(User).where(User.name == "spongebob") >>> u = union_all(stmt1, stmt2) For a simple SELECT with UNION that is not already nested inside of a @@ -1279,15 +1264,13 @@ can return ``user_account`` rows that have more than one related row in .. sourcecode:: pycon+sql >>> subq = ( - ... select(func.count(address_table.c.id)). - ... where(user_table.c.id == address_table.c.user_id). - ... group_by(address_table.c.user_id). - ... having(func.count(address_table.c.id) > 1) + ... select(func.count(address_table.c.id)) + ... .where(user_table.c.id == address_table.c.user_id) + ... .group_by(address_table.c.user_id) + ... .having(func.count(address_table.c.id) > 1) ... ).exists() >>> with engine.connect() as conn: - ... result = conn.execute( - ... select(user_table.c.name).where(subq) - ... ) + ... result = conn.execute(select(user_table.c.name).where(subq)) ... print(result.all()) {opensql}BEGIN (implicit) SELECT user_account.name @@ -1309,13 +1292,10 @@ clause: .. sourcecode:: pycon+sql >>> subq = ( - ... select(address_table.c.id). - ... where(user_table.c.id == address_table.c.user_id) + ... select(address_table.c.id).where(user_table.c.id == address_table.c.user_id) ... ).exists() >>> with engine.connect() as conn: - ... result = conn.execute( - ... select(user_table.c.name).where(~subq) - ... ) + ... result = conn.execute(select(user_table.c.name).where(~subq)) ... print(result.all()) {opensql}BEGIN (implicit) SELECT user_account.name @@ -1571,11 +1551,15 @@ number the email addresses of individual users: .. sourcecode:: pycon+sql - >>> stmt = select( - ... func.row_number().over(partition_by=user_table.c.name), - ... user_table.c.name, - ... address_table.c.email_address - ... ).select_from(user_table).join(address_table) + >>> stmt = ( + ... select( + ... func.row_number().over(partition_by=user_table.c.name), + ... user_table.c.name, + ... address_table.c.email_address, + ... ) + ... .select_from(user_table) + ... .join(address_table) + ... ) >>> with engine.connect() as conn: # doctest:+SKIP ... result = conn.execute(stmt) ... print(result.all()) @@ -1593,10 +1577,15 @@ We also may make use of the ``ORDER BY`` clause using :paramref:`_functions.Func .. sourcecode:: pycon+sql - >>> stmt = select( - ... func.count().over(order_by=user_table.c.name), - ... user_table.c.name, - ... address_table.c.email_address).select_from(user_table).join(address_table) + >>> stmt = ( + ... select( + ... func.count().over(order_by=user_table.c.name), + ... user_table.c.name, + ... address_table.c.email_address, + ... ) + ... .select_from(user_table) + ... .join(address_table) + ... ) >>> with engine.connect() as conn: # doctest:+SKIP ... result = conn.execute(stmt) ... print(result.all()) @@ -1635,7 +1624,7 @@ method:: >>> print( ... func.unnest( - ... func.percentile_disc([0.25,0.5,0.75,1]).within_group(user_table.c.name) + ... func.percentile_disc([0.25, 0.5, 0.75, 1]).within_group(user_table.c.name) ... ) ... ) unnest(percentile_disc(:percentile_disc_1) WITHIN GROUP (ORDER BY user_account.name)) @@ -1644,10 +1633,16 @@ method:: particular subset of rows compared to the total range of rows returned, available using the :meth:`_functions.FunctionElement.filter` method:: - >>> stmt = select( - ... func.count(address_table.c.email_address).filter(user_table.c.name == 'sandy'), - ... func.count(address_table.c.email_address).filter(user_table.c.name == 'spongebob') - ... ).select_from(user_table).join(address_table) + >>> stmt = ( + ... select( + ... func.count(address_table.c.email_address).filter(user_table.c.name == "sandy"), + ... func.count(address_table.c.email_address).filter( + ... user_table.c.name == "spongebob" + ... ), + ... ) + ... .select_from(user_table) + ... .join(address_table) + ... ) >>> with engine.connect() as conn: # doctest:+SKIP ... result = conn.execute(stmt) ... print(result.all()) @@ -1811,11 +1806,7 @@ string into one of MySQL's JSON functions: >>> from sqlalchemy import JSON >>> from sqlalchemy import type_coerce >>> from sqlalchemy.dialects import mysql - >>> s = select( - ... type_coerce( - ... {'some_key': {'foo': 'bar'}}, JSON - ... )['some_key'] - ... ) + >>> s = select(type_coerce({"some_key": {"foo": "bar"}}, JSON)["some_key"]) >>> print(s.compile(dialect=mysql.dialect())) SELECT JSON_EXTRACT(%s, %s) AS anon_1 diff --git a/doc/build/tutorial/data_update.rst b/doc/build/tutorial/data_update.rst index 1091bccf64..78c2e60f63 100644 --- a/doc/build/tutorial/data_update.rst +++ b/doc/build/tutorial/data_update.rst @@ -56,8 +56,9 @@ A basic UPDATE looks like:: >>> from sqlalchemy import update >>> stmt = ( - ... update(user_table).where(user_table.c.name == 'patrick'). - ... values(fullname='Patrick the Star') + ... update(user_table) + ... .where(user_table.c.name == "patrick") + ... .values(fullname="Patrick the Star") ... ) >>> print(stmt) {opensql}UPDATE user_account SET fullname=:fullname WHERE user_account.name = :name_1 @@ -70,10 +71,7 @@ keyword arguments. UPDATE supports all the major SQL forms of UPDATE, including updates against expressions, where we can make use of :class:`_schema.Column` expressions:: - >>> stmt = ( - ... update(user_table). - ... values(fullname="Username: " + user_table.c.name) - ... ) + >>> stmt = update(user_table).values(fullname="Username: " + user_table.c.name) >>> print(stmt) {opensql}UPDATE user_account SET fullname=(:name_1 || user_account.name) @@ -86,19 +84,19 @@ that literal values would normally go: >>> from sqlalchemy import bindparam >>> stmt = ( - ... update(user_table). - ... where(user_table.c.name == bindparam('oldname')). - ... values(name=bindparam('newname')) + ... update(user_table) + ... .where(user_table.c.name == bindparam("oldname")) + ... .values(name=bindparam("newname")) ... ) >>> with engine.begin() as conn: - ... conn.execute( - ... stmt, - ... [ - ... {'oldname':'jack', 'newname':'ed'}, - ... {'oldname':'wendy', 'newname':'mary'}, - ... {'oldname':'jim', 'newname':'jake'}, - ... ] - ... ) + ... conn.execute( + ... stmt, + ... [ + ... {"oldname": "jack", "newname": "ed"}, + ... {"oldname": "wendy", "newname": "mary"}, + ... {"oldname": "jim", "newname": "jake"}, + ... ], + ... ) {opensql}BEGIN (implicit) UPDATE user_account SET name=? WHERE user_account.name = ? [...] (('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim')) @@ -118,11 +116,11 @@ An UPDATE statement can make use of rows in other tables by using a anywhere a column expression might be placed:: >>> scalar_subq = ( - ... select(address_table.c.email_address). - ... where(address_table.c.user_id == user_table.c.id). - ... order_by(address_table.c.id). - ... limit(1). - ... scalar_subquery() + ... select(address_table.c.email_address) + ... .where(address_table.c.user_id == user_table.c.id) + ... .order_by(address_table.c.id) + ... .limit(1) + ... .scalar_subquery() ... ) >>> update_stmt = update(user_table).values(fullname=scalar_subq) >>> print(update_stmt) @@ -143,11 +141,11 @@ syntax will be generated implicitly when additional tables are located in the WHERE clause of the statement:: >>> update_stmt = ( - ... update(user_table). - ... where(user_table.c.id == address_table.c.user_id). - ... where(address_table.c.email_address == 'patrick@aol.com'). - ... values(fullname='Pat') - ... ) + ... update(user_table) + ... .where(user_table.c.id == address_table.c.user_id) + ... .where(address_table.c.email_address == "patrick@aol.com") + ... .values(fullname="Pat") + ... ) >>> print(update_stmt) {opensql}UPDATE user_account SET fullname=:fullname FROM address WHERE user_account.id = address.user_id AND address.email_address = :email_address_1 @@ -158,16 +156,13 @@ requires we refer to :class:`_schema.Table` objects in the VALUES clause in order to refer to additional tables:: >>> update_stmt = ( - ... update(user_table). - ... where(user_table.c.id == address_table.c.user_id). - ... where(address_table.c.email_address == 'patrick@aol.com'). - ... values( - ... { - ... user_table.c.fullname: "Pat", - ... address_table.c.email_address: "pat@aol.com" - ... } - ... ) - ... ) + ... update(user_table) + ... .where(user_table.c.id == address_table.c.user_id) + ... .where(address_table.c.email_address == "patrick@aol.com") + ... .values( + ... {user_table.c.fullname: "Pat", address_table.c.email_address: "pat@aol.com"} + ... ) + ... ) >>> from sqlalchemy.dialects import mysql >>> print(update_stmt.compile(dialect=mysql.dialect())) {opensql}UPDATE user_account, address @@ -185,12 +180,8 @@ of an UPDATE actually impacts the evaluation of each expression. For this use case, the :meth:`_sql.Update.ordered_values` method accepts a sequence of tuples so that this order may be controlled [2]_:: - >>> update_stmt = ( - ... update(some_table). - ... ordered_values( - ... (some_table.c.y, 20), - ... (some_table.c.x, some_table.c.y + 10) - ... ) + >>> update_stmt = update(some_table).ordered_values( + ... (some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10) ... ) >>> print(update_stmt) {opensql}UPDATE some_table SET y=:y, x=(some_table.y + :y_1) @@ -220,7 +211,7 @@ allowing for a RETURNING variant on some database backends. :: >>> from sqlalchemy import delete - >>> stmt = delete(user_table).where(user_table.c.name == 'patrick') + >>> stmt = delete(user_table).where(user_table.c.name == "patrick") >>> print(stmt) {opensql}DELETE FROM user_account WHERE user_account.name = :name_1 @@ -235,10 +226,10 @@ subqueries in the WHERE clause as well as backend-specific multiple table syntaxes, such as ``DELETE FROM..USING`` on MySQL:: >>> delete_stmt = ( - ... delete(user_table). - ... where(user_table.c.id == address_table.c.user_id). - ... where(address_table.c.email_address == 'patrick@aol.com') - ... ) + ... delete(user_table) + ... .where(user_table.c.id == address_table.c.user_id) + ... .where(address_table.c.email_address == "patrick@aol.com") + ... ) >>> from sqlalchemy.dialects import mysql >>> print(delete_stmt.compile(dialect=mysql.dialect())) {opensql}DELETE FROM user_account USING user_account, address @@ -259,9 +250,9 @@ is available from the :attr:`_engine.CursorResult.rowcount` attribute: >>> with engine.begin() as conn: ... result = conn.execute( - ... update(user_table). - ... values(fullname="Patrick McStar"). - ... where(user_table.c.name == 'patrick') + ... update(user_table) + ... .values(fullname="Patrick McStar") + ... .where(user_table.c.name == "patrick") ... ) ... print(result.rowcount) {opensql}BEGIN (implicit) @@ -316,9 +307,10 @@ be iterated:: >>> update_stmt = ( - ... update(user_table).where(user_table.c.name == 'patrick'). - ... values(fullname='Patrick the Star'). - ... returning(user_table.c.id, user_table.c.name) + ... update(user_table) + ... .where(user_table.c.name == "patrick") + ... .values(fullname="Patrick the Star") + ... .returning(user_table.c.id, user_table.c.name) ... ) >>> print(update_stmt) {opensql}UPDATE user_account SET fullname=:fullname @@ -326,8 +318,9 @@ be iterated:: RETURNING user_account.id, user_account.name{stop} >>> delete_stmt = ( - ... delete(user_table).where(user_table.c.name == 'patrick'). - ... returning(user_table.c.id, user_table.c.name) + ... delete(user_table) + ... .where(user_table.c.name == "patrick") + ... .returning(user_table.c.id, user_table.c.name) ... ) >>> print(delete_stmt) {opensql}DELETE FROM user_account diff --git a/doc/build/tutorial/dbapi_transactions.rst b/doc/build/tutorial/dbapi_transactions.rst index 6492f5f0ec..cf93534e4f 100644 --- a/doc/build/tutorial/dbapi_transactions.rst +++ b/doc/build/tutorial/dbapi_transactions.rst @@ -107,7 +107,7 @@ where we acquired the :class:`_future.Connection` object: ... conn.execute(text("CREATE TABLE some_table (x int, y int)")) ... conn.execute( ... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"), - ... [{"x": 1, "y": 1}, {"x": 2, "y": 4}] + ... [{"x": 1, "y": 1}, {"x": 2, "y": 4}], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -145,7 +145,7 @@ may be referred towards as **begin once**: >>> with engine.begin() as conn: ... conn.execute( ... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"), - ... [{"x": 6, "y": 8}, {"x": 9, "y": 10}] + ... [{"x": 6, "y": 8}, {"x": 9, "y": 10}], ... ) {opensql}BEGIN (implicit) INSERT INTO some_table (x, y) VALUES (?, ?) @@ -286,8 +286,8 @@ Below we illustrate a variety of ways to access rows. result = conn.execute(text("select x, y from some_table")) for dict_row in result.mappings(): - x = dict_row['x'] - y = dict_row['y'] + x = dict_row["x"] + y = dict_row["y"] .. @@ -316,12 +316,9 @@ construct accepts these using a colon format "``:y``". The actual value for .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... result = conn.execute( - ... text("SELECT x, y FROM some_table WHERE y > :y"), - ... {"y": 2} - ... ) + ... result = conn.execute(text("SELECT x, y FROM some_table WHERE y > :y"), {"y": 2}) ... for row in result: - ... print(f"x: {row.x} y: {row.y}") + ... print(f"x: {row.x} y: {row.y}") {opensql}BEGIN (implicit) SELECT x, y FROM some_table WHERE y > ? [...] (2,) @@ -370,7 +367,7 @@ be invoked against each parameter set individually: >>> with engine.connect() as conn: ... conn.execute( ... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"), - ... [{"x": 11, "y": 12}, {"x": 13, "y": 14}] + ... [{"x": 11, "y": 12}, {"x": 13, "y": 14}], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -436,7 +433,7 @@ a context manager: >>> with Session(engine) as session: ... result = session.execute(stmt, {"y": 6}) ... for row in result: - ... print(f"x: {row.x} y: {row.y}") + ... print(f"x: {row.x} y: {row.y}") {opensql}BEGIN (implicit) SELECT x, y FROM some_table WHERE y > ? ORDER BY x, y [...] (6,){stop} @@ -462,7 +459,7 @@ our data: >>> with Session(engine) as session: ... result = session.execute( ... text("UPDATE some_table SET y=:y WHERE x=:x"), - ... [{"x": 9, "y":11}, {"x": 13, "y": 15}] + ... [{"x": 9, "y": 11}, {"x": 13, "y": 15}], ... ) ... session.commit() {opensql}BEGIN (implicit) diff --git a/doc/build/tutorial/metadata.rst b/doc/build/tutorial/metadata.rst index 6444ed692e..215d9fd8b8 100644 --- a/doc/build/tutorial/metadata.rst +++ b/doc/build/tutorial/metadata.rst @@ -76,9 +76,9 @@ that will be how we will refer to the table in application code:: >>> user_table = Table( ... "user_account", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30)), - ... Column('fullname', String) + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30)), + ... Column("fullname", String), ... ) We can observe that the above :class:`_schema.Table` construct looks a lot like @@ -151,9 +151,9 @@ table:: >>> address_table = Table( ... "address", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', ForeignKey('user_account.id'), nullable=False), - ... Column('email_address', String, nullable=False) + ... Column("id", Integer, primary_key=True), + ... Column("user_id", ForeignKey("user_account.id"), nullable=False), + ... Column("email_address", String, nullable=False), ... ) The table above also features a third kind of constraint, which in SQL is the @@ -297,6 +297,7 @@ known as the **declarative base**. We get a new declarative base from the :func:`_orm.declarative_base` function:: from sqlalchemy.orm import declarative_base + Base = declarative_base() .. @@ -313,7 +314,7 @@ for the ``user`` and ``address`` table in terms of new classes ``User`` and >>> from sqlalchemy.orm import relationship >>> class User(Base): - ... __tablename__ = 'user_account' + ... __tablename__ = "user_account" ... ... id = Column(Integer, primary_key=True) ... name = Column(String(30)) @@ -322,14 +323,14 @@ for the ``user`` and ``address`` table in terms of new classes ``User`` and ... addresses = relationship("Address", back_populates="user") ... ... def __repr__(self): - ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" + ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): - ... __tablename__ = 'address' + ... __tablename__ = "address" ... ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) - ... user_id = Column(Integer, ForeignKey('user_account.id')) + ... user_id = Column(Integer, ForeignKey("user_account.id")) ... ... user = relationship("User", back_populates="addresses") ... @@ -428,7 +429,6 @@ using :meth:`_schema.MetaData.create_all`:: # declarative base Base.metadata.create_all(engine) - Combining Core Table Declarations with ORM Declarative ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -446,6 +446,7 @@ than having the declarative process generate it:: mapper_registry = registry() Base = mapper_registry.generate_base() + class User(Base): __table__ = user_table @@ -454,6 +455,7 @@ than having the declarative process generate it:: def __repr__(self): return f"User({self.name!r}, {self.fullname!r})" + class Address(Base): __table__ = address_table diff --git a/doc/build/tutorial/orm_data_manipulation.rst b/doc/build/tutorial/orm_data_manipulation.rst index b0b67f53c4..e8bdb3d4c4 100644 --- a/doc/build/tutorial/orm_data_manipulation.rst +++ b/doc/build/tutorial/orm_data_manipulation.rst @@ -290,9 +290,7 @@ from this row and we will get our updated value back: .. sourcecode:: pycon+sql - >>> sandy_fullname = session.execute( - ... select(User.fullname).where(User.id == 2) - ... ).scalar_one() + >>> sandy_fullname = session.execute(select(User.fullname).where(User.id == 2)).scalar_one() {opensql}UPDATE user_account SET fullname=? WHERE user_account.id = ? [...] ('Sandy Squirrel', 2) SELECT user_account.fullname @@ -336,9 +334,9 @@ a value in the ``User.name`` column: .. sourcecode:: pycon+sql >>> session.execute( - ... update(User). - ... where(User.name == "sandy"). - ... values(fullname="Sandy Squirrel Extraordinaire") + ... update(User) + ... .where(User.name == "sandy") + ... .values(fullname="Sandy Squirrel Extraordinaire") ... ) {opensql}UPDATE user_account SET fullname=? WHERE user_account.name = ? [...] ('Sandy Squirrel Extraordinaire', 'sandy'){stop} @@ -525,7 +523,7 @@ and of course the database data is present again as well: .. sourcecode:: pycon+sql - {sql}>>> session.execute(select(User).where(User.name == 'patrick')).scalar_one() is patrick + {sql}>>> session.execute(select(User).where(User.name == "patrick")).scalar_one() is patrick SELECT user_account.id, user_account.name, user_account.fullname FROM user_account WHERE user_account.name = ? diff --git a/doc/build/tutorial/orm_related_objects.rst b/doc/build/tutorial/orm_related_objects.rst index 2eacc39e36..02ff2c1722 100644 --- a/doc/build/tutorial/orm_related_objects.rst +++ b/doc/build/tutorial/orm_related_objects.rst @@ -25,8 +25,10 @@ and other directives: .. sourcecode:: python from sqlalchemy.orm import relationship + + class User(Base): - __tablename__ = 'user_account' + __tablename__ = "user_account" # ... Column mappings @@ -34,13 +36,12 @@ and other directives: class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" # ... Column mappings user = relationship("User", back_populates="addresses") - Above, the ``User`` class now has an attribute ``User.addresses`` and the ``Address`` class has an attribute ``Address.user``. The :func:`_orm.relationship` construct will be used to inspect the table @@ -69,7 +70,7 @@ We can start by illustrating what :func:`_orm.relationship` does to instances of objects. If we make a new ``User`` object, we can note that there is a Python list when we access the ``.addresses`` element:: - >>> u1 = User(name='pkrabs', fullname='Pearl Krabs') + >>> u1 = User(name="pkrabs", fullname="Pearl Krabs") >>> u1.addresses [] @@ -301,11 +302,7 @@ corresponding to the :func:`_orm.relationship` may be passed as the **single argument** to :meth:`_sql.Select.join`, where it serves to indicate both the right side of the join as well as the ON clause at once:: - >>> print( - ... select(Address.email_address). - ... select_from(User). - ... join(User.addresses) - ... ) + >>> print(select(Address.email_address).select_from(User).join(User.addresses)) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -317,10 +314,7 @@ ON clause, it works because of the :class:`_schema.ForeignKeyConstraint` between the two mapped :class:`_schema.Table` objects, not because of the :func:`_orm.relationship` objects on the ``User`` and ``Address`` classes:: - >>> print( - ... select(Address.email_address). - ... join_from(User, Address) - ... ) + >>> print(select(Address.email_address).join_from(User, Address)) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -338,12 +332,12 @@ demonstrate we will construct the same join illustrated at :ref:`tutorial_orm_en using the :func:`_orm.relationship` attributes to join instead:: >>> print( - ... select(User). - ... join(User.addresses.of_type(address_alias_1)). - ... where(address_alias_1.email_address == 'patrick@aol.com'). - ... join(User.addresses.of_type(address_alias_2)). - ... where(address_alias_2.email_address == 'patrick@gmail.com') - ... ) + ... select(User) + ... .join(User.addresses.of_type(address_alias_1)) + ... .where(address_alias_1.email_address == "patrick@aol.com") + ... .join(User.addresses.of_type(address_alias_2)) + ... .where(address_alias_2.email_address == "patrick@gmail.com") + ... ) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account JOIN address AS address_1 ON user_account.id = address_1.user_id @@ -356,10 +350,7 @@ aliased entity, the attribute is available from the :func:`_orm.aliased` construct directly:: >>> user_alias_1 = aliased(User) - >>> print( - ... select(user_alias_1.name). - ... join(user_alias_1.addresses) - ... ) + >>> print(select(user_alias_1.name).join(user_alias_1.addresses)) {opensql}SELECT user_account_1.name FROM user_account AS user_account_1 JOIN address ON user_account_1.id = address.user_id @@ -381,9 +372,8 @@ email addresses: .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(User.fullname). - ... join(User.addresses.and_(Address.email_address == 'pearl.krabs@gmail.com')) + >>> stmt = select(User.fullname).join( + ... User.addresses.and_(Address.email_address == "pearl.krabs@gmail.com") ... ) >>> session.execute(stmt).all() {opensql}SELECT user_account.fullname @@ -411,9 +401,8 @@ an optional WHERE criteria to limit the rows matched by the subquery: .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(User.fullname). - ... where(User.addresses.any(Address.email_address == 'pearl.krabs@gmail.com')) + >>> stmt = select(User.fullname).where( + ... User.addresses.any(Address.email_address == "pearl.krabs@gmail.com") ... ) >>> session.execute(stmt).all() {opensql}SELECT user_account.fullname @@ -431,10 +420,7 @@ for ``User`` entities that have no related ``Address`` rows: .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(User.fullname). - ... where(~User.addresses.any()) - ... ) + >>> stmt = select(User.fullname).where(~User.addresses.any()) >>> session.execute(stmt).all() {opensql}SELECT user_account.fullname FROM user_account @@ -451,10 +437,7 @@ which belonged to "pearl": .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(Address.email_address). - ... where(Address.user.has(User.name=="pkrabs")) - ... ) + >>> stmt = select(Address.email_address).where(Address.user.has(User.name == "pkrabs")) >>> session.execute(stmt).all() {opensql}SELECT address.email_address FROM address @@ -568,8 +551,10 @@ the :paramref:`_orm.relationship.lazy` option, e.g.: .. sourcecode:: python from sqlalchemy.orm import relationship + + class User(Base): - __tablename__ = 'user_account' + __tablename__ = "user_account" addresses = relationship("Address", back_populates="user", lazy="selectin") @@ -611,11 +596,11 @@ related ``Address`` objects: .. sourcecode:: pycon+sql >>> from sqlalchemy.orm import selectinload - >>> stmt = ( - ... select(User).options(selectinload(User.addresses)).order_by(User.id) - ... ) + >>> stmt = select(User).options(selectinload(User.addresses)).order_by(User.id) >>> for row in session.execute(stmt): - ... print(f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})") + ... print( + ... f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})" + ... ) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account ORDER BY user_account.id [...] () @@ -655,7 +640,9 @@ as below where we know that all ``Address`` objects have an associated >>> from sqlalchemy.orm import joinedload >>> stmt = ( - ... select(Address).options(joinedload(Address.user, innerjoin=True)).order_by(Address.id) + ... select(Address) + ... .options(joinedload(Address.user, innerjoin=True)) + ... .order_by(Address.id) ... ) >>> for row in session.execute(stmt): ... print(f"{row.Address.email_address} {row.Address.user.name}") @@ -731,10 +718,11 @@ example: >>> from sqlalchemy.orm import contains_eager >>> stmt = ( - ... select(Address). - ... join(Address.user). - ... where(User.name == 'pkrabs'). - ... options(contains_eager(Address.user)).order_by(Address.id) + ... select(Address) + ... .join(Address.user) + ... .where(User.name == "pkrabs") + ... .options(contains_eager(Address.user)) + ... .order_by(Address.id) ... ) >>> for row in session.execute(stmt): ... print(f"{row.Address.email_address} {row.Address.user.name}") @@ -752,10 +740,11 @@ rows. If we had applied :func:`_orm.joinedload` separately, we would get a SQL query that unnecessarily joins twice:: >>> stmt = ( - ... select(Address). - ... join(Address.user). - ... where(User.name == 'pkrabs'). - ... options(joinedload(Address.user)).order_by(Address.id) + ... select(Address) + ... .join(Address.user) + ... .where(User.name == "pkrabs") + ... .options(joinedload(Address.user)) + ... .order_by(Address.id) ... ) >>> print(stmt) # SELECT has a JOIN and LEFT OUTER JOIN unnecessarily {opensql}SELECT address.id, address.email_address, address.user_id, @@ -791,19 +780,19 @@ the email addresses with the ``sqlalchemy.org`` domain, we can apply >>> from sqlalchemy.orm import selectinload >>> stmt = ( - ... select(User). - ... options( - ... selectinload( - ... User.addresses.and_( - ... ~Address.email_address.endswith("sqlalchemy.org") - ... ) - ... ) - ... ). - ... order_by(User.id). - ... execution_options(populate_existing=True) + ... select(User) + ... .options( + ... selectinload( + ... User.addresses.and_(~Address.email_address.endswith("sqlalchemy.org")) + ... ) + ... ) + ... .order_by(User.id) + ... .execution_options(populate_existing=True) ... ) >>> for row in session.execute(stmt): - ... print(f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})") + ... print( + ... f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})" + ... ) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account ORDER BY user_account.id [...] () @@ -857,7 +846,7 @@ relationship will never try to emit SQL: .. sourcecode:: python class User(Base): - __tablename__ = 'user_account' + __tablename__ = "user_account" # ... Column mappings @@ -865,13 +854,12 @@ relationship will never try to emit SQL: class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" # ... Column mappings user = relationship("User", back_populates="addresses", lazy="raise_on_sql") - Using such a mapping, the application is blocked from lazy loading, indicating that a particular query would need to specify a loader strategy: diff --git a/tools/format_docs_code.py b/tools/format_docs_code.py new file mode 100644 index 0000000000..88e9288bc3 --- /dev/null +++ b/tools/format_docs_code.py @@ -0,0 +1,395 @@ +from argparse import ArgumentParser +from argparse import RawDescriptionHelpFormatter +from collections.abc import Iterator +from pathlib import Path +import re + +from black import format_str +from black.const import DEFAULT_LINE_LENGTH +from black.files import parse_pyproject_toml +from black.mode import Mode +from black.mode import TargetVersion + + +home = Path(__file__).parent.parent + +_Block = list[ + tuple[ + str, + int, + str | None, + str | None, + str, + ] +] + + +def _format_block( + input_block: _Block, + exit_on_error: bool, + errors: list[tuple[int, str, Exception]], + is_doctest: bool, +) -> list[str]: + if not is_doctest: + # The first line may have additional padding. Remove then restore later + add_padding = start_space.match(input_block[0][4]).groups()[0] + skip = len(add_padding) + code = "\n".join( + c[skip:] if c.startswith(add_padding) else c + for *_, c in input_block + ) + else: + add_padding = None + code = "\n".join(c for *_, c in input_block) + + try: + formatted = format_str(code, mode=BLACK_MODE) + except Exception as e: + start_line = input_block[0][1] + errors.append((start_line, code, e)) + if is_doctest: + print( + "Could not format code block starting at " + f"line {start_line}:\n{code}\nError: {e}" + ) + if exit_on_error: + print("Exiting since --exit-on-error was passed") + raise + else: + print("Ignoring error") + elif VERBOSE: + print( + "Could not format code block starting at " + f"line {start_line}:\n---\n{code}\n---Error: {e}" + ) + return [line for line, *_ in input_block] + else: + formatted_code_lines = formatted.splitlines() + padding = input_block[0][2] + sql_prefix = input_block[0][3] or "" + + if is_doctest: + formatted_lines = [ + f"{padding}{sql_prefix}>>> {formatted_code_lines[0]}", + *( + f"{padding}...{' ' if fcl else ''}{fcl}" + for fcl in formatted_code_lines[1:] + ), + ] + else: + formatted_lines = [ + f"{padding}{add_padding}{sql_prefix}{formatted_code_lines[0]}", + *( + f"{padding}{add_padding}{fcl}" if fcl else fcl + for fcl in formatted_code_lines[1:] + ), + ] + if not input_block[-1][0] and formatted_lines[-1]: + # last line was empty and black removed it. restore it + formatted_lines.append("") + return formatted_lines + + +format_directive = re.compile(r"^\.\.\s*format\s*:\s*(on|off)\s*$") + +doctest_code_start = re.compile(r"^(\s+)({(?:opensql|sql|stop)})?>>>\s?(.+)") +doctest_code_continue = re.compile(r"^\s+\.\.\.\s?(\s*.*)") +sql_code_start = re.compile(r"^(\s+){(?:open)?sql}") +sql_code_stop = re.compile(r"^(\s+){stop}") + +start_code_section = re.compile( + r"^(((?!\.\.).+::)|(\.\.\s*sourcecode::(.*py.*)?)|(::))$" +) +start_space = re.compile(r"^(\s*)[^ ]?") + + +def format_file( + file: Path, exit_on_error: bool, check: bool, no_plain: bool +) -> tuple[bool, int]: + buffer = [] + if not check: + print(f"Running file {file} ..", end="") + original = file.read_text("utf-8") + doctest_block: _Block | None = None + plain_block: _Block | None = None + + plain_code_section = False + plain_padding = None + plain_padding_len = None + sql_section = False + + errors = [] + + disable_format = False + for line_no, line in enumerate(original.splitlines(), 1): + # start_code_section requires no spaces at the start + + if start_code_section.match(line.strip()): + if plain_block: + buffer.extend( + _format_block( + plain_block, exit_on_error, errors, is_doctest=False + ) + ) + plain_block = None + plain_code_section = True + assert not sql_section + plain_padding = start_space.match(line).groups()[0] + plain_padding_len = len(plain_padding) + buffer.append(line) + continue + elif ( + plain_code_section + and line.strip() + and not line.startswith(" " * (plain_padding_len + 1)) + ): + plain_code_section = sql_section = False + elif match := format_directive.match(line): + disable_format = match.groups()[0] == "off" + + if doctest_block: + assert not plain_block + if match := doctest_code_continue.match(line): + doctest_block.append( + (line, line_no, None, None, match.groups()[0]) + ) + continue + else: + buffer.extend( + _format_block( + doctest_block, exit_on_error, errors, is_doctest=True + ) + ) + doctest_block = None + elif plain_block: + if ( + plain_code_section + and not doctest_code_start.match(line) + and not sql_code_start.match(line) + ): + plain_block.append( + (line, line_no, None, None, line[plain_padding_len:]) + ) + continue + else: + buffer.extend( + _format_block( + plain_block, exit_on_error, errors, is_doctest=False + ) + ) + plain_block = None + + if line and (match := doctest_code_start.match(line)): + plain_code_section = sql_section = False + if plain_block: + buffer.extend( + _format_block( + plain_block, exit_on_error, errors, is_doctest=False + ) + ) + plain_block = None + padding, code = match.group(1, 3) + doctest_block = [(line, line_no, padding, match.group(2), code)] + elif ( + line + and plain_code_section + and (match := sql_code_start.match(line)) + ): + if plain_block: + buffer.extend( + _format_block( + plain_block, exit_on_error, errors, is_doctest=False + ) + ) + plain_block = None + + sql_section = True + buffer.append(line) + elif line and sql_section and (match := sql_code_stop.match(line)): + sql_section = False + line = line.replace("{stop}", "") + assert not doctest_block + # start of a plain block + if line.strip(): + plain_block = [ + ( + line, + line_no, + plain_padding, + "{stop}", + line[plain_padding_len:], + ) + ] + + elif ( + line + and not no_plain + and not disable_format + and plain_code_section + and not sql_section + ): + assert not doctest_block + # start of a plain block + plain_block = [ + (line, line_no, plain_padding, None, line[plain_padding_len:]) + ] + else: + buffer.append(line) + + if doctest_block: + buffer.extend( + _format_block( + doctest_block, exit_on_error, errors, is_doctest=True + ) + ) + if plain_block: + buffer.extend( + _format_block(plain_block, exit_on_error, errors, is_doctest=False) + ) + if buffer: + # if there is nothing in the buffer something strange happened so + # don't do anything + buffer.append("") + updated = "\n".join(buffer) + equal = original == updated + if not check: + print( + f"..done. {len(errors)} error(s).", + "No changes" if equal else "Changes detected", + ) + if not equal: + # write only if there are changes to write + file.write_text(updated, "utf-8", newline="\n") + else: + if not check: + print(".. Nothing to write") + equal = bool(original) is False + + if check: + if not equal: + print(f"File {file} would be formatted") + return equal, len(errors) + + +def iter_files(directory) -> Iterator[Path]: + yield from (home / directory).glob("./**/*.rst") + + +def main( + file: str | None, + directory: str, + exit_on_error: bool, + check: bool, + no_plain: bool, +): + if file is not None: + result = [format_file(Path(file), exit_on_error, check, no_plain)] + else: + result = [ + format_file(doc, exit_on_error, check, no_plain) + for doc in iter_files(directory) + ] + + if check: + formatting_error_counts = [e for _, e in result if e] + to_reformat = len([b for b, _ in result if not b]) + + if not to_reformat and not formatting_error_counts: + print("All files are correctly formatted") + exit(0) + else: + print( + f"{to_reformat} file(s) would be reformatted;", + ( + f"{sum(formatting_error_counts)} formatting errors " + f"reported in {len(formatting_error_counts)} files" + ) + if formatting_error_counts + else "no formatting errors reported", + ) + + # interim, until we fix all formatting errors + if not to_reformat: + exit(0) + exit(1) + + +if __name__ == "__main__": + parser = ArgumentParser( + description="""Formats code inside docs using black. Supports \ +doctest code blocks and also tries to format plain code block identifies as \ +all indented blocks of at least 4 spaces, unless '--no-plain' is specified. + +Plain code block may lead to false positive. To disable formatting on a \ +file section the comment ``.. format: off`` disables formatting until \ +``.. format: on`` is encountered or the file ends. +Another alterative is to use less than 4 spaces to indent the code block. +""", + formatter_class=RawDescriptionHelpFormatter, + ) + parser.add_argument( + "-f", "--file", help="Format only this file instead of all docs" + ) + parser.add_argument( + "-d", + "--directory", + help="Find documents in this directory and its sub dirs", + default="doc/build", + ) + parser.add_argument( + "-c", + "--check", + help="Don't write the files back, just return the " + "status. Return code 0 means nothing would change. " + "Return code 1 means some files would be reformatted.", + action="store_true", + ) + parser.add_argument( + "-e", + "--exit-on-error", + help="Exit in case of black format error instead of ignoring it. " + "This option is only valid for doctest code blocks", + action="store_true", + ) + parser.add_argument( + "-l", + "--project-line-length", + help="Configure the line length to the project value instead " + "of using the black default of 88", + action="store_true", + ) + parser.add_argument( + "-v", + "--verbose", + help="Increase verbosity", + action="store_true", + ) + parser.add_argument( + "-n", + "--no-plain", + help="Disable plain code blocks formatting that's more difficult " + "to parse compared to doctest code blocks", + action="store_true", + ) + args = parser.parse_args() + + config = parse_pyproject_toml(home / "pyproject.toml") + BLACK_MODE = Mode( + target_versions=set( + TargetVersion[val.upper()] + for val in config.get("target_version", []) + if val != "py27" + ), + line_length=config.get("line_length", DEFAULT_LINE_LENGTH) + if args.project_line_length + else DEFAULT_LINE_LENGTH, + ) + VERBOSE = args.verbose + + main( + args.file, + args.directory, + args.exit_on_error, + args.check, + args.no_plain, + )