]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
Improvements to code formatter
authorFederico Caselli <cfederico87@gmail.com>
Fri, 30 Sep 2022 21:46:42 +0000 (23:46 +0200)
committerMike Bayer <mike_mp@zzzcomputing.com>
Sun, 2 Oct 2022 14:55:42 +0000 (10:55 -0400)
Change-Id: I75cf7143f3ed3bbc09aa8bc18edbce5c8af0f0be

44 files changed:
doc/build/changelog/changelog_04.rst
doc/build/changelog/changelog_08.rst
doc/build/changelog/changelog_09.rst
doc/build/changelog/migration_04.rst
doc/build/changelog/migration_05.rst
doc/build/changelog/migration_07.rst
doc/build/changelog/migration_08.rst
doc/build/changelog/migration_09.rst
doc/build/changelog/migration_10.rst
doc/build/changelog/migration_11.rst
doc/build/changelog/migration_12.rst
doc/build/changelog/migration_14.rst
doc/build/changelog/migration_20.rst
doc/build/changelog/whatsnew_20.rst
doc/build/core/connections.rst
doc/build/core/operators.rst
doc/build/core/pooling.rst
doc/build/dialects/postgresql.rst
doc/build/errors.rst
doc/build/faq/metadata_schema.rst
doc/build/faq/performance.rst
doc/build/faq/sessions.rst
doc/build/glossary.rst
doc/build/orm/collections.rst
doc/build/orm/declarative_tables.rst
doc/build/orm/extensions/asyncio.rst
doc/build/orm/mapping_api.rst
doc/build/orm/nonstandard_mappings.rst
doc/build/orm/persistence_techniques.rst
doc/build/orm/queryguide/_deferred_setup.rst
doc/build/orm/queryguide/_dml_setup.rst
doc/build/orm/queryguide/_inheritance_setup.rst
doc/build/orm/queryguide/_plain_setup.rst
doc/build/orm/queryguide/_single_inheritance.rst
doc/build/orm/queryguide/api.rst
doc/build/orm/queryguide/columns.rst
doc/build/orm/queryguide/inheritance.rst
doc/build/orm/queryguide/relationships.rst
doc/build/orm/quickstart.rst
doc/build/orm/session_state_management.rst
doc/build/orm/session_transaction.rst
doc/build/tutorial/dbapi_transactions.rst
doc/build/tutorial/metadata.rst
tools/format_docs_code.py

index 9261c1262bc1d42b21f80165c208dfb03396d469..fbaa5d9a8156aae69ce4d2e55935ab608f5f742f 100644 (file)
       outer joins are created for all joined-table inheriting
       mappers requested. Note that the auto-create of joins
       is not compatible with concrete table inheritance.
-      
+    
       The existing select_table flag on mapper() is now
       deprecated and is synonymous with
       with_polymorphic('*', select_table).  Note that the
       underlying "guts" of select_table have been
       completely removed and replaced with the newer,
       more flexible approach.
-      
+    
       The new approach also automatically allows eager loads
       to work for subclasses, if they are present, for
       example::
 
-        sess.query(Company).options(
-         eagerload_all(
-        ))
+        sess.query(Company).options(eagerload_all())
 
       to load Company objects, their employees, and the
       'machines' collection of employees who happen to be
index decf365593a6b804928bfc49b6a87d1f2ece2b95..4164e3f5877f065a1cc949e73c9a2f41aada6a50 100644 (file)
 
             del_ = delete(SomeMappedClass).where(SomeMappedClass.id == 5)
 
-            upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name='ed')
+            upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name="ed")
 
     .. change::
         :tags: bug, orm
       to the original, older use case for :meth:`_query.Query.select_from`, which is that
       of restating the mapped entity in terms of a different selectable::
 
-        session.query(User.name).\
-          select_from(user_table.select().where(user_table.c.id > 5))
+        session.query(User.name).select_from(user_table.select().where(user_table.c.id > 5))
 
       Which produces::
 
       original.   Allows symmetry when using :class:`_engine.Engine` and
       :class:`_engine.Connection` objects as context managers::
 
-        with conn.connect() as c: # leaves the Connection open
-          c.execute("...")
+        with conn.connect() as c:  # leaves the Connection open
+            c.execute("...")
 
         with engine.connect() as c:  # closes the Connection
-          c.execute("...")
+            c.execute("...")
 
     .. change::
         :tags: engine
index c9ec5f3a49ac1898b24988effd4abc2647f7a65c..d00e043326e8d4899b2891f73f564ab515f5d018 100644 (file)
         ad-hoc keyword arguments within the :attr:`.Index.kwargs` collection,
         after construction::
 
-            idx = Index('a', 'b')
-            idx.kwargs['mysql_someargument'] = True
+            idx = Index("a", "b")
+            idx.kwargs["mysql_someargument"] = True
 
         To suit the use case of allowing custom arguments at construction time,
         the :meth:`.DialectKWArgs.argument_for` method now allows this registration::
 
-            Index.argument_for('mysql', 'someargument', False)
+            Index.argument_for("mysql", "someargument", False)
 
-            idx = Index('a', 'b', mysql_someargument=True)
+            idx = Index("a", "b", mysql_someargument=True)
 
         .. seealso::
 
index 93a2b654fbc12a042817a6175258aa77dbd82c73..f68c449084adc4e51fe5a50545f136516c5d7ef5 100644 (file)
@@ -429,16 +429,24 @@ flush before each query.
 
 ::
 
-    mapper(Foo, foo_table, properties={
-        'bars':dynamic_loader(Bar, backref='foo', <other relation() opts>)
-    })
+    mapper(
+        Foo,
+        foo_table,
+        properties={
+            "bars": dynamic_loader(
+                Bar,
+                backref="foo",
+                # <other relation() opts>
+            )
+        },
+    )
 
     session = create_session(autoflush=True)
     foo = session.query(Foo).first()
 
-    foo.bars.append(Bar(name='lala'))
+    foo.bars.append(Bar(name="lala"))
 
-    for bar in foo.bars.filter(Bar.name=='lala'):
+    for bar in foo.bars.filter(Bar.name == "lala"):
         print(bar)
 
     session.commit()
@@ -452,13 +460,17 @@ columns as undeferred:
 
 ::
 
-    mapper(Class, table, properties={
-        'foo' : deferred(table.c.foo, group='group1'),
-        'bar' : deferred(table.c.bar, group='group1'),
-        'bat' : deferred(table.c.bat, group='group1'),
+    mapper(
+        Class,
+        table,
+        properties={
+            "foo": deferred(table.c.foo, group="group1"),
+            "bar": deferred(table.c.bar, group="group1"),
+            "bat": deferred(table.c.bat, group="group1"),
+        },
     )
 
-    session.query(Class).options(undefer_group('group1')).filter(...).all()
+    session.query(Class).options(undefer_group("group1")).filter(...).all()
 
 and ``eagerload_all()`` sets a chain of attributes to be
 eager in one pass:
@@ -785,15 +797,15 @@ deprecated. This means that
 
 ::
 
-    my_table.select(my_table.c.id.in_(1,2,3)
-    my_table.select(my_table.c.id.in_(*listOfIds)
+    my_table.select(my_table.c.id.in_(1, 2, 3))
+    my_table.select(my_table.c.id.in_(*listOfIds))
 
 should be changed to
 
 ::
 
-    my_table.select(my_table.c.id.in_([1,2,3])
-    my_table.select(my_table.c.id.in_(listOfIds)
+    my_table.select(my_table.c.id.in_([1, 2, 3]))
+    my_table.select(my_table.c.id.in_(listOfIds))
 
 Schema and Reflection
 =====================
index 39bb9cb053f57c6464762e6653fcd022178f029d..b8f6c0d5f8b2175d4fbeb31bc6a7db59d8217fc0 100644 (file)
@@ -86,10 +86,15 @@ Object Relational Mapping
 
   ::
 
-      subq = session.query(Keyword.id.label('keyword_id')).filter(Keyword.name.in_(['beans', 'carrots'])).subquery()
-      recipes = session.query(Recipe).filter(exists().
-         where(Recipe.id==recipe_keywords.c.recipe_id).
-         where(recipe_keywords.c.keyword_id==subq.c.keyword_id)
+      subq = (
+          session.query(Keyword.id.label("keyword_id"))
+          .filter(Keyword.name.in_(["beans", "carrots"]))
+          .subquery()
+      )
+      recipes = session.query(Recipe).filter(
+          exists()
+          .where(Recipe.id == recipe_keywords.c.recipe_id)
+          .where(recipe_keywords.c.keyword_id == subq.c.keyword_id)
       )
 
 * **Explicit ORM aliases are recommended for aliased joins**
@@ -439,7 +444,7 @@ Schema/Types
   ::
 
       class MyType(AdaptOldConvertMethods, TypeEngine):
-         # ...
+        ..
 
 * The ``quote`` flag on ``Column`` and ``Table`` as well as
   the ``quote_schema`` flag on ``Table`` now control quoting
@@ -477,10 +482,10 @@ Schema/Types
        dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125)  # 125 usec
 
        # old way
-       '2008-06-27 12:00:00.125'
+       "2008-06-27 12:00:00.125"
 
        # new way
-       '2008-06-27 12:00:00.000125'
+       "2008-06-27 12:00:00.000125"
 
   So if an existing SQLite file-based database intends to be
   used across 0.4 and 0.5, you either have to upgrade the
@@ -497,6 +502,7 @@ Schema/Types
   ::
 
        from sqlalchemy.databases.sqlite import DateTimeMixin
+
        DateTimeMixin.__legacy_microseconds__ = True
 
 Connection Pool no longer threadlocal by default
@@ -538,7 +544,7 @@ data-driven, it takes ``[args]``.
 
   ::
 
-         query.join('orders', 'items')
+         query.join("orders", "items")
          query.join(User.orders, Order.items)
 
 * the ``in_()`` method on columns and similar only accepts a
@@ -584,7 +590,8 @@ Removed
 
        class MyQuery(Query):
            def get(self, ident):
-               # ...
+               ...
+
 
        session = sessionmaker(query_cls=MyQuery)()
 
@@ -621,6 +628,7 @@ Removed
   ::
 
       from sqlalchemy.orm import aliased
+
       address_alias = aliased(Address)
       print(session.query(User, address_alias).join((address_alias, User.addresses)).all())
 
index 4763b9134c4c577058cf80a4b66f85eec30f84de..590da6812522fe7c7f34befcdcb3cae30f35c00e 100644 (file)
@@ -926,12 +926,13 @@ Using declarative, the scenario is this:
 ::
 
     class Parent(Base):
-        __tablename__ = 'parent'
+        __tablename__ = "parent"
         id = Column(Integer, primary_key=True)
 
+
     class Child(Parent):
-       __tablename__ = 'child'
-        id = Column(Integer, ForeignKey('parent.id'), primary_key=True)
+        __tablename__ = "child"
+        id = Column(Integer, ForeignKey("parent.id"), primary_key=True)
 
 Above, the attribute ``Child.id`` refers to both the
 ``child.id`` column as well as ``parent.id`` - this due to
@@ -958,15 +959,17 @@ local column:
 ::
 
     class Child(Parent):
-       __tablename__ = 'child'
-        id = Column(Integer, ForeignKey('parent.id'), primary_key=True)
-        some_related = relationship("SomeRelated",
-                        primaryjoin="Child.id==SomeRelated.child_id")
+        __tablename__ = "child"
+        id = Column(Integer, ForeignKey("parent.id"), primary_key=True)
+        some_related = relationship(
+            "SomeRelated", primaryjoin="Child.id==SomeRelated.child_id"
+        )
+
 
     class SomeRelated(Base):
-       __tablename__ = 'some_related'
+        __tablename__ = "some_related"
         id = Column(Integer, primary_key=True)
-        child_id = Column(Integer, ForeignKey('child.id'))
+        child_id = Column(Integer, ForeignKey("child.id"))
 
 Prior to 0.7 the ``Child.id`` expression would reference
 ``Parent.id``, and it would be necessary to map ``child.id``
index 9c5b381ee96454cebedc590b1b72ea794473da06..4a07518539b8aa3ac9ddf0369c2f1216958f8be2 100644 (file)
@@ -71,16 +71,17 @@ entities.  The new system includes these features:
 
 
         class Parent(Base):
-            __tablename__ = 'parent'
+            __tablename__ = "parent"
             id = Column(Integer, primary_key=True)
-            child_id_one = Column(Integer, ForeignKey('child.id'))
-            child_id_two = Column(Integer, ForeignKey('child.id'))
+            child_id_one = Column(Integer, ForeignKey("child.id"))
+            child_id_two = Column(Integer, ForeignKey("child.id"))
 
             child_one = relationship("Child", foreign_keys=child_id_one)
             child_two = relationship("Child", foreign_keys=child_id_two)
 
+
         class Child(Base):
-            __tablename__ = 'child'
+            __tablename__ = "child"
             id = Column(Integer, primary_key=True)
 
 * relationships against self-referential, composite foreign
index c5f4a31532be93dd0c60514aa070cd66545f81e2..93fb8f1e58e0f838af79db275d19a02b6981a7bc 100644 (file)
@@ -721,12 +721,9 @@ Only those elements in the path that actually need :meth:`.PropComparator.of_typ
 need to be set as a class-bound attribute, string-based names can be resumed
 afterwards::
 
-    session.query(Company).\
-        options(
-            subqueryload(Company.employees.of_type(Engineer)).
-            subqueryload("machines")
-            )
-        )
+    session.query(Company).options(
+        subqueryload(Company.employees.of_type(Engineer)).subqueryload("machines")
+    )
 
 **Old Way**
 
@@ -822,17 +819,16 @@ The :func:`_expression.text` construct gains new methods:
   to be set flexibly::
 
       # setup values
-      stmt = text("SELECT id, name FROM user "
-            "WHERE name=:name AND timestamp=:timestamp").\
-            bindparams(name="ed", timestamp=datetime(2012, 11, 10, 15, 12, 35))
+      stmt = text(
+          "SELECT id, name FROM user WHERE name=:name AND timestamp=:timestamp"
+      ).bindparams(name="ed", timestamp=datetime(2012, 11, 10, 15, 12, 35))
 
       # setup types and/or values
-      stmt = text("SELECT id, name FROM user "
-            "WHERE name=:name AND timestamp=:timestamp").\
-            bindparams(
-                bindparam("name", value="ed"),
-                bindparam("timestamp", type_=DateTime()
-            ).bindparam(timestamp=datetime(2012, 11, 10, 15, 12, 35))
+      stmt = (
+          text("SELECT id, name FROM user WHERE name=:name AND timestamp=:timestamp")
+          .bindparams(bindparam("name", value="ed"), bindparam("timestamp", type_=DateTime()))
+          .bindparam(timestamp=datetime(2012, 11, 10, 15, 12, 35))
+      )
 
 * :meth:`_expression.TextClause.columns` supersedes the ``typemap`` option
   of :func:`_expression.text`, returning a new construct :class:`.TextAsFrom`::
@@ -842,7 +838,8 @@ The :func:`_expression.text` construct gains new methods:
       stmt = stmt.alias()
 
       stmt = select([addresses]).select_from(
-                    addresses.join(stmt), addresses.c.user_id == stmt.c.id)
+          addresses.join(stmt), addresses.c.user_id == stmt.c.id
+      )
 
 
       # or into a cte():
@@ -850,7 +847,8 @@ The :func:`_expression.text` construct gains new methods:
       stmt = stmt.cte("x")
 
       stmt = select([addresses]).select_from(
-                    addresses.join(stmt), addresses.c.user_id == stmt.c.id)
+          addresses.join(stmt), addresses.c.user_id == stmt.c.id
+      )
 
 :ticket:`2877`
 
index 65019111119f01206019fa7b686f6b0d097c7237..ee77e5a6b92cbb846bd410c004be65c74aaf662e 100644 (file)
@@ -396,32 +396,29 @@ of inheritance-oriented scenarios, including:
 * Binding to a Mixin or Abstract Class::
 
         class MyClass(SomeMixin, Base):
-            __tablename__ = 'my_table'
+            __tablename__ = "my_table"
             # ...
 
-        session = Session(binds={SomeMixin: some_engine})
 
+        session = Session(binds={SomeMixin: some_engine})
 
 * Binding to inherited concrete subclasses individually based on table::
 
         class BaseClass(Base):
-            __tablename__ = 'base'
+            __tablename__ = "base"
 
             # ...
 
+
         class ConcreteSubClass(BaseClass):
-            __tablename__ = 'concrete'
+            __tablename__ = "concrete"
 
             # ...
 
-            __mapper_args__ = {'concrete': True}
-
+            __mapper_args__ = {"concrete": True}
 
-        session = Session(binds={
-            base_table: some_engine,
-            concrete_table: some_other_engine
-        })
 
+        session = Session(binds={base_table: some_engine, concrete_table: some_other_engine})
 
 :ticket:`3035`
 
@@ -457,10 +454,10 @@ These scenarios include:
   statement as well as for the SELECT used by the "fetch" strategy::
 
         session.query(User).filter(User.id == 15).update(
-                {"name": "foob"}, synchronize_session='fetch')
+            {"name": "foob"}, synchronize_session="fetch"
+        )
 
-        session.query(User).filter(User.id == 15).delete(
-                synchronize_session='fetch')
+        session.query(User).filter(User.id == 15).delete(synchronize_session="fetch")
 
 * Queries against individual columns::
 
@@ -470,9 +467,10 @@ These scenarios include:
   :obj:`.column_property`::
 
         class User(Base):
-            # ...
+            ...
+
+            score = column_property(func.coalesce(self.tables.users.c.name, None))
 
-            score = column_property(func.coalesce(self.tables.users.c.name, None)))
 
         session.query(func.max(User.score)).scalar()
 
index a9ede422312bf6cf6df7ca590c7edba99715b592..d90f49ec561ff93ff3c80402bea09b670d84881b 100644 (file)
@@ -845,11 +845,13 @@ are part of the "correlate" for the subquery.  Assuming the
 ``Person/Manager/Engineer->Company`` setup from the mapping documentation,
 using with_polymorphic::
 
-    sess.query(Person.name)
-                .filter(
-                    sess.query(Company.name).
-                    filter(Company.company_id == Person.company_id).
-                    correlate(Person).as_scalar() == "Elbonia, Inc.")
+    sess.query(Person.name).filter(
+        sess.query(Company.name)
+        .filter(Company.company_id == Person.company_id)
+        .correlate(Person)
+        .as_scalar()
+        == "Elbonia, Inc."
+    )
 
 The above query now produces::
 
@@ -885,11 +887,13 @@ from it first::
     # aliasing.
 
     paliased = aliased(Person)
-    sess.query(paliased.name)
-                .filter(
-                    sess.query(Company.name).
-                    filter(Company.company_id == paliased.company_id).
-                    correlate(paliased).as_scalar() == "Elbonia, Inc.")
+    sess.query(paliased.name).filter(
+        sess.query(Company.name)
+        .filter(Company.company_id == paliased.company_id)
+        .correlate(paliased)
+        .as_scalar()
+        == "Elbonia, Inc."
+    )
 
 The :func:`.aliased` construct guarantees that the "polymorphic selectable"
 is wrapped in a subquery.  By referring to it explicitly in the correlated
@@ -1130,6 +1134,7 @@ for specific exceptions::
 
         engine = create_engine("postgresql+psycopg2://")
 
+
         @event.listens_for(engine, "handle_error")
         def cancel_disconnect(ctx):
             if isinstance(ctx.original_exception, KeyboardInterrupt):
@@ -2421,12 +2426,10 @@ supported by PostgreSQL 9.5 in this area::
 
     from sqlalchemy.dialects.postgresql import insert
 
-    insert_stmt = insert(my_table). \\
-        values(id='some_id', data='some data to insert')
+    insert_stmt = insert(my_table).values(id="some_id", data="some data to insert")
 
     do_update_stmt = insert_stmt.on_conflict_do_update(
-        index_elements=[my_table.c.id],
-        set_=dict(data='some data to update')
+        index_elements=[my_table.c.id], set_=dict(data="some data to update")
     )
 
     conn.execute(do_update_stmt)
index eb4f076d132229fee7c78961a0a95b9387487fff..7b601a8d48c8ba2f32d9852a76e2a26505bdd7c6 100644 (file)
@@ -394,6 +394,7 @@ hybrid in-place, interfering with the definition on the superclass.
             def _set_name(self, value):
                 self.first_name = value
 
+
         class FirstNameOnly(Base):
             @hybrid_property
             def name(self):
@@ -829,8 +830,7 @@ new feature allows the related features of "select in" loading and
 "polymorphic in" loading to make use of the baked query extension
 to reduce call overhead::
 
-    stmt = select([table]).where(
-        table.c.col.in_(bindparam('foo', expanding=True))
+    stmt = select([table]).where(table.c.col.in_(bindparam("foo", expanding=True)))
     conn.execute(stmt, {"foo": [1, 2, 3]})
 
 The feature should be regarded as **experimental** within the 1.2 series.
index d1933566b4c291517da82b0061af0ec21c09f388..88a51c776fca611f133817bfc3db2436a3dc20e5 100644 (file)
@@ -504,6 +504,7 @@ mutually-dependent module imports, like this::
 
     @util.dependency_for("sqlalchemy.sql.dml")
     def insert(self, dml, *args, **kw):
+        ...
 
 Where the above function would be rewritten to no longer have the ``dml`` parameter
 on the outside.  This would confuse code-linting tools into seeing a missing parameter
@@ -1282,10 +1283,11 @@ including methods such as:
     with engine.connect() as conn:
         result = conn.execute(
             table.select().order_by(table.c.id),
-            execution_options={"stream_results": True}
+            execution_options={"stream_results": True},
         )
         for chunk in result.partitions(500):
             # process up to 500 records
+            ...
 
 :meth:`_engine.Result.columns` - allows slicing and reorganizing of rows:
 
@@ -1306,7 +1308,7 @@ first column by default but can also be selected:
 
     result = session.execute(select(User).order_by(User.id))
     for user_obj in result.scalars():
-        ...
+        ...
 
 :meth:`_engine.Result.mappings` - instead of named-tuple rows, returns
 dictionaries:
@@ -2154,8 +2156,9 @@ in any way::
 
         addresses = relationship(Address, backref=backref("user", viewonly=True))
 
+
     class Address(Base):
-        ...
+        ...
 
 
     u1 = session.query(User).filter_by(name="x").first()
index 781d530ad285e22fdd1ad8b0cbd0d36640beb52c..801235872379d6479fa5712069fb5729627ed73a 100644 (file)
@@ -321,24 +321,23 @@ the SQLAlchemy project itself, the approach taken is as follows:
         from sqlalchemy import exc
 
         # for warnings not included in regex-based filter below, just log
-        warnings.filterwarnings(
-          "always", category=exc.RemovedIn20Warning
-        )
+        warnings.filterwarnings("always", category=exc.RemovedIn20Warning)
 
         # for warnings related to execute() / scalar(), raise
         for msg in [
             r"The (?:Executable|Engine)\.(?:execute|scalar)\(\) function",
-            r"The current statement is being autocommitted using implicit "
-            "autocommit,",
+            r"The current statement is being autocommitted using implicit autocommit,",
             r"The connection.execute\(\) method in SQLAlchemy 2.0 will accept "
             "parameters as a single dictionary or a single sequence of "
             "dictionaries only.",
             r"The Connection.connect\(\) function/method is considered legacy",
             r".*DefaultGenerator.execute\(\)",
         ]:
-          warnings.filterwarnings(
-              "error", message=msg, category=exc.RemovedIn20Warning,
-          )
+            warnings.filterwarnings(
+                "error",
+                message=msg,
+                category=exc.RemovedIn20Warning,
+            )
 
 3. As each sub-category of warnings are resolved in the application, new
    warnings that are caught by the "always" filter can be added to the list
@@ -1245,9 +1244,7 @@ following the table, and may include additional notes not summarized here.
 
       - ::
 
-          session.execute(
-              select(User)
-          ).scalars().all()
+          session.execute(select(User)).scalars().all()
           # or
           session.scalars(select(User)).all()
 
@@ -1258,15 +1255,11 @@ following the table, and may include additional notes not summarized here.
 
     * - ::
 
-          session.query(User).\
-          filter_by(name='some user').one()
+          session.query(User).filter_by(name="some user").one()
 
       - ::
 
-          session.execute(
-              select(User).
-              filter_by(name="some user")
-          ).scalar_one()
+          session.execute(select(User).filter_by(name="some user")).scalar_one()
 
       - :ref:`migration_20_unify_select`
 
@@ -1274,17 +1267,11 @@ following the table, and may include additional notes not summarized here.
 
     * - ::
 
-          session.query(User).\
-          filter_by(name='some user').first()
-
+          session.query(User).filter_by(name="some user").first()
 
       - ::
 
-          session.scalars(
-            select(User).
-            filter_by(name="some user").
-            limit(1)
-          ).first()
+          session.scalars(select(User).filter_by(name="some user").limit(1)).first()
 
       - :ref:`migration_20_unify_select`
 
@@ -1292,34 +1279,22 @@ following the table, and may include additional notes not summarized here.
 
     * - ::
 
-            session.query(User).options(
-                joinedload(User.addresses)
-            ).all()
+            session.query(User).options(joinedload(User.addresses)).all()
 
       - ::
 
-            session.scalars(
-                select(User).
-                options(
-                  joinedload(User.addresses)
-                )
-            ).unique().all()
+            session.scalars(select(User).options(joinedload(User.addresses))).unique().all()
 
       - :ref:`joinedload_not_uniqued`
 
     * - ::
 
-          session.query(User).\
-              join(Address).\
-              filter(Address.email == 'e@sa.us').\
-              all()
+          session.query(User).join(Address).filter(Address.email == "e@sa.us").all()
 
       - ::
 
           session.execute(
-              select(User).
-              join(Address).
-              where(Address.email == 'e@sa.us')
+              select(User).join(Address).where(Address.email == "e@sa.us")
           ).scalars().all()
 
       - :ref:`migration_20_unify_select`
@@ -1328,37 +1303,27 @@ following the table, and may include additional notes not summarized here.
 
     * - ::
 
-          session.query(User).from_statement(
-              text("select * from users")
-          ).all()
+          session.query(User).from_statement(text("select * from users")).all()
 
       - ::
 
-          session.scalars(
-              select(User).
-              from_statement(
-                  text("select * from users")
-              )
-          ).all()
+          session.scalars(select(User).from_statement(text("select * from users"))).all()
 
       - :ref:`orm_queryguide_selecting_text`
 
     * - ::
 
-          session.query(User).\
-              join(User.addresses).\
-              options(
-                contains_eager(User.addresses)
-              ).\
-              populate_existing().all()
+          session.query(User).join(User.addresses).options(
+              contains_eager(User.addresses)
+          ).populate_existing().all()
 
       - ::
 
           session.execute(
-              select(User).
-              join(User.addresses).
-              options(contains_eager(User.addresses)).
-              execution_options(populate_existing=True)
+              select(User)
+              .join(User.addresses)
+              .options(contains_eager(User.addresses))
+              .execution_options(populate_existing=True)
           ).scalars().all()
 
       -
@@ -1370,21 +1335,17 @@ following the table, and may include additional notes not summarized here.
     *
       - ::
 
-          session.query(User).\
-              filter(User.name == 'foo').\
-              update(
-                  {"fullname": "Foo Bar"},
-                  synchronize_session="evaluate"
-              )
-
+          session.query(User).filter(User.name == "foo").update(
+              {"fullname": "Foo Bar"}, synchronize_session="evaluate"
+          )
 
       - ::
 
           session.execute(
-              update(User).
-              where(User.name == 'foo').
-              values(fullname="Foo Bar").
-              execution_options(synchronize_session="evaluate")
+              update(User)
+              .where(User.name == "foo")
+              .values(fullname="Foo Bar")
+              .execution_options(synchronize_session="evaluate")
           )
 
       - :ref:`orm_expression_update_delete`
@@ -1422,25 +1383,25 @@ Legacy code examples are illustrated below::
     session = Session(engine)
 
     # becomes legacy use case
-    user = session.query(User).filter_by(name='some user').one()
+    user = session.query(User).filter_by(name="some user").one()
 
     # becomes legacy use case
-    user = session.query(User).filter_by(name='some user').first()
+    user = session.query(User).filter_by(name="some user").first()
 
     # becomes legacy use case
     user = session.query(User).get(5)
 
     # becomes legacy use case
-    for user in session.query(User).join(User.addresses).filter(Address.email == 'some@email.com'):
-        # ...
+    for user in (
+        session.query(User).join(User.addresses).filter(Address.email == "some@email.com")
+    ):
+        ...
 
     # becomes legacy use case
     users = session.query(User).options(joinedload(User.addresses)).order_by(User.id).all()
 
     # becomes legacy use case
-    users = session.query(User).from_statement(
-        text("select * from users")
-    ).all()
+    users = session.query(User).from_statement(text("select * from users")).all()
 
     # etc
 
@@ -1484,15 +1445,13 @@ Below are some examples of how to migrate to :func:`_sql.select`::
 
     session = Session(engine)
 
-    user = session.execute(
-        select(User).filter_by(name="some user")
-    ).scalar_one()
+    user = session.execute(select(User).filter_by(name="some user")).scalar_one()
 
     # for first(), no LIMIT is applied automatically; add limit(1) if LIMIT
     # is desired on the query
-    user = session.execute(
-        select(User).filter_by(name="some user").limit(1)
-    ).scalars().first()
+    user = (
+        session.execute(select(User).filter_by(name="some user").limit(1)).scalars().first()
+    )
 
     # get() moves to the Session directly
     user = session.get(User, 5)
@@ -1500,18 +1459,22 @@ Below are some examples of how to migrate to :func:`_sql.select`::
     for user in session.execute(
         select(User).join(User.addresses).filter(Address.email == "some@email.case")
     ).scalars():
-        ...
+        ...
 
     # when using joinedload() against collections, use unique() on the result
-    users = session.execute(
-        select(User).options(joinedload(User.addresses)).order_by(User.id)
-    ).unique().all()
+    users = (
+        session.execute(select(User).options(joinedload(User.addresses)).order_by(User.id))
+        .unique()
+        .all()
+    )
 
     # select() has ORM-ish methods like from_statement() that only work
     # if the statement is against ORM entities
-    users = session.execute(
-        select(User).from_statement(text("select * from users"))
-    ).scalars().all()
+    users = (
+        session.execute(select(User).from_statement(text("select * from users")))
+        .scalars()
+        .all()
+    )
 
 **Discussion**
 
index c5ec3887cc65e1e702b6d265aa9de8b027ea2afe..7f1180a924c2b1ec769849a6aa7152bbea2fb941 100644 (file)
@@ -239,12 +239,13 @@ helper):
 
 
       class User(Base):
-          __tablename__ = 'user_account'
+          __tablename__ = "user_account"
 
           id: Mapped[int] = mapped_column(primary_key=True)
           name: Mapped[str]
           addresses: Mapped[List["Address"]] = relationship()
 
+
       class Address(Base):
           __tablename__ = "address"
 
@@ -252,7 +253,6 @@ helper):
           email_address: Mapped[str]
           user_id = mapped_column(ForeignKey("user_account.id"))
 
-
   With the above mapping, the attributes are typed and express themselves
   all the way from statement to result set::
 
@@ -617,10 +617,11 @@ of :class:`_types.String`, as below where use of an ``Annotated`` ``str`` called
   # declarative base with a type-level override, using a type that is
   # expected to be used in multiple places
   class Base(DeclarativeBase):
-      registry = registry(type_annotation_map={
-          str50: String(50),
-      })
-
+      registry = registry(
+          type_annotation_map={
+              str50: String(50),
+          }
+      )
 
 Second, Declarative will extract full
 :func:`_orm.mapped_column` definitions from the left hand type if
index 671d5045485d6bd6f1a92d94f483fce1c3407f43..0aee788def15b31c67581c7df5bc9edc883ec9e6 100644 (file)
@@ -140,15 +140,15 @@ each time the transaction is ended, and a new statement is
 emitted, a new transaction begins implicitly::
 
     with engine.connect() as connection:
-        connection.execute(<some statement>)
+        connection.execute("<some statement>")
         connection.commit()  # commits "some statement"
 
         # new transaction starts
-        connection.execute(<some other statement>)
+        connection.execute("<some other statement>")
         connection.rollback()  # rolls back "some other statement"
 
         # new transaction starts
-        connection.execute(<a third statement>)
+        connection.execute("<a third statement>")
         connection.commit()  # commits "a third statement"
 
 .. versionadded:: 2.0 "commit as you go" style is a new feature of
@@ -823,7 +823,7 @@ as the schema name is passed to these methods explicitly.
   to the :class:`_orm.Session`.  The :class:`_orm.Session` uses a new
   :class:`_engine.Connection` for each transaction::
 
-      schema_engine = engine.execution_options(schema_translate_map = { ... } )
+      schema_engine = engine.execution_options(schema_translate_map={...})
 
       session = Session(schema_engine)
 
@@ -1470,15 +1470,20 @@ Basic guidelines include:
 
         # **Don't** do this:
 
+
         def my_stmt(parameter, thing=False):
             stmt = lambda_stmt(lambda: select(table))
             stmt += (
-                lambda s: s.where(table.c.x > parameter) if thing
+                lambda s: s.where(table.c.x > parameter)
+                if thing
                 else s.where(table.c.y == parameter)
+            )
             return stmt
 
+
         # **Do** do this:
 
+
         def my_stmt(parameter, thing=False):
             stmt = lambda_stmt(lambda: select(table))
             if thing:
@@ -1501,10 +1506,10 @@ Basic guidelines include:
     >>> def my_stmt(x, y):
     ...     def get_x():
     ...         return x
-    ... 
+    ...
     ...     def get_y():
     ...         return y
-    ... 
+    ...
     ...     stmt = lambda_stmt(lambda: select(func.max(get_x(), get_y())))
     ...     return stmt
     >>> with engine.connect() as conn:
@@ -1524,10 +1529,10 @@ Basic guidelines include:
     >>> def my_stmt(x, y):
     ...     def get_x():
     ...         return x
-    ... 
+    ...
     ...     def get_y():
     ...         return y
-    ... 
+    ...
     ...     x_param, y_param = get_x(), get_y()
     ...     stmt = lambda_stmt(lambda: select(func.max(x_param, y_param)))
     ...     return stmt
@@ -2206,22 +2211,24 @@ to create a new dialect "foodialect://", the steps are as follows:
    which is typically a subclass of :class:`sqlalchemy.engine.default.DefaultDialect`.
    In this example let's say it's called ``FooDialect`` and its module is accessed
    via ``foodialect.dialect``.
-3. The entry point can be established in setup.py as follows::
+3. The entry point can be established in ``setup.cfg`` as follows:
 
-    entry_points = """
-          [sqlalchemy.dialects]
-          foodialect = foodialect.dialect:FooDialect
-          """
+   .. sourcecode:: ini
+
+          [options.entry_points]
+          sqlalchemy.dialects =
+              foodialect = foodialect.dialect:FooDialect
 
 If the dialect is providing support for a particular DBAPI on top of
 an existing SQLAlchemy-supported database, the name can be given
 including a database-qualification.  For example, if ``FooDialect``
-were in fact a MySQL dialect, the entry point could be established like this::
+were in fact a MySQL dialect, the entry point could be established like this:
+
+.. sourcecode:: ini
 
-    entry_points = """
-            [sqlalchemy.dialects]
-            mysql.foodialect = foodialect.dialect:FooDialect
-            """
+      [options.entry_points]
+      sqlalchemy.dialects
+          mysql.foodialect = foodialect.dialect:FooDialect
 
 The above entrypoint would then be accessed as ``create_engine("mysql+foodialect://")``.
 
index 4f0c2a5af2729c8721a20c380401449ef25cd890..79a5c6f4dcae24f72dd3a778d0ef02152761c4da 100644 (file)
@@ -31,25 +31,25 @@ Operator Reference
     >>> from sqlalchemy.orm import relationship
     >>> class User(Base):
     ...     __tablename__ = "user_account"
-    ... 
+    ...
     ...     id = Column(Integer, primary_key=True)
     ...     name = Column(String(30))
     ...     fullname = Column(String)
-    ... 
+    ...
     ...     addresses = relationship("Address", back_populates="user")
-    ... 
+    ...
     ...     def __repr__(self):
     ...         return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
 
     >>> class Address(Base):
     ...     __tablename__ = "address"
-    ... 
+    ...
     ...     id = Column(Integer, primary_key=True)
     ...     email_address = Column(String, nullable=False)
     ...     user_id = Column(Integer, ForeignKey("user_account.id"))
-    ... 
+    ...
     ...     user = relationship("User", back_populates="addresses")
-    ... 
+    ...
     ...     def __repr__(self):
     ...         return f"Address(id={self.id!r}, email_address={self.email_address!r})"
     >>> conn = engine.connect()
index 7f6523bffd3bc7440c377989b34a36e0437da490..2b8a39c434d9876c2f765095f7e8029118d1ec87 100644 (file)
@@ -524,19 +524,21 @@ are three general approaches to this:
 
         engine = create_engine("mysql+mysqldb://user:pass@host/dbname")
 
+
         def run_in_process(some_data_record):
             with engine.connect() as conn:
                 conn.execute(text("..."))
 
+
         def initializer():
             """ensure the parent proc's database connections are not touched
-               in the new connection pool"""
+            in the new connection pool"""
             engine.dispose(close=False)
 
+
         with Pool(10, initializer=initializer) as p:
             p.map(run_in_process, data)
 
-
    .. versionadded:: 1.4.33  Added the :paramref:`.Engine.dispose.close`
       parameter to allow the replacement of a connection pool in a child
       process without interfering with the connections used by the parent
@@ -549,10 +551,12 @@ are three general approaches to this:
 
         engine = create_engine("mysql://user:pass@host/dbname")
 
+
         def run_in_process():
             with engine.connect() as conn:
                 conn.execute(text("..."))
 
+
         # before process starts, ensure engine.dispose() is called
         engine.dispose()
         p = Process(target=run_in_process)
index 0cb3984dde81da9847fbd7306134f510e23ab193..411037f87cf9648de8fda68732bcb3485355406a 100644 (file)
@@ -101,10 +101,10 @@ was needed in order to allow this combination to work, described below.
 E.g.::
 
     Table(
-        'mydata', metadata,
-        Column('id', Integer, primary_key=True),
-        Column('data', ArrayOfEnum(ENUM('a', 'b, 'c', name='myenum')))
-
+        "mydata",
+        metadata,
+        Column("id", Integer, primary_key=True),
+        Column("data", ArrayOfEnum(ENUM("a", "b", "c", name="myenum"))),
     )
 
 This type is not included as a built-in type as it would be incompatible
index bb49138dbda79ec4860a30ec6c3ae6e271a41ae5..994ecc41ade7b5cb15dc836a983d9cf943519402 100644 (file)
@@ -1187,7 +1187,7 @@ The above mapping will generate warnings::
 The relationships ``Child.parent`` and ``Parent.children`` appear to be in conflict.
 The solution is to apply :paramref:`_orm.relationship.back_populates`::
 
-  class Parent(Base):
+    class Parent(Base):
         __tablename__ = "parent"
         id = Column(Integer, primary_key=True)
         children = relationship("Child", back_populates="parent")
@@ -1525,8 +1525,9 @@ and associating the :class:`_engine.Engine` with the
     metadata_obj = MetaData(bind=engine)
     Base = declarative_base(metadata=metadata_obj)
 
+
     class MyClass(Base):
-        ...
+        ...
 
 
     session = Session()
@@ -1543,8 +1544,9 @@ engine::
     Session = sessionmaker(engine)
     Base = declarative_base()
 
+
     class MyClass(Base):
-        ...
+        ...
 
 
     session = Session()
@@ -1587,7 +1589,7 @@ The correct way to invoke statements is via
 the :meth:`_engine.Connection.execute` method of a :class:`_engine.Connection`::
 
     with engine.connect() as conn:
-    result = conn.execute(stmt)
+        result = conn.execute(stmt)
 
 When using the ORM, a similar facility is available via the :class:`.Session`::
 
index 7dcb8fa4177dde5c59d3f2755eaddbd509e96988..dfb154e41f9571d52a8a3dd15cf1828dbb212929 100644 (file)
@@ -62,7 +62,7 @@ This is available via the :attr:`_schema.MetaData.sorted_tables` function::
 
     metadata_obj = MetaData()
     # ... add Table objects to metadata
-    ti = metadata_obj.sorted_tables:
+    ti = metadata_obj.sorted_tables
     for t in ti:
         print(t)
 
index 1aa94e1c7f82f6d0aa507b8832a3b14bc5bdf6b2..3b494a7c4cda3e3bc18857ddb8943170f3a8f95a 100644 (file)
@@ -413,11 +413,11 @@ Common strategies to mitigate this include:
 
 * Use :class:`.Bundle` objects to organize column-based results::
 
-      u_b = Bundle('user', User.id, User.name)
-      a_b = Bundle('address', Address.id, Address.email)
+      u_b = Bundle("user", User.id, User.name)
+      a_b = Bundle("address", Address.id, Address.email)
 
       for user, address in session.execute(select(u_b, a_b).join(User.addresses)):
-          ...
+          ...
 
 * Use result caching - see :ref:`examples_caching` for an in-depth example
   of this.
index 2642ed5a8ecc2b706ac66b47ce493cd0de580058..e48ff0ec42c45fd7eaa22a50f03ae9291f646b03 100644 (file)
@@ -118,13 +118,13 @@ does not properly handle the exception.    For example::
 The usage of the :class:`.Session` should fit within a structure similar to this::
 
     try:
-        <use session>
+        <use session>
         session.commit()
     except:
-       session.rollback()
-       raise
+        session.rollback()
+        raise
     finally:
-       session.close()  # optional, depends on use case
+        session.close()  # optional, depends on use case
 
 Many things can cause a failure within the try/except besides flushes.
 Applications should ensure some system of "framing" is applied to ORM-oriented
index 3e5ce4d3c9c79fc0961734f58abc9ae7b4231155..9f73f0a7f442e4a23f0e85b2c7b048074389a387 100644 (file)
@@ -309,7 +309,7 @@ Glossary
         on mapped classes.   When a class is mapped as such::
 
             class MyClass(Base):
-                __tablename__ = 'foo'
+                __tablename__ = "foo"
 
                 id = Column(Integer, primary_key=True)
                 data = Column(String)
@@ -643,10 +643,12 @@ Glossary
         as an ORDER BY clause by calling upon the :meth:`_expression.Select.where`
         and :meth:`_expression.Select.order_by` methods::
 
-            stmt = select(user.c.name).\
-                        where(user.c.id > 5).\
-                        where(user.c.name.like('e%').\
-                        order_by(user.c.name)
+            stmt = (
+                select(user.c.name)
+                .where(user.c.id > 5)
+                .where(user.c.name.like("e%"))
+                .order_by(user.c.name)
+            )
 
         Each method call above returns a copy of the original
         :class:`_expression.Select` object with additional qualifiers
@@ -1126,16 +1128,17 @@ Glossary
         single department.  A SQLAlchemy mapping might look like::
 
             class Department(Base):
-                __tablename__ = 'department'
+                __tablename__ = "department"
                 id = Column(Integer, primary_key=True)
                 name = Column(String(30))
                 employees = relationship("Employee")
 
+
             class Employee(Base):
-                __tablename__ = 'employee'
+                __tablename__ = "employee"
                 id = Column(Integer, primary_key=True)
                 name = Column(String(30))
-                dep_id = Column(Integer, ForeignKey('department.id'))
+                dep_id = Column(Integer, ForeignKey("department.id"))
 
         .. seealso::
 
@@ -1177,15 +1180,16 @@ Glossary
         single department.  A SQLAlchemy mapping might look like::
 
             class Department(Base):
-                __tablename__ = 'department'
+                __tablename__ = "department"
                 id = Column(Integer, primary_key=True)
                 name = Column(String(30))
 
+
             class Employee(Base):
-                __tablename__ = 'employee'
+                __tablename__ = "employee"
                 id = Column(Integer, primary_key=True)
                 name = Column(String(30))
-                dep_id = Column(Integer, ForeignKey('department.id'))
+                dep_id = Column(Integer, ForeignKey("department.id"))
                 department = relationship("Department")
 
         .. seealso::
@@ -1210,16 +1214,17 @@ Glossary
         used in :term:`one to many` as follows::
 
             class Department(Base):
-                __tablename__ = 'department'
+                __tablename__ = "department"
                 id = Column(Integer, primary_key=True)
                 name = Column(String(30))
                 employees = relationship("Employee", backref="department")
 
+
             class Employee(Base):
-                __tablename__ = 'employee'
+                __tablename__ = "employee"
                 id = Column(Integer, primary_key=True)
                 name = Column(String(30))
-                dep_id = Column(Integer, ForeignKey('department.id'))
+                dep_id = Column(Integer, ForeignKey("department.id"))
 
         A backref can be applied to any relationship, including one to many,
         many to one, and :term:`many to many`.
@@ -1271,24 +1276,25 @@ Glossary
         specified using plain table metadata::
 
             class Employee(Base):
-                __tablename__ = 'employee'
+                __tablename__ = "employee"
 
                 id = Column(Integer, primary_key=True)
                 name = Column(String(30))
 
                 projects = relationship(
                     "Project",
-                    secondary=Table('employee_project', Base.metadata,
-                                Column("employee_id", Integer, ForeignKey('employee.id'),
-                                            primary_key=True),
-                                Column("project_id", Integer, ForeignKey('project.id'),
-                                            primary_key=True)
-                            ),
-                    backref="employees"
-                    )
+                    secondary=Table(
+                        "employee_project",
+                        Base.metadata,
+                        Column("employee_id", Integer, ForeignKey("employee.id"), primary_key=True),
+                        Column("project_id", Integer, ForeignKey("project.id"), primary_key=True),
+                    ),
+                    backref="employees",
+                )
+
 
             class Project(Base):
-                __tablename__ = 'project'
+                __tablename__ = "project"
 
                 id = Column(Integer, primary_key=True)
                 name = Column(String(30))
@@ -1384,30 +1390,29 @@ Glossary
         A SQLAlchemy declarative mapping for the above might look like::
 
             class Employee(Base):
-                __tablename__ = 'employee'
+                __tablename__ = "employee"
 
                 id = Column(Integer, primary_key=True)
                 name = Column(String(30))
 
 
             class Project(Base):
-                __tablename__ = 'project'
+                __tablename__ = "project"
 
                 id = Column(Integer, primary_key=True)
                 name = Column(String(30))
 
 
             class EmployeeProject(Base):
-                __tablename__ = 'employee_project'
+                __tablename__ = "employee_project"
 
-                employee_id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
-                project_id = Column(Integer, ForeignKey('project.id'), primary_key=True)
+                employee_id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
+                project_id = Column(Integer, ForeignKey("project.id"), primary_key=True)
                 role_name = Column(String(30))
 
                 project = relationship("Project", backref="project_employees")
                 employee = relationship("Employee", backref="employee_projects")
 
-
         Employees can be added to a project given a role name::
 
             proj = Project(name="Client A")
@@ -1415,10 +1420,12 @@ Glossary
             emp1 = Employee(name="emp1")
             emp2 = Employee(name="emp2")
 
-            proj.project_employees.extend([
-                EmployeeProject(employee=emp1, role_name="tech lead"),
-                EmployeeProject(employee=emp2, role_name="account executive")
-            ])
+            proj.project_employees.extend(
+                [
+                    EmployeeProject(employee=emp1, role_name="tech lead"),
+                    EmployeeProject(employee=emp2, role_name="account executive"),
+                ]
+            )
 
         .. seealso::
 
index 8f4c095ccf5c235316e0056cd392f4f4e53bf324..226870d67b20254b2c4fa5f18af7bff807d54b9e 100644 (file)
@@ -241,7 +241,6 @@ Or for a ``set``, illustrated in the same
            # use a List, Python 3.8 and earlier
            children: Mapped[List["Child"]] = relationship()
 
-
 When using mappings without the :class:`_orm.Mapped` annotation, such as when
 using :ref:`imperative mappings <orm_imperative_mapping>` or untyped
 Python code, as well as in a few special cases, the collection class for a
@@ -666,14 +665,16 @@ methods can be changed as well:
 
     from sqlalchemy.orm.collections import collection
 
+
     class MyList(list):
         @collection.remover
         def zark(self, item):
             # do something special...
+            ...
 
         @collection.iterator
         def hey_use_this_instead_for_iteration(self):
-            ...
+            ...
 
 There is no requirement to be list-, or set-like at all. Collection classes
 can be any shape, so long as they have the append, remove and iterate
index 9faba2f3a06374badad9185bb0d4a88c653c7f1e..d1d45178d1700ca34fd4edb9137f96e4815df86a 100644 (file)
@@ -252,11 +252,13 @@ The two qualities that :func:`_orm.mapped_column` derives from the
       from sqlalchemy.orm import Mapped
       from sqlalchemy.orm import mapped_column
 
+
       class Base(DeclarativeBase):
           pass
 
+
       class SomeClass(Base):
-          __tablename__ = 'some_table'
+          __tablename__ = "some_table"
 
           # primary_key=True, therefore will be NOT NULL
           id: Mapped[int] = mapped_column(primary_key=True)
index 3a81bf9bb3e22f9443159a299a21764f8afddcb2..ea7b9eedc7b1fad1545af9b3791116b41ba64b05 100644 (file)
@@ -349,9 +349,7 @@ Other guidelines include:
 
       user = await session.get(User, 42)
       addresses = (await session.scalars(user.addresses.statement)).all()
-      stmt = user.addresses.statement.where(
-          Address.email_address.startswith("patrick")
-      )
+      stmt = user.addresses.statement.where(Address.email_address.startswith("patrick"))
       addresses_filter = (await session.scalars(stmt)).all()
 
   .. seealso::
@@ -678,10 +676,12 @@ value will be invoked after being returned::
 
 
     @event.listens_for(engine.sync_engine, "connect")
-    def register_custom_types(dbapi_connection, ...):
+    def register_custom_types(dbapi_connection, *args):
         dbapi_connection.run_async(
             lambda connection: connection.set_type_codec(
-                "MyCustomType", encoder, decoder, ...
+                "MyCustomType",
+                encoder,
+                decoder,  # ...
             )
         )
 
index 72eea5cc659badfc6bbb935c4ea9258fdfa3c68d..921291a170687d279dc7d374fbadcb58f45b36dd 100644 (file)
@@ -55,18 +55,19 @@ Class Mapping API
                 @declared_attr.cascading
                 def id(cls):
                     if has_inherited_table(cls):
-                        return Column(
-                            ForeignKey('myclass.id'), primary_key=True
-                        )
+                        return Column(ForeignKey("myclass.id"), primary_key=True)
                     else:
                         return Column(Integer, primary_key=True)
 
+
             class MyClass(HasIdMixin, Base):
-                __tablename__ = 'myclass'
+                __tablename__ = "myclass"
                 # ...
 
+
             class MySubClass(MyClass):
-                ""
+                """"""
+
                 # ...
 
         The behavior of the above configuration is that ``MySubClass``
index d461b63c646cf62bef2db9e65bcf343bd8f2f8df..d71343e99fd7c84f9134c5faae1a388128c1d596 100644 (file)
@@ -110,9 +110,10 @@ may be used::
 
         from sqlalchemy import event
 
-        @event.listens_for(PtoQ, 'before_update')
+
+        @event.listens_for(PtoQ, "before_update")
         def receive_before_update(mapper, connection, target):
-           if target.some_required_attr_on_q is None:
+            if target.some_required_attr_on_q is None:
                 connection.execute(q_table.insert(), {"id": target.id})
 
     where above, a row is INSERTed into the ``q_table`` table by creating an
index 789cd739c8485410f795cea0f98c5792126365b9..abcd75804345521d3e57cf5f0d2f9a191057a8c6 100644 (file)
@@ -45,16 +45,17 @@ retrieved by the ORM as part of the object's primary key::
 
 
     class Foo(Base):
-        __tablename__ = 'foo'
+        __tablename__ = "foo"
         pk = mapped_column(Integer, primary_key=True)
         bar = mapped_column(Integer)
 
+
     e = create_engine("postgresql+psycopg2://scott:tiger@localhost/test", echo=True)
     Base.metadata.create_all(e)
 
     session = Session(e)
 
-    foo = Foo(pk=sql.select(sql.func.coalesce(sql.func.max(Foo.pk) + 1, 1))
+    foo = Foo(pk=sql.select(sql.func.coalesce(sql.func.max(Foo.pk) + 1, 1)))
     session.add(foo)
     session.commit()
 
@@ -632,31 +633,36 @@ connections::
     from sqlalchemy.orm import DeclarativeBase
     from sqlalchemy.orm import Session
 
+
     class BaseA(DeclarativeBase):
         pass
 
+
     class BaseB(DeclarativeBase):
         pass
 
+
     class User(BaseA):
-        # ...
+        ...
+
 
     class Address(BaseA):
-        ...
+        ...
 
 
     class GameInfo(BaseB):
-        # ...
+        ...
+
 
     class GameStats(BaseB):
-        ...
+        ...
 
 
     Session = sessionmaker()
 
     # all User/Address operations will be on engine 1, all
     # Game operations will be on engine 2
-    Session.configure(binds={BaseA:engine1, BaseB:engine2})
+    Session.configure(binds={BaseA: engine1, BaseB: engine2})
 
 Above, classes which descend from ``BaseA`` and ``BaseB`` will have their
 SQL operations routed to one of two engines based on which superclass
index a9bbaac8b847ec838f8b39a5fcf25a2a17937c5d..e75630c46ea134e8a3ef6aabf91e4bb49715cb64 100644 (file)
@@ -33,7 +33,7 @@ This page illustrates the mappings and fixture data used by the
     ...     name: Mapped[str]
     ...     fullname: Mapped[Optional[str]]
     ...     books: Mapped[List["Book"]] = relationship(back_populates="owner")
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
     >>> class Book(Base):
@@ -44,7 +44,7 @@ This page illustrates the mappings and fixture data used by the
     ...     summary: Mapped[str] = mapped_column(Text)
     ...     cover_photo: Mapped[bytes] = mapped_column(LargeBinary)
     ...     owner: Mapped["User"] = relationship(back_populates="books")
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"Book(id={self.id!r}, title={self.title!r})"
     >>> engine = create_engine("sqlite+pysqlite:///:memory:", echo=True)
index bae0cce3dc94484dabc61a8c03d0131547d2f43b..07f053980cd1b7960633487c7742853172747e12 100644 (file)
@@ -33,7 +33,7 @@ This page illustrates the mappings and fixture data used by the
     ...     fullname: Mapped[Optional[str]]
     ...     species: Mapped[Optional[str]]
     ...     addresses: Mapped[List["Address"]] = relationship(back_populates="user")
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"User(name={self.name!r}, fullname={self.fullname!r})"
     >>> class Address(Base):
@@ -42,7 +42,7 @@ This page illustrates the mappings and fixture data used by the
     ...     user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id"))
     ...     email_address: Mapped[str]
     ...     user: Mapped[User] = relationship(back_populates="addresses")
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"Address(email_address={self.email_address!r})"
     >>> class LogRecord(Base):
@@ -51,7 +51,7 @@ This page illustrates the mappings and fixture data used by the
     ...     message: Mapped[str]
     ...     code: Mapped[str]
     ...     timestamp: Mapped[datetime.datetime]
-    ... 
+    ...
     ...     def __repr__(self):
     ...         return f"LogRecord({self.message!r}, {self.code!r}, {self.timestamp!r})"
 
@@ -60,10 +60,10 @@ This page illustrates the mappings and fixture data used by the
     ...     id: Mapped[int] = mapped_column(primary_key=True)
     ...     name: Mapped[str]
     ...     type: Mapped[str]
-    ... 
+    ...
     ...     def __repr__(self):
     ...         return f"{self.__class__.__name__}({self.name!r})"
-    ... 
+    ...
     ...     __mapper_args__ = {
     ...         "polymorphic_identity": "employee",
     ...         "polymorphic_on": "type",
@@ -72,10 +72,10 @@ This page illustrates the mappings and fixture data used by the
     ...     __tablename__ = "manager"
     ...     id: Mapped[int] = mapped_column(ForeignKey("employee.id"), primary_key=True)
     ...     manager_name: Mapped[str]
-    ... 
+    ...
     ...     def __repr__(self):
     ...         return f"{self.__class__.__name__}({self.name!r}, manager_name={self.manager_name!r})"
-    ... 
+    ...
     ...     __mapper_args__ = {
     ...         "polymorphic_identity": "manager",
     ...     }
@@ -83,10 +83,10 @@ This page illustrates the mappings and fixture data used by the
     ...     __tablename__ = "engineer"
     ...     id: Mapped[int] = mapped_column(ForeignKey("employee.id"), primary_key=True)
     ...     engineer_info: Mapped[str]
-    ... 
+    ...
     ...     def __repr__(self):
     ...         return f"{self.__class__.__name__}({self.name!r}, engineer_info={self.engineer_info!r})"
-    ... 
+    ...
     ...     __mapper_args__ = {
     ...         "polymorphic_identity": "engineer",
     ...     }
index addddda4e286c7d2dd00bb8b9bbe4216024fb2e7..c98a83035a0bf62088039fe1eee4bb5a5c9a9d43 100644 (file)
@@ -35,10 +35,10 @@ the :ref:`queryguide_toplevel`.
     ...     type: Mapped[str]
     ...     company_id: Mapped[int] = mapped_column(ForeignKey("company.id"))
     ...     company: Mapped[Company] = relationship(back_populates="employees")
-    ... 
+    ...
     ...     def __repr__(self):
     ...         return f"{self.__class__.__name__}({self.name!r})"
-    ... 
+    ...
     ...     __mapper_args__ = {
     ...         "polymorphic_identity": "employee",
     ...         "polymorphic_on": "type",
@@ -57,7 +57,7 @@ the :ref:`queryguide_toplevel`.
     ...     id: Mapped[int] = mapped_column(primary_key=True)
     ...     manager_id: Mapped[int] = mapped_column(ForeignKey("manager.id"))
     ...     document_name: Mapped[str]
-    ... 
+    ...
     ...     def __repr__(self):
     ...         return f"Paperwork({self.document_name!r})"
     >>>
index 7166fe9b8a9c7686e2768674abd275a0097d109e..af4e5b5c8ad39eebb8fb62806722346edf50f28b 100644 (file)
@@ -32,7 +32,7 @@ This page illustrates the mappings and fixture data used by the
     ...     fullname: Mapped[Optional[str]]
     ...     addresses: Mapped[List["Address"]] = relationship(back_populates="user")
     ...     orders: Mapped[List["Order"]] = relationship()
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
     >>> class Address(Base):
@@ -41,7 +41,7 @@ This page illustrates the mappings and fixture data used by the
     ...     user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id"))
     ...     email_address: Mapped[str]
     ...     user: Mapped[User] = relationship(back_populates="addresses")
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"Address(id={self.id!r}, email_address={self.email_address!r})"
     >>> order_items_table = Table(
index 546f87a19366152de9c7657060f196f63076e184..158326e1e2debfdb8a45c0ae4e87d7afdd178b80 100644 (file)
@@ -27,10 +27,10 @@ the :ref:`queryguide_toplevel`.
     ...     id: Mapped[int] = mapped_column(primary_key=True)
     ...     name: Mapped[str]
     ...     type: Mapped[str]
-    ... 
+    ...
     ...     def __repr__(self):
     ...         return f"{self.__class__.__name__}({self.name!r})"
-    ... 
+    ...
     ...     __mapper_args__ = {
     ...         "polymorphic_identity": "employee",
     ...         "polymorphic_on": "type",
index f9a92e3316e8b10d947681e6ac74517bee921a0c..5b6209d3db3d5ac3f4ef6a43be73c8112341882b 100644 (file)
@@ -88,10 +88,10 @@ E.g. to refresh an instance while also refreshing a related set of objects:
 .. sourcecode:: python
 
     stmt = (
-        select(User).
-        where(User.name.in_(names)).
-        execution_options(populate_existing=True).
-        options(selectinload(User.addresses)
+        select(User)
+        .where(User.name.in_(names))
+        .execution_options(populate_existing=True)
+        .options(selectinload(User.addresses))
     )
     # will refresh all matching User objects as well as the related
     # Address objects
index 19538c698e834d19885b9b249fbe5cfaafd251cd..78b51b36c2d901d13a7a8d05f8fb6a3281bfb808 100644 (file)
@@ -331,7 +331,7 @@ unconditionally on every query. To configure, use the
     ...     title: Mapped[str]
     ...     summary: Mapped[str] = mapped_column(Text, deferred=True)
     ...     cover_photo: Mapped[bytes] = mapped_column(LargeBinary, deferred=True)
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"Book(id={self.id!r}, title={self.title!r})"
 
@@ -492,7 +492,7 @@ undeferred::
     ...     cover_photo: Mapped[bytes] = mapped_column(
     ...         LargeBinary, deferred=True, deferred_group="book_attrs"
     ...     )
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"Book(id={self.id!r}, title={self.title!r})"
 
@@ -573,7 +573,7 @@ will raise on access in all cases unless explicitly "undeferred" using
     ...     cover_photo: Mapped[bytes] = mapped_column(
     ...         LargeBinary, deferred=True, deferred_raiseload=True
     ...     )
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"Book(id={self.id!r}, title={self.title!r})"
 
@@ -626,7 +626,7 @@ Loading Arbitrary SQL Expressions onto Objects
     ...     name: Mapped[str]
     ...     fullname: Mapped[Optional[str]]
     ...     books: Mapped[List["Book"]] = relationship(back_populates="owner")
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
     >>> class Book(Base):
@@ -637,7 +637,7 @@ Loading Arbitrary SQL Expressions onto Objects
     ...     summary: Mapped[str] = mapped_column(Text)
     ...     cover_photo: Mapped[bytes] = mapped_column(LargeBinary)
     ...     owner: Mapped["User"] = relationship(back_populates="books")
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"Book(id={self.id!r}, title={self.title!r})"
 
@@ -685,7 +685,7 @@ level :func:`_orm.query_expression` directive may produce this result.
     ...     title: Mapped[str]
     ...     summary: Mapped[str] = mapped_column(Text)
     ...     cover_photo: Mapped[bytes] = mapped_column(LargeBinary)
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"Book(id={self.id!r}, title={self.title!r})"
 
@@ -705,7 +705,7 @@ normally produce ``None``::
     ...     name: Mapped[str]
     ...     fullname: Mapped[Optional[str]]
     ...     book_count: Mapped[int] = query_expression()
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
 
index 4506f4ffc7ddd6b8fbbfdf0e396ca91066c4ffa4..ea5316cecd131899fdb0923944a756a52f13c7b0 100644 (file)
@@ -939,10 +939,10 @@ is below::
     ...     id: Mapped[int] = mapped_column(primary_key=True)
     ...     name: Mapped[str]
     ...     type: Mapped[str]
-    ... 
+    ...
     ...     def __repr__(self):
     ...         return f"{self.__class__.__name__}({self.name!r})"
-    ... 
+    ...
     ...     __mapper_args__ = {
     ...         "polymorphic_identity": "employee",
     ...         "polymorphic_on": "type",
index 4c8125f5a450fe7d29bd4c6626a4e0028cee4220..f2be63c89e2e8ebb623390cdf4fb1086d2ec3c33 100644 (file)
@@ -272,9 +272,12 @@ the :meth:`_orm.Load.options` method::
    :ref:`orm_queryguide_populate_existing` execution option::
 
       # change the options on Parent objects that were already loaded
-      stmt = select(Parent).execution_options(populate_existing=True).options(
-          lazyload(Parent.children).
-          lazyload(Child.subelements)).all()
+      stmt = (
+          select(Parent)
+          .execution_options(populate_existing=True)
+          .options(lazyload(Parent.children).lazyload(Child.subelements))
+          .all()
+      )
 
    If the objects loaded above are fully cleared from the :class:`.Session`,
    such as due to garbage collection or that :meth:`.Session.expunge_all`
@@ -1116,7 +1119,7 @@ the specific :func:`_orm.aliased` construct to be passed:
     stmt = (
        select(User).
        outerjoin(User.addresses.of_type(adalias)).
-       options(contains_eager(User.addresses.of_type(adalias))
+       options(contains_eager(User.addresses.of_type(adalias)))
     )
 
     # get results normally
index 6a20eb9771a5c307d06c7a5efa73c848c52f272f..039bf3d70393102158cb7c355b6c74ee5cc16d93 100644 (file)
@@ -40,27 +40,27 @@ real SQL tables that exist, or will exist, in a particular database::
 
     >>> class User(Base):
     ...     __tablename__ = "user_account"
-    ... 
+    ...
     ...     id: Mapped[int] = mapped_column(primary_key=True)
     ...     name: Mapped[str] = mapped_column(String(30))
     ...     fullname: Mapped[Optional[str]]
-    ... 
+    ...
     ...     addresses: Mapped[list["Address"]] = relationship(
     ...         back_populates="user", cascade="all, delete-orphan"
     ...     )
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
 
     >>> class Address(Base):
     ...     __tablename__ = "address"
-    ... 
+    ...
     ...     id: Mapped[int] = mapped_column(primary_key=True)
     ...     email_address: Mapped[str]
     ...     user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id"))
-    ... 
+    ...
     ...     user: Mapped["User"] = relationship(back_populates="addresses")
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"Address(id={self.id!r}, email_address={self.email_address!r})"
 
@@ -198,7 +198,7 @@ is used:
     >>> from sqlalchemy.orm import Session
 
     >>> with Session(engine) as session:
-    ... 
+    ...
     ...     spongebob = User(
     ...         name="spongebob",
     ...         fullname="Spongebob Squarepants",
@@ -213,9 +213,9 @@ is used:
     ...         ],
     ...     )
     ...     patrick = User(name="patrick", fullname="Patrick Star")
-    ... 
+    ...
     ...     session.add_all([spongebob, sandy, patrick])
-    ... 
+    ...
     ...     session.commit()
     {opensql}BEGIN (implicit)
     INSERT INTO user_account (name, fullname) VALUES (?, ?), (?, ?), (?, ?) RETURNING id
index 7538fb4a1fe5ba04fff8a7b2f7fb7ab0e2c8ee67..5d1848812e4a313e9a4c3908b2df4b619db03bd8 100644 (file)
@@ -532,9 +532,9 @@ be that of a column-mapped attribute::
     will be refreshed with data from the database::
 
         stmt = (
-            select(User).
-            execution_options(populate_existing=True).
-            where((User.name.in_(['a', 'b', 'c']))
+            select(User)
+            .execution_options(populate_existing=True)
+            .where((User.name.in_(["a", "b", "c"])))
         )
         for user in session.execute(stmt).scalars():
             print(user)  # will be refreshed for those columns that came back from the query
index 164fea347a8cfb389c8345cdeb841f9dca16ec3b..e8dd484599e3e96022699a2b109f8dc5eabaf6a1 100644 (file)
@@ -60,13 +60,13 @@ or rolled back::
     session.commit()  # commits
 
     # will automatically begin again
-    result = session.execute(< some select statement >)
+    result = session.execute("< some select statement >")
     session.add_all([more_objects, ...])
     session.commit()  # commits
 
     session.add(still_another_object)
     session.flush()  # flush still_another_object
-    session.rollback()   # rolls back still_another_object
+    session.rollback()  # rolls back still_another_object
 
 The :class:`_orm.Session` itself features a :meth:`_orm.Session.close`
 method.  If the :class:`_orm.Session` is begun within a transaction that
@@ -120,9 +120,7 @@ Similarly, the :class:`_orm.sessionmaker` can be used in the same way::
 method to allow both operations to take place at once::
 
     with Session.begin() as session:
-        session.add(some_object):
-
-
+        session.add(some_object)
 
 .. _session_begin_nested:
 
@@ -530,12 +528,11 @@ used in a read-only fashion**, that is::
 
 
     with autocommit_session() as session:
-        some_objects = session.execute(<statement>)
-        some_other_objects = session.execute(<statement>)
+        some_objects = session.execute("<statement>")
+        some_other_objects = session.execute("<statement>")
 
     # closes connection
 
-
 Setting Isolation for Individual Sessions
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
index 58a4e3ab58a70aa8bc60646f185034f73fe31b8a..89b8f423d290f56edcd11d265c6deab8bd838470 100644 (file)
@@ -248,7 +248,7 @@ Below we illustrate a variety of ways to access rows.
       result = conn.execute(text("select x, y from some_table"))
 
       for x, y in result:
-          ...
+          ...
 
 * **Integer Index** - Tuples are Python sequences, so regular integer access is available too:
 
@@ -256,8 +256,8 @@ Below we illustrate a variety of ways to access rows.
 
       result = conn.execute(text("select x, y from some_table"))
 
-        for row in result:
-            x = row[0]
+      for row in result:
+          x = row[0]
 
 * **Attribute Name** - As these are Python named tuples, the tuples have dynamic attribute names
   matching the names of each column.  These names are normally the names that the
@@ -286,8 +286,8 @@ Below we illustrate a variety of ways to access rows.
       result = conn.execute(text("select x, y from some_table"))
 
       for dict_row in result.mappings():
-          x = dict_row['x']
-          y = dict_row['y']
+          x = dict_row["x"]
+          y = dict_row["y"]
 
   ..
 
index 1b569941185a84869b4bae3a8eef174ea7d57ba1..7816bae6770363ea0707b9e665ebe2e450183be8 100644 (file)
@@ -332,25 +332,25 @@ types::
 
     >>> class User(Base):
     ...     __tablename__ = "user_account"
-    ... 
+    ...
     ...     id: Mapped[int] = mapped_column(primary_key=True)
     ...     name: Mapped[str] = mapped_column(String(30))
     ...     fullname: Mapped[Optional[str]]
-    ... 
+    ...
     ...     addresses: Mapped[List["Address"]] = relationship(back_populates="user")
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
 
     >>> class Address(Base):
     ...     __tablename__ = "address"
-    ... 
+    ...
     ...     id: Mapped[int] = mapped_column(primary_key=True)
     ...     email_address: Mapped[str]
     ...     user_id = mapped_column(ForeignKey("user_account.id"))
-    ... 
+    ...
     ...     user: Mapped[User] = relationship(back_populates="addresses")
-    ... 
+    ...
     ...     def __repr__(self) -> str:
     ...         return f"Address(id={self.id!r}, email_address={self.email_address!r})"
 
@@ -423,15 +423,15 @@ about these classes include:
     optional. Our mapping above can be written without annotations as::
 
         class User(Base):
-          __tablename__ = 'user_account'
+            __tablename__ = "user_account"
 
-          id = mapped_column(Integer, primary_key=True)
-          name = mapped_column(String(30), nullable=False)
-          fullname = mapped_column(String)
+            id = mapped_column(Integer, primary_key=True)
+            name = mapped_column(String(30), nullable=False)
+            fullname = mapped_column(String)
 
-          addresses = relationship("Address", back_populates="user")
+            addresses = relationship("Address", back_populates="user")
 
-          # ... definition continues
+            # ... definition continues
 
     The above class has an advantage over one that uses :class:`.Column`
     directly, in that the ``User`` class as well as instances of ``User``
index 04dc59d36ca62cfaf16e56650b659c8db3d5ce11..18ea5315e24c53888d3cc7b1fcd340cc912126b3 100644 (file)
@@ -4,11 +4,11 @@ from collections.abc import Iterator
 from pathlib import Path
 import re
 
-from black import DEFAULT_LINE_LENGTH
 from black import format_str
-from black import Mode
-from black import parse_pyproject_toml
-from black import TargetVersion
+from black.const import DEFAULT_LINE_LENGTH
+from black.files import parse_pyproject_toml
+from black.mode import Mode
+from black.mode import TargetVersion
 
 
 home = Path(__file__).parent.parent
@@ -17,14 +17,28 @@ _Block = list[tuple[str, int, str | None, str]]
 
 
 def _format_block(
-    input_block: _Block, exit_on_error: bool, is_doctest: bool
+    input_block: _Block,
+    exit_on_error: bool,
+    errors: list[tuple[int, str, Exception]],
+    is_doctest: bool,
 ) -> list[str]:
-    code = "\n".join(c for *_, c in input_block)
+    if not is_doctest:
+        # The first line may have additional padding. Remove then restore later
+        add_padding = start_space.match(input_block[0][3]).groups()[0]
+        skip = len(add_padding)
+        code = "\n".join(
+            c[skip:] if c.startswith(add_padding) else c
+            for *_, c in input_block
+        )
+    else:
+        add_padding = None
+        code = "\n".join(c for *_, c in input_block)
     try:
         formatted = format_str(code, mode=BLACK_MODE)
     except Exception as e:
+        start_line = input_block[0][1]
+        errors.append((start_line, code, e))
         if is_doctest:
-            start_line = input_block[0][1]
             print(
                 "Could not format code block starting at "
                 f"line {start_line}:\n{code}\nError: {e}"
@@ -35,7 +49,6 @@ def _format_block(
             else:
                 print("Ignoring error")
         elif VERBOSE:
-            start_line = input_block[0][1]
             print(
                 "Could not format code block starting at "
                 f"line {start_line}:\n---\n{code}\n---Error: {e}"
@@ -47,16 +60,14 @@ def _format_block(
         if is_doctest:
             formatted_lines = [
                 f"{padding}>>> {formatted_code_lines[0]}",
-                *(f"{padding}... {fcl}" for fcl in formatted_code_lines[1:]),
+                *(
+                    f"{padding}...{' ' if fcl else ''}{fcl}"
+                    for fcl in formatted_code_lines[1:]
+                ),
             ]
         else:
-            # The first line may have additional padding.
-            # If it does restore it
-            additionalPadding = re.match(
-                r"^(\s*)[^ ]?", input_block[0][3]
-            ).groups()[0]
             formatted_lines = [
-                f"{padding}{additionalPadding}{fcl}" if fcl else fcl
+                f"{padding}{add_padding}{fcl}" if fcl else fcl
                 for fcl in formatted_code_lines
             ]
             if not input_block[-1][0] and formatted_lines[-1]:
@@ -65,30 +76,57 @@ def _format_block(
         return formatted_lines
 
 
+format_directive = re.compile(r"^\.\.\s*format\s*:\s*(on|off)\s*$")
+
 doctest_code_start = re.compile(r"^(\s+)>>>\s?(.+)")
 doctest_code_continue = re.compile(r"^\s+\.\.\.\s?(\s*.*)")
-plain_indent = re.compile(r"^(\s{4})(\s*[^: ].*)")
-format_directive = re.compile(r"^\.\.\s*format\s*:\s*(on|off)\s*$")
-dont_format_under_directive = re.compile(r"^\.\. (?:toctree)::\s*$")
+
+start_code_section = re.compile(
+    r"^(((?!\.\.).+::)|(\.\.\s*sourcecode::(.*py.*)?)|(::))$"
+)
+start_space = re.compile(r"^(\s*)[^ ]?")
 
 
 def format_file(
     file: Path, exit_on_error: bool, check: bool, no_plain: bool
-) -> bool | None:
+) -> tuple[bool, int]:
     buffer = []
     if not check:
         print(f"Running file {file} ..", end="")
     original = file.read_text("utf-8")
     doctest_block: _Block | None = None
     plain_block: _Block | None = None
-    last_line = None
+
+    plain_code_section = False
+    plain_padding = None
+    plain_padding_len = None
+
+    errors = []
+
     disable_format = False
-    non_code_directive = False
     for line_no, line in enumerate(original.splitlines(), 1):
-        if match := format_directive.match(line):
+        # start_code_section requires no spaces at the start
+        if start_code_section.match(line.strip()):
+            if plain_block:
+                buffer.extend(
+                    _format_block(
+                        plain_block, exit_on_error, errors, is_doctest=False
+                    )
+                )
+                plain_block = None
+            plain_code_section = True
+            plain_padding = start_space.match(line).groups()[0]
+            plain_padding_len = len(plain_padding)
+            buffer.append(line)
+            continue
+        elif (
+            plain_code_section
+            and line.strip()
+            and not line.startswith(" " * (plain_padding_len + 1))
+        ):
+            plain_code_section = False
+        elif match := format_directive.match(line):
             disable_format = match.groups()[0] == "off"
-        elif match := dont_format_under_directive.match(line):
-            non_code_directive = True
 
         if doctest_block:
             assert not plain_block
@@ -98,65 +136,56 @@ def format_file(
             else:
                 buffer.extend(
                     _format_block(
-                        doctest_block, exit_on_error, is_doctest=True
+                        doctest_block, exit_on_error, errors, is_doctest=True
                     )
                 )
                 doctest_block = None
-
-        if plain_block:
-            assert not doctest_block
-            if not line:
-                plain_block.append((line, line_no, None, line))
-                continue
-            elif match := plain_indent.match(line):
-                plain_block.append((line, line_no, None, match.groups()[1]))
+        elif plain_block:
+            if plain_code_section and not doctest_code_start.match(line):
+                plain_block.append(
+                    (line, line_no, None, line[plain_padding_len:])
+                )
                 continue
             else:
-                if non_code_directive:
-                    buffer.extend(line for line, _, _, _ in plain_block)
-                else:
-                    buffer.extend(
-                        _format_block(
-                            plain_block, exit_on_error, is_doctest=False
-                        )
+                buffer.extend(
+                    _format_block(
+                        plain_block, exit_on_error, errors, is_doctest=False
                     )
+                )
                 plain_block = None
-                non_code_directive = False
 
-        if match := doctest_code_start.match(line):
+        if line and (match := doctest_code_start.match(line)):
+            plain_code_section = False
             if plain_block:
                 buffer.extend(
-                    _format_block(plain_block, exit_on_error, is_doctest=False)
+                    _format_block(
+                        plain_block, exit_on_error, errors, is_doctest=False
+                    )
                 )
                 plain_block = None
             padding, code = match.groups()
             doctest_block = [(line, line_no, padding, code)]
         elif (
-            not no_plain
-            and not disable_format
-            and not last_line
-            and (match := plain_indent.match(line))
+            line and not no_plain and not disable_format and plain_code_section
         ):
-            # print('start plain', line)
             assert not doctest_block
             # start of a plain block
-            padding, code = match.groups()
-            plain_block = [(line, line_no, padding, code)]
+            plain_block = [
+                (line, line_no, plain_padding, line[plain_padding_len:])
+            ]
         else:
             buffer.append(line)
-        last_line = line
 
     if doctest_block:
         buffer.extend(
-            _format_block(doctest_block, exit_on_error, is_doctest=True)
+            _format_block(
+                doctest_block, exit_on_error, errors, is_doctest=True
+            )
         )
     if plain_block:
-        if non_code_directive:
-            buffer.extend(line for line, _, _, _ in plain_block)
-        else:
-            buffer.extend(
-                _format_block(plain_block, exit_on_error, is_doctest=False)
-            )
+        buffer.extend(
+            _format_block(plain_block, exit_on_error, errors, is_doctest=False)
+        )
     if buffer:
         # if there is nothing in the buffer something strange happened so
         # don't do anything
@@ -164,7 +193,10 @@ def format_file(
         updated = "\n".join(buffer)
         equal = original == updated
         if not check:
-            print("..done. ", "No changes" if equal else "Changes detected")
+            print(
+                f"..done. {len(errors)} error(s).",
+                "No changes" if equal else "Changes detected",
+            )
             if not equal:
                 # write only if there are changes to write
                 file.write_text(updated, "utf-8", newline="\n")
@@ -176,9 +208,7 @@ def format_file(
     if check:
         if not equal:
             print(f"File {file} would be formatted")
-        return equal
-    else:
-        return None
+    return equal, len(errors)
 
 
 def iter_files(directory) -> Iterator[Path]:
@@ -201,11 +231,26 @@ def main(
         ]
 
     if check:
-        if all(result):
+        formatting_error_counts = [e for _, e in result if e]
+        to_reformat = len([b for b, _ in result if not b])
+
+        if not to_reformat and not formatting_error_counts:
             print("All files are correctly formatted")
             exit(0)
         else:
-            print("Some file would be reformated")
+            print(
+                f"{to_reformat} file(s) would be reformatted;",
+                (
+                    f"{sum(formatting_error_counts)} formatting errors "
+                    f"reported in {len(formatting_error_counts)} files"
+                )
+                if formatting_error_counts
+                else "no formatting errors reported",
+            )
+
+            # interim, until we fix all formatting errors
+            if not to_reformat:
+                exit(0)
             exit(1)