From: Federico Caselli Date: Fri, 30 Sep 2022 21:46:42 +0000 (+0200) Subject: Improvements to code formatter X-Git-Tag: rel_2_0_0b1~24 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=dcbda9f1eac8bcb81c8b3b1624ffebf76627a138;p=thirdparty%2Fsqlalchemy%2Fsqlalchemy.git Improvements to code formatter Change-Id: I75cf7143f3ed3bbc09aa8bc18edbce5c8af0f0be --- diff --git a/doc/build/changelog/changelog_04.rst b/doc/build/changelog/changelog_04.rst index 9261c1262b..fbaa5d9a81 100644 --- a/doc/build/changelog/changelog_04.rst +++ b/doc/build/changelog/changelog_04.rst @@ -528,21 +528,19 @@ outer joins are created for all joined-table inheriting mappers requested. Note that the auto-create of joins is not compatible with concrete table inheritance. - + The existing select_table flag on mapper() is now deprecated and is synonymous with with_polymorphic('*', select_table). Note that the underlying "guts" of select_table have been completely removed and replaced with the newer, more flexible approach. - + The new approach also automatically allows eager loads to work for subclasses, if they are present, for example:: - sess.query(Company).options( - eagerload_all( - )) + sess.query(Company).options(eagerload_all()) to load Company objects, their employees, and the 'machines' collection of employees who happen to be diff --git a/doc/build/changelog/changelog_08.rst b/doc/build/changelog/changelog_08.rst index decf365593..4164e3f587 100644 --- a/doc/build/changelog/changelog_08.rst +++ b/doc/build/changelog/changelog_08.rst @@ -970,7 +970,7 @@ del_ = delete(SomeMappedClass).where(SomeMappedClass.id == 5) - upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name='ed') + upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name="ed") .. change:: :tags: bug, orm @@ -2079,8 +2079,7 @@ to the original, older use case for :meth:`_query.Query.select_from`, which is that of restating the mapped entity in terms of a different selectable:: - session.query(User.name).\ - select_from(user_table.select().where(user_table.c.id > 5)) + session.query(User.name).select_from(user_table.select().where(user_table.c.id > 5)) Which produces:: @@ -2281,11 +2280,11 @@ original. Allows symmetry when using :class:`_engine.Engine` and :class:`_engine.Connection` objects as context managers:: - with conn.connect() as c: # leaves the Connection open - c.execute("...") + with conn.connect() as c: # leaves the Connection open + c.execute("...") with engine.connect() as c: # closes the Connection - c.execute("...") + c.execute("...") .. change:: :tags: engine diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst index c9ec5f3a49..d00e043326 100644 --- a/doc/build/changelog/changelog_09.rst +++ b/doc/build/changelog/changelog_09.rst @@ -1708,15 +1708,15 @@ ad-hoc keyword arguments within the :attr:`.Index.kwargs` collection, after construction:: - idx = Index('a', 'b') - idx.kwargs['mysql_someargument'] = True + idx = Index("a", "b") + idx.kwargs["mysql_someargument"] = True To suit the use case of allowing custom arguments at construction time, the :meth:`.DialectKWArgs.argument_for` method now allows this registration:: - Index.argument_for('mysql', 'someargument', False) + Index.argument_for("mysql", "someargument", False) - idx = Index('a', 'b', mysql_someargument=True) + idx = Index("a", "b", mysql_someargument=True) .. seealso:: diff --git a/doc/build/changelog/migration_04.rst b/doc/build/changelog/migration_04.rst index 93a2b654fb..f68c449084 100644 --- a/doc/build/changelog/migration_04.rst +++ b/doc/build/changelog/migration_04.rst @@ -429,16 +429,24 @@ flush before each query. :: - mapper(Foo, foo_table, properties={ - 'bars':dynamic_loader(Bar, backref='foo', ) - }) + mapper( + Foo, + foo_table, + properties={ + "bars": dynamic_loader( + Bar, + backref="foo", + # + ) + }, + ) session = create_session(autoflush=True) foo = session.query(Foo).first() - foo.bars.append(Bar(name='lala')) + foo.bars.append(Bar(name="lala")) - for bar in foo.bars.filter(Bar.name=='lala'): + for bar in foo.bars.filter(Bar.name == "lala"): print(bar) session.commit() @@ -452,13 +460,17 @@ columns as undeferred: :: - mapper(Class, table, properties={ - 'foo' : deferred(table.c.foo, group='group1'), - 'bar' : deferred(table.c.bar, group='group1'), - 'bat' : deferred(table.c.bat, group='group1'), + mapper( + Class, + table, + properties={ + "foo": deferred(table.c.foo, group="group1"), + "bar": deferred(table.c.bar, group="group1"), + "bat": deferred(table.c.bat, group="group1"), + }, ) - session.query(Class).options(undefer_group('group1')).filter(...).all() + session.query(Class).options(undefer_group("group1")).filter(...).all() and ``eagerload_all()`` sets a chain of attributes to be eager in one pass: @@ -785,15 +797,15 @@ deprecated. This means that :: - my_table.select(my_table.c.id.in_(1,2,3) - my_table.select(my_table.c.id.in_(*listOfIds) + my_table.select(my_table.c.id.in_(1, 2, 3)) + my_table.select(my_table.c.id.in_(*listOfIds)) should be changed to :: - my_table.select(my_table.c.id.in_([1,2,3]) - my_table.select(my_table.c.id.in_(listOfIds) + my_table.select(my_table.c.id.in_([1, 2, 3])) + my_table.select(my_table.c.id.in_(listOfIds)) Schema and Reflection ===================== diff --git a/doc/build/changelog/migration_05.rst b/doc/build/changelog/migration_05.rst index 39bb9cb053..b8f6c0d5f8 100644 --- a/doc/build/changelog/migration_05.rst +++ b/doc/build/changelog/migration_05.rst @@ -86,10 +86,15 @@ Object Relational Mapping :: - subq = session.query(Keyword.id.label('keyword_id')).filter(Keyword.name.in_(['beans', 'carrots'])).subquery() - recipes = session.query(Recipe).filter(exists(). - where(Recipe.id==recipe_keywords.c.recipe_id). - where(recipe_keywords.c.keyword_id==subq.c.keyword_id) + subq = ( + session.query(Keyword.id.label("keyword_id")) + .filter(Keyword.name.in_(["beans", "carrots"])) + .subquery() + ) + recipes = session.query(Recipe).filter( + exists() + .where(Recipe.id == recipe_keywords.c.recipe_id) + .where(recipe_keywords.c.keyword_id == subq.c.keyword_id) ) * **Explicit ORM aliases are recommended for aliased joins** @@ -439,7 +444,7 @@ Schema/Types :: class MyType(AdaptOldConvertMethods, TypeEngine): - # ... + .. * The ``quote`` flag on ``Column`` and ``Table`` as well as the ``quote_schema`` flag on ``Table`` now control quoting @@ -477,10 +482,10 @@ Schema/Types dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125) # 125 usec # old way - '2008-06-27 12:00:00.125' + "2008-06-27 12:00:00.125" # new way - '2008-06-27 12:00:00.000125' + "2008-06-27 12:00:00.000125" So if an existing SQLite file-based database intends to be used across 0.4 and 0.5, you either have to upgrade the @@ -497,6 +502,7 @@ Schema/Types :: from sqlalchemy.databases.sqlite import DateTimeMixin + DateTimeMixin.__legacy_microseconds__ = True Connection Pool no longer threadlocal by default @@ -538,7 +544,7 @@ data-driven, it takes ``[args]``. :: - query.join('orders', 'items') + query.join("orders", "items") query.join(User.orders, Order.items) * the ``in_()`` method on columns and similar only accepts a @@ -584,7 +590,8 @@ Removed class MyQuery(Query): def get(self, ident): - # ... + ... + session = sessionmaker(query_cls=MyQuery)() @@ -621,6 +628,7 @@ Removed :: from sqlalchemy.orm import aliased + address_alias = aliased(Address) print(session.query(User, address_alias).join((address_alias, User.addresses)).all()) diff --git a/doc/build/changelog/migration_07.rst b/doc/build/changelog/migration_07.rst index 4763b9134c..590da68125 100644 --- a/doc/build/changelog/migration_07.rst +++ b/doc/build/changelog/migration_07.rst @@ -926,12 +926,13 @@ Using declarative, the scenario is this: :: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) + class Child(Parent): - __tablename__ = 'child' - id = Column(Integer, ForeignKey('parent.id'), primary_key=True) + __tablename__ = "child" + id = Column(Integer, ForeignKey("parent.id"), primary_key=True) Above, the attribute ``Child.id`` refers to both the ``child.id`` column as well as ``parent.id`` - this due to @@ -958,15 +959,17 @@ local column: :: class Child(Parent): - __tablename__ = 'child' - id = Column(Integer, ForeignKey('parent.id'), primary_key=True) - some_related = relationship("SomeRelated", - primaryjoin="Child.id==SomeRelated.child_id") + __tablename__ = "child" + id = Column(Integer, ForeignKey("parent.id"), primary_key=True) + some_related = relationship( + "SomeRelated", primaryjoin="Child.id==SomeRelated.child_id" + ) + class SomeRelated(Base): - __tablename__ = 'some_related' + __tablename__ = "some_related" id = Column(Integer, primary_key=True) - child_id = Column(Integer, ForeignKey('child.id')) + child_id = Column(Integer, ForeignKey("child.id")) Prior to 0.7 the ``Child.id`` expression would reference ``Parent.id``, and it would be necessary to map ``child.id`` diff --git a/doc/build/changelog/migration_08.rst b/doc/build/changelog/migration_08.rst index 9c5b381ee9..4a07518539 100644 --- a/doc/build/changelog/migration_08.rst +++ b/doc/build/changelog/migration_08.rst @@ -71,16 +71,17 @@ entities. The new system includes these features: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) - child_id_one = Column(Integer, ForeignKey('child.id')) - child_id_two = Column(Integer, ForeignKey('child.id')) + child_id_one = Column(Integer, ForeignKey("child.id")) + child_id_two = Column(Integer, ForeignKey("child.id")) child_one = relationship("Child", foreign_keys=child_id_one) child_two = relationship("Child", foreign_keys=child_id_two) + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) * relationships against self-referential, composite foreign diff --git a/doc/build/changelog/migration_09.rst b/doc/build/changelog/migration_09.rst index c5f4a31532..93fb8f1e58 100644 --- a/doc/build/changelog/migration_09.rst +++ b/doc/build/changelog/migration_09.rst @@ -721,12 +721,9 @@ Only those elements in the path that actually need :meth:`.PropComparator.of_typ need to be set as a class-bound attribute, string-based names can be resumed afterwards:: - session.query(Company).\ - options( - subqueryload(Company.employees.of_type(Engineer)). - subqueryload("machines") - ) - ) + session.query(Company).options( + subqueryload(Company.employees.of_type(Engineer)).subqueryload("machines") + ) **Old Way** @@ -822,17 +819,16 @@ The :func:`_expression.text` construct gains new methods: to be set flexibly:: # setup values - stmt = text("SELECT id, name FROM user " - "WHERE name=:name AND timestamp=:timestamp").\ - bindparams(name="ed", timestamp=datetime(2012, 11, 10, 15, 12, 35)) + stmt = text( + "SELECT id, name FROM user WHERE name=:name AND timestamp=:timestamp" + ).bindparams(name="ed", timestamp=datetime(2012, 11, 10, 15, 12, 35)) # setup types and/or values - stmt = text("SELECT id, name FROM user " - "WHERE name=:name AND timestamp=:timestamp").\ - bindparams( - bindparam("name", value="ed"), - bindparam("timestamp", type_=DateTime() - ).bindparam(timestamp=datetime(2012, 11, 10, 15, 12, 35)) + stmt = ( + text("SELECT id, name FROM user WHERE name=:name AND timestamp=:timestamp") + .bindparams(bindparam("name", value="ed"), bindparam("timestamp", type_=DateTime())) + .bindparam(timestamp=datetime(2012, 11, 10, 15, 12, 35)) + ) * :meth:`_expression.TextClause.columns` supersedes the ``typemap`` option of :func:`_expression.text`, returning a new construct :class:`.TextAsFrom`:: @@ -842,7 +838,8 @@ The :func:`_expression.text` construct gains new methods: stmt = stmt.alias() stmt = select([addresses]).select_from( - addresses.join(stmt), addresses.c.user_id == stmt.c.id) + addresses.join(stmt), addresses.c.user_id == stmt.c.id + ) # or into a cte(): @@ -850,7 +847,8 @@ The :func:`_expression.text` construct gains new methods: stmt = stmt.cte("x") stmt = select([addresses]).select_from( - addresses.join(stmt), addresses.c.user_id == stmt.c.id) + addresses.join(stmt), addresses.c.user_id == stmt.c.id + ) :ticket:`2877` diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst index 6501911111..ee77e5a6b9 100644 --- a/doc/build/changelog/migration_10.rst +++ b/doc/build/changelog/migration_10.rst @@ -396,32 +396,29 @@ of inheritance-oriented scenarios, including: * Binding to a Mixin or Abstract Class:: class MyClass(SomeMixin, Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" # ... - session = Session(binds={SomeMixin: some_engine}) + session = Session(binds={SomeMixin: some_engine}) * Binding to inherited concrete subclasses individually based on table:: class BaseClass(Base): - __tablename__ = 'base' + __tablename__ = "base" # ... + class ConcreteSubClass(BaseClass): - __tablename__ = 'concrete' + __tablename__ = "concrete" # ... - __mapper_args__ = {'concrete': True} - + __mapper_args__ = {"concrete": True} - session = Session(binds={ - base_table: some_engine, - concrete_table: some_other_engine - }) + session = Session(binds={base_table: some_engine, concrete_table: some_other_engine}) :ticket:`3035` @@ -457,10 +454,10 @@ These scenarios include: statement as well as for the SELECT used by the "fetch" strategy:: session.query(User).filter(User.id == 15).update( - {"name": "foob"}, synchronize_session='fetch') + {"name": "foob"}, synchronize_session="fetch" + ) - session.query(User).filter(User.id == 15).delete( - synchronize_session='fetch') + session.query(User).filter(User.id == 15).delete(synchronize_session="fetch") * Queries against individual columns:: @@ -470,9 +467,10 @@ These scenarios include: :obj:`.column_property`:: class User(Base): - # ... + ... + + score = column_property(func.coalesce(self.tables.users.c.name, None)) - score = column_property(func.coalesce(self.tables.users.c.name, None))) session.query(func.max(User.score)).scalar() diff --git a/doc/build/changelog/migration_11.rst b/doc/build/changelog/migration_11.rst index a9ede42231..d90f49ec56 100644 --- a/doc/build/changelog/migration_11.rst +++ b/doc/build/changelog/migration_11.rst @@ -845,11 +845,13 @@ are part of the "correlate" for the subquery. Assuming the ``Person/Manager/Engineer->Company`` setup from the mapping documentation, using with_polymorphic:: - sess.query(Person.name) - .filter( - sess.query(Company.name). - filter(Company.company_id == Person.company_id). - correlate(Person).as_scalar() == "Elbonia, Inc.") + sess.query(Person.name).filter( + sess.query(Company.name) + .filter(Company.company_id == Person.company_id) + .correlate(Person) + .as_scalar() + == "Elbonia, Inc." + ) The above query now produces:: @@ -885,11 +887,13 @@ from it first:: # aliasing. paliased = aliased(Person) - sess.query(paliased.name) - .filter( - sess.query(Company.name). - filter(Company.company_id == paliased.company_id). - correlate(paliased).as_scalar() == "Elbonia, Inc.") + sess.query(paliased.name).filter( + sess.query(Company.name) + .filter(Company.company_id == paliased.company_id) + .correlate(paliased) + .as_scalar() + == "Elbonia, Inc." + ) The :func:`.aliased` construct guarantees that the "polymorphic selectable" is wrapped in a subquery. By referring to it explicitly in the correlated @@ -1130,6 +1134,7 @@ for specific exceptions:: engine = create_engine("postgresql+psycopg2://") + @event.listens_for(engine, "handle_error") def cancel_disconnect(ctx): if isinstance(ctx.original_exception, KeyboardInterrupt): @@ -2421,12 +2426,10 @@ supported by PostgreSQL 9.5 in this area:: from sqlalchemy.dialects.postgresql import insert - insert_stmt = insert(my_table). \\ - values(id='some_id', data='some data to insert') + insert_stmt = insert(my_table).values(id="some_id", data="some data to insert") do_update_stmt = insert_stmt.on_conflict_do_update( - index_elements=[my_table.c.id], - set_=dict(data='some data to update') + index_elements=[my_table.c.id], set_=dict(data="some data to update") ) conn.execute(do_update_stmt) diff --git a/doc/build/changelog/migration_12.rst b/doc/build/changelog/migration_12.rst index eb4f076d13..7b601a8d48 100644 --- a/doc/build/changelog/migration_12.rst +++ b/doc/build/changelog/migration_12.rst @@ -394,6 +394,7 @@ hybrid in-place, interfering with the definition on the superclass. def _set_name(self, value): self.first_name = value + class FirstNameOnly(Base): @hybrid_property def name(self): @@ -829,8 +830,7 @@ new feature allows the related features of "select in" loading and "polymorphic in" loading to make use of the baked query extension to reduce call overhead:: - stmt = select([table]).where( - table.c.col.in_(bindparam('foo', expanding=True)) + stmt = select([table]).where(table.c.col.in_(bindparam("foo", expanding=True))) conn.execute(stmt, {"foo": [1, 2, 3]}) The feature should be regarded as **experimental** within the 1.2 series. diff --git a/doc/build/changelog/migration_14.rst b/doc/build/changelog/migration_14.rst index d1933566b4..88a51c776f 100644 --- a/doc/build/changelog/migration_14.rst +++ b/doc/build/changelog/migration_14.rst @@ -504,6 +504,7 @@ mutually-dependent module imports, like this:: @util.dependency_for("sqlalchemy.sql.dml") def insert(self, dml, *args, **kw): + ... Where the above function would be rewritten to no longer have the ``dml`` parameter on the outside. This would confuse code-linting tools into seeing a missing parameter @@ -1282,10 +1283,11 @@ including methods such as: with engine.connect() as conn: result = conn.execute( table.select().order_by(table.c.id), - execution_options={"stream_results": True} + execution_options={"stream_results": True}, ) for chunk in result.partitions(500): # process up to 500 records + ... :meth:`_engine.Result.columns` - allows slicing and reorganizing of rows: @@ -1306,7 +1308,7 @@ first column by default but can also be selected: result = session.execute(select(User).order_by(User.id)) for user_obj in result.scalars(): - # ... + ... :meth:`_engine.Result.mappings` - instead of named-tuple rows, returns dictionaries: @@ -2154,8 +2156,9 @@ in any way:: addresses = relationship(Address, backref=backref("user", viewonly=True)) + class Address(Base): - # ... + ... u1 = session.query(User).filter_by(name="x").first() diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index 781d530ad2..8012358723 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -321,24 +321,23 @@ the SQLAlchemy project itself, the approach taken is as follows: from sqlalchemy import exc # for warnings not included in regex-based filter below, just log - warnings.filterwarnings( - "always", category=exc.RemovedIn20Warning - ) + warnings.filterwarnings("always", category=exc.RemovedIn20Warning) # for warnings related to execute() / scalar(), raise for msg in [ r"The (?:Executable|Engine)\.(?:execute|scalar)\(\) function", - r"The current statement is being autocommitted using implicit " - "autocommit,", + r"The current statement is being autocommitted using implicit autocommit,", r"The connection.execute\(\) method in SQLAlchemy 2.0 will accept " "parameters as a single dictionary or a single sequence of " "dictionaries only.", r"The Connection.connect\(\) function/method is considered legacy", r".*DefaultGenerator.execute\(\)", ]: - warnings.filterwarnings( - "error", message=msg, category=exc.RemovedIn20Warning, - ) + warnings.filterwarnings( + "error", + message=msg, + category=exc.RemovedIn20Warning, + ) 3. As each sub-category of warnings are resolved in the application, new warnings that are caught by the "always" filter can be added to the list @@ -1245,9 +1244,7 @@ following the table, and may include additional notes not summarized here. - :: - session.execute( - select(User) - ).scalars().all() + session.execute(select(User)).scalars().all() # or session.scalars(select(User)).all() @@ -1258,15 +1255,11 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).\ - filter_by(name='some user').one() + session.query(User).filter_by(name="some user").one() - :: - session.execute( - select(User). - filter_by(name="some user") - ).scalar_one() + session.execute(select(User).filter_by(name="some user")).scalar_one() - :ref:`migration_20_unify_select` @@ -1274,17 +1267,11 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).\ - filter_by(name='some user').first() - + session.query(User).filter_by(name="some user").first() - :: - session.scalars( - select(User). - filter_by(name="some user"). - limit(1) - ).first() + session.scalars(select(User).filter_by(name="some user").limit(1)).first() - :ref:`migration_20_unify_select` @@ -1292,34 +1279,22 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).options( - joinedload(User.addresses) - ).all() + session.query(User).options(joinedload(User.addresses)).all() - :: - session.scalars( - select(User). - options( - joinedload(User.addresses) - ) - ).unique().all() + session.scalars(select(User).options(joinedload(User.addresses))).unique().all() - :ref:`joinedload_not_uniqued` * - :: - session.query(User).\ - join(Address).\ - filter(Address.email == 'e@sa.us').\ - all() + session.query(User).join(Address).filter(Address.email == "e@sa.us").all() - :: session.execute( - select(User). - join(Address). - where(Address.email == 'e@sa.us') + select(User).join(Address).where(Address.email == "e@sa.us") ).scalars().all() - :ref:`migration_20_unify_select` @@ -1328,37 +1303,27 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).from_statement( - text("select * from users") - ).all() + session.query(User).from_statement(text("select * from users")).all() - :: - session.scalars( - select(User). - from_statement( - text("select * from users") - ) - ).all() + session.scalars(select(User).from_statement(text("select * from users"))).all() - :ref:`orm_queryguide_selecting_text` * - :: - session.query(User).\ - join(User.addresses).\ - options( - contains_eager(User.addresses) - ).\ - populate_existing().all() + session.query(User).join(User.addresses).options( + contains_eager(User.addresses) + ).populate_existing().all() - :: session.execute( - select(User). - join(User.addresses). - options(contains_eager(User.addresses)). - execution_options(populate_existing=True) + select(User) + .join(User.addresses) + .options(contains_eager(User.addresses)) + .execution_options(populate_existing=True) ).scalars().all() - @@ -1370,21 +1335,17 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).\ - filter(User.name == 'foo').\ - update( - {"fullname": "Foo Bar"}, - synchronize_session="evaluate" - ) - + session.query(User).filter(User.name == "foo").update( + {"fullname": "Foo Bar"}, synchronize_session="evaluate" + ) - :: session.execute( - update(User). - where(User.name == 'foo'). - values(fullname="Foo Bar"). - execution_options(synchronize_session="evaluate") + update(User) + .where(User.name == "foo") + .values(fullname="Foo Bar") + .execution_options(synchronize_session="evaluate") ) - :ref:`orm_expression_update_delete` @@ -1422,25 +1383,25 @@ Legacy code examples are illustrated below:: session = Session(engine) # becomes legacy use case - user = session.query(User).filter_by(name='some user').one() + user = session.query(User).filter_by(name="some user").one() # becomes legacy use case - user = session.query(User).filter_by(name='some user').first() + user = session.query(User).filter_by(name="some user").first() # becomes legacy use case user = session.query(User).get(5) # becomes legacy use case - for user in session.query(User).join(User.addresses).filter(Address.email == 'some@email.com'): - # ... + for user in ( + session.query(User).join(User.addresses).filter(Address.email == "some@email.com") + ): + ... # becomes legacy use case users = session.query(User).options(joinedload(User.addresses)).order_by(User.id).all() # becomes legacy use case - users = session.query(User).from_statement( - text("select * from users") - ).all() + users = session.query(User).from_statement(text("select * from users")).all() # etc @@ -1484,15 +1445,13 @@ Below are some examples of how to migrate to :func:`_sql.select`:: session = Session(engine) - user = session.execute( - select(User).filter_by(name="some user") - ).scalar_one() + user = session.execute(select(User).filter_by(name="some user")).scalar_one() # for first(), no LIMIT is applied automatically; add limit(1) if LIMIT # is desired on the query - user = session.execute( - select(User).filter_by(name="some user").limit(1) - ).scalars().first() + user = ( + session.execute(select(User).filter_by(name="some user").limit(1)).scalars().first() + ) # get() moves to the Session directly user = session.get(User, 5) @@ -1500,18 +1459,22 @@ Below are some examples of how to migrate to :func:`_sql.select`:: for user in session.execute( select(User).join(User.addresses).filter(Address.email == "some@email.case") ).scalars(): - # ... + ... # when using joinedload() against collections, use unique() on the result - users = session.execute( - select(User).options(joinedload(User.addresses)).order_by(User.id) - ).unique().all() + users = ( + session.execute(select(User).options(joinedload(User.addresses)).order_by(User.id)) + .unique() + .all() + ) # select() has ORM-ish methods like from_statement() that only work # if the statement is against ORM entities - users = session.execute( - select(User).from_statement(text("select * from users")) - ).scalars().all() + users = ( + session.execute(select(User).from_statement(text("select * from users"))) + .scalars() + .all() + ) **Discussion** diff --git a/doc/build/changelog/whatsnew_20.rst b/doc/build/changelog/whatsnew_20.rst index c5ec3887cc..7f1180a924 100644 --- a/doc/build/changelog/whatsnew_20.rst +++ b/doc/build/changelog/whatsnew_20.rst @@ -239,12 +239,13 @@ helper): class User(Base): - __tablename__ = 'user_account' + __tablename__ = "user_account" id: Mapped[int] = mapped_column(primary_key=True) name: Mapped[str] addresses: Mapped[List["Address"]] = relationship() + class Address(Base): __tablename__ = "address" @@ -252,7 +253,6 @@ helper): email_address: Mapped[str] user_id = mapped_column(ForeignKey("user_account.id")) - With the above mapping, the attributes are typed and express themselves all the way from statement to result set:: @@ -617,10 +617,11 @@ of :class:`_types.String`, as below where use of an ``Annotated`` ``str`` called # declarative base with a type-level override, using a type that is # expected to be used in multiple places class Base(DeclarativeBase): - registry = registry(type_annotation_map={ - str50: String(50), - }) - + registry = registry( + type_annotation_map={ + str50: String(50), + } + ) Second, Declarative will extract full :func:`_orm.mapped_column` definitions from the left hand type if diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index 671d504548..0aee788def 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -140,15 +140,15 @@ each time the transaction is ended, and a new statement is emitted, a new transaction begins implicitly:: with engine.connect() as connection: - connection.execute() + connection.execute("") connection.commit() # commits "some statement" # new transaction starts - connection.execute() + connection.execute("") connection.rollback() # rolls back "some other statement" # new transaction starts - connection.execute() + connection.execute("") connection.commit() # commits "a third statement" .. versionadded:: 2.0 "commit as you go" style is a new feature of @@ -823,7 +823,7 @@ as the schema name is passed to these methods explicitly. to the :class:`_orm.Session`. The :class:`_orm.Session` uses a new :class:`_engine.Connection` for each transaction:: - schema_engine = engine.execution_options(schema_translate_map = { ... } ) + schema_engine = engine.execution_options(schema_translate_map={...}) session = Session(schema_engine) @@ -1470,15 +1470,20 @@ Basic guidelines include: # **Don't** do this: + def my_stmt(parameter, thing=False): stmt = lambda_stmt(lambda: select(table)) stmt += ( - lambda s: s.where(table.c.x > parameter) if thing + lambda s: s.where(table.c.x > parameter) + if thing else s.where(table.c.y == parameter) + ) return stmt + # **Do** do this: + def my_stmt(parameter, thing=False): stmt = lambda_stmt(lambda: select(table)) if thing: @@ -1501,10 +1506,10 @@ Basic guidelines include: >>> def my_stmt(x, y): ... def get_x(): ... return x - ... + ... ... def get_y(): ... return y - ... + ... ... stmt = lambda_stmt(lambda: select(func.max(get_x(), get_y()))) ... return stmt >>> with engine.connect() as conn: @@ -1524,10 +1529,10 @@ Basic guidelines include: >>> def my_stmt(x, y): ... def get_x(): ... return x - ... + ... ... def get_y(): ... return y - ... + ... ... x_param, y_param = get_x(), get_y() ... stmt = lambda_stmt(lambda: select(func.max(x_param, y_param))) ... return stmt @@ -2206,22 +2211,24 @@ to create a new dialect "foodialect://", the steps are as follows: which is typically a subclass of :class:`sqlalchemy.engine.default.DefaultDialect`. In this example let's say it's called ``FooDialect`` and its module is accessed via ``foodialect.dialect``. -3. The entry point can be established in setup.py as follows:: +3. The entry point can be established in ``setup.cfg`` as follows: - entry_points = """ - [sqlalchemy.dialects] - foodialect = foodialect.dialect:FooDialect - """ + .. sourcecode:: ini + + [options.entry_points] + sqlalchemy.dialects = + foodialect = foodialect.dialect:FooDialect If the dialect is providing support for a particular DBAPI on top of an existing SQLAlchemy-supported database, the name can be given including a database-qualification. For example, if ``FooDialect`` -were in fact a MySQL dialect, the entry point could be established like this:: +were in fact a MySQL dialect, the entry point could be established like this: + +.. sourcecode:: ini - entry_points = """ - [sqlalchemy.dialects] - mysql.foodialect = foodialect.dialect:FooDialect - """ + [options.entry_points] + sqlalchemy.dialects + mysql.foodialect = foodialect.dialect:FooDialect The above entrypoint would then be accessed as ``create_engine("mysql+foodialect://")``. diff --git a/doc/build/core/operators.rst b/doc/build/core/operators.rst index 4f0c2a5af2..79a5c6f4dc 100644 --- a/doc/build/core/operators.rst +++ b/doc/build/core/operators.rst @@ -31,25 +31,25 @@ Operator Reference >>> from sqlalchemy.orm import relationship >>> class User(Base): ... __tablename__ = "user_account" - ... + ... ... id = Column(Integer, primary_key=True) ... name = Column(String(30)) ... fullname = Column(String) - ... + ... ... addresses = relationship("Address", back_populates="user") - ... + ... ... def __repr__(self): ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): ... __tablename__ = "address" - ... + ... ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) ... user_id = Column(Integer, ForeignKey("user_account.id")) - ... + ... ... user = relationship("User", back_populates="addresses") - ... + ... ... def __repr__(self): ... return f"Address(id={self.id!r}, email_address={self.email_address!r})" >>> conn = engine.connect() diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst index 7f6523bffd..2b8a39c434 100644 --- a/doc/build/core/pooling.rst +++ b/doc/build/core/pooling.rst @@ -524,19 +524,21 @@ are three general approaches to this: engine = create_engine("mysql+mysqldb://user:pass@host/dbname") + def run_in_process(some_data_record): with engine.connect() as conn: conn.execute(text("...")) + def initializer(): """ensure the parent proc's database connections are not touched - in the new connection pool""" + in the new connection pool""" engine.dispose(close=False) + with Pool(10, initializer=initializer) as p: p.map(run_in_process, data) - .. versionadded:: 1.4.33 Added the :paramref:`.Engine.dispose.close` parameter to allow the replacement of a connection pool in a child process without interfering with the connections used by the parent @@ -549,10 +551,12 @@ are three general approaches to this: engine = create_engine("mysql://user:pass@host/dbname") + def run_in_process(): with engine.connect() as conn: conn.execute(text("...")) + # before process starts, ensure engine.dispose() is called engine.dispose() p = Process(target=run_in_process) diff --git a/doc/build/dialects/postgresql.rst b/doc/build/dialects/postgresql.rst index 0cb3984dde..411037f87c 100644 --- a/doc/build/dialects/postgresql.rst +++ b/doc/build/dialects/postgresql.rst @@ -101,10 +101,10 @@ was needed in order to allow this combination to work, described below. E.g.:: Table( - 'mydata', metadata, - Column('id', Integer, primary_key=True), - Column('data', ArrayOfEnum(ENUM('a', 'b, 'c', name='myenum'))) - + "mydata", + metadata, + Column("id", Integer, primary_key=True), + Column("data", ArrayOfEnum(ENUM("a", "b", "c", name="myenum"))), ) This type is not included as a built-in type as it would be incompatible diff --git a/doc/build/errors.rst b/doc/build/errors.rst index bb49138dbd..994ecc41ad 100644 --- a/doc/build/errors.rst +++ b/doc/build/errors.rst @@ -1187,7 +1187,7 @@ The above mapping will generate warnings:: The relationships ``Child.parent`` and ``Parent.children`` appear to be in conflict. The solution is to apply :paramref:`_orm.relationship.back_populates`:: - class Parent(Base): + class Parent(Base): __tablename__ = "parent" id = Column(Integer, primary_key=True) children = relationship("Child", back_populates="parent") @@ -1525,8 +1525,9 @@ and associating the :class:`_engine.Engine` with the metadata_obj = MetaData(bind=engine) Base = declarative_base(metadata=metadata_obj) + class MyClass(Base): - # ... + ... session = Session() @@ -1543,8 +1544,9 @@ engine:: Session = sessionmaker(engine) Base = declarative_base() + class MyClass(Base): - # ... + ... session = Session() @@ -1587,7 +1589,7 @@ The correct way to invoke statements is via the :meth:`_engine.Connection.execute` method of a :class:`_engine.Connection`:: with engine.connect() as conn: - result = conn.execute(stmt) + result = conn.execute(stmt) When using the ORM, a similar facility is available via the :class:`.Session`:: diff --git a/doc/build/faq/metadata_schema.rst b/doc/build/faq/metadata_schema.rst index 7dcb8fa417..dfb154e41f 100644 --- a/doc/build/faq/metadata_schema.rst +++ b/doc/build/faq/metadata_schema.rst @@ -62,7 +62,7 @@ This is available via the :attr:`_schema.MetaData.sorted_tables` function:: metadata_obj = MetaData() # ... add Table objects to metadata - ti = metadata_obj.sorted_tables: + ti = metadata_obj.sorted_tables for t in ti: print(t) diff --git a/doc/build/faq/performance.rst b/doc/build/faq/performance.rst index 1aa94e1c7f..3b494a7c4c 100644 --- a/doc/build/faq/performance.rst +++ b/doc/build/faq/performance.rst @@ -413,11 +413,11 @@ Common strategies to mitigate this include: * Use :class:`.Bundle` objects to organize column-based results:: - u_b = Bundle('user', User.id, User.name) - a_b = Bundle('address', Address.id, Address.email) + u_b = Bundle("user", User.id, User.name) + a_b = Bundle("address", Address.id, Address.email) for user, address in session.execute(select(u_b, a_b).join(User.addresses)): - # ... + ... * Use result caching - see :ref:`examples_caching` for an in-depth example of this. diff --git a/doc/build/faq/sessions.rst b/doc/build/faq/sessions.rst index 2642ed5a8e..e48ff0ec42 100644 --- a/doc/build/faq/sessions.rst +++ b/doc/build/faq/sessions.rst @@ -118,13 +118,13 @@ does not properly handle the exception. For example:: The usage of the :class:`.Session` should fit within a structure similar to this:: try: - + # session.commit() except: - session.rollback() - raise + session.rollback() + raise finally: - session.close() # optional, depends on use case + session.close() # optional, depends on use case Many things can cause a failure within the try/except besides flushes. Applications should ensure some system of "framing" is applied to ORM-oriented diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst index 3e5ce4d3c9..9f73f0a7f4 100644 --- a/doc/build/glossary.rst +++ b/doc/build/glossary.rst @@ -309,7 +309,7 @@ Glossary on mapped classes. When a class is mapped as such:: class MyClass(Base): - __tablename__ = 'foo' + __tablename__ = "foo" id = Column(Integer, primary_key=True) data = Column(String) @@ -643,10 +643,12 @@ Glossary as an ORDER BY clause by calling upon the :meth:`_expression.Select.where` and :meth:`_expression.Select.order_by` methods:: - stmt = select(user.c.name).\ - where(user.c.id > 5).\ - where(user.c.name.like('e%').\ - order_by(user.c.name) + stmt = ( + select(user.c.name) + .where(user.c.id > 5) + .where(user.c.name.like("e%")) + .order_by(user.c.name) + ) Each method call above returns a copy of the original :class:`_expression.Select` object with additional qualifiers @@ -1126,16 +1128,17 @@ Glossary single department. A SQLAlchemy mapping might look like:: class Department(Base): - __tablename__ = 'department' + __tablename__ = "department" id = Column(Integer, primary_key=True) name = Column(String(30)) employees = relationship("Employee") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) - dep_id = Column(Integer, ForeignKey('department.id')) + dep_id = Column(Integer, ForeignKey("department.id")) .. seealso:: @@ -1177,15 +1180,16 @@ Glossary single department. A SQLAlchemy mapping might look like:: class Department(Base): - __tablename__ = 'department' + __tablename__ = "department" id = Column(Integer, primary_key=True) name = Column(String(30)) + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) - dep_id = Column(Integer, ForeignKey('department.id')) + dep_id = Column(Integer, ForeignKey("department.id")) department = relationship("Department") .. seealso:: @@ -1210,16 +1214,17 @@ Glossary used in :term:`one to many` as follows:: class Department(Base): - __tablename__ = 'department' + __tablename__ = "department" id = Column(Integer, primary_key=True) name = Column(String(30)) employees = relationship("Employee", backref="department") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) - dep_id = Column(Integer, ForeignKey('department.id')) + dep_id = Column(Integer, ForeignKey("department.id")) A backref can be applied to any relationship, including one to many, many to one, and :term:`many to many`. @@ -1271,24 +1276,25 @@ Glossary specified using plain table metadata:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) projects = relationship( "Project", - secondary=Table('employee_project', Base.metadata, - Column("employee_id", Integer, ForeignKey('employee.id'), - primary_key=True), - Column("project_id", Integer, ForeignKey('project.id'), - primary_key=True) - ), - backref="employees" - ) + secondary=Table( + "employee_project", + Base.metadata, + Column("employee_id", Integer, ForeignKey("employee.id"), primary_key=True), + Column("project_id", Integer, ForeignKey("project.id"), primary_key=True), + ), + backref="employees", + ) + class Project(Base): - __tablename__ = 'project' + __tablename__ = "project" id = Column(Integer, primary_key=True) name = Column(String(30)) @@ -1384,30 +1390,29 @@ Glossary A SQLAlchemy declarative mapping for the above might look like:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) class Project(Base): - __tablename__ = 'project' + __tablename__ = "project" id = Column(Integer, primary_key=True) name = Column(String(30)) class EmployeeProject(Base): - __tablename__ = 'employee_project' + __tablename__ = "employee_project" - employee_id = Column(Integer, ForeignKey('employee.id'), primary_key=True) - project_id = Column(Integer, ForeignKey('project.id'), primary_key=True) + employee_id = Column(Integer, ForeignKey("employee.id"), primary_key=True) + project_id = Column(Integer, ForeignKey("project.id"), primary_key=True) role_name = Column(String(30)) project = relationship("Project", backref="project_employees") employee = relationship("Employee", backref="employee_projects") - Employees can be added to a project given a role name:: proj = Project(name="Client A") @@ -1415,10 +1420,12 @@ Glossary emp1 = Employee(name="emp1") emp2 = Employee(name="emp2") - proj.project_employees.extend([ - EmployeeProject(employee=emp1, role_name="tech lead"), - EmployeeProject(employee=emp2, role_name="account executive") - ]) + proj.project_employees.extend( + [ + EmployeeProject(employee=emp1, role_name="tech lead"), + EmployeeProject(employee=emp2, role_name="account executive"), + ] + ) .. seealso:: diff --git a/doc/build/orm/collections.rst b/doc/build/orm/collections.rst index 8f4c095ccf..226870d67b 100644 --- a/doc/build/orm/collections.rst +++ b/doc/build/orm/collections.rst @@ -241,7 +241,6 @@ Or for a ``set``, illustrated in the same # use a List, Python 3.8 and earlier children: Mapped[List["Child"]] = relationship() - When using mappings without the :class:`_orm.Mapped` annotation, such as when using :ref:`imperative mappings ` or untyped Python code, as well as in a few special cases, the collection class for a @@ -666,14 +665,16 @@ methods can be changed as well: from sqlalchemy.orm.collections import collection + class MyList(list): @collection.remover def zark(self, item): # do something special... + ... @collection.iterator def hey_use_this_instead_for_iteration(self): - # ... + ... There is no requirement to be list-, or set-like at all. Collection classes can be any shape, so long as they have the append, remove and iterate diff --git a/doc/build/orm/declarative_tables.rst b/doc/build/orm/declarative_tables.rst index 9faba2f3a0..d1d45178d1 100644 --- a/doc/build/orm/declarative_tables.rst +++ b/doc/build/orm/declarative_tables.rst @@ -252,11 +252,13 @@ The two qualities that :func:`_orm.mapped_column` derives from the from sqlalchemy.orm import Mapped from sqlalchemy.orm import mapped_column + class Base(DeclarativeBase): pass + class SomeClass(Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" # primary_key=True, therefore will be NOT NULL id: Mapped[int] = mapped_column(primary_key=True) diff --git a/doc/build/orm/extensions/asyncio.rst b/doc/build/orm/extensions/asyncio.rst index 3a81bf9bb3..ea7b9eedc7 100644 --- a/doc/build/orm/extensions/asyncio.rst +++ b/doc/build/orm/extensions/asyncio.rst @@ -349,9 +349,7 @@ Other guidelines include: user = await session.get(User, 42) addresses = (await session.scalars(user.addresses.statement)).all() - stmt = user.addresses.statement.where( - Address.email_address.startswith("patrick") - ) + stmt = user.addresses.statement.where(Address.email_address.startswith("patrick")) addresses_filter = (await session.scalars(stmt)).all() .. seealso:: @@ -678,10 +676,12 @@ value will be invoked after being returned:: @event.listens_for(engine.sync_engine, "connect") - def register_custom_types(dbapi_connection, ...): + def register_custom_types(dbapi_connection, *args): dbapi_connection.run_async( lambda connection: connection.set_type_codec( - "MyCustomType", encoder, decoder, ... + "MyCustomType", + encoder, + decoder, # ... ) ) diff --git a/doc/build/orm/mapping_api.rst b/doc/build/orm/mapping_api.rst index 72eea5cc65..921291a170 100644 --- a/doc/build/orm/mapping_api.rst +++ b/doc/build/orm/mapping_api.rst @@ -55,18 +55,19 @@ Class Mapping API @declared_attr.cascading def id(cls): if has_inherited_table(cls): - return Column( - ForeignKey('myclass.id'), primary_key=True - ) + return Column(ForeignKey("myclass.id"), primary_key=True) else: return Column(Integer, primary_key=True) + class MyClass(HasIdMixin, Base): - __tablename__ = 'myclass' + __tablename__ = "myclass" # ... + class MySubClass(MyClass): - "" + """""" + # ... The behavior of the above configuration is that ``MySubClass`` diff --git a/doc/build/orm/nonstandard_mappings.rst b/doc/build/orm/nonstandard_mappings.rst index d461b63c64..d71343e99f 100644 --- a/doc/build/orm/nonstandard_mappings.rst +++ b/doc/build/orm/nonstandard_mappings.rst @@ -110,9 +110,10 @@ may be used:: from sqlalchemy import event - @event.listens_for(PtoQ, 'before_update') + + @event.listens_for(PtoQ, "before_update") def receive_before_update(mapper, connection, target): - if target.some_required_attr_on_q is None: + if target.some_required_attr_on_q is None: connection.execute(q_table.insert(), {"id": target.id}) where above, a row is INSERTed into the ``q_table`` table by creating an diff --git a/doc/build/orm/persistence_techniques.rst b/doc/build/orm/persistence_techniques.rst index 789cd739c8..abcd758043 100644 --- a/doc/build/orm/persistence_techniques.rst +++ b/doc/build/orm/persistence_techniques.rst @@ -45,16 +45,17 @@ retrieved by the ORM as part of the object's primary key:: class Foo(Base): - __tablename__ = 'foo' + __tablename__ = "foo" pk = mapped_column(Integer, primary_key=True) bar = mapped_column(Integer) + e = create_engine("postgresql+psycopg2://scott:tiger@localhost/test", echo=True) Base.metadata.create_all(e) session = Session(e) - foo = Foo(pk=sql.select(sql.func.coalesce(sql.func.max(Foo.pk) + 1, 1)) + foo = Foo(pk=sql.select(sql.func.coalesce(sql.func.max(Foo.pk) + 1, 1))) session.add(foo) session.commit() @@ -632,31 +633,36 @@ connections:: from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import Session + class BaseA(DeclarativeBase): pass + class BaseB(DeclarativeBase): pass + class User(BaseA): - # ... + ... + class Address(BaseA): - # ... + ... class GameInfo(BaseB): - # ... + ... + class GameStats(BaseB): - # ... + ... Session = sessionmaker() # all User/Address operations will be on engine 1, all # Game operations will be on engine 2 - Session.configure(binds={BaseA:engine1, BaseB:engine2}) + Session.configure(binds={BaseA: engine1, BaseB: engine2}) Above, classes which descend from ``BaseA`` and ``BaseB`` will have their SQL operations routed to one of two engines based on which superclass diff --git a/doc/build/orm/queryguide/_deferred_setup.rst b/doc/build/orm/queryguide/_deferred_setup.rst index a9bbaac8b8..e75630c46e 100644 --- a/doc/build/orm/queryguide/_deferred_setup.rst +++ b/doc/build/orm/queryguide/_deferred_setup.rst @@ -33,7 +33,7 @@ This page illustrates the mappings and fixture data used by the ... name: Mapped[str] ... fullname: Mapped[Optional[str]] ... books: Mapped[List["Book"]] = relationship(back_populates="owner") - ... + ... ... def __repr__(self) -> str: ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Book(Base): @@ -44,7 +44,7 @@ This page illustrates the mappings and fixture data used by the ... summary: Mapped[str] = mapped_column(Text) ... cover_photo: Mapped[bytes] = mapped_column(LargeBinary) ... owner: Mapped["User"] = relationship(back_populates="books") - ... + ... ... def __repr__(self) -> str: ... return f"Book(id={self.id!r}, title={self.title!r})" >>> engine = create_engine("sqlite+pysqlite:///:memory:", echo=True) diff --git a/doc/build/orm/queryguide/_dml_setup.rst b/doc/build/orm/queryguide/_dml_setup.rst index bae0cce3dc..07f053980c 100644 --- a/doc/build/orm/queryguide/_dml_setup.rst +++ b/doc/build/orm/queryguide/_dml_setup.rst @@ -33,7 +33,7 @@ This page illustrates the mappings and fixture data used by the ... fullname: Mapped[Optional[str]] ... species: Mapped[Optional[str]] ... addresses: Mapped[List["Address"]] = relationship(back_populates="user") - ... + ... ... def __repr__(self) -> str: ... return f"User(name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): @@ -42,7 +42,7 @@ This page illustrates the mappings and fixture data used by the ... user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id")) ... email_address: Mapped[str] ... user: Mapped[User] = relationship(back_populates="addresses") - ... + ... ... def __repr__(self) -> str: ... return f"Address(email_address={self.email_address!r})" >>> class LogRecord(Base): @@ -51,7 +51,7 @@ This page illustrates the mappings and fixture data used by the ... message: Mapped[str] ... code: Mapped[str] ... timestamp: Mapped[datetime.datetime] - ... + ... ... def __repr__(self): ... return f"LogRecord({self.message!r}, {self.code!r}, {self.timestamp!r})" @@ -60,10 +60,10 @@ This page illustrates the mappings and fixture data used by the ... id: Mapped[int] = mapped_column(primary_key=True) ... name: Mapped[str] ... type: Mapped[str] - ... + ... ... def __repr__(self): ... return f"{self.__class__.__name__}({self.name!r})" - ... + ... ... __mapper_args__ = { ... "polymorphic_identity": "employee", ... "polymorphic_on": "type", @@ -72,10 +72,10 @@ This page illustrates the mappings and fixture data used by the ... __tablename__ = "manager" ... id: Mapped[int] = mapped_column(ForeignKey("employee.id"), primary_key=True) ... manager_name: Mapped[str] - ... + ... ... def __repr__(self): ... return f"{self.__class__.__name__}({self.name!r}, manager_name={self.manager_name!r})" - ... + ... ... __mapper_args__ = { ... "polymorphic_identity": "manager", ... } @@ -83,10 +83,10 @@ This page illustrates the mappings and fixture data used by the ... __tablename__ = "engineer" ... id: Mapped[int] = mapped_column(ForeignKey("employee.id"), primary_key=True) ... engineer_info: Mapped[str] - ... + ... ... def __repr__(self): ... return f"{self.__class__.__name__}({self.name!r}, engineer_info={self.engineer_info!r})" - ... + ... ... __mapper_args__ = { ... "polymorphic_identity": "engineer", ... } diff --git a/doc/build/orm/queryguide/_inheritance_setup.rst b/doc/build/orm/queryguide/_inheritance_setup.rst index addddda4e2..c98a83035a 100644 --- a/doc/build/orm/queryguide/_inheritance_setup.rst +++ b/doc/build/orm/queryguide/_inheritance_setup.rst @@ -35,10 +35,10 @@ the :ref:`queryguide_toplevel`. ... type: Mapped[str] ... company_id: Mapped[int] = mapped_column(ForeignKey("company.id")) ... company: Mapped[Company] = relationship(back_populates="employees") - ... + ... ... def __repr__(self): ... return f"{self.__class__.__name__}({self.name!r})" - ... + ... ... __mapper_args__ = { ... "polymorphic_identity": "employee", ... "polymorphic_on": "type", @@ -57,7 +57,7 @@ the :ref:`queryguide_toplevel`. ... id: Mapped[int] = mapped_column(primary_key=True) ... manager_id: Mapped[int] = mapped_column(ForeignKey("manager.id")) ... document_name: Mapped[str] - ... + ... ... def __repr__(self): ... return f"Paperwork({self.document_name!r})" >>> diff --git a/doc/build/orm/queryguide/_plain_setup.rst b/doc/build/orm/queryguide/_plain_setup.rst index 7166fe9b8a..af4e5b5c8a 100644 --- a/doc/build/orm/queryguide/_plain_setup.rst +++ b/doc/build/orm/queryguide/_plain_setup.rst @@ -32,7 +32,7 @@ This page illustrates the mappings and fixture data used by the ... fullname: Mapped[Optional[str]] ... addresses: Mapped[List["Address"]] = relationship(back_populates="user") ... orders: Mapped[List["Order"]] = relationship() - ... + ... ... def __repr__(self) -> str: ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): @@ -41,7 +41,7 @@ This page illustrates the mappings and fixture data used by the ... user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id")) ... email_address: Mapped[str] ... user: Mapped[User] = relationship(back_populates="addresses") - ... + ... ... def __repr__(self) -> str: ... return f"Address(id={self.id!r}, email_address={self.email_address!r})" >>> order_items_table = Table( diff --git a/doc/build/orm/queryguide/_single_inheritance.rst b/doc/build/orm/queryguide/_single_inheritance.rst index 546f87a193..158326e1e2 100644 --- a/doc/build/orm/queryguide/_single_inheritance.rst +++ b/doc/build/orm/queryguide/_single_inheritance.rst @@ -27,10 +27,10 @@ the :ref:`queryguide_toplevel`. ... id: Mapped[int] = mapped_column(primary_key=True) ... name: Mapped[str] ... type: Mapped[str] - ... + ... ... def __repr__(self): ... return f"{self.__class__.__name__}({self.name!r})" - ... + ... ... __mapper_args__ = { ... "polymorphic_identity": "employee", ... "polymorphic_on": "type", diff --git a/doc/build/orm/queryguide/api.rst b/doc/build/orm/queryguide/api.rst index f9a92e3316..5b6209d3db 100644 --- a/doc/build/orm/queryguide/api.rst +++ b/doc/build/orm/queryguide/api.rst @@ -88,10 +88,10 @@ E.g. to refresh an instance while also refreshing a related set of objects: .. sourcecode:: python stmt = ( - select(User). - where(User.name.in_(names)). - execution_options(populate_existing=True). - options(selectinload(User.addresses) + select(User) + .where(User.name.in_(names)) + .execution_options(populate_existing=True) + .options(selectinload(User.addresses)) ) # will refresh all matching User objects as well as the related # Address objects diff --git a/doc/build/orm/queryguide/columns.rst b/doc/build/orm/queryguide/columns.rst index 19538c698e..78b51b36c2 100644 --- a/doc/build/orm/queryguide/columns.rst +++ b/doc/build/orm/queryguide/columns.rst @@ -331,7 +331,7 @@ unconditionally on every query. To configure, use the ... title: Mapped[str] ... summary: Mapped[str] = mapped_column(Text, deferred=True) ... cover_photo: Mapped[bytes] = mapped_column(LargeBinary, deferred=True) - ... + ... ... def __repr__(self) -> str: ... return f"Book(id={self.id!r}, title={self.title!r})" @@ -492,7 +492,7 @@ undeferred:: ... cover_photo: Mapped[bytes] = mapped_column( ... LargeBinary, deferred=True, deferred_group="book_attrs" ... ) - ... + ... ... def __repr__(self) -> str: ... return f"Book(id={self.id!r}, title={self.title!r})" @@ -573,7 +573,7 @@ will raise on access in all cases unless explicitly "undeferred" using ... cover_photo: Mapped[bytes] = mapped_column( ... LargeBinary, deferred=True, deferred_raiseload=True ... ) - ... + ... ... def __repr__(self) -> str: ... return f"Book(id={self.id!r}, title={self.title!r})" @@ -626,7 +626,7 @@ Loading Arbitrary SQL Expressions onto Objects ... name: Mapped[str] ... fullname: Mapped[Optional[str]] ... books: Mapped[List["Book"]] = relationship(back_populates="owner") - ... + ... ... def __repr__(self) -> str: ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Book(Base): @@ -637,7 +637,7 @@ Loading Arbitrary SQL Expressions onto Objects ... summary: Mapped[str] = mapped_column(Text) ... cover_photo: Mapped[bytes] = mapped_column(LargeBinary) ... owner: Mapped["User"] = relationship(back_populates="books") - ... + ... ... def __repr__(self) -> str: ... return f"Book(id={self.id!r}, title={self.title!r})" @@ -685,7 +685,7 @@ level :func:`_orm.query_expression` directive may produce this result. ... title: Mapped[str] ... summary: Mapped[str] = mapped_column(Text) ... cover_photo: Mapped[bytes] = mapped_column(LargeBinary) - ... + ... ... def __repr__(self) -> str: ... return f"Book(id={self.id!r}, title={self.title!r})" @@ -705,7 +705,7 @@ normally produce ``None``:: ... name: Mapped[str] ... fullname: Mapped[Optional[str]] ... book_count: Mapped[int] = query_expression() - ... + ... ... def __repr__(self) -> str: ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" diff --git a/doc/build/orm/queryguide/inheritance.rst b/doc/build/orm/queryguide/inheritance.rst index 4506f4ffc7..ea5316cecd 100644 --- a/doc/build/orm/queryguide/inheritance.rst +++ b/doc/build/orm/queryguide/inheritance.rst @@ -939,10 +939,10 @@ is below:: ... id: Mapped[int] = mapped_column(primary_key=True) ... name: Mapped[str] ... type: Mapped[str] - ... + ... ... def __repr__(self): ... return f"{self.__class__.__name__}({self.name!r})" - ... + ... ... __mapper_args__ = { ... "polymorphic_identity": "employee", ... "polymorphic_on": "type", diff --git a/doc/build/orm/queryguide/relationships.rst b/doc/build/orm/queryguide/relationships.rst index 4c8125f5a4..f2be63c89e 100644 --- a/doc/build/orm/queryguide/relationships.rst +++ b/doc/build/orm/queryguide/relationships.rst @@ -272,9 +272,12 @@ the :meth:`_orm.Load.options` method:: :ref:`orm_queryguide_populate_existing` execution option:: # change the options on Parent objects that were already loaded - stmt = select(Parent).execution_options(populate_existing=True).options( - lazyload(Parent.children). - lazyload(Child.subelements)).all() + stmt = ( + select(Parent) + .execution_options(populate_existing=True) + .options(lazyload(Parent.children).lazyload(Child.subelements)) + .all() + ) If the objects loaded above are fully cleared from the :class:`.Session`, such as due to garbage collection or that :meth:`.Session.expunge_all` @@ -1116,7 +1119,7 @@ the specific :func:`_orm.aliased` construct to be passed: stmt = ( select(User). outerjoin(User.addresses.of_type(adalias)). - options(contains_eager(User.addresses.of_type(adalias)) + options(contains_eager(User.addresses.of_type(adalias))) ) # get results normally diff --git a/doc/build/orm/quickstart.rst b/doc/build/orm/quickstart.rst index 6a20eb9771..039bf3d703 100644 --- a/doc/build/orm/quickstart.rst +++ b/doc/build/orm/quickstart.rst @@ -40,27 +40,27 @@ real SQL tables that exist, or will exist, in a particular database:: >>> class User(Base): ... __tablename__ = "user_account" - ... + ... ... id: Mapped[int] = mapped_column(primary_key=True) ... name: Mapped[str] = mapped_column(String(30)) ... fullname: Mapped[Optional[str]] - ... + ... ... addresses: Mapped[list["Address"]] = relationship( ... back_populates="user", cascade="all, delete-orphan" ... ) - ... + ... ... def __repr__(self) -> str: ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): ... __tablename__ = "address" - ... + ... ... id: Mapped[int] = mapped_column(primary_key=True) ... email_address: Mapped[str] ... user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id")) - ... + ... ... user: Mapped["User"] = relationship(back_populates="addresses") - ... + ... ... def __repr__(self) -> str: ... return f"Address(id={self.id!r}, email_address={self.email_address!r})" @@ -198,7 +198,7 @@ is used: >>> from sqlalchemy.orm import Session >>> with Session(engine) as session: - ... + ... ... spongebob = User( ... name="spongebob", ... fullname="Spongebob Squarepants", @@ -213,9 +213,9 @@ is used: ... ], ... ) ... patrick = User(name="patrick", fullname="Patrick Star") - ... + ... ... session.add_all([spongebob, sandy, patrick]) - ... + ... ... session.commit() {opensql}BEGIN (implicit) INSERT INTO user_account (name, fullname) VALUES (?, ?), (?, ?), (?, ?) RETURNING id diff --git a/doc/build/orm/session_state_management.rst b/doc/build/orm/session_state_management.rst index 7538fb4a1f..5d1848812e 100644 --- a/doc/build/orm/session_state_management.rst +++ b/doc/build/orm/session_state_management.rst @@ -532,9 +532,9 @@ be that of a column-mapped attribute:: will be refreshed with data from the database:: stmt = ( - select(User). - execution_options(populate_existing=True). - where((User.name.in_(['a', 'b', 'c'])) + select(User) + .execution_options(populate_existing=True) + .where((User.name.in_(["a", "b", "c"]))) ) for user in session.execute(stmt).scalars(): print(user) # will be refreshed for those columns that came back from the query diff --git a/doc/build/orm/session_transaction.rst b/doc/build/orm/session_transaction.rst index 164fea347a..e8dd484599 100644 --- a/doc/build/orm/session_transaction.rst +++ b/doc/build/orm/session_transaction.rst @@ -60,13 +60,13 @@ or rolled back:: session.commit() # commits # will automatically begin again - result = session.execute(< some select statement >) + result = session.execute("< some select statement >") session.add_all([more_objects, ...]) session.commit() # commits session.add(still_another_object) session.flush() # flush still_another_object - session.rollback() # rolls back still_another_object + session.rollback() # rolls back still_another_object The :class:`_orm.Session` itself features a :meth:`_orm.Session.close` method. If the :class:`_orm.Session` is begun within a transaction that @@ -120,9 +120,7 @@ Similarly, the :class:`_orm.sessionmaker` can be used in the same way:: method to allow both operations to take place at once:: with Session.begin() as session: - session.add(some_object): - - + session.add(some_object) .. _session_begin_nested: @@ -530,12 +528,11 @@ used in a read-only fashion**, that is:: with autocommit_session() as session: - some_objects = session.execute() - some_other_objects = session.execute() + some_objects = session.execute("") + some_other_objects = session.execute("") # closes connection - Setting Isolation for Individual Sessions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/build/tutorial/dbapi_transactions.rst b/doc/build/tutorial/dbapi_transactions.rst index 58a4e3ab58..89b8f423d2 100644 --- a/doc/build/tutorial/dbapi_transactions.rst +++ b/doc/build/tutorial/dbapi_transactions.rst @@ -248,7 +248,7 @@ Below we illustrate a variety of ways to access rows. result = conn.execute(text("select x, y from some_table")) for x, y in result: - # ... + ... * **Integer Index** - Tuples are Python sequences, so regular integer access is available too: @@ -256,8 +256,8 @@ Below we illustrate a variety of ways to access rows. result = conn.execute(text("select x, y from some_table")) - for row in result: - x = row[0] + for row in result: + x = row[0] * **Attribute Name** - As these are Python named tuples, the tuples have dynamic attribute names matching the names of each column. These names are normally the names that the @@ -286,8 +286,8 @@ Below we illustrate a variety of ways to access rows. result = conn.execute(text("select x, y from some_table")) for dict_row in result.mappings(): - x = dict_row['x'] - y = dict_row['y'] + x = dict_row["x"] + y = dict_row["y"] .. diff --git a/doc/build/tutorial/metadata.rst b/doc/build/tutorial/metadata.rst index 1b56994118..7816bae677 100644 --- a/doc/build/tutorial/metadata.rst +++ b/doc/build/tutorial/metadata.rst @@ -332,25 +332,25 @@ types:: >>> class User(Base): ... __tablename__ = "user_account" - ... + ... ... id: Mapped[int] = mapped_column(primary_key=True) ... name: Mapped[str] = mapped_column(String(30)) ... fullname: Mapped[Optional[str]] - ... + ... ... addresses: Mapped[List["Address"]] = relationship(back_populates="user") - ... + ... ... def __repr__(self) -> str: ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): ... __tablename__ = "address" - ... + ... ... id: Mapped[int] = mapped_column(primary_key=True) ... email_address: Mapped[str] ... user_id = mapped_column(ForeignKey("user_account.id")) - ... + ... ... user: Mapped[User] = relationship(back_populates="addresses") - ... + ... ... def __repr__(self) -> str: ... return f"Address(id={self.id!r}, email_address={self.email_address!r})" @@ -423,15 +423,15 @@ about these classes include: optional. Our mapping above can be written without annotations as:: class User(Base): - __tablename__ = 'user_account' + __tablename__ = "user_account" - id = mapped_column(Integer, primary_key=True) - name = mapped_column(String(30), nullable=False) - fullname = mapped_column(String) + id = mapped_column(Integer, primary_key=True) + name = mapped_column(String(30), nullable=False) + fullname = mapped_column(String) - addresses = relationship("Address", back_populates="user") + addresses = relationship("Address", back_populates="user") - # ... definition continues + # ... definition continues The above class has an advantage over one that uses :class:`.Column` directly, in that the ``User`` class as well as instances of ``User`` diff --git a/tools/format_docs_code.py b/tools/format_docs_code.py index 04dc59d36c..18ea5315e2 100644 --- a/tools/format_docs_code.py +++ b/tools/format_docs_code.py @@ -4,11 +4,11 @@ from collections.abc import Iterator from pathlib import Path import re -from black import DEFAULT_LINE_LENGTH from black import format_str -from black import Mode -from black import parse_pyproject_toml -from black import TargetVersion +from black.const import DEFAULT_LINE_LENGTH +from black.files import parse_pyproject_toml +from black.mode import Mode +from black.mode import TargetVersion home = Path(__file__).parent.parent @@ -17,14 +17,28 @@ _Block = list[tuple[str, int, str | None, str]] def _format_block( - input_block: _Block, exit_on_error: bool, is_doctest: bool + input_block: _Block, + exit_on_error: bool, + errors: list[tuple[int, str, Exception]], + is_doctest: bool, ) -> list[str]: - code = "\n".join(c for *_, c in input_block) + if not is_doctest: + # The first line may have additional padding. Remove then restore later + add_padding = start_space.match(input_block[0][3]).groups()[0] + skip = len(add_padding) + code = "\n".join( + c[skip:] if c.startswith(add_padding) else c + for *_, c in input_block + ) + else: + add_padding = None + code = "\n".join(c for *_, c in input_block) try: formatted = format_str(code, mode=BLACK_MODE) except Exception as e: + start_line = input_block[0][1] + errors.append((start_line, code, e)) if is_doctest: - start_line = input_block[0][1] print( "Could not format code block starting at " f"line {start_line}:\n{code}\nError: {e}" @@ -35,7 +49,6 @@ def _format_block( else: print("Ignoring error") elif VERBOSE: - start_line = input_block[0][1] print( "Could not format code block starting at " f"line {start_line}:\n---\n{code}\n---Error: {e}" @@ -47,16 +60,14 @@ def _format_block( if is_doctest: formatted_lines = [ f"{padding}>>> {formatted_code_lines[0]}", - *(f"{padding}... {fcl}" for fcl in formatted_code_lines[1:]), + *( + f"{padding}...{' ' if fcl else ''}{fcl}" + for fcl in formatted_code_lines[1:] + ), ] else: - # The first line may have additional padding. - # If it does restore it - additionalPadding = re.match( - r"^(\s*)[^ ]?", input_block[0][3] - ).groups()[0] formatted_lines = [ - f"{padding}{additionalPadding}{fcl}" if fcl else fcl + f"{padding}{add_padding}{fcl}" if fcl else fcl for fcl in formatted_code_lines ] if not input_block[-1][0] and formatted_lines[-1]: @@ -65,30 +76,57 @@ def _format_block( return formatted_lines +format_directive = re.compile(r"^\.\.\s*format\s*:\s*(on|off)\s*$") + doctest_code_start = re.compile(r"^(\s+)>>>\s?(.+)") doctest_code_continue = re.compile(r"^\s+\.\.\.\s?(\s*.*)") -plain_indent = re.compile(r"^(\s{4})(\s*[^: ].*)") -format_directive = re.compile(r"^\.\.\s*format\s*:\s*(on|off)\s*$") -dont_format_under_directive = re.compile(r"^\.\. (?:toctree)::\s*$") + +start_code_section = re.compile( + r"^(((?!\.\.).+::)|(\.\.\s*sourcecode::(.*py.*)?)|(::))$" +) +start_space = re.compile(r"^(\s*)[^ ]?") def format_file( file: Path, exit_on_error: bool, check: bool, no_plain: bool -) -> bool | None: +) -> tuple[bool, int]: buffer = [] if not check: print(f"Running file {file} ..", end="") original = file.read_text("utf-8") doctest_block: _Block | None = None plain_block: _Block | None = None - last_line = None + + plain_code_section = False + plain_padding = None + plain_padding_len = None + + errors = [] + disable_format = False - non_code_directive = False for line_no, line in enumerate(original.splitlines(), 1): - if match := format_directive.match(line): + # start_code_section requires no spaces at the start + if start_code_section.match(line.strip()): + if plain_block: + buffer.extend( + _format_block( + plain_block, exit_on_error, errors, is_doctest=False + ) + ) + plain_block = None + plain_code_section = True + plain_padding = start_space.match(line).groups()[0] + plain_padding_len = len(plain_padding) + buffer.append(line) + continue + elif ( + plain_code_section + and line.strip() + and not line.startswith(" " * (plain_padding_len + 1)) + ): + plain_code_section = False + elif match := format_directive.match(line): disable_format = match.groups()[0] == "off" - elif match := dont_format_under_directive.match(line): - non_code_directive = True if doctest_block: assert not plain_block @@ -98,65 +136,56 @@ def format_file( else: buffer.extend( _format_block( - doctest_block, exit_on_error, is_doctest=True + doctest_block, exit_on_error, errors, is_doctest=True ) ) doctest_block = None - - if plain_block: - assert not doctest_block - if not line: - plain_block.append((line, line_no, None, line)) - continue - elif match := plain_indent.match(line): - plain_block.append((line, line_no, None, match.groups()[1])) + elif plain_block: + if plain_code_section and not doctest_code_start.match(line): + plain_block.append( + (line, line_no, None, line[plain_padding_len:]) + ) continue else: - if non_code_directive: - buffer.extend(line for line, _, _, _ in plain_block) - else: - buffer.extend( - _format_block( - plain_block, exit_on_error, is_doctest=False - ) + buffer.extend( + _format_block( + plain_block, exit_on_error, errors, is_doctest=False ) + ) plain_block = None - non_code_directive = False - if match := doctest_code_start.match(line): + if line and (match := doctest_code_start.match(line)): + plain_code_section = False if plain_block: buffer.extend( - _format_block(plain_block, exit_on_error, is_doctest=False) + _format_block( + plain_block, exit_on_error, errors, is_doctest=False + ) ) plain_block = None padding, code = match.groups() doctest_block = [(line, line_no, padding, code)] elif ( - not no_plain - and not disable_format - and not last_line - and (match := plain_indent.match(line)) + line and not no_plain and not disable_format and plain_code_section ): - # print('start plain', line) assert not doctest_block # start of a plain block - padding, code = match.groups() - plain_block = [(line, line_no, padding, code)] + plain_block = [ + (line, line_no, plain_padding, line[plain_padding_len:]) + ] else: buffer.append(line) - last_line = line if doctest_block: buffer.extend( - _format_block(doctest_block, exit_on_error, is_doctest=True) + _format_block( + doctest_block, exit_on_error, errors, is_doctest=True + ) ) if plain_block: - if non_code_directive: - buffer.extend(line for line, _, _, _ in plain_block) - else: - buffer.extend( - _format_block(plain_block, exit_on_error, is_doctest=False) - ) + buffer.extend( + _format_block(plain_block, exit_on_error, errors, is_doctest=False) + ) if buffer: # if there is nothing in the buffer something strange happened so # don't do anything @@ -164,7 +193,10 @@ def format_file( updated = "\n".join(buffer) equal = original == updated if not check: - print("..done. ", "No changes" if equal else "Changes detected") + print( + f"..done. {len(errors)} error(s).", + "No changes" if equal else "Changes detected", + ) if not equal: # write only if there are changes to write file.write_text(updated, "utf-8", newline="\n") @@ -176,9 +208,7 @@ def format_file( if check: if not equal: print(f"File {file} would be formatted") - return equal - else: - return None + return equal, len(errors) def iter_files(directory) -> Iterator[Path]: @@ -201,11 +231,26 @@ def main( ] if check: - if all(result): + formatting_error_counts = [e for _, e in result if e] + to_reformat = len([b for b, _ in result if not b]) + + if not to_reformat and not formatting_error_counts: print("All files are correctly formatted") exit(0) else: - print("Some file would be reformated") + print( + f"{to_reformat} file(s) would be reformatted;", + ( + f"{sum(formatting_error_counts)} formatting errors " + f"reported in {len(formatting_error_counts)} files" + ) + if formatting_error_counts + else "no formatting errors reported", + ) + + # interim, until we fix all formatting errors + if not to_reformat: + exit(0) exit(1)