]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
update the format_docs_code to also work on python files
authorFederico Caselli <cfederico87@gmail.com>
Sat, 30 Nov 2024 18:50:38 +0000 (19:50 +0100)
committerFederico Caselli <cfederico87@gmail.com>
Sat, 30 Nov 2024 20:10:21 +0000 (21:10 +0100)
Change-Id: I0a6c9610b3fd85365ed4c2c199e3cad87ee64022

124 files changed:
README.dialects.rst
README.unittests.rst
examples/adjacency_list/__init__.py
examples/dogpile_caching/__init__.py
examples/performance/__init__.py
examples/space_invaders/__init__.py
examples/versioned_history/__init__.py
examples/vertical/__init__.py
examples/vertical/dictlike-polymorphic.py
examples/vertical/dictlike.py
lib/sqlalchemy/dialects/mssql/aioodbc.py
lib/sqlalchemy/dialects/mssql/base.py
lib/sqlalchemy/dialects/mssql/json.py
lib/sqlalchemy/dialects/mssql/pyodbc.py
lib/sqlalchemy/dialects/mysql/aiomysql.py
lib/sqlalchemy/dialects/mysql/asyncmy.py
lib/sqlalchemy/dialects/mysql/base.py
lib/sqlalchemy/dialects/mysql/dml.py
lib/sqlalchemy/dialects/mysql/enumerated.py
lib/sqlalchemy/dialects/mysql/expression.py
lib/sqlalchemy/dialects/mysql/mysqldb.py
lib/sqlalchemy/dialects/mysql/pymysql.py
lib/sqlalchemy/dialects/mysql/pyodbc.py
lib/sqlalchemy/dialects/oracle/base.py
lib/sqlalchemy/dialects/oracle/cx_oracle.py
lib/sqlalchemy/dialects/oracle/oracledb.py
lib/sqlalchemy/dialects/postgresql/array.py
lib/sqlalchemy/dialects/postgresql/asyncpg.py
lib/sqlalchemy/dialects/postgresql/base.py
lib/sqlalchemy/dialects/postgresql/ext.py
lib/sqlalchemy/dialects/postgresql/hstore.py
lib/sqlalchemy/dialects/postgresql/json.py
lib/sqlalchemy/dialects/postgresql/named_types.py
lib/sqlalchemy/dialects/postgresql/pg8000.py
lib/sqlalchemy/dialects/postgresql/psycopg.py
lib/sqlalchemy/dialects/postgresql/psycopg2.py
lib/sqlalchemy/dialects/postgresql/types.py
lib/sqlalchemy/dialects/sqlite/aiosqlite.py
lib/sqlalchemy/dialects/sqlite/base.py
lib/sqlalchemy/dialects/sqlite/pysqlcipher.py
lib/sqlalchemy/dialects/sqlite/pysqlite.py
lib/sqlalchemy/engine/base.py
lib/sqlalchemy/engine/create.py
lib/sqlalchemy/engine/cursor.py
lib/sqlalchemy/engine/events.py
lib/sqlalchemy/engine/interfaces.py
lib/sqlalchemy/engine/mock.py
lib/sqlalchemy/engine/reflection.py
lib/sqlalchemy/engine/result.py
lib/sqlalchemy/engine/row.py
lib/sqlalchemy/engine/url.py
lib/sqlalchemy/event/api.py
lib/sqlalchemy/exc.py
lib/sqlalchemy/ext/associationproxy.py
lib/sqlalchemy/ext/asyncio/base.py
lib/sqlalchemy/ext/asyncio/engine.py
lib/sqlalchemy/ext/asyncio/scoping.py
lib/sqlalchemy/ext/asyncio/session.py
lib/sqlalchemy/ext/automap.py
lib/sqlalchemy/ext/baked.py
lib/sqlalchemy/ext/compiler.py
lib/sqlalchemy/ext/declarative/extensions.py
lib/sqlalchemy/ext/horizontal_shard.py
lib/sqlalchemy/ext/hybrid.py
lib/sqlalchemy/ext/indexable.py
lib/sqlalchemy/ext/mutable.py
lib/sqlalchemy/ext/mypy/apply.py
lib/sqlalchemy/ext/mypy/infer.py
lib/sqlalchemy/ext/orderinglist.py
lib/sqlalchemy/ext/serializer.py
lib/sqlalchemy/orm/_orm_constructors.py
lib/sqlalchemy/orm/attributes.py
lib/sqlalchemy/orm/collections.py
lib/sqlalchemy/orm/decl_api.py
lib/sqlalchemy/orm/events.py
lib/sqlalchemy/orm/interfaces.py
lib/sqlalchemy/orm/mapper.py
lib/sqlalchemy/orm/properties.py
lib/sqlalchemy/orm/query.py
lib/sqlalchemy/orm/relationships.py
lib/sqlalchemy/orm/scoping.py
lib/sqlalchemy/orm/session.py
lib/sqlalchemy/orm/strategy_options.py
lib/sqlalchemy/orm/util.py
lib/sqlalchemy/pool/events.py
lib/sqlalchemy/sql/_dml_constructors.py
lib/sqlalchemy/sql/_elements_constructors.py
lib/sqlalchemy/sql/_selectable_constructors.py
lib/sqlalchemy/sql/base.py
lib/sqlalchemy/sql/ddl.py
lib/sqlalchemy/sql/dml.py
lib/sqlalchemy/sql/elements.py
lib/sqlalchemy/sql/events.py
lib/sqlalchemy/sql/functions.py
lib/sqlalchemy/sql/lambdas.py
lib/sqlalchemy/sql/operators.py
lib/sqlalchemy/sql/schema.py
lib/sqlalchemy/sql/selectable.py
lib/sqlalchemy/sql/sqltypes.py
lib/sqlalchemy/sql/type_api.py
lib/sqlalchemy/sql/util.py
lib/sqlalchemy/sql/visitors.py
lib/sqlalchemy/testing/config.py
lib/sqlalchemy/testing/provision.py
lib/sqlalchemy/testing/requirements.py
lib/sqlalchemy/testing/util.py
lib/sqlalchemy/util/_collections.py
lib/sqlalchemy/util/deprecations.py
lib/sqlalchemy/util/langhelpers.py
reap_dbs.py
test/orm/declarative/test_tm_future_annotations.py
test/orm/inheritance/_poly_fixtures.py
test/orm/test_relationships.py
test/requirements.py
test/sql/test_cte.py
test/sql/test_from_linter.py
test/sql/test_functions.py
test/sql/test_quote.py
tools/cython_imports.py
tools/format_docs_code.py
tools/generate_proxy_methods.py
tools/generate_sql_functions.py
tools/generate_tuple_map_overloads.py
tools/trace_orm_adapter.py

index 810267a20cf982f362de16047c044ca2d4af247f..798ed21fbd3f78dcb226a014cc2aa43651ba8045 100644 (file)
@@ -26,7 +26,9 @@ compliance suite" should be viewed as the primary target for new dialects.
 Dialect Layout
 ===============
 
-The file structure of a dialect is typically similar to the following::
+The file structure of a dialect is typically similar to the following:
+
+.. sourcecode:: text
 
     sqlalchemy-<dialect>/
                          setup.py
@@ -52,9 +54,9 @@ Key aspects of this file layout include:
   dialect to be usable from create_engine(), e.g.::
 
         entry_points = {
-         'sqlalchemy.dialects': [
-              'access.pyodbc = sqlalchemy_access.pyodbc:AccessDialect_pyodbc',
-              ]
+            "sqlalchemy.dialects": [
+                "access.pyodbc = sqlalchemy_access.pyodbc:AccessDialect_pyodbc",
+            ]
         }
 
   Above, the entrypoint ``access.pyodbc`` allow URLs to be used such as::
@@ -63,7 +65,9 @@ Key aspects of this file layout include:
 
 * setup.cfg - this file contains the traditional contents such as
   [tool:pytest] directives, but also contains new directives that are used
-  by SQLAlchemy's testing framework.  E.g. for Access::
+  by SQLAlchemy's testing framework.  E.g. for Access:
+
+  .. sourcecode:: text
 
     [tool:pytest]
     addopts= --tb native -v -r fxX --maxfail=25 -p no:warnings
@@ -129,6 +133,7 @@ Key aspects of this file layout include:
 
       from sqlalchemy.testing import exclusions
 
+
       class Requirements(SuiteRequirements):
           @property
           def nullable_booleans(self):
@@ -148,7 +153,9 @@ Key aspects of this file layout include:
   The requirements system can also be used when running SQLAlchemy's
   primary test suite against the external dialect.  In this use case,
   a ``--dburi`` as well as a ``--requirements`` flag are passed to SQLAlchemy's
-  test runner so that exclusions specific to the dialect take place::
+  test runner so that exclusions specific to the dialect take place:
+
+  .. sourcecode:: text
 
     cd /path/to/sqlalchemy
     pytest -v \
@@ -175,6 +182,7 @@ Key aspects of this file layout include:
 
       from sqlalchemy.testing.suite import IntegerTest as _IntegerTest
 
+
       class IntegerTest(_IntegerTest):
 
           @testing.skip("access")
index 07b935037813109ea0cc858ef1089e4238c88597..66118f0c3fe5065c07ea7d3100c4978fd1ee84dc 100644 (file)
@@ -49,7 +49,7 @@ database options and test selection.
 
 A generic pytest run looks like::
 
-    pytest -n4
+    pytest - n4
 
 Above, the full test suite will run against SQLite, using four processes.
 If the "-n" flag is not used, the pytest-xdist is skipped and the tests will
index 65ce311e6def50ccd2062da647ebf2321c5c7a31..b029e421b9330e74d8776e0b784147a025cf6df8 100644 (file)
@@ -4,9 +4,9 @@ an adjacency list model.
 
 E.g.::
 
-    node = TreeNode('rootnode')
-    node.append('node1')
-    node.append('node3')
+    node = TreeNode("rootnode")
+    node.append("node1")
+    node.append("node3")
     session.add(node)
     session.commit()
 
index f8c1bb582bc65ddd894070ae5dc79f67cd773cee..7fd6dba7217071842a62d4d9747a7a95fc9a9006 100644 (file)
@@ -44,13 +44,13 @@ pull from cache.
 The demo scripts themselves, in order of complexity, are run as Python
 modules so that relative imports work::
 
-   python -m examples.dogpile_caching.helloworld
+   python -m examples.dogpile_caching.helloworld
 
-   python -m examples.dogpile_caching.relationship_caching
+   python -m examples.dogpile_caching.relationship_caching
 
-   python -m examples.dogpile_caching.advanced
+   python -m examples.dogpile_caching.advanced
 
-   python -m examples.dogpile_caching.local_session_caching
+   python -m examples.dogpile_caching.local_session_caching
 
 .. autosource::
     :files: environment.py, caching_query.py, model.py, fixture_data.py, \
index 34db251e5c755f48b9402719c471a5aa98d3ad4b..3854fdbea52448debc07bd08a52d26c2af1c69c6 100644 (file)
@@ -129,15 +129,15 @@ we can create a file ``test_loads.py``, with the following content::
 
 
     class Parent(Base):
-        __tablename__ = 'parent'
+        __tablename__ = "parent"
         id = Column(Integer, primary_key=True)
         children = relationship("Child")
 
 
     class Child(Base):
-        __tablename__ = 'child'
+        __tablename__ = "child"
         id = Column(Integer, primary_key=True)
-        parent_id = Column(Integer, ForeignKey('parent.id'))
+        parent_id = Column(Integer, ForeignKey("parent.id"))
 
 
     # Init with name of file, default number of items
@@ -152,10 +152,12 @@ we can create a file ``test_loads.py``, with the following content::
         Base.metadata.drop_all(engine)
         Base.metadata.create_all(engine)
         sess = Session(engine)
-        sess.add_all([
-            Parent(children=[Child() for j in range(100)])
-            for i in range(num)
-        ])
+        sess.add_all(
+            [
+                Parent(children=[Child() for j in range(100)])
+                for i in range(num)
+            ]
+        )
         sess.commit()
 
 
@@ -191,7 +193,8 @@ we can create a file ``test_loads.py``, with the following content::
         for parent in session.query(Parent).options(subqueryload("children")):
             parent.children
 
-    if __name__ == '__main__':
+
+    if __name__ == "__main__":
         Profiler.main()
 
 We can run our new script directly::
index 944f8bb466c4351e514a4c57056ac760c9dfe216..993d1e454312976146f31c6d4a68f4ee2c1ad1b4 100644 (file)
@@ -11,11 +11,11 @@ Runs in a textual console using ASCII art.
 
 To run::
 
-    python -m examples.space_invaders.space_invaders
+    python -m examples.space_invaders.space_invaders
 
 While it runs, watch the SQL output in the log::
 
-    tail -f space_invaders.log
+    tail -f space_invaders.log
 
 enjoy!
 
index 2fa281b8dd1fa62da89d7da058e169ece9a32b74..a872a63c0346292690be2b8aca63416c0f33d927 100644 (file)
@@ -9,18 +9,20 @@ as new rows in the same table, without using a separate history table.
 Usage is illustrated via a unit test module ``test_versioning.py``, which is
 run using SQLAlchemy's internal pytest plugin::
 
-    pytest test/base/test_examples.py
+    pytest test/base/test_examples.py
 
 
 A fragment of example usage, using declarative::
 
     from history_meta import Versioned, versioned_session
 
+
     class Base(DeclarativeBase):
         pass
 
+
     class SomeClass(Versioned, Base):
-        __tablename__ = 'sometable'
+        __tablename__ = "sometable"
 
         id = Column(Integer, primary_key=True)
         name = Column(String(50))
@@ -28,25 +30,25 @@ A fragment of example usage, using declarative::
         def __eq__(self, other):
             assert type(other) is SomeClass and other.id == self.id
 
+
     Session = sessionmaker(bind=engine)
     versioned_session(Session)
 
     sess = Session()
-    sc = SomeClass(name='sc1')
+    sc = SomeClass(name="sc1")
     sess.add(sc)
     sess.commit()
 
-    sc.name = 'sc1modified'
+    sc.name = "sc1modified"
     sess.commit()
 
     assert sc.version == 2
 
     SomeClassHistory = SomeClass.__history_mapper__.class_
 
-    assert sess.query(SomeClassHistory).\\
-                filter(SomeClassHistory.version == 1).\\
-                all() \\
-                == [SomeClassHistory(version=1, name='sc1')]
+    assert sess.query(SomeClassHistory).filter(
+        SomeClassHistory.version == 1
+    ).all() == [SomeClassHistory(version=1, name="sc1")]
 
 The ``Versioned`` mixin is designed to work with declarative.  To use
 the extension with classical mappers, the ``_history_mapper`` function
@@ -64,7 +66,7 @@ feature documented at :ref:`mapper_version_counter`.   To enable this feature,
 set the flag ``Versioned.use_mapper_versioning`` to True::
 
     class SomeClass(Versioned, Base):
-        __tablename__ = 'sometable'
+        __tablename__ = "sometable"
 
         use_mapper_versioning = True
 
index b0c00b664e75c614141081745525663713f33262..997510e1b07c85bb32ace5f591ac5228fe4371d9 100644 (file)
@@ -15,19 +15,20 @@ information stored in the attribute, such as integer, string, or date.
 
 Example::
 
-    shrew = Animal(u'shrew')
-    shrew[u'cuteness'] = 5
-    shrew[u'weasel-like'] = False
-    shrew[u'poisonous'] = True
+    shrew = Animal("shrew")
+    shrew["cuteness"] = 5
+    shrew["weasel-like"] = False
+    shrew["poisonous"] = True
 
     session.add(shrew)
     session.flush()
 
-    q = (session.query(Animal).
-         filter(Animal.facts.any(
-           and_(AnimalFact.key == u'weasel-like',
-                AnimalFact.value == True))))
-    print('weasel-like animals', q.all())
+    q = session.query(Animal).filter(
+        Animal.facts.any(
+            and_(AnimalFact.key == "weasel-like", AnimalFact.value == True)
+        )
+    )
+    print("weasel-like animals", q.all())
 
 .. autosource::
 
index 69f32cf4a8ed4ab6ac7cdc3ca0cc53ef3a8ad3b4..7de8fa80d9f7433580cbb7dc824775cb014b4caf 100644 (file)
@@ -3,15 +3,17 @@
 Builds upon the dictlike.py example to also add differently typed
 columns to the "fact" table, e.g.::
 
-  Table('properties', metadata
-        Column('owner_id', Integer, ForeignKey('owner.id'),
-               primary_key=True),
-        Column('key', UnicodeText),
-        Column('type', Unicode(16)),
-        Column('int_value', Integer),
-        Column('char_value', UnicodeText),
-        Column('bool_value', Boolean),
-        Column('decimal_value', Numeric(10,2)))
+  Table(
+      "properties",
+      metadata,
+      Column("owner_id", Integer, ForeignKey("owner.id"), primary_key=True),
+      Column("key", UnicodeText),
+      Column("type", Unicode(16)),
+      Column("int_value", Integer),
+      Column("char_value", UnicodeText),
+      Column("bool_value", Boolean),
+      Column("decimal_value", Numeric(10, 2)),
+  )
 
 For any given properties row, the value of the 'type' column will point to the
 '_value' column active for that row.
index f561499e8fdaaa8d4fa7bfe948d0f4f2bc99ef57..bd1701c89c6f30d613d98bbd8e58cabb2e90efb0 100644 (file)
@@ -6,24 +6,30 @@ that store free-form object properties as rows instead of columns.  For
 example, instead of::
 
   # A regular ("horizontal") table has columns for 'species' and 'size'
-  Table('animal', metadata,
-        Column('id', Integer, primary_key=True),
-        Column('species', Unicode),
-        Column('size', Unicode))
+  Table(
+      "animal",
+      metadata,
+      Column("id", Integer, primary_key=True),
+      Column("species", Unicode),
+      Column("size", Unicode),
+  )
 
 A vertical table models this as two tables: one table for the base or parent
 entity, and another related table holding key/value pairs::
 
-  Table('animal', metadata,
-        Column('id', Integer, primary_key=True))
+  Table("animal", metadata, Column("id", Integer, primary_key=True))
 
   # The properties table will have one row for a 'species' value, and
   # another row for the 'size' value.
-  Table('properties', metadata
-        Column('animal_id', Integer, ForeignKey('animal.id'),
-               primary_key=True),
-        Column('key', UnicodeText),
-        Column('value', UnicodeText))
+  Table(
+      "properties",
+      metadata,
+      Column(
+          "animal_id", Integer, ForeignKey("animal.id"), primary_key=True
+      ),
+      Column("key", UnicodeText),
+      Column("value", UnicodeText),
+  )
 
 Because the key/value pairs in a vertical scheme are not fixed in advance,
 accessing them like a Python dict can be very convenient.  The example below
index 65945d97559d490556d864cc13bb5e44d2b5ef16..518d7ce066975e647a24d9af556c224871bb79e0 100644 (file)
@@ -32,13 +32,12 @@ This dialect should normally be used only with the
 styles are otherwise equivalent to those documented in the pyodbc section::
 
     from sqlalchemy.ext.asyncio import create_async_engine
+
     engine = create_async_engine(
         "mssql+aioodbc://scott:tiger@mssql2017:1433/test?"
         "driver=ODBC+Driver+18+for+SQL+Server&TrustServerCertificate=yes"
     )
 
-
-
 """
 
 from __future__ import annotations
index a617acf3dea953da045cd20fdeb79b684e0a3e02..a9eeace3a784454df93e2e07e7c6c379a1282a7d 100644 (file)
@@ -39,9 +39,12 @@ considered to be the identity column - unless it is associated with a
     from sqlalchemy import Table, MetaData, Column, Integer
 
     m = MetaData()
-    t = Table('t', m,
-            Column('id', Integer, primary_key=True),
-            Column('x', Integer))
+    t = Table(
+        "t",
+        m,
+        Column("id", Integer, primary_key=True),
+        Column("x", Integer),
+    )
     m.create_all(engine)
 
 The above example will generate DDL as:
@@ -59,9 +62,12 @@ specify ``False`` for the :paramref:`_schema.Column.autoincrement` flag,
 on the first integer primary key column::
 
     m = MetaData()
-    t = Table('t', m,
-            Column('id', Integer, primary_key=True, autoincrement=False),
-            Column('x', Integer))
+    t = Table(
+        "t",
+        m,
+        Column("id", Integer, primary_key=True, autoincrement=False),
+        Column("x", Integer),
+    )
     m.create_all(engine)
 
 To add the ``IDENTITY`` keyword to a non-primary key column, specify
@@ -71,9 +77,12 @@ To add the ``IDENTITY`` keyword to a non-primary key column, specify
 is set to ``False`` on any integer primary key column::
 
     m = MetaData()
-    t = Table('t', m,
-            Column('id', Integer, primary_key=True, autoincrement=False),
-            Column('x', Integer, autoincrement=True))
+    t = Table(
+        "t",
+        m,
+        Column("id", Integer, primary_key=True, autoincrement=False),
+        Column("x", Integer, autoincrement=True),
+    )
     m.create_all(engine)
 
 .. versionchanged::  1.4   Added :class:`_schema.Identity` construct
@@ -136,14 +145,12 @@ parameters passed to the :class:`_schema.Identity` object::
     from sqlalchemy import Table, Integer, Column, Identity
 
     test = Table(
-        'test', metadata,
+        "test",
+        metadata,
         Column(
-            'id',
-            Integer,
-            primary_key=True,
-            Identity(start=100, increment=10)
+            "id", Integer, primary_key=True, Identity(start=100, increment=10)
         ),
-        Column('name', String(20))
+        Column("name", String(20)),
     )
 
 The CREATE TABLE for the above :class:`_schema.Table` object would be:
@@ -153,7 +160,7 @@ The CREATE TABLE for the above :class:`_schema.Table` object would be:
    CREATE TABLE test (
      id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
      name VARCHAR(20) NULL,
-     )
+   )
 
 .. note::
 
@@ -186,6 +193,7 @@ type deployed to the SQL Server database can be specified as ``Numeric`` using
 
     Base = declarative_base()
 
+
     class TestTable(Base):
         __tablename__ = "test"
         id = Column(
@@ -211,8 +219,9 @@ integer values in Python 3), use :class:`_types.TypeDecorator` as follows::
 
     from sqlalchemy import TypeDecorator
 
+
     class NumericAsInteger(TypeDecorator):
-        '''normalize floating point return values into ints'''
+        "normalize floating point return values into ints"
 
         impl = Numeric(10, 0, asdecimal=False)
         cache_ok = True
@@ -222,6 +231,7 @@ integer values in Python 3), use :class:`_types.TypeDecorator` as follows::
                 value = int(value)
             return value
 
+
     class TestTable(Base):
         __tablename__ = "test"
         id = Column(
@@ -270,11 +280,11 @@ The process for fetching this value has several variants:
     fetched in order to receive the value.  Given a table as::
 
         t = Table(
-            't',
+            "t",
             metadata,
-            Column('id', Integer, primary_key=True),
-            Column('x', Integer),
-            implicit_returning=False
+            Column("id", Integer, primary_key=True),
+            Column("x", Integer),
+            implicit_returning=False,
         )
 
     an INSERT will look like:
@@ -300,12 +310,13 @@ statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
 execution.  Given this example::
 
     m = MetaData()
-    t = Table('t', m, Column('id', Integer, primary_key=True),
-                    Column('x', Integer))
+    t = Table(
+        "t", m, Column("id", Integer, primary_key=True), Column("x", Integer)
+    )
     m.create_all(engine)
 
     with engine.begin() as conn:
-        conn.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
+        conn.execute(t.insert(), {"id": 1, "x": 1}, {"id": 2, "x": 2})
 
 The above column will be created with IDENTITY, however the INSERT statement
 we emit is specifying explicit values.  In the echo output we can see
@@ -341,7 +352,11 @@ The :class:`.Sequence` object creates "real" sequences, i.e.,
     >>> from sqlalchemy import Sequence
     >>> from sqlalchemy.schema import CreateSequence
     >>> from sqlalchemy.dialects import mssql
-    >>> print(CreateSequence(Sequence("my_seq", start=1)).compile(dialect=mssql.dialect()))
+    >>> print(
+    ...     CreateSequence(Sequence("my_seq", start=1)).compile(
+    ...         dialect=mssql.dialect()
+    ...     )
+    ... )
     {printsql}CREATE SEQUENCE my_seq START WITH 1
 
 For integer primary key generation, SQL Server's ``IDENTITY`` construct should
@@ -375,12 +390,12 @@ more than one backend without using dialect-specific types.
 To build a SQL Server VARCHAR or NVARCHAR with MAX length, use None::
 
     my_table = Table(
-        'my_table', metadata,
-        Column('my_data', VARCHAR(None)),
-        Column('my_n_data', NVARCHAR(None))
+        "my_table",
+        metadata,
+        Column("my_data", VARCHAR(None)),
+        Column("my_n_data", NVARCHAR(None)),
     )
 
-
 Collation Support
 -----------------
 
@@ -388,10 +403,13 @@ Character collations are supported by the base string types,
 specified by the string argument "collation"::
 
     from sqlalchemy import VARCHAR
-    Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
+
+    Column("login", VARCHAR(32, collation="Latin1_General_CI_AS"))
 
 When such a column is associated with a :class:`_schema.Table`, the
-CREATE TABLE statement for this column will yield::
+CREATE TABLE statement for this column will yield:
+
+.. sourcecode:: sql
 
     login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
 
@@ -411,7 +429,9 @@ versions when no OFFSET clause is present.  A statement such as::
 
     select(some_table).limit(5)
 
-will render similarly to::
+will render similarly to:
+
+.. sourcecode:: sql
 
     SELECT TOP 5 col1, col2.. FROM table
 
@@ -421,7 +441,9 @@ LIMIT and OFFSET, or just OFFSET alone, will be rendered using the
 
     select(some_table).order_by(some_table.c.col3).limit(5).offset(10)
 
-will render similarly to::
+will render similarly to:
+
+.. sourcecode:: sql
 
     SELECT anon_1.col1, anon_1.col2 FROM (SELECT col1, col2,
     ROW_NUMBER() OVER (ORDER BY col3) AS
@@ -474,16 +496,13 @@ each new connection.
 To set isolation level using :func:`_sa.create_engine`::
 
     engine = create_engine(
-        "mssql+pyodbc://scott:tiger@ms_2008",
-        isolation_level="REPEATABLE READ"
+        "mssql+pyodbc://scott:tiger@ms_2008", isolation_level="REPEATABLE READ"
     )
 
 To set using per-connection execution options::
 
     connection = engine.connect()
-    connection = connection.execution_options(
-        isolation_level="READ COMMITTED"
-    )
+    connection = connection.execution_options(isolation_level="READ COMMITTED")
 
 Valid values for ``isolation_level`` include:
 
@@ -533,7 +552,6 @@ will remain consistent with the state of the transaction::
 
     mssql_engine = create_engine(
         "mssql+pyodbc://scott:tiger^5HHH@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server",
-
         # disable default reset-on-return scheme
         pool_reset_on_return=None,
     )
@@ -562,13 +580,17 @@ Nullability
 -----------
 MSSQL has support for three levels of column nullability. The default
 nullability allows nulls and is explicit in the CREATE TABLE
-construct::
+construct:
+
+.. sourcecode:: sql
 
     name VARCHAR(20) NULL
 
 If ``nullable=None`` is specified then no specification is made. In
 other words the database's configured default is used. This will
-render::
+render:
+
+.. sourcecode:: sql
 
     name VARCHAR(20)
 
@@ -624,8 +646,9 @@ behavior of this flag is as follows:
 * The flag can be set to either ``True`` or ``False`` when the dialect
   is created, typically via :func:`_sa.create_engine`::
 
-        eng = create_engine("mssql+pymssql://user:pass@host/db",
-                        deprecate_large_types=True)
+        eng = create_engine(
+            "mssql+pymssql://user:pass@host/db", deprecate_large_types=True
+        )
 
 * Complete control over whether the "old" or "new" types are rendered is
   available in all SQLAlchemy versions by using the UPPERCASE type objects
@@ -647,9 +670,10 @@ at once using the :paramref:`_schema.Table.schema` argument of
 :class:`_schema.Table`::
 
     Table(
-        "some_table", metadata,
+        "some_table",
+        metadata,
         Column("q", String(50)),
-        schema="mydatabase.dbo"
+        schema="mydatabase.dbo",
     )
 
 When performing operations such as table or component reflection, a schema
@@ -661,9 +685,10 @@ components will be quoted separately for case sensitive names and other
 special characters.   Given an argument as below::
 
     Table(
-        "some_table", metadata,
+        "some_table",
+        metadata,
         Column("q", String(50)),
-        schema="MyDataBase.dbo"
+        schema="MyDataBase.dbo",
     )
 
 The above schema would be rendered as ``[MyDataBase].dbo``, and also in
@@ -676,21 +701,22 @@ Below, the "owner" will be considered as ``MyDataBase.dbo`` and the
 "database" will be None::
 
     Table(
-        "some_table", metadata,
+        "some_table",
+        metadata,
         Column("q", String(50)),
-        schema="[MyDataBase.dbo]"
+        schema="[MyDataBase.dbo]",
     )
 
 To individually specify both database and owner name with special characters
 or embedded dots, use two sets of brackets::
 
     Table(
-        "some_table", metadata,
+        "some_table",
+        metadata,
         Column("q", String(50)),
-        schema="[MyDataBase.Period].[MyOwner.Dot]"
+        schema="[MyDataBase.Period].[MyOwner.Dot]",
     )
 
-
 .. versionchanged:: 1.2 the SQL Server dialect now treats brackets as
    identifier delimiters splitting the schema into separate database
    and owner tokens, to allow dots within either name itself.
@@ -705,10 +731,11 @@ schema-qualified table would be auto-aliased when used in a
 SELECT statement; given a table::
 
     account_table = Table(
-        'account', metadata,
-        Column('id', Integer, primary_key=True),
-        Column('info', String(100)),
-        schema="customer_schema"
+        "account",
+        metadata,
+        Column("id", Integer, primary_key=True),
+        Column("info", String(100)),
+        schema="customer_schema",
     )
 
 this legacy mode of rendering would assume that "customer_schema.account"
@@ -751,37 +778,55 @@ which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
 
 To generate a clustered primary key use::
 
-    Table('my_table', metadata,
-          Column('x', ...),
-          Column('y', ...),
-          PrimaryKeyConstraint("x", "y", mssql_clustered=True))
+    Table(
+        "my_table",
+        metadata,
+        Column("x", ...),
+        Column("y", ...),
+        PrimaryKeyConstraint("x", "y", mssql_clustered=True),
+    )
 
-which will render the table, for example, as::
+which will render the table, for example, as:
 
-  CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
-                         PRIMARY KEY CLUSTERED (x, y))
+.. sourcecode:: sql
+
+  CREATE TABLE my_table (
+    x INTEGER NOT NULL,
+    y INTEGER NOT NULL,
+    PRIMARY KEY CLUSTERED (x, y)
+  )
 
 Similarly, we can generate a clustered unique constraint using::
 
-    Table('my_table', metadata,
-          Column('x', ...),
-          Column('y', ...),
-          PrimaryKeyConstraint("x"),
-          UniqueConstraint("y", mssql_clustered=True),
-          )
+    Table(
+        "my_table",
+        metadata,
+        Column("x", ...),
+        Column("y", ...),
+        PrimaryKeyConstraint("x"),
+        UniqueConstraint("y", mssql_clustered=True),
+    )
 
 To explicitly request a non-clustered primary key (for example, when
 a separate clustered index is desired), use::
 
-    Table('my_table', metadata,
-          Column('x', ...),
-          Column('y', ...),
-          PrimaryKeyConstraint("x", "y", mssql_clustered=False))
+    Table(
+        "my_table",
+        metadata,
+        Column("x", ...),
+        Column("y", ...),
+        PrimaryKeyConstraint("x", "y", mssql_clustered=False),
+    )
 
-which will render the table, for example, as::
+which will render the table, for example, as:
+
+.. sourcecode:: sql
 
-  CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
-                         PRIMARY KEY NONCLUSTERED (x, y))
+  CREATE TABLE my_table (
+    x INTEGER NOT NULL,
+    y INTEGER NOT NULL,
+    PRIMARY KEY NONCLUSTERED (x, y)
+  )
 
 Columnstore Index Support
 -------------------------
@@ -819,7 +864,7 @@ INCLUDE
 The ``mssql_include`` option renders INCLUDE(colname) for the given string
 names::
 
-    Index("my_index", table.c.x, mssql_include=['y'])
+    Index("my_index", table.c.x, mssql_include=["y"])
 
 would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
 
@@ -874,18 +919,19 @@ To disable the usage of OUTPUT INSERTED on a per-table basis,
 specify ``implicit_returning=False`` for each :class:`_schema.Table`
 which has triggers::
 
-    Table('mytable', metadata,
-        Column('id', Integer, primary_key=True),
+    Table(
+        "mytable",
+        metadata,
+        Column("id", Integer, primary_key=True),
         # ...,
-        implicit_returning=False
+        implicit_returning=False,
     )
 
 Declarative form::
 
     class MyClass(Base):
         # ...
-        __table_args__ = {'implicit_returning':False}
-
+        __table_args__ = {"implicit_returning": False}
 
 .. _mssql_rowcount_versioning:
 
@@ -919,7 +965,9 @@ isolation mode that locks entire tables, and causes even mildly concurrent
 applications to have long held locks and frequent deadlocks.
 Enabling snapshot isolation for the database as a whole is recommended
 for modern levels of concurrency support.  This is accomplished via the
-following ALTER DATABASE commands executed at the SQL prompt::
+following ALTER DATABASE commands executed at the SQL prompt:
+
+.. sourcecode:: sql
 
     ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
 
index 18bea09d0f14c46895a970d5a4efb06f6fa9277d..305aef77d104a1f63bf84768e96d8abd193f966c 100644 (file)
@@ -54,9 +54,7 @@ class JSON(sqltypes.JSON):
        dictionary or list, the :meth:`_types.JSON.Comparator.as_json` accessor
        should be used::
 
-            stmt = select(
-                data_table.c.data["some key"].as_json()
-            ).where(
+            stmt = select(data_table.c.data["some key"].as_json()).where(
                 data_table.c.data["some key"].as_json() == {"sub": "structure"}
             )
 
@@ -67,9 +65,7 @@ class JSON(sqltypes.JSON):
        :meth:`_types.JSON.Comparator.as_integer`,
        :meth:`_types.JSON.Comparator.as_float`::
 
-            stmt = select(
-                data_table.c.data["some key"].as_string()
-            ).where(
+            stmt = select(data_table.c.data["some key"].as_string()).where(
                 data_table.c.data["some key"].as_string() == "some string"
             )
 
index 76ea046de99cec66ef66f46895710a8a7abcc3a7..421472c255287a1fa665ffe592866a505ab26745 100644 (file)
@@ -30,7 +30,9 @@ is configured on the client, a basic DSN-based connection looks like::
 
     engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn")
 
-Which above, will pass the following connection string to PyODBC::
+Which above, will pass the following connection string to PyODBC:
+
+.. sourcecode:: text
 
     DSN=some_dsn;UID=scott;PWD=tiger
 
@@ -49,7 +51,9 @@ When using a hostname connection, the driver name must also be specified in the
 query parameters of the URL.  As these names usually have spaces in them, the
 name must be URL encoded which means using plus signs for spaces::
 
-    engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=ODBC+Driver+17+for+SQL+Server")
+    engine = create_engine(
+        "mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=ODBC+Driver+17+for+SQL+Server"
+    )
 
 The ``driver`` keyword is significant to the pyodbc dialect and must be
 specified in lowercase.
@@ -69,6 +73,7 @@ internally::
 The equivalent URL can be constructed using :class:`_sa.engine.URL`::
 
     from sqlalchemy.engine import URL
+
     connection_url = URL.create(
         "mssql+pyodbc",
         username="scott",
@@ -83,7 +88,6 @@ The equivalent URL can be constructed using :class:`_sa.engine.URL`::
         },
     )
 
-
 Pass through exact Pyodbc string
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -94,8 +98,11 @@ using the parameter ``odbc_connect``.  A :class:`_sa.engine.URL` object
 can help make this easier::
 
     from sqlalchemy.engine import URL
+
     connection_string = "DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password"
-    connection_url = URL.create("mssql+pyodbc", query={"odbc_connect": connection_string})
+    connection_url = URL.create(
+        "mssql+pyodbc", query={"odbc_connect": connection_string}
+    )
 
     engine = create_engine(connection_url)
 
@@ -127,7 +134,8 @@ database using Azure credentials::
     from sqlalchemy.engine.url import URL
     from azure import identity
 
-    SQL_COPT_SS_ACCESS_TOKEN = 1256  # Connection option for access tokens, as defined in msodbcsql.h
+    # Connection option for access tokens, as defined in msodbcsql.h
+    SQL_COPT_SS_ACCESS_TOKEN = 1256
     TOKEN_URL = "https://database.windows.net/"  # The token URL for any Azure SQL database
 
     connection_string = "mssql+pyodbc://@my-server.database.windows.net/myDb?driver=ODBC+Driver+17+for+SQL+Server"
@@ -136,14 +144,19 @@ database using Azure credentials::
 
     azure_credentials = identity.DefaultAzureCredential()
 
+
     @event.listens_for(engine, "do_connect")
     def provide_token(dialect, conn_rec, cargs, cparams):
         # remove the "Trusted_Connection" parameter that SQLAlchemy adds
         cargs[0] = cargs[0].replace(";Trusted_Connection=Yes", "")
 
         # create token credential
-        raw_token = azure_credentials.get_token(TOKEN_URL).token.encode("utf-16-le")
-        token_struct = struct.pack(f"<I{len(raw_token)}s", len(raw_token), raw_token)
+        raw_token = azure_credentials.get_token(TOKEN_URL).token.encode(
+            "utf-16-le"
+        )
+        token_struct = struct.pack(
+            f"<I{len(raw_token)}s", len(raw_token), raw_token
+        )
 
         # apply it to keyword arguments
         cparams["attrs_before"] = {SQL_COPT_SS_ACCESS_TOKEN: token_struct}
@@ -176,7 +189,9 @@ emit a ``.rollback()`` after an operation had a failure of some kind.
 This specific case can be handled by passing ``ignore_no_transaction_on_rollback=True`` to
 the SQL Server dialect via the :func:`_sa.create_engine` function as follows::
 
-    engine = create_engine(connection_url, ignore_no_transaction_on_rollback=True)
+    engine = create_engine(
+        connection_url, ignore_no_transaction_on_rollback=True
+    )
 
 Using the above parameter, the dialect will catch ``ProgrammingError``
 exceptions raised during ``connection.rollback()`` and emit a warning
@@ -236,7 +251,6 @@ behavior and pass long strings as varchar(max)/nvarchar(max) using the
         },
     )
 
-
 Pyodbc Pooling / connection close behavior
 ------------------------------------------
 
@@ -301,7 +315,8 @@ Server dialect supports this parameter by passing the
 
     engine = create_engine(
         "mssql+pyodbc://scott:tiger@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server",
-        fast_executemany=True)
+        fast_executemany=True,
+    )
 
 .. versionchanged:: 2.0.9 - the ``fast_executemany`` parameter now has its
    intended effect of this PyODBC feature taking effect for all INSERT
index 315ea6df95a577de3a935d4dbaecc1f874c4821d..f66fae027942816ff3ff012326b401ae3aedfc83 100644 (file)
@@ -23,8 +23,10 @@ This dialect should normally be used only with the
 :func:`_asyncio.create_async_engine` engine creation function::
 
     from sqlalchemy.ext.asyncio import create_async_engine
-    engine = create_async_engine("mysql+aiomysql://user:pass@hostname/dbname?charset=utf8mb4")
 
+    engine = create_async_engine(
+        "mysql+aiomysql://user:pass@hostname/dbname?charset=utf8mb4"
+    )
 
 """  # noqa
 from .pymysql import MySQLDialect_pymysql
index 5fc36044dc86a1ccf6561c3f6776cc2bbc9de4a5..49a9a79ba81f2d85072826e9e97e5ab16241bd15 100644 (file)
@@ -21,8 +21,10 @@ This dialect should normally be used only with the
 :func:`_asyncio.create_async_engine` engine creation function::
 
     from sqlalchemy.ext.asyncio import create_async_engine
-    engine = create_async_engine("mysql+asyncmy://user:pass@hostname/dbname?charset=utf8mb4")
 
+    engine = create_async_engine(
+        "mysql+asyncmy://user:pass@hostname/dbname?charset=utf8mb4"
+    )
 
 """  # noqa
 from __future__ import annotations
index c834495759e9c3f5c04f878df243fe8456dfaf78..3e5a87756fc9d85fcd151c8a01c23fb7d8ada68a 100644 (file)
@@ -34,7 +34,9 @@ syntactical and behavioral differences that SQLAlchemy accommodates automaticall
 To connect to a MariaDB database, no changes to the database URL are required::
 
 
-    engine = create_engine("mysql+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4")
+    engine = create_engine(
+        "mysql+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4"
+    )
 
 Upon first connect, the SQLAlchemy dialect employs a
 server version detection scheme that determines if the
@@ -52,7 +54,9 @@ useful for the case where an application makes use of MariaDB-specific features
 and is not compatible with a MySQL database.    To use this mode of operation,
 replace the "mysql" token in the above URL with "mariadb"::
 
-    engine = create_engine("mariadb+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4")
+    engine = create_engine(
+        "mariadb+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4"
+    )
 
 The above engine, upon first connect, will raise an error if the server version
 detection detects that the backing database is not MariaDB.
@@ -98,7 +102,7 @@ the :paramref:`_sa.create_engine.pool_recycle` option which ensures that
 a connection will be discarded and replaced with a new one if it has been
 present in the pool for a fixed number of seconds::
 
-    engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
+    engine = create_engine("mysql+mysqldb://...", pool_recycle=3600)
 
 For more comprehensive disconnect detection of pooled connections, including
 accommodation of  server restarts and network issues, a pre-ping approach may
@@ -122,12 +126,14 @@ To accommodate the rendering of these arguments, specify the form
 ``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8mb4``, and ``KEY_BLOCK_SIZE``
 of ``1024``::
 
-  Table('mytable', metadata,
-        Column('data', String(32)),
-        mysql_engine='InnoDB',
-        mysql_charset='utf8mb4',
-        mysql_key_block_size="1024"
-       )
+  Table(
+      "mytable",
+      metadata,
+      Column("data", String(32)),
+      mysql_engine="InnoDB",
+      mysql_charset="utf8mb4",
+      mysql_key_block_size="1024",
+  )
 
 When supporting :ref:`mysql_mariadb_only_mode` mode, similar keys against
 the "mariadb" prefix must be included as well.  The values can of course
@@ -136,19 +142,17 @@ be maintained::
 
   # support both "mysql" and "mariadb-only" engine URLs
 
-  Table('mytable', metadata,
-        Column('data', String(32)),
-
-        mysql_engine='InnoDB',
-        mariadb_engine='InnoDB',
-
-        mysql_charset='utf8mb4',
-        mariadb_charset='utf8',
-
-        mysql_key_block_size="1024"
-        mariadb_key_block_size="1024"
-
-       )
+  Table(
+      "mytable",
+      metadata,
+      Column("data", String(32)),
+      mysql_engine="InnoDB",
+      mariadb_engine="InnoDB",
+      mysql_charset="utf8mb4",
+      mariadb_charset="utf8",
+      mysql_key_block_size="1024",
+      mariadb_key_block_size="1024",
+  )
 
 The MySQL / MariaDB dialects will normally transfer any keyword specified as
 ``mysql_keyword_name`` to be rendered as ``KEYWORD_NAME`` in the
@@ -214,16 +218,14 @@ techniques are used.
 To set isolation level using :func:`_sa.create_engine`::
 
     engine = create_engine(
-                    "mysql+mysqldb://scott:tiger@localhost/test",
-                    isolation_level="READ UNCOMMITTED"
-                )
+        "mysql+mysqldb://scott:tiger@localhost/test",
+        isolation_level="READ UNCOMMITTED",
+    )
 
 To set using per-connection execution options::
 
     connection = engine.connect()
-    connection = connection.execution_options(
-        isolation_level="READ COMMITTED"
-    )
+    connection = connection.execution_options(isolation_level="READ COMMITTED")
 
 Valid values for ``isolation_level`` include:
 
@@ -255,8 +257,8 @@ When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
 the first :class:`.Integer` primary key column which is not marked as a
 foreign key::
 
-  >>> t = Table('mytable', metadata,
-  ...   Column('mytable_id', Integer, primary_key=True)
+  >>> t = Table(
+  ...     "mytable", metadata, Column("mytable_id", Integer, primary_key=True)
   ... )
   >>> t.create()
   CREATE TABLE mytable (
@@ -270,10 +272,12 @@ This flag
 can also be used to enable auto-increment on a secondary column in a
 multi-column key for some storage engines::
 
-  Table('mytable', metadata,
-        Column('gid', Integer, primary_key=True, autoincrement=False),
-        Column('id', Integer, primary_key=True)
-       )
+  Table(
+      "mytable",
+      metadata,
+      Column("gid", Integer, primary_key=True, autoincrement=False),
+      Column("id", Integer, primary_key=True),
+  )
 
 .. _mysql_ss_cursors:
 
@@ -291,7 +295,9 @@ Server side cursors are enabled on a per-statement basis by using the
 option::
 
     with engine.connect() as conn:
-        result = conn.execution_options(stream_results=True).execute(text("select * from table"))
+        result = conn.execution_options(stream_results=True).execute(
+            text("select * from table")
+        )
 
 Note that some kinds of SQL statements may not be supported with
 server side cursors; generally, only SQL statements that return rows should be
@@ -319,7 +325,8 @@ a connection.   This is typically delivered using the ``charset`` parameter
 in the URL, such as::
 
     e = create_engine(
-        "mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4")
+        "mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4"
+    )
 
 This charset is the **client character set** for the connection.  Some
 MySQL DBAPIs will default this to a value such as ``latin1``, and some
@@ -339,7 +346,8 @@ charset is preferred, if supported by both the database as well as the client
 DBAPI, as in::
 
     e = create_engine(
-        "mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4")
+        "mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4"
+    )
 
 All modern DBAPIs should support the ``utf8mb4`` charset.
 
@@ -361,7 +369,9 @@ Dealing with Binary Data Warnings and Unicode
 MySQL versions 5.6, 5.7 and later (not MariaDB at the time of this writing) now
 emit a warning when attempting to pass binary data to the database, while a
 character set encoding is also in place, when the binary data itself is not
-valid for that encoding::
+valid for that encoding:
+
+.. sourcecode:: text
 
     default.py:509: Warning: (1300, "Invalid utf8mb4 character string:
     'F9876A'")
@@ -371,7 +381,9 @@ This warning is due to the fact that the MySQL client library is attempting to
 interpret the binary string as a unicode object even if a datatype such
 as :class:`.LargeBinary` is in use.   To resolve this, the SQL statement requires
 a binary "character set introducer" be present before any non-NULL value
-that renders like this::
+that renders like this:
+
+.. sourcecode:: sql
 
     INSERT INTO table (data) VALUES (_binary %s)
 
@@ -381,12 +393,13 @@ string parameter ``binary_prefix=true`` to the URL to repair this warning::
 
     # mysqlclient
     engine = create_engine(
-        "mysql+mysqldb://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true")
+        "mysql+mysqldb://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true"
+    )
 
     # PyMySQL
     engine = create_engine(
-        "mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true")
-
+        "mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true"
+    )
 
 The ``binary_prefix`` flag may or may not be supported by other MySQL drivers.
 
@@ -429,7 +442,10 @@ the ``first_connect`` and ``connect`` events::
 
     from sqlalchemy import create_engine, event
 
-    eng = create_engine("mysql+mysqldb://scott:tiger@localhost/test", echo='debug')
+    eng = create_engine(
+        "mysql+mysqldb://scott:tiger@localhost/test", echo="debug"
+    )
+
 
     # `insert=True` will ensure this is the very first listener to run
     @event.listens_for(eng, "connect", insert=True)
@@ -437,6 +453,7 @@ the ``first_connect`` and ``connect`` events::
         cursor = dbapi_connection.cursor()
         cursor.execute("SET sql_mode = 'STRICT_ALL_TABLES'")
 
+
     conn = eng.connect()
 
 In the example illustrated above, the "connect" event will invoke the "SET"
@@ -453,8 +470,8 @@ MySQL / MariaDB SQL Extensions
 Many of the MySQL / MariaDB SQL extensions are handled through SQLAlchemy's generic
 function and operator support::
 
-  table.select(table.c.password==func.md5('plaintext'))
-  table.select(table.c.username.op('regexp')('^[a-d]'))
+  table.select(table.c.password == func.md5("plaintext"))
+  table.select(table.c.username.op("regexp")("^[a-d]"))
 
 And of course any valid SQL statement can be executed as a string as well.
 
@@ -467,7 +484,7 @@ available.
 * SELECT pragma, use :meth:`_expression.Select.prefix_with` and
   :meth:`_query.Query.prefix_with`::
 
-    select(...).prefix_with(['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
+    select(...).prefix_with(["HIGH_PRIORITY", "SQL_SMALL_RESULT"])
 
 * UPDATE with LIMIT::
 
@@ -483,14 +500,16 @@ available.
 
     select(...).with_hint(some_table, "USE INDEX xyz")
 
-* MATCH operator support::
+* MATCH
+  operator support::
+
+        from sqlalchemy.dialects.mysql import match
 
-    from sqlalchemy.dialects.mysql import match
-    select(...).where(match(col1, col2, against="some expr").in_boolean_mode())
+        select(...).where(match(col1, col2, against="some expr").in_boolean_mode())
 
-    .. seealso::
+  .. seealso::
 
-        :class:`_mysql.match`
+    :class:`_mysql.match`
 
 INSERT/DELETE...RETURNING
 -------------------------
@@ -507,17 +526,15 @@ To specify an explicit ``RETURNING`` clause, use the
 
     # INSERT..RETURNING
     result = connection.execute(
-        table.insert().
-        values(name='foo').
-        returning(table.c.col1, table.c.col2)
+        table.insert().values(name="foo").returning(table.c.col1, table.c.col2)
     )
     print(result.all())
 
     # DELETE..RETURNING
     result = connection.execute(
-        table.delete().
-        where(table.c.name=='foo').
-        returning(table.c.col1, table.c.col2)
+        table.delete()
+        .where(table.c.name == "foo")
+        .returning(table.c.col1, table.c.col2)
     )
     print(result.all())
 
@@ -544,12 +561,11 @@ the generative method :meth:`~.mysql.Insert.on_duplicate_key_update`:
     >>> from sqlalchemy.dialects.mysql import insert
 
     >>> insert_stmt = insert(my_table).values(
-    ...     id='some_existing_id',
-    ...     data='inserted value')
+    ...     id="some_existing_id", data="inserted value"
+    ... )
 
     >>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
-    ...     data=insert_stmt.inserted.data,
-    ...     status='U'
+    ...     data=insert_stmt.inserted.data, status="U"
     ... )
     >>> print(on_duplicate_key_stmt)
     {printsql}INSERT INTO my_table (id, data) VALUES (%s, %s)
@@ -574,8 +590,8 @@ as values:
 .. sourcecode:: pycon+sql
 
     >>> insert_stmt = insert(my_table).values(
-    ...          id='some_existing_id',
-    ...          data='inserted value')
+    ...     id="some_existing_id", data="inserted value"
+    ... )
 
     >>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
     ...     data="some data",
@@ -638,13 +654,11 @@ table:
 .. sourcecode:: pycon+sql
 
     >>> stmt = insert(my_table).values(
-    ...     id='some_id',
-    ...     data='inserted value',
-    ...     author='jlh')
+    ...     id="some_id", data="inserted value", author="jlh"
+    ... )
 
     >>> do_update_stmt = stmt.on_duplicate_key_update(
-    ...     data="updated value",
-    ...     author=stmt.inserted.author
+    ...     data="updated value", author=stmt.inserted.author
     ... )
 
     >>> print(do_update_stmt)
@@ -689,13 +703,13 @@ MySQL and MariaDB both provide an option to create index entries with a certain
 become part of the index. SQLAlchemy provides this feature via the
 ``mysql_length`` and/or ``mariadb_length`` parameters::
 
-    Index('my_index', my_table.c.data, mysql_length=10, mariadb_length=10)
+    Index("my_index", my_table.c.data, mysql_length=10, mariadb_length=10)
 
-    Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4,
-                                                               'b': 9})
+    Index("a_b_idx", my_table.c.a, my_table.c.b, mysql_length={"a": 4, "b": 9})
 
-    Index('a_b_idx', my_table.c.a, my_table.c.b, mariadb_length={'a': 4,
-                                                               'b': 9})
+    Index(
+        "a_b_idx", my_table.c.a, my_table.c.b, mariadb_length={"a": 4, "b": 9}
+    )
 
 Prefix lengths are given in characters for nonbinary string types and in bytes
 for binary string types. The value passed to the keyword argument *must* be
@@ -712,7 +726,7 @@ MySQL storage engines permit you to specify an index prefix when creating
 an index. SQLAlchemy provides this feature via the
 ``mysql_prefix`` parameter on :class:`.Index`::
 
-    Index('my_index', my_table.c.data, mysql_prefix='FULLTEXT')
+    Index("my_index", my_table.c.data, mysql_prefix="FULLTEXT")
 
 The value passed to the keyword argument will be simply passed through to the
 underlying CREATE INDEX, so it *must* be a valid index prefix for your MySQL
@@ -729,11 +743,13 @@ Some MySQL storage engines permit you to specify an index type when creating
 an index or primary key constraint. SQLAlchemy provides this feature via the
 ``mysql_using`` parameter on :class:`.Index`::
 
-    Index('my_index', my_table.c.data, mysql_using='hash', mariadb_using='hash')
+    Index(
+        "my_index", my_table.c.data, mysql_using="hash", mariadb_using="hash"
+    )
 
 As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`::
 
-    PrimaryKeyConstraint("data", mysql_using='hash', mariadb_using='hash')
+    PrimaryKeyConstraint("data", mysql_using="hash", mariadb_using="hash")
 
 The value passed to the keyword argument will be simply passed through to the
 underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
@@ -752,9 +768,12 @@ CREATE FULLTEXT INDEX in MySQL also supports a "WITH PARSER" option.  This
 is available using the keyword argument ``mysql_with_parser``::
 
     Index(
-        'my_index', my_table.c.data,
-        mysql_prefix='FULLTEXT', mysql_with_parser="ngram",
-        mariadb_prefix='FULLTEXT', mariadb_with_parser="ngram",
+        "my_index",
+        my_table.c.data,
+        mysql_prefix="FULLTEXT",
+        mysql_with_parser="ngram",
+        mariadb_prefix="FULLTEXT",
+        mariadb_with_parser="ngram",
     )
 
 .. versionadded:: 1.3
@@ -781,6 +800,7 @@ them ignored on a MySQL / MariaDB backend, use a custom compile rule::
     from sqlalchemy.ext.compiler import compiles
     from sqlalchemy.schema import ForeignKeyConstraint
 
+
     @compiles(ForeignKeyConstraint, "mysql", "mariadb")
     def process(element, compiler, **kw):
         element.deferrable = element.initially = None
@@ -802,10 +822,12 @@ very common ``MyISAM`` MySQL storage engine, the information loaded by table
 reflection will not include foreign keys.  For these tables, you may supply a
 :class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
 
-  Table('mytable', metadata,
-        ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
-        autoload_with=engine
-       )
+  Table(
+      "mytable",
+      metadata,
+      ForeignKeyConstraint(["other_id"], ["othertable.other_id"]),
+      autoload_with=engine,
+  )
 
 .. seealso::
 
@@ -877,13 +899,15 @@ parameter and pass a textual clause that also includes the ON UPDATE clause::
     mytable = Table(
         "mytable",
         metadata,
-        Column('id', Integer, primary_key=True),
-        Column('data', String(50)),
+        Column("id", Integer, primary_key=True),
+        Column("data", String(50)),
         Column(
-            'last_updated',
+            "last_updated",
             TIMESTAMP,
-            server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP")
-        )
+            server_default=text(
+                "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"
+            ),
+        ),
     )
 
 The same instructions apply to use of the :class:`_types.DateTime` and
@@ -894,34 +918,37 @@ The same instructions apply to use of the :class:`_types.DateTime` and
     mytable = Table(
         "mytable",
         metadata,
-        Column('id', Integer, primary_key=True),
-        Column('data', String(50)),
+        Column("id", Integer, primary_key=True),
+        Column("data", String(50)),
         Column(
-            'last_updated',
+            "last_updated",
             DateTime,
-            server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP")
-        )
+            server_default=text(
+                "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"
+            ),
+        ),
     )
 
-
 Even though the :paramref:`_schema.Column.server_onupdate` feature does not
 generate this DDL, it still may be desirable to signal to the ORM that this
 updated value should be fetched.  This syntax looks like the following::
 
     from sqlalchemy.schema import FetchedValue
 
+
     class MyClass(Base):
-        __tablename__ = 'mytable'
+        __tablename__ = "mytable"
 
         id = Column(Integer, primary_key=True)
         data = Column(String(50))
         last_updated = Column(
             TIMESTAMP,
-            server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"),
-            server_onupdate=FetchedValue()
+            server_default=text(
+                "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"
+            ),
+            server_onupdate=FetchedValue(),
         )
 
-
 .. _mysql_timestamp_null:
 
 TIMESTAMP Columns and NULL
@@ -931,7 +958,9 @@ MySQL historically enforces that a column which specifies the
 TIMESTAMP datatype implicitly includes a default value of
 CURRENT_TIMESTAMP, even though this is not stated, and additionally
 sets the column as NOT NULL, the opposite behavior vs. that of all
-other datatypes::
+other datatypes:
+
+.. sourcecode:: text
 
     mysql> CREATE TABLE ts_test (
         -> a INTEGER,
@@ -976,19 +1005,24 @@ SQLAlchemy also emits NOT NULL for TIMESTAMP columns that do specify
     from sqlalchemy.dialects.mysql import TIMESTAMP
 
     m = MetaData()
-    t = Table('ts_test', m,
-            Column('a', Integer),
-            Column('b', Integer, nullable=False),
-            Column('c', TIMESTAMP),
-            Column('d', TIMESTAMP, nullable=False)
-        )
+    t = Table(
+        "ts_test",
+        m,
+        Column("a", Integer),
+        Column("b", Integer, nullable=False),
+        Column("c", TIMESTAMP),
+        Column("d", TIMESTAMP, nullable=False),
+    )
 
 
     from sqlalchemy import create_engine
+
     e = create_engine("mysql+mysqldb://scott:tiger@localhost/test", echo=True)
     m.create_all(e)
 
-output::
+output:
+
+.. sourcecode:: sql
 
     CREATE TABLE ts_test (
         a INTEGER,
index e4005c267e48122084cd7cc33fc6a9a2d5a2245a..d9164317b0980a76dc534c4b220c63931dc5ea9a 100644 (file)
@@ -141,7 +141,11 @@ class Insert(StandardInsert):
          in :ref:`tutorial_parameter_ordered_updates`::
 
             insert().on_duplicate_key_update(
-                [("name", "some name"), ("value", "some value")])
+                [
+                    ("name", "some name"),
+                    ("value", "some value"),
+                ]
+            )
 
          .. versionchanged:: 1.3 parameters can be specified as a dictionary
             or list of 2-tuples; the latter form provides for parameter
index 96499d7bee2658da37f52950b620e5ea5e9f42cc..d3c10c0021b7afaafd7667928bd53fc95246b004 100644 (file)
@@ -28,7 +28,7 @@ class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum, _StringType):
 
         E.g.::
 
-          Column('myenum', ENUM("foo", "bar", "baz"))
+          Column("myenum", ENUM("foo", "bar", "baz"))
 
         :param enums: The range of valid values for this ENUM.  Values in
           enums are not quoted, they will be escaped and surrounded by single
@@ -102,8 +102,7 @@ class SET(_StringType):
 
         E.g.::
 
-          Column('myset', SET("foo", "bar", "baz"))
-
+          Column("myset", SET("foo", "bar", "baz"))
 
         The list of potential values is required in the case that this
         set will be used to generate DDL for a table, or if the
index b81b58afc282a70e74d4421f081c2d694f533b68..8c21c748c96d0f209e9d62f669108ce1b8cd3e40 100644 (file)
@@ -38,7 +38,9 @@ class match(Generative, elements.BinaryExpression):
             .order_by(desc(match_expr))
         )
 
-    Would produce SQL resembling::
+    Would produce SQL resembling:
+
+    .. sourcecode:: sql
 
         SELECT id, firstname, lastname
         FROM user
index 0baf10f7056ea133e352b6d6a5fa712eb011bda2..6e7ccaa1525f9096de9dad55f40ab780143db9e6 100644 (file)
@@ -48,9 +48,9 @@ key "ssl", which may be specified using the
             "ssl": {
                 "ca": "/home/gord/client-ssl/ca.pem",
                 "cert": "/home/gord/client-ssl/client-cert.pem",
-                "key": "/home/gord/client-ssl/client-key.pem"
+                "key": "/home/gord/client-ssl/client-key.pem",
             }
-        }
+        },
     )
 
 For convenience, the following keys may also be specified inline within the URL
@@ -74,7 +74,9 @@ Using MySQLdb with Google Cloud SQL
 -----------------------------------
 
 Google Cloud SQL now recommends use of the MySQLdb dialect.  Connect
-using a URL like the following::
+using a URL like the following:
+
+.. sourcecode:: text
 
     mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>
 
index 830e4416c7968ed977c6f4bc1e6a30078ab252bd..ff62e4f0282b20ca54318522a9d772f1188ed9e9 100644 (file)
@@ -41,7 +41,6 @@ necessary to indicate ``ssl_check_hostname=false`` in PyMySQL::
         "&ssl_check_hostname=false"
     )
 
-
 MySQL-Python Compatibility
 --------------------------
 
index 428c8dfd38526c961e8d5ee43f2b4fc339c1e74a..9ad360bd99511418931daa0626ba946022f08157 100644 (file)
@@ -30,14 +30,15 @@ r"""
 Pass through exact pyodbc connection string::
 
     import urllib
+
     connection_string = (
-        'DRIVER=MySQL ODBC 8.0 ANSI Driver;'
-        'SERVER=localhost;'
-        'PORT=3307;'
-        'DATABASE=mydb;'
-        'UID=root;'
-        'PWD=(whatever);'
-        'charset=utf8mb4;'
+        "DRIVER=MySQL ODBC 8.0 ANSI Driver;"
+        "SERVER=localhost;"
+        "PORT=3307;"
+        "DATABASE=mydb;"
+        "UID=root;"
+        "PWD=(whatever);"
+        "charset=utf8mb4;"
     )
     params = urllib.parse.quote_plus(connection_string)
     connection_uri = "mysql+pyodbc:///?odbc_connect=%s" % params
index 35d9127b37bd97f15ab23d5b5a7b2f13d39f72be..6a8b035bd924fc9b2a23fd30b01577b35930930e 100644 (file)
@@ -29,9 +29,12 @@ Specifying GENERATED AS IDENTITY (Oracle Database 12 and above)
 Starting from version 12, Oracle Database can make use of identity columns
 using the :class:`_sql.Identity` to specify the autoincrementing behavior::
 
-    t = Table('mytable', metadata,
-        Column('id', Integer, Identity(start=3), primary_key=True),
-        Column(...), ...
+    t = Table(
+        "mytable",
+        metadata,
+        Column("id", Integer, Identity(start=3), primary_key=True),
+        Column(...),
+        ...,
     )
 
 The CREATE TABLE for the above :class:`_schema.Table` object would be:
@@ -68,16 +71,21 @@ which assume the usage of an autoincrement-capable database.  To specify
 sequences, use the sqlalchemy.schema.Sequence object which is passed to a
 Column construct::
 
-  t = Table('mytable', metadata,
-        Column('id', Integer, Sequence('id_seq', start=1), primary_key=True),
-        Column(...), ...
+  t = Table(
+      "mytable",
+      metadata,
+      Column("id", Integer, Sequence("id_seq", start=1), primary_key=True),
+      Column(...),
+      ...,
   )
 
 This step is also required when using table reflection, i.e. autoload_with=engine::
 
-  t = Table('mytable', metadata,
-        Column('id', Integer, Sequence('id_seq', start=1), primary_key=True),
-        autoload_with=engine
+  t = Table(
+      "mytable",
+      metadata,
+      Column("id", Integer, Sequence("id_seq", start=1), primary_key=True),
+      autoload_with=engine,
   )
 
 In addition to the standard options, Oracle Database supports the following
@@ -103,9 +111,7 @@ python-oracledb and cx_Oracle dialects.
 To set using per-connection execution options::
 
     connection = engine.connect()
-    connection = connection.execution_options(
-        isolation_level="AUTOCOMMIT"
-    )
+    connection = connection.execution_options(isolation_level="AUTOCOMMIT")
 
 For ``READ COMMITTED`` and ``SERIALIZABLE``, the Oracle Database dialects sets
 the level at the session level using ``ALTER SESSION``, which is reverted back
@@ -189,7 +195,8 @@ change and the value given will be used as is::
 
     engine = create_engine(
         "oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1",
-        max_identifier_length=30)
+        max_identifier_length=30,
+    )
 
 If :paramref:`_sa.create_engine.max_identifier_length` is not set, the oracledb
 dialect internally uses the ``max_identifier_length`` attribute available on
@@ -239,13 +246,17 @@ identifier length::
     oracle_dialect = oracle.dialect(max_identifier_length=30)
     print(CreateIndex(ix).compile(dialect=oracle_dialect))
 
-With an identifier length of 30, the above CREATE INDEX looks like::
+With an identifier length of 30, the above CREATE INDEX looks like:
+
+.. sourcecode:: sql
 
     CREATE INDEX ix_some_column_name_1s_70cd ON t
     (some_column_name_1, some_column_name_2, some_column_name_3)
 
 However with length of 128, it becomes::
 
+.. sourcecode:: sql
+
     CREATE INDEX ix_some_column_name_1some_column_name_2some_column_name_3 ON t
     (some_column_name_1, some_column_name_2, some_column_name_3)
 
@@ -376,8 +387,9 @@ for tables indicated by synonyms, either in local or remote schemas or
 accessed over DBLINK, by passing the flag ``oracle_resolve_synonyms=True`` as
 a keyword argument to the :class:`_schema.Table` construct::
 
-    some_table = Table('some_table', autoload_with=some_engine,
-                                oracle_resolve_synonyms=True)
+    some_table = Table(
+        "some_table", autoload_with=some_engine, oracle_resolve_synonyms=True
+    )
 
 When this flag is set, the given name (such as ``some_table`` above) will be
 searched not just in the ``ALL_TABLES`` view, but also within the
@@ -422,10 +434,13 @@ Note the following caveats:
 
       from sqlalchemy import create_engine, inspect
 
-      engine = create_engine("oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1")
+      engine = create_engine(
+          "oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1"
+      )
       inspector = inspect(engine)
       all_check_constraints = inspector.get_check_constraints(
-          "some_table", include_all=True)
+          "some_table", include_all=True
+      )
 
 * in most cases, when reflecting a :class:`_schema.Table`, a UNIQUE constraint
   will **not** be available as a :class:`.UniqueConstraint` object, as Oracle
@@ -455,8 +470,9 @@ the ``exclude_tablespaces`` parameter::
 
     # exclude SYSAUX and SOME_TABLESPACE, but not SYSTEM
     e = create_engine(
-      "oracle+oracledb://scott:tiger@localhost:1521/?service_name=freepdb1",
-      exclude_tablespaces=["SYSAUX", "SOME_TABLESPACE"])
+        "oracle+oracledb://scott:tiger@localhost:1521/?service_name=freepdb1",
+        exclude_tablespaces=["SYSAUX", "SOME_TABLESPACE"],
+    )
 
 DateTime Compatibility
 ----------------------
@@ -481,30 +497,35 @@ dialects in conjunction with the :class:`_schema.Table` construct:
 * ``ON COMMIT``::
 
     Table(
-        "some_table", metadata, ...,
-        prefixes=['GLOBAL TEMPORARY'], oracle_on_commit='PRESERVE ROWS')
+        "some_table",
+        metadata,
+        ...,
+        prefixes=["GLOBAL TEMPORARY"],
+        oracle_on_commit="PRESERVE ROWS",
+    )
 
-* ``COMPRESS``::
+*
+  ``COMPRESS``::
 
-    Table('mytable', metadata, Column('data', String(32)),
-        oracle_compress=True)
+     Table(
+         "mytable", metadata, Column("data", String(32)), oracle_compress=True
+     )
 
-    Table('mytable', metadata, Column('data', String(32)),
-        oracle_compress=6)
+     Table("mytable", metadata, Column("data", String(32)), oracle_compress=6)
 
-   The ``oracle_compress`` parameter accepts either an integer compression
-   level, or ``True`` to use the default compression level.
+  The ``oracle_compress`` parameter accepts either an integer compression
+  level, or ``True`` to use the default compression level.
 
-* ``TABLESPACE``::
+*
+  ``TABLESPACE``::
 
-    Table('mytable', metadata, ...,
-        oracle_tablespace="EXAMPLE_TABLESPACE")
+     Table("mytable", metadata, ..., oracle_tablespace="EXAMPLE_TABLESPACE")
 
-    The ``oracle_tablespace`` parameter specifies the tablespace in which the
-    table is to be created. This is useful when you want to create a table in a
-    tablespace other than the default tablespace of the user.
+  The ``oracle_tablespace`` parameter specifies the tablespace in which the
+  table is to be created. This is useful when you want to create a table in a
+  tablespace other than the default tablespace of the user.
 
-    .. versionadded:: 2.0.37
+  .. versionadded:: 2.0.37
 
 .. _oracle_index_options:
 
@@ -517,7 +538,7 @@ Bitmap Indexes
 You can specify the ``oracle_bitmap`` parameter to create a bitmap index
 instead of a B-tree index::
 
-    Index('my_index', my_table.c.data, oracle_bitmap=True)
+    Index("my_index", my_table.c.data, oracle_bitmap=True)
 
 Bitmap indexes cannot be unique and cannot be compressed. SQLAlchemy will not
 check for such limitations, only the database will.
@@ -529,10 +550,15 @@ Oracle Database has a more efficient storage mode for indexes containing lots
 of repeated values. Use the ``oracle_compress`` parameter to turn on key
 compression::
 
-    Index('my_index', my_table.c.data, oracle_compress=True)
+    Index("my_index", my_table.c.data, oracle_compress=True)
 
-    Index('my_index', my_table.c.data1, my_table.c.data2, unique=True,
-           oracle_compress=1)
+    Index(
+        "my_index",
+        my_table.c.data1,
+        my_table.c.data2,
+        unique=True,
+        oracle_compress=1,
+    )
 
 The ``oracle_compress`` parameter accepts either an integer specifying the
 number of prefix columns to compress, or ``True`` to use the default (all
index babb916a60209ab58f0af1457ab7f3d7a38a4519..9b66d7ea783ebeec09a7c062c196f77b7da93a41 100644 (file)
@@ -32,7 +32,9 @@ Given a hostname, port and service name of the target database, for example
 from Oracle Database's Easy Connect syntax then connect in SQLAlchemy using the
 ``service_name`` query string parameter::
 
-    engine = create_engine("oracle+cx_oracle://scott:tiger@hostname:port?service_name=myservice&encoding=UTF-8&nencoding=UTF-8")
+    engine = create_engine(
+        "oracle+cx_oracle://scott:tiger@hostname:port?service_name=myservice&encoding=UTF-8&nencoding=UTF-8"
+    )
 
 Note that the default driver value for encoding and nencoding was changed to
 “UTF-8” in cx_Oracle 8.0 so these parameters can be omitted when using that
@@ -42,13 +44,14 @@ To use a full Easy Connect string, pass it as the ``dsn`` key value in a
 :paramref:`_sa.create_engine.connect_args` dictionary::
 
     import cx_Oracle
+
     e = create_engine(
         "oracle+cx_oracle://@",
         connect_args={
             "user": "scott",
             "password": "tiger",
-            "dsn": "hostname:port/myservice?transport_connect_timeout=30&expire_time=60"
-        }
+            "dsn": "hostname:port/myservice?transport_connect_timeout=30&expire_time=60",
+        },
     )
 
 Connections with tnsnames.ora or to Oracle Autonomous Database
@@ -57,7 +60,9 @@ Connections with tnsnames.ora or to Oracle Autonomous Database
 Alternatively, if no port, database name, or service name is provided, the
 dialect will use an Oracle Database DSN "connection string".  This takes the
 "hostname" portion of the URL as the data source name.  For example, if the
-``tnsnames.ora`` file contains a TNS Alias of ``myalias`` as below::
+``tnsnames.ora`` file contains a TNS Alias of ``myalias`` as below:
+
+.. sourcecode:: text
 
     myalias =
       (DESCRIPTION =
@@ -85,7 +90,9 @@ SID Connections
 To use Oracle Database's obsolete System Identifier connection syntax, the SID
 can be passed in a "database name" portion of the URL::
 
-    engine = create_engine("oracle+cx_oracle://scott:tiger@hostname:port/dbname")
+    engine = create_engine(
+        "oracle+cx_oracle://scott:tiger@hostname:port/dbname"
+    )
 
 Above, the DSN passed to cx_Oracle is created by ``cx_Oracle.makedsn()`` as
 follows::
@@ -107,7 +114,8 @@ particular symbols like ``SYSDBA`` are intercepted and converted to the correct
 symbol::
 
     e = create_engine(
-        "oracle+cx_oracle://user:pass@dsn?encoding=UTF-8&nencoding=UTF-8&mode=SYSDBA&events=true")
+        "oracle+cx_oracle://user:pass@dsn?encoding=UTF-8&nencoding=UTF-8&mode=SYSDBA&events=true"
+    )
 
 .. versionchanged:: 1.3 the cx_Oracle dialect now accepts all argument names
    within the URL string itself, to be passed to the cx_Oracle DBAPI.   As
@@ -120,14 +128,15 @@ string, use the :paramref:`_sa.create_engine.connect_args` dictionary.
 Any cx_Oracle parameter value and/or constant may be passed, such as::
 
     import cx_Oracle
+
     e = create_engine(
         "oracle+cx_oracle://user:pass@dsn",
         connect_args={
             "encoding": "UTF-8",
             "nencoding": "UTF-8",
             "mode": cx_Oracle.SYSDBA,
-            "events": True
-        }
+            "events": True,
+        },
     )
 
 Note that the default driver value for ``encoding`` and ``nencoding`` was
@@ -142,7 +151,8 @@ itself.  These options are always passed directly to :func:`_sa.create_engine`
 , such as::
 
     e = create_engine(
-        "oracle+cx_oracle://user:pass@dsn", coerce_to_decimal=False)
+        "oracle+cx_oracle://user:pass@dsn", coerce_to_decimal=False
+    )
 
 The parameters accepted by the cx_oracle dialect are as follows:
 
@@ -184,12 +194,20 @@ SQLAlchemy's pooling::
     from sqlalchemy.pool import NullPool
 
     pool = cx_Oracle.SessionPool(
-        user="scott", password="tiger", dsn="orclpdb",
-        min=1, max=4, increment=1, threaded=True,
-        encoding="UTF-8", nencoding="UTF-8"
+        user="scott",
+        password="tiger",
+        dsn="orclpdb",
+        min=1,
+        max=4,
+        increment=1,
+        threaded=True,
+        encoding="UTF-8",
+        nencoding="UTF-8",
     )
 
-    engine = create_engine("oracle+cx_oracle://", creator=pool.acquire, poolclass=NullPool)
+    engine = create_engine(
+        "oracle+cx_oracle://", creator=pool.acquire, poolclass=NullPool
+    )
 
 The above engine may then be used normally where cx_Oracle's pool handles
 connection pooling::
@@ -220,15 +238,27 @@ This can be achieved by wrapping ``pool.acquire()``::
     from sqlalchemy.pool import NullPool
 
     pool = cx_Oracle.SessionPool(
-        user="scott", password="tiger", dsn="orclpdb",
-        min=2, max=5, increment=1, threaded=True,
-        encoding="UTF-8", nencoding="UTF-8"
+        user="scott",
+        password="tiger",
+        dsn="orclpdb",
+        min=2,
+        max=5,
+        increment=1,
+        threaded=True,
+        encoding="UTF-8",
+        nencoding="UTF-8",
     )
 
+
     def creator():
-        return pool.acquire(cclass="MYCLASS", purity=cx_Oracle.ATTR_PURITY_SELF)
+        return pool.acquire(
+            cclass="MYCLASS", purity=cx_Oracle.ATTR_PURITY_SELF
+        )
+
 
-    engine = create_engine("oracle+cx_oracle://", creator=creator, poolclass=NullPool)
+    engine = create_engine(
+        "oracle+cx_oracle://", creator=creator, poolclass=NullPool
+    )
 
 The above engine may then be used normally where cx_Oracle handles session
 pooling and Oracle Database additionally uses DRCP::
@@ -261,7 +291,9 @@ The cx_Oracle driver also supported a programmatic alternative which is to pass
 the ``encoding`` and ``nencoding`` parameters directly to its ``.connect()``
 function.  These can be present in the URL as follows::
 
-    engine = create_engine("oracle+cx_oracle://scott:tiger@tnsalias?encoding=UTF-8&nencoding=UTF-8")
+    engine = create_engine(
+        "oracle+cx_oracle://scott:tiger@tnsalias?encoding=UTF-8&nencoding=UTF-8"
+    )
 
 For the meaning of the ``encoding`` and ``nencoding`` parameters, please
 consult
@@ -355,13 +387,16 @@ objects which have a ``.key`` and a ``.type`` attribute::
 
     engine = create_engine("oracle+cx_oracle://scott:tiger@host/xe")
 
+
     @event.listens_for(engine, "do_setinputsizes")
     def _log_setinputsizes(inputsizes, cursor, statement, parameters, context):
         for bindparam, dbapitype in inputsizes.items():
-                log.info(
-                    "Bound parameter name: %s  SQLAlchemy type: %r  "
-                    "DBAPI object: %s",
-                    bindparam.key, bindparam.type, dbapitype)
+            log.info(
+                "Bound parameter name: %s  SQLAlchemy type: %r DBAPI object: %s",
+                bindparam.key,
+                bindparam.type,
+                dbapitype,
+            )
 
 Example 2 - remove all bindings to CLOB
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -375,6 +410,7 @@ series.   This setting can be modified as follows::
 
     engine = create_engine("oracle+cx_oracle://scott:tiger@host/xe")
 
+
     @event.listens_for(engine, "do_setinputsizes")
     def _remove_clob(inputsizes, cursor, statement, parameters, context):
         for bindparam, dbapitype in list(inputsizes.items()):
index 79a90822b406ae47c28ce5f7c224af6e35668a8d..d85849ad9d0295b18672427a2b2bca1dc224a469 100644 (file)
@@ -31,19 +31,28 @@ selected depending on how the engine is created:
   automatically select the sync version::
 
     from sqlalchemy import create_engine
-    sync_engine = create_engine("oracle+oracledb://scott:tiger@localhost?service_name=FREEPDB1")
+
+    sync_engine = create_engine(
+        "oracle+oracledb://scott:tiger@localhost?service_name=FREEPDB1"
+    )
 
 * calling :func:`_asyncio.create_async_engine` with ``oracle+oracledb://...``
   will automatically select the async version::
 
     from sqlalchemy.ext.asyncio import create_async_engine
-    asyncio_engine = create_async_engine("oracle+oracledb://scott:tiger@localhost?service_name=FREEPDB1")
+
+    asyncio_engine = create_async_engine(
+        "oracle+oracledb://scott:tiger@localhost?service_name=FREEPDB1"
+    )
 
   The asyncio version of the dialect may also be specified explicitly using the
   ``oracledb_async`` suffix::
 
       from sqlalchemy.ext.asyncio import create_async_engine
-      asyncio_engine = create_async_engine("oracle+oracledb_async://scott:tiger@localhost?service_name=FREEPDB1")
+
+      asyncio_engine = create_async_engine(
+          "oracle+oracledb_async://scott:tiger@localhost?service_name=FREEPDB1"
+      )
 
 .. versionadded:: 2.0.25 added support for the async version of oracledb.
 
@@ -62,11 +71,14 @@ explicitly, or pass the parameter ``thick_mode=True`` to
 ``init_oracle_client()``, like the ``lib_dir`` path, a dict may be passed, for
 example::
 
-    engine = sa.create_engine("oracle+oracledb://...", thick_mode={
-        "lib_dir": "/path/to/oracle/client/lib",
-        "config_dir": "/path/to/network_config_file_directory",
-        "driver_name": "my-app : 1.0.0"
-    })
+    engine = sa.create_engine(
+        "oracle+oracledb://...",
+        thick_mode={
+            "lib_dir": "/path/to/oracle/client/lib",
+            "config_dir": "/path/to/network_config_file_directory",
+            "driver_name": "my-app : 1.0.0",
+        },
+    )
 
 Note that passing a ``lib_dir`` path should only be done on macOS or
 Windows. On Linux it does not behave as you might expect.
@@ -85,7 +97,9 @@ The dialect translates from a series of different URL forms.
 Given the hostname, port and service name of the target database, you can
 connect in SQLAlchemy using the ``service_name`` query string parameter::
 
-    engine = create_engine("oracle+oracledb://scott:tiger@hostname:port?service_name=myservice")
+    engine = create_engine(
+        "oracle+oracledb://scott:tiger@hostname:port?service_name=myservice"
+    )
 
 Connecting with Easy Connect strings
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -107,8 +121,8 @@ terminated by a firewall::
         connect_args={
             "user": "scott",
             "password": "tiger",
-            "dsn": "hostname:port/myservice?transport_connect_timeout=30&expire_time=60"
-        }
+            "dsn": "hostname:port/myservice?transport_connect_timeout=30&expire_time=60",
+        },
     )
 
 The Easy Connect syntax has been enhanced during the life of Oracle Database.
@@ -116,7 +130,9 @@ Review the documentation for your database version.  The current documentation
 is at `Understanding the Easy Connect Naming Method
 <https://www.oracle.com/pls/topic/lookup?ctx=dblatest&id=GUID-B0437826-43C1-49EC-A94D-B650B6A4A6EE>`_.
 
-The general syntax is similar to::
+The general syntax is similar to:
+
+.. sourcecode:: text
 
     [[protocol:]//]host[:port][/[service_name]][?parameter_name=value{&parameter_name=value}]
 
@@ -143,8 +159,8 @@ can be passed in ``connect_args``.  For example::
             "password": "tiger",
             "dsn": "hostname:port/myservice",
             "events": True,
-            "mode": oracledb.AUTH_MODE_SYSDBA
-        }
+            "mode": oracledb.AUTH_MODE_SYSDBA,
+        },
     )
 
 Connecting with tnsnames.ora TNS aliases
@@ -155,7 +171,9 @@ Oracle Database DSN "connection string".  This takes the "hostname" portion of
 the URL as the data source name.  For example, if the ``tnsnames.ora`` file
 contains a `TNS Alias
 <https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#tns-aliases-for-connection-strings>`_
-of ``myalias`` as below::
+of ``myalias`` as below:
+
+.. sourcecode:: text
 
     myalias =
       (DESCRIPTION =
@@ -187,32 +205,32 @@ summary, Thick mode users should configure file locations and set the wallet
 path in ``sqlnet.ora`` appropriately::
 
     e = create_engine(
-            "oracle+oracledb://@",
-            thick_mode={
-                 # directory containing tnsnames.ora and cwallet.so
-                 "config_dir": "/opt/oracle/wallet_dir",
-            },
-            connect_args={
-                "user": "scott",
-                "password": "tiger",
-                "dsn": "mydb_high"
-            }
-        )
+        "oracle+oracledb://@",
+        thick_mode={
+            # directory containing tnsnames.ora and cwallet.so
+            "config_dir": "/opt/oracle/wallet_dir",
+        },
+        connect_args={
+            "user": "scott",
+            "password": "tiger",
+            "dsn": "mydb_high",
+        },
+    )
 
 Thin mode users of mTLS should pass the appropriate directories and PEM wallet
 password when creating the engine, similar to::
 
     e = create_engine(
-            "oracle+oracledb://@",
-            connect_args={
-                "user": "scott",
-                "password": "tiger",
-                "dsn": "mydb_high",
-                "config_dir": "/opt/oracle/wallet_dir",       # directory containing tnsnames.ora
-                "wallet_location": "/opt/oracle/wallet_dir",  # directory containing ewallet.pem
-                "wallet_password": "top secret"               # password for the PEM file
-            }
-        )
+        "oracle+oracledb://@",
+        connect_args={
+            "user": "scott",
+            "password": "tiger",
+            "dsn": "mydb_high",
+            "config_dir": "/opt/oracle/wallet_dir",  # directory containing tnsnames.ora
+            "wallet_location": "/opt/oracle/wallet_dir",  # directory containing ewallet.pem
+            "wallet_password": "top secret",  # password for the PEM file
+        },
+    )
 
 Typically ``config_dir`` and ``wallet_location`` are the same directory, which
 is where the Oracle Autonomous Database wallet zip file was extracted.  Note
@@ -246,11 +264,19 @@ SQLAlchemy's pooling::
 
     # Uncomment to use the optional python-oracledb Thick mode.
     # Review the python-oracledb doc for the appropriate parameters
-    #oracledb.init_oracle_client(<your parameters>)
-
-    pool = oracledb.create_pool(user="scott", password="tiger", dsn="localhost:1521/freepdb1",
-                                min=1, max=4, increment=1)
-    engine = create_engine("oracle+oracledb://", creator=pool.acquire, poolclass=NullPool)
+    # oracledb.init_oracle_client(<your parameters>)
+
+    pool = oracledb.create_pool(
+        user="scott",
+        password="tiger",
+        dsn="localhost:1521/freepdb1",
+        min=1,
+        max=4,
+        increment=1,
+    )
+    engine = create_engine(
+        "oracle+oracledb://", creator=pool.acquire, poolclass=NullPool
+    )
 
 The above engine may then be used normally. Internally, python-oracledb handles
 connection pooling::
@@ -280,12 +306,21 @@ For example::
 
     # Uncomment to use the optional python-oracledb Thick mode.
     # Review the python-oracledb doc for the appropriate parameters
-    #oracledb.init_oracle_client(<your parameters>)
-
-    pool = oracledb.create_pool(user="scott", password="tiger", dsn="localhost:1521/freepdb1",
-                                min=1, max=4, increment=1,
-                                cclass="MYCLASS", purity=oracledb.PURITY_SELF)
-    engine = create_engine("oracle+oracledb://", creator=pool.acquire, poolclass=NullPool)
+    # oracledb.init_oracle_client(<your parameters>)
+
+    pool = oracledb.create_pool(
+        user="scott",
+        password="tiger",
+        dsn="localhost:1521/freepdb1",
+        min=1,
+        max=4,
+        increment=1,
+        cclass="MYCLASS",
+        purity=oracledb.PURITY_SELF,
+    )
+    engine = create_engine(
+        "oracle+oracledb://", creator=pool.acquire, poolclass=NullPool
+    )
 
 The above engine may then be used normally where python-oracledb handles
 application connection pooling and Oracle Database additionally uses DRCP::
@@ -303,16 +338,27 @@ connections, then wrap ``pool.acquire()``::
 
     # Uncomment to use python-oracledb Thick mode.
     # Review the python-oracledb doc for the appropriate parameters
-    #oracledb.init_oracle_client(<your parameters>)
+    # oracledb.init_oracle_client(<your parameters>)
+
+    pool = oracledb.create_pool(
+        user="scott",
+        password="tiger",
+        dsn="localhost:1521/freepdb1",
+        min=1,
+        max=4,
+        increment=1,
+        cclass="MYCLASS",
+        purity=oracledb.PURITY_SELF,
+    )
 
-    pool = oracledb.create_pool(user="scott", password="tiger", dsn="localhost:1521/freepdb1",
-                                min=1, max=4, increment=1,
-                                cclass="MYCLASS", purity=oracledb.PURITY_SELF)
 
     def creator():
         return pool.acquire(cclass="MYOTHERCLASS", purity=oracledb.PURITY_NEW)
 
-    engine = create_engine("oracle+oracledb://", creator=creator, poolclass=NullPool)
+
+    engine = create_engine(
+        "oracle+oracledb://", creator=creator, poolclass=NullPool
+    )
 
 Engine Options consumed by the SQLAlchemy oracledb dialect outside of the driver
 --------------------------------------------------------------------------------
@@ -321,8 +367,7 @@ There are also options that are consumed by the SQLAlchemy oracledb dialect
 itself.  These options are always passed directly to :func:`_sa.create_engine`,
 such as::
 
-    e = create_engine(
-        "oracle+oracledb://user:pass@tnsalias", arraysize=500)
+    e = create_engine("oracle+oracledb://user:pass@tnsalias", arraysize=500)
 
 The parameters accepted by the oracledb dialect are as follows:
 
@@ -433,15 +478,20 @@ objects which have a ``.key`` and a ``.type`` attribute::
 
     from sqlalchemy import create_engine, event
 
-    engine = create_engine("oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1")
+    engine = create_engine(
+        "oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1"
+    )
+
 
     @event.listens_for(engine, "do_setinputsizes")
     def _log_setinputsizes(inputsizes, cursor, statement, parameters, context):
         for bindparam, dbapitype in inputsizes.items():
-                log.info(
-                    "Bound parameter name: %s  SQLAlchemy type: %r  "
-                    "DBAPI object: %s",
-                    bindparam.key, bindparam.type, dbapitype)
+            log.info(
+                "Bound parameter name: %s  SQLAlchemy type: %r DBAPI object: %s",
+                bindparam.key,
+                bindparam.type,
+                dbapitype,
+            )
 
 Example 2 - remove all bindings to CLOB
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -454,7 +504,10 @@ follows::
     from sqlalchemy import create_engine, event
     from oracledb import CLOB
 
-    engine = create_engine("oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1")
+    engine = create_engine(
+        "oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1"
+    )
+
 
     @event.listens_for(engine, "do_setinputsizes")
     def _remove_clob(inputsizes, cursor, statement, parameters, context):
@@ -524,7 +577,9 @@ values which specify precision and scale as Python ``Decimal`` objects.  To
 disable this coercion to decimal for performance reasons, pass the flag
 ``coerce_to_decimal=False`` to :func:`_sa.create_engine`::
 
-    engine = create_engine("oracle+oracledb://scott:tiger@tnsalias", coerce_to_decimal=False)
+    engine = create_engine(
+        "oracle+oracledb://scott:tiger@tnsalias", coerce_to_decimal=False
+    )
 
 The ``coerce_to_decimal`` flag only impacts the results of plain string
 SQL statements that are not otherwise associated with a :class:`.Numeric`
index 1d63655ee05aa3ff1dc87f4a7d79f574c746f1d4..fcb98e6518325b0c8927d702d62ef1374329ab86 100644 (file)
@@ -54,11 +54,13 @@ class array(expression.ExpressionClauseList[_T]):
         from sqlalchemy.dialects import postgresql
         from sqlalchemy import select, func
 
-        stmt = select(array([1,2]) + array([3,4,5]))
+        stmt = select(array([1, 2]) + array([3, 4, 5]))
 
         print(stmt.compile(dialect=postgresql.dialect()))
 
-    Produces the SQL::
+    Produces the SQL:
+
+    .. sourcecode:: sql
 
         SELECT ARRAY[%(param_1)s, %(param_2)s] ||
             ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
@@ -67,7 +69,7 @@ class array(expression.ExpressionClauseList[_T]):
     :class:`_types.ARRAY`.  The "inner" type of the array is inferred from
     the values present, unless the ``type_`` keyword argument is passed::
 
-        array(['foo', 'bar'], type_=CHAR)
+        array(["foo", "bar"], type_=CHAR)
 
     Multidimensional arrays are produced by nesting :class:`.array` constructs.
     The dimensionality of the final :class:`_types.ARRAY`
@@ -76,16 +78,21 @@ class array(expression.ExpressionClauseList[_T]):
     type::
 
         stmt = select(
-            array([
-                array([1, 2]), array([3, 4]), array([column('q'), column('x')])
-            ])
+            array(
+                [array([1, 2]), array([3, 4]), array([column("q"), column("x")])]
+            )
         )
         print(stmt.compile(dialect=postgresql.dialect()))
 
-    Produces::
+    Produces:
 
-        SELECT ARRAY[ARRAY[%(param_1)s, %(param_2)s],
-        ARRAY[%(param_3)s, %(param_4)s], ARRAY[q, x]] AS anon_1
+    .. sourcecode:: sql
+
+        SELECT ARRAY[
+            ARRAY[%(param_1)s, %(param_2)s],
+            ARRAY[%(param_3)s, %(param_4)s],
+            ARRAY[q, x]
+        ] AS anon_1
 
     .. versionadded:: 1.3.6 added support for multidimensional array literals
 
@@ -93,7 +100,7 @@ class array(expression.ExpressionClauseList[_T]):
 
         :class:`_postgresql.ARRAY`
 
-    """
+    """  # noqa: E501
 
     __visit_name__ = "array"
 
@@ -166,9 +173,11 @@ class ARRAY(sqltypes.ARRAY):
 
         from sqlalchemy.dialects import postgresql
 
-        mytable = Table("mytable", metadata,
-                Column("data", postgresql.ARRAY(Integer, dimensions=2))
-            )
+        mytable = Table(
+            "mytable",
+            metadata,
+            Column("data", postgresql.ARRAY(Integer, dimensions=2)),
+        )
 
     The :class:`_postgresql.ARRAY` type provides all operations defined on the
     core :class:`_types.ARRAY` type, including support for "dimensions",
@@ -204,6 +213,7 @@ class ARRAY(sqltypes.ARRAY):
             from sqlalchemy.dialects.postgresql import ARRAY
             from sqlalchemy.ext.mutable import MutableList
 
+
             class SomeOrmClass(Base):
                 # ...
 
@@ -236,7 +246,7 @@ class ARRAY(sqltypes.ARRAY):
 
         E.g.::
 
-          Column('myarray', ARRAY(Integer))
+          Column("myarray", ARRAY(Integer))
 
         Arguments are:
 
index a362c616e1d0d110cfa28137a13e1c860a955784..510530a37df806ba1e354533cc22fc997f69760c 100644 (file)
@@ -23,7 +23,10 @@ This dialect should normally be used only with the
 :func:`_asyncio.create_async_engine` engine creation function::
 
     from sqlalchemy.ext.asyncio import create_async_engine
-    engine = create_async_engine("postgresql+asyncpg://user:pass@hostname/dbname")
+
+    engine = create_async_engine(
+        "postgresql+asyncpg://user:pass@hostname/dbname"
+    )
 
 .. versionadded:: 1.4
 
@@ -78,11 +81,15 @@ asyncpg dialect, therefore is handled as a DBAPI argument, not a dialect
 argument)::
 
 
-    engine = create_async_engine("postgresql+asyncpg://user:pass@hostname/dbname?prepared_statement_cache_size=500")
+    engine = create_async_engine(
+        "postgresql+asyncpg://user:pass@hostname/dbname?prepared_statement_cache_size=500"
+    )
 
 To disable the prepared statement cache, use a value of zero::
 
-    engine = create_async_engine("postgresql+asyncpg://user:pass@hostname/dbname?prepared_statement_cache_size=0")
+    engine = create_async_engine(
+        "postgresql+asyncpg://user:pass@hostname/dbname?prepared_statement_cache_size=0"
+    )
 
 .. versionadded:: 1.4.0b2 Added ``prepared_statement_cache_size`` for asyncpg.
 
@@ -131,7 +138,7 @@ a prepared statement is prepared::
         "postgresql+asyncpg://user:pass@somepgbouncer/dbname",
         poolclass=NullPool,
         connect_args={
-            'prepared_statement_name_func': lambda:  f'__asyncpg_{uuid4()}__',
+            "prepared_statement_name_func": lambda: f"__asyncpg_{uuid4()}__",
         },
     )
 
index 44d6f1570c56a8a6279cc044dcb084641438b720..2a335c3d28d4e0b22c470becc4a1866d79b0fe61 100644 (file)
@@ -31,7 +31,7 @@ use the :func:`~sqlalchemy.schema.Sequence` construct::
         metadata,
         Column(
             "id", Integer, Sequence("some_id_seq", start=1), primary_key=True
-        )
+        ),
     )
 
 When SQLAlchemy issues a single INSERT statement, to fulfill the contract of
@@ -63,9 +63,9 @@ of SERIAL. The :class:`_schema.Identity` construct in a
         "data",
         metadata,
         Column(
-            'id', Integer, Identity(start=42, cycle=True), primary_key=True
+            "id", Integer, Identity(start=42, cycle=True), primary_key=True
         ),
-        Column('data', String)
+        Column("data", String),
     )
 
 The CREATE TABLE for the above :class:`_schema.Table` object would be:
@@ -92,23 +92,21 @@ The CREATE TABLE for the above :class:`_schema.Table` object would be:
        from sqlalchemy.ext.compiler import compiles
 
 
-       @compiles(CreateColumn, 'postgresql')
+       @compiles(CreateColumn, "postgresql")
        def use_identity(element, compiler, **kw):
            text = compiler.visit_create_column(element, **kw)
-           text = text.replace(
-               "SERIAL", "INT GENERATED BY DEFAULT AS IDENTITY"
-            )
+           text = text.replace("SERIAL", "INT GENERATED BY DEFAULT AS IDENTITY")
            return text
 
    Using the above, a table such as::
 
        t = Table(
-           't', m,
-           Column('id', Integer, primary_key=True),
-           Column('data', String)
+           "t", m, Column("id", Integer, primary_key=True), Column("data", String)
        )
 
-   Will generate on the backing database as::
+   Will generate on the backing database as:
+
+   .. sourcecode:: sql
 
        CREATE TABLE t (
            id INT GENERATED BY DEFAULT AS IDENTITY,
@@ -129,7 +127,9 @@ Server side cursors are enabled on a per-statement basis by using the
 option::
 
     with engine.connect() as conn:
-        result = conn.execution_options(stream_results=True).execute(text("select * from table"))
+        result = conn.execution_options(stream_results=True).execute(
+            text("select * from table")
+        )
 
 Note that some kinds of SQL statements may not be supported with
 server side cursors; generally, only SQL statements that return rows should be
@@ -168,17 +168,15 @@ To set isolation level using :func:`_sa.create_engine`::
 
     engine = create_engine(
         "postgresql+pg8000://scott:tiger@localhost/test",
-        isolation_level = "REPEATABLE READ"
+        isolation_level="REPEATABLE READ",
     )
 
 To set using per-connection execution options::
 
     with engine.connect() as conn:
-        conn = conn.execution_options(
-            isolation_level="REPEATABLE READ"
-        )
+        conn = conn.execution_options(isolation_level="REPEATABLE READ")
         with conn.begin():
-            # ... work with transaction
+            ...  # work with transaction
 
 There are also more options for isolation level configurations, such as
 "sub-engine" objects linked to a main :class:`_engine.Engine` which each apply
@@ -221,10 +219,10 @@ passing the ``"SERIALIZABLE"`` isolation level at the same time as setting
         conn = conn.execution_options(
             isolation_level="SERIALIZABLE",
             postgresql_readonly=True,
-            postgresql_deferrable=True
+            postgresql_deferrable=True,
         )
         with conn.begin():
-            #  ... work with transaction
+            ...  # work with transaction
 
 Note that some DBAPIs such as asyncpg only support "readonly" with
 SERIALIZABLE isolation.
@@ -269,7 +267,6 @@ will remain consistent with the state of the transaction::
 
     postgresql_engine = create_engine(
         "postgresql+pyscopg2://scott:tiger@hostname/dbname",
-
         # disable default reset-on-return scheme
         pool_reset_on_return=None,
     )
@@ -316,6 +313,7 @@ at :ref:`schema_set_default_connections`::
 
     engine = create_engine("postgresql+psycopg2://scott:tiger@host/dbname")
 
+
     @event.listens_for(engine, "connect", insert=True)
     def set_search_path(dbapi_connection, connection_record):
         existing_autocommit = dbapi_connection.autocommit
@@ -334,9 +332,6 @@ be reverted when the DBAPI connection has a rollback.
 
   :ref:`schema_set_default_connections` - in the :ref:`metadata_toplevel` documentation
 
-
-
-
 .. _postgresql_schema_reflection:
 
 Remote-Schema Table Introspection and PostgreSQL search_path
@@ -360,7 +355,9 @@ In all cases, the first thing SQLAlchemy does when reflecting tables is
 to **determine the default schema for the current database connection**.
 It does this using the PostgreSQL ``current_schema()``
 function, illustated below using a PostgreSQL client session (i.e. using
-the ``psql`` tool)::
+the ``psql`` tool):
+
+.. sourcecode:: sql
 
     test=> select current_schema();
     current_schema
@@ -374,7 +371,9 @@ is the name ``public``.
 However, if your database username **matches the name of a schema**, PostgreSQL's
 default is to then **use that name as the default schema**.  Below, we log in
 using the username ``scott``.  When we create a schema named ``scott``, **it
-implicitly changes the default schema**::
+implicitly changes the default schema**:
+
+.. sourcecode:: sql
 
     test=> select current_schema();
     current_schema
@@ -393,7 +392,9 @@ implicitly changes the default schema**::
 The behavior of ``current_schema()`` is derived from the
 `PostgreSQL search path
 <https://www.postgresql.org/docs/current/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_
-variable ``search_path``, which in modern PostgreSQL versions defaults to this::
+variable ``search_path``, which in modern PostgreSQL versions defaults to this:
+
+.. sourcecode:: sql
 
     test=> show search_path;
     search_path
@@ -419,7 +420,9 @@ PostgreSQL's own ``pg_get_constraintdef()`` builtin procedure.  This function
 returns a sample definition for a particular foreign key constraint,
 omitting the referenced schema name from that definition when the name is
 also in the PostgreSQL schema search path.  The interaction below
-illustrates this behavior::
+illustrates this behavior:
+
+.. sourcecode:: sql
 
     test=> CREATE TABLE test_schema.referred(id INTEGER PRIMARY KEY);
     CREATE TABLE
@@ -446,13 +449,17 @@ PG ``search_path`` and then asked ``pg_get_constraintdef()`` for the
 the function.
 
 On the other hand, if we set the search path back to the typical default
-of ``public``::
+of ``public``:
+
+.. sourcecode:: sql
 
     test=> SET search_path TO public;
     SET
 
 The same query against ``pg_get_constraintdef()`` now returns the fully
-schema-qualified name for us::
+schema-qualified name for us:
+
+.. sourcecode:: sql
 
     test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
     test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
@@ -474,16 +481,14 @@ reflection process as follows::
     >>> with engine.connect() as conn:
     ...     conn.execute(text("SET search_path TO test_schema, public"))
     ...     metadata_obj = MetaData()
-    ...     referring = Table('referring', metadata_obj,
-    ...                       autoload_with=conn)
-    ...
+    ...     referring = Table("referring", metadata_obj, autoload_with=conn)
     <sqlalchemy.engine.result.CursorResult object at 0x101612ed0>
 
 The above process would deliver to the :attr:`_schema.MetaData.tables`
 collection
 ``referred`` table named **without** the schema::
 
-    >>> metadata_obj.tables['referred'].schema is None
+    >>> metadata_obj.tables["referred"].schema is None
     True
 
 To alter the behavior of reflection such that the referred schema is
@@ -495,15 +500,17 @@ dialect-specific argument to both :class:`_schema.Table` as well as
     >>> with engine.connect() as conn:
     ...     conn.execute(text("SET search_path TO test_schema, public"))
     ...     metadata_obj = MetaData()
-    ...     referring = Table('referring', metadata_obj,
-    ...                       autoload_with=conn,
-    ...                       postgresql_ignore_search_path=True)
-    ...
+    ...     referring = Table(
+    ...         "referring",
+    ...         metadata_obj,
+    ...         autoload_with=conn,
+    ...         postgresql_ignore_search_path=True,
+    ...     )
     <sqlalchemy.engine.result.CursorResult object at 0x1016126d0>
 
 We will now have ``test_schema.referred`` stored as schema-qualified::
 
-    >>> metadata_obj.tables['test_schema.referred'].schema
+    >>> metadata_obj.tables["test_schema.referred"].schema
     'test_schema'
 
 .. sidebar:: Best Practices for PostgreSQL Schema reflection
@@ -537,18 +544,26 @@ primary key identifiers.   To specify an explicit ``RETURNING`` clause,
 use the :meth:`._UpdateBase.returning` method on a per-statement basis::
 
     # INSERT..RETURNING
-    result = table.insert().returning(table.c.col1, table.c.col2).\
-        values(name='foo')
+    result = (
+        table.insert().returning(table.c.col1, table.c.col2).values(name="foo")
+    )
     print(result.fetchall())
 
     # UPDATE..RETURNING
-    result = table.update().returning(table.c.col1, table.c.col2).\
-        where(table.c.name=='foo').values(name='bar')
+    result = (
+        table.update()
+        .returning(table.c.col1, table.c.col2)
+        .where(table.c.name == "foo")
+        .values(name="bar")
+    )
     print(result.fetchall())
 
     # DELETE..RETURNING
-    result = table.delete().returning(table.c.col1, table.c.col2).\
-        where(table.c.name=='foo')
+    result = (
+        table.delete()
+        .returning(table.c.col1, table.c.col2)
+        .where(table.c.name == "foo")
+    )
     print(result.fetchall())
 
 .. _postgresql_insert_on_conflict:
@@ -578,19 +593,16 @@ and :meth:`~.postgresql.Insert.on_conflict_do_nothing`:
 
     >>> from sqlalchemy.dialects.postgresql import insert
     >>> insert_stmt = insert(my_table).values(
-    ...     id='some_existing_id',
-    ...     data='inserted value')
-    >>> do_nothing_stmt = insert_stmt.on_conflict_do_nothing(
-    ...     index_elements=['id']
+    ...     id="some_existing_id", data="inserted value"
     ... )
+    >>> do_nothing_stmt = insert_stmt.on_conflict_do_nothing(index_elements=["id"])
     >>> print(do_nothing_stmt)
     {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
     ON CONFLICT (id) DO NOTHING
     {stop}
 
     >>> do_update_stmt = insert_stmt.on_conflict_do_update(
-    ...     constraint='pk_my_table',
-    ...     set_=dict(data='updated value')
+    ...     constraint="pk_my_table", set_=dict(data="updated value")
     ... )
     >>> print(do_update_stmt)
     {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
@@ -616,8 +628,7 @@ named constraint or by column inference:
   .. sourcecode:: pycon+sql
 
     >>> do_update_stmt = insert_stmt.on_conflict_do_update(
-    ...     index_elements=['id'],
-    ...     set_=dict(data='updated value')
+    ...     index_elements=["id"], set_=dict(data="updated value")
     ... )
     >>> print(do_update_stmt)
     {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
@@ -625,8 +636,7 @@ named constraint or by column inference:
     {stop}
 
     >>> do_update_stmt = insert_stmt.on_conflict_do_update(
-    ...     index_elements=[my_table.c.id],
-    ...     set_=dict(data='updated value')
+    ...     index_elements=[my_table.c.id], set_=dict(data="updated value")
     ... )
     >>> print(do_update_stmt)
     {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
@@ -638,11 +648,11 @@ named constraint or by column inference:
 
   .. sourcecode:: pycon+sql
 
-    >>> stmt = insert(my_table).values(user_email='a@b.com', data='inserted data')
+    >>> stmt = insert(my_table).values(user_email="a@b.com", data="inserted data")
     >>> stmt = stmt.on_conflict_do_update(
     ...     index_elements=[my_table.c.user_email],
-    ...     index_where=my_table.c.user_email.like('%@gmail.com'),
-    ...     set_=dict(data=stmt.excluded.data)
+    ...     index_where=my_table.c.user_email.like("%@gmail.com"),
+    ...     set_=dict(data=stmt.excluded.data),
     ... )
     >>> print(stmt)
     {printsql}INSERT INTO my_table (data, user_email)
@@ -656,8 +666,7 @@ named constraint or by column inference:
   .. sourcecode:: pycon+sql
 
     >>> do_update_stmt = insert_stmt.on_conflict_do_update(
-    ...     constraint='my_table_idx_1',
-    ...     set_=dict(data='updated value')
+    ...     constraint="my_table_idx_1", set_=dict(data="updated value")
     ... )
     >>> print(do_update_stmt)
     {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
@@ -665,8 +674,7 @@ named constraint or by column inference:
     {stop}
 
     >>> do_update_stmt = insert_stmt.on_conflict_do_update(
-    ...     constraint='my_table_pk',
-    ...     set_=dict(data='updated value')
+    ...     constraint="my_table_pk", set_=dict(data="updated value")
     ... )
     >>> print(do_update_stmt)
     {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
@@ -688,8 +696,7 @@ named constraint or by column inference:
   .. sourcecode:: pycon+sql
 
     >>> do_update_stmt = insert_stmt.on_conflict_do_update(
-    ...     constraint=my_table.primary_key,
-    ...     set_=dict(data='updated value')
+    ...     constraint=my_table.primary_key, set_=dict(data="updated value")
     ... )
     >>> print(do_update_stmt)
     {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
@@ -707,10 +714,9 @@ for UPDATE:
 
 .. sourcecode:: pycon+sql
 
-    >>> stmt = insert(my_table).values(id='some_id', data='inserted value')
+    >>> stmt = insert(my_table).values(id="some_id", data="inserted value")
     >>> do_update_stmt = stmt.on_conflict_do_update(
-    ...     index_elements=['id'],
-    ...     set_=dict(data='updated value')
+    ...     index_elements=["id"], set_=dict(data="updated value")
     ... )
     >>> print(do_update_stmt)
     {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
@@ -739,13 +745,11 @@ table:
 .. sourcecode:: pycon+sql
 
     >>> stmt = insert(my_table).values(
-    ...     id='some_id',
-    ...     data='inserted value',
-    ...     author='jlh'
+    ...     id="some_id", data="inserted value", author="jlh"
     ... )
     >>> do_update_stmt = stmt.on_conflict_do_update(
-    ...     index_elements=['id'],
-    ...     set_=dict(data='updated value', author=stmt.excluded.author)
+    ...     index_elements=["id"],
+    ...     set_=dict(data="updated value", author=stmt.excluded.author),
     ... )
     >>> print(do_update_stmt)
     {printsql}INSERT INTO my_table (id, data, author)
@@ -762,14 +766,12 @@ parameter, which will limit those rows which receive an UPDATE:
 .. sourcecode:: pycon+sql
 
     >>> stmt = insert(my_table).values(
-    ...     id='some_id',
-    ...     data='inserted value',
-    ...     author='jlh'
+    ...     id="some_id", data="inserted value", author="jlh"
     ... )
     >>> on_update_stmt = stmt.on_conflict_do_update(
-    ...     index_elements=['id'],
-    ...     set_=dict(data='updated value', author=stmt.excluded.author),
-    ...     where=(my_table.c.status == 2)
+    ...     index_elements=["id"],
+    ...     set_=dict(data="updated value", author=stmt.excluded.author),
+    ...     where=(my_table.c.status == 2),
     ... )
     >>> print(on_update_stmt)
     {printsql}INSERT INTO my_table (id, data, author)
@@ -787,8 +789,8 @@ this is illustrated using the
 
 .. sourcecode:: pycon+sql
 
-    >>> stmt = insert(my_table).values(id='some_id', data='inserted value')
-    >>> stmt = stmt.on_conflict_do_nothing(index_elements=['id'])
+    >>> stmt = insert(my_table).values(id="some_id", data="inserted value")
+    >>> stmt = stmt.on_conflict_do_nothing(index_elements=["id"])
     >>> print(stmt)
     {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
     ON CONFLICT (id) DO NOTHING
@@ -799,7 +801,7 @@ constraint violation which occurs:
 
 .. sourcecode:: pycon+sql
 
-    >>> stmt = insert(my_table).values(id='some_id', data='inserted value')
+    >>> stmt = insert(my_table).values(id="some_id", data="inserted value")
     >>> stmt = stmt.on_conflict_do_nothing()
     >>> print(stmt)
     {printsql}INSERT INTO my_table (id, data) VALUES (%(id)s, %(data)s)
@@ -830,7 +832,9 @@ On the PostgreSQL dialect, an expression like the following::
 
     select(sometable.c.text.match("search string"))
 
-would emit to the database::
+would emit to the database:
+
+.. sourcecode:: sql
 
     SELECT text @@ plainto_tsquery('search string') FROM table
 
@@ -846,11 +850,11 @@ with other backends.
 
         from sqlalchemy import func
 
-        select(
-            sometable.c.text.bool_op("@@")(func.to_tsquery("search string"))
-        )
+        select(sometable.c.text.bool_op("@@")(func.to_tsquery("search string")))
 
-   Which would emit::
+   Which would emit:
+
+   .. sourcecode:: sql
 
         SELECT text @@ to_tsquery('search string') FROM table
 
@@ -864,9 +868,7 @@ any boolean operator.
 
 For example, the query::
 
-    select(
-        func.to_tsquery('cat').bool_op("@>")(func.to_tsquery('cat & rat'))
-    )
+    select(func.to_tsquery("cat").bool_op("@>")(func.to_tsquery("cat & rat")))
 
 would generate:
 
@@ -879,9 +881,12 @@ The :class:`_postgresql.TSVECTOR` type can provide for explicit CAST::
 
     from sqlalchemy.dialects.postgresql import TSVECTOR
     from sqlalchemy import select, cast
+
     select(cast("some text", TSVECTOR))
 
-produces a statement equivalent to::
+produces a statement equivalent to:
+
+.. sourcecode:: sql
 
     SELECT CAST('some text' AS TSVECTOR) AS anon_1
 
@@ -909,10 +914,12 @@ When using :meth:`.Operators.match`, this additional parameter may be
 specified using the ``postgresql_regconfig`` parameter, such as::
 
     select(mytable.c.id).where(
-        mytable.c.title.match('somestring', postgresql_regconfig='english')
+        mytable.c.title.match("somestring", postgresql_regconfig="english")
     )
 
-Which would emit::
+Which would emit:
+
+.. sourcecode:: sql
 
     SELECT mytable.id FROM mytable
     WHERE mytable.title @@ plainto_tsquery('english', 'somestring')
@@ -926,7 +933,9 @@ When using other PostgreSQL search functions with :data:`.func`, the
         )
     )
 
-produces a statement equivalent to::
+produces a statement equivalent to:
+
+.. sourcecode:: sql
 
     SELECT mytable.id FROM mytable
     WHERE to_tsvector('english', mytable.title) @@
@@ -950,16 +959,16 @@ table in an inheritance hierarchy. This can be used to produce the
 syntaxes. It uses SQLAlchemy's hints mechanism::
 
     # SELECT ... FROM ONLY ...
-    result = table.select().with_hint(table, 'ONLY', 'postgresql')
+    result = table.select().with_hint(table, "ONLY", "postgresql")
     print(result.fetchall())
 
     # UPDATE ONLY ...
-    table.update(values=dict(foo='bar')).with_hint('ONLY',
-                                                   dialect_name='postgresql')
+    table.update(values=dict(foo="bar")).with_hint(
+        "ONLY", dialect_name="postgresql"
+    )
 
     # DELETE FROM ONLY ...
-    table.delete().with_hint('ONLY', dialect_name='postgresql')
-
+    table.delete().with_hint("ONLY", dialect_name="postgresql")
 
 .. _postgresql_indexes:
 
@@ -975,7 +984,7 @@ Covering Indexes
 The ``postgresql_include`` option renders INCLUDE(colname) for the given
 string names::
 
-    Index("my_index", table.c.x, postgresql_include=['y'])
+    Index("my_index", table.c.x, postgresql_include=["y"])
 
 would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
 
@@ -992,7 +1001,7 @@ Partial indexes add criterion to the index definition so that the index is
 applied to a subset of rows.   These can be specified on :class:`.Index`
 using the ``postgresql_where`` keyword argument::
 
-  Index('my_index', my_table.c.id, postgresql_where=my_table.c.value > 10)
+  Index("my_index", my_table.c.id, postgresql_where=my_table.c.value > 10)
 
 .. _postgresql_operator_classes:
 
@@ -1006,11 +1015,11 @@ The :class:`.Index` construct allows these to be specified via the
 ``postgresql_ops`` keyword argument::
 
     Index(
-        'my_index', my_table.c.id, my_table.c.data,
-        postgresql_ops={
-            'data': 'text_pattern_ops',
-            'id': 'int4_ops'
-        })
+        "my_index",
+        my_table.c.id,
+        my_table.c.data,
+        postgresql_ops={"data": "text_pattern_ops", "id": "int4_ops"},
+    )
 
 Note that the keys in the ``postgresql_ops`` dictionaries are the
 "key" name of the :class:`_schema.Column`, i.e. the name used to access it from
@@ -1022,12 +1031,11 @@ as a function call, then to apply to the column it must be given a label
 that is identified in the dictionary by name, e.g.::
 
     Index(
-        'my_index', my_table.c.id,
-        func.lower(my_table.c.data).label('data_lower'),
-        postgresql_ops={
-            'data_lower': 'text_pattern_ops',
-            'id': 'int4_ops'
-        })
+        "my_index",
+        my_table.c.id,
+        func.lower(my_table.c.data).label("data_lower"),
+        postgresql_ops={"data_lower": "text_pattern_ops", "id": "int4_ops"},
+    )
 
 Operator classes are also supported by the
 :class:`_postgresql.ExcludeConstraint` construct using the
@@ -1046,7 +1054,7 @@ as the ability for users to create their own (see
 https://www.postgresql.org/docs/current/static/indexes-types.html). These can be
 specified on :class:`.Index` using the ``postgresql_using`` keyword argument::
 
-    Index('my_index', my_table.c.data, postgresql_using='gin')
+    Index("my_index", my_table.c.data, postgresql_using="gin")
 
 The value passed to the keyword argument will be simply passed through to the
 underlying CREATE INDEX command, so it *must* be a valid index type for your
@@ -1062,13 +1070,13 @@ parameters available depend on the index method used by the index. Storage
 parameters can be specified on :class:`.Index` using the ``postgresql_with``
 keyword argument::
 
-    Index('my_index', my_table.c.data, postgresql_with={"fillfactor": 50})
+    Index("my_index", my_table.c.data, postgresql_with={"fillfactor": 50})
 
 PostgreSQL allows to define the tablespace in which to create the index.
 The tablespace can be specified on :class:`.Index` using the
 ``postgresql_tablespace`` keyword argument::
 
-    Index('my_index', my_table.c.data, postgresql_tablespace='my_tablespace')
+    Index("my_index", my_table.c.data, postgresql_tablespace="my_tablespace")
 
 Note that the same option is available on :class:`_schema.Table` as well.
 
@@ -1080,17 +1088,21 @@ Indexes with CONCURRENTLY
 The PostgreSQL index option CONCURRENTLY is supported by passing the
 flag ``postgresql_concurrently`` to the :class:`.Index` construct::
 
-    tbl = Table('testtbl', m, Column('data', Integer))
+    tbl = Table("testtbl", m, Column("data", Integer))
 
-    idx1 = Index('test_idx1', tbl.c.data, postgresql_concurrently=True)
+    idx1 = Index("test_idx1", tbl.c.data, postgresql_concurrently=True)
 
 The above index construct will render DDL for CREATE INDEX, assuming
-PostgreSQL 8.2 or higher is detected or for a connection-less dialect, as::
+PostgreSQL 8.2 or higher is detected or for a connection-less dialect, as:
+
+.. sourcecode:: sql
 
     CREATE INDEX CONCURRENTLY test_idx1 ON testtbl (data)
 
 For DROP INDEX, assuming PostgreSQL 9.2 or higher is detected or for
-a connection-less dialect, it will emit::
+a connection-less dialect, it will emit:
+
+.. sourcecode:: sql
 
     DROP INDEX CONCURRENTLY test_idx1
 
@@ -1100,14 +1112,11 @@ even for a single statement, a transaction is present, so to use this
 construct, the DBAPI's "autocommit" mode must be used::
 
     metadata = MetaData()
-    table = Table(
-        "foo", metadata,
-        Column("id", String))
-    index = Index(
-        "foo_idx", table.c.id, postgresql_concurrently=True)
+    table = Table("foo", metadata, Column("id", String))
+    index = Index("foo_idx", table.c.id, postgresql_concurrently=True)
 
     with engine.connect() as conn:
-        with conn.execution_options(isolation_level='AUTOCOMMIT'):
+        with conn.execution_options(isolation_level="AUTOCOMMIT"):
             table.create(conn)
 
 .. seealso::
@@ -1165,26 +1174,33 @@ dialect in conjunction with the :class:`_schema.Table` construct:
 
 * ``ON COMMIT``::
 
-    Table("some_table", metadata, ..., postgresql_on_commit='PRESERVE ROWS')
+    Table("some_table", metadata, ..., postgresql_on_commit="PRESERVE ROWS")
 
-* ``PARTITION BY``::
+*
+  ``PARTITION BY``::
 
-    Table("some_table", metadata, ...,
-          postgresql_partition_by='LIST (part_column)')
+    Table(
+        "some_table",
+        metadata,
+        ...,
+        postgresql_partition_by="LIST (part_column)",
+    )
 
-    .. versionadded:: 1.2.6
+  .. versionadded:: 1.2.6
 
-* ``TABLESPACE``::
+*
+  ``TABLESPACE``::
 
-    Table("some_table", metadata, ..., postgresql_tablespace='some_tablespace')
+    Table("some_table", metadata, ..., postgresql_tablespace="some_tablespace")
 
   The above option is also available on the :class:`.Index` construct.
 
-* ``USING``::
+*
+  ``USING``::
 
-    Table("some_table", metadata, ..., postgresql_using='heap')
+    Table("some_table", metadata, ..., postgresql_using="heap")
 
-    .. versionadded:: 2.0.26
+  .. versionadded:: 2.0.26
 
 * ``WITH OIDS``::
 
@@ -1225,7 +1241,7 @@ with selected constraint constructs:
                 "user",
                 ["user_id"],
                 ["id"],
-                postgresql_not_valid=True
+                postgresql_not_valid=True,
             )
 
   The keyword is ultimately accepted directly by the
@@ -1236,7 +1252,9 @@ with selected constraint constructs:
 
        CheckConstraint("some_field IS NOT NULL", postgresql_not_valid=True)
 
-       ForeignKeyConstraint(["some_id"], ["some_table.some_id"], postgresql_not_valid=True)
+       ForeignKeyConstraint(
+           ["some_id"], ["some_table.some_id"], postgresql_not_valid=True
+       )
 
   .. versionadded:: 1.4.32
 
@@ -1279,7 +1297,9 @@ Examples from PostgreSQL's reference documentation follow below:
   .. sourcecode:: pycon+sql
 
     >>> from sqlalchemy import select, func
-    >>> stmt = select(func.json_each('{"a":"foo", "b":"bar"}').table_valued("key", "value"))
+    >>> stmt = select(
+    ...     func.json_each('{"a":"foo", "b":"bar"}').table_valued("key", "value")
+    ... )
     >>> print(stmt)
     {printsql}SELECT anon_1.key, anon_1.value
     FROM json_each(:json_each_1) AS anon_1
@@ -1291,8 +1311,7 @@ Examples from PostgreSQL's reference documentation follow below:
     >>> from sqlalchemy import select, func, literal_column
     >>> stmt = select(
     ...     func.json_populate_record(
-    ...         literal_column("null::myrowtype"),
-    ...         '{"a":1,"b":2}'
+    ...         literal_column("null::myrowtype"), '{"a":1,"b":2}'
     ...     ).table_valued("a", "b", name="x")
     ... )
     >>> print(stmt)
@@ -1310,9 +1329,13 @@ Examples from PostgreSQL's reference documentation follow below:
 
     >>> from sqlalchemy import select, func, column, Integer, Text
     >>> stmt = select(
-    ...     func.json_to_record('{"a":1,"b":[1,2,3],"c":"bar"}').table_valued(
-    ...         column("a", Integer), column("b", Text), column("d", Text),
-    ...     ).render_derived(name="x", with_types=True)
+    ...     func.json_to_record('{"a":1,"b":[1,2,3],"c":"bar"}')
+    ...     .table_valued(
+    ...         column("a", Integer),
+    ...         column("b", Text),
+    ...         column("d", Text),
+    ...     )
+    ...     .render_derived(name="x", with_types=True)
     ... )
     >>> print(stmt)
     {printsql}SELECT x.a, x.b, x.d
@@ -1329,9 +1352,9 @@ Examples from PostgreSQL's reference documentation follow below:
 
     >>> from sqlalchemy import select, func
     >>> stmt = select(
-    ...     func.generate_series(4, 1, -1).
-    ...     table_valued("value", with_ordinality="ordinality").
-    ...     render_derived()
+    ...     func.generate_series(4, 1, -1)
+    ...     .table_valued("value", with_ordinality="ordinality")
+    ...     .render_derived()
     ... )
     >>> print(stmt)
     {printsql}SELECT anon_1.value, anon_1.ordinality
@@ -1360,7 +1383,9 @@ scalar value.  PostgreSQL functions such as ``json_array_elements()``,
   .. sourcecode:: pycon+sql
 
     >>> from sqlalchemy import select, func
-    >>> stmt = select(func.json_array_elements('["one", "two"]').column_valued("x"))
+    >>> stmt = select(
+    ...     func.json_array_elements('["one", "two"]').column_valued("x")
+    ... )
     >>> print(stmt)
     {printsql}SELECT x
     FROM json_array_elements(:json_array_elements_1) AS x
@@ -1384,7 +1409,7 @@ scalar value.  PostgreSQL functions such as ``json_array_elements()``,
 
     >>> from sqlalchemy import table, column, ARRAY, Integer
     >>> from sqlalchemy import select, func
-    >>> t = table("t", column('value', ARRAY(Integer)))
+    >>> t = table("t", column("value", ARRAY(Integer)))
     >>> stmt = select(func.unnest(t.c.value).column_valued("unnested_value"))
     >>> print(stmt)
     {printsql}SELECT unnested_value
@@ -1406,10 +1431,10 @@ Built-in support for rendering a ``ROW`` may be approximated using
 
     >>> from sqlalchemy import table, column, func, tuple_
     >>> t = table("t", column("id"), column("fk"))
-    >>> stmt = t.select().where(
-    ...     tuple_(t.c.id, t.c.fk) > (1,2)
-    ... ).where(
-    ...     func.ROW(t.c.id, t.c.fk) < func.ROW(3, 7)
+    >>> stmt = (
+    ...     t.select()
+    ...     .where(tuple_(t.c.id, t.c.fk) > (1, 2))
+    ...     .where(func.ROW(t.c.id, t.c.fk) < func.ROW(3, 7))
     ... )
     >>> print(stmt)
     {printsql}SELECT t.id, t.fk
@@ -1438,7 +1463,7 @@ itself:
 .. sourcecode:: pycon+sql
 
     >>> from sqlalchemy import table, column, func, select
-    >>> a = table( "a", column("id"), column("x"), column("y"))
+    >>> a = table("a", column("id"), column("x"), column("y"))
     >>> stmt = select(func.row_to_json(a.table_valued()))
     >>> print(stmt)
     {printsql}SELECT row_to_json(a) AS row_to_json_1
index 7fc08953fcc134e66a9016f289f2f340c4b32965..a760773e247323a3d86ee624d4b137e0f3217c0b 100644 (file)
@@ -35,22 +35,26 @@ class aggregate_order_by(expression.ColumnElement):
     E.g.::
 
         from sqlalchemy.dialects.postgresql import aggregate_order_by
+
         expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
         stmt = select(expr)
 
-    would represent the expression::
+    would represent the expression:
+
+    .. sourcecode:: sql
 
         SELECT array_agg(a ORDER BY b DESC) FROM table;
 
     Similarly::
 
         expr = func.string_agg(
-            table.c.a,
-            aggregate_order_by(literal_column("','"), table.c.a)
+            table.c.a, aggregate_order_by(literal_column("','"), table.c.a)
         )
         stmt = select(expr)
 
-    Would represent::
+    Would represent:
+
+    .. sourcecode:: sql
 
         SELECT string_agg(a, ',' ORDER BY a) FROM table;
 
@@ -131,10 +135,10 @@ class ExcludeConstraint(ColumnCollectionConstraint):
         E.g.::
 
             const = ExcludeConstraint(
-                (Column('period'), '&&'),
-                (Column('group'), '='),
-                where=(Column('group') != 'some group'),
-                ops={'group': 'my_operator_class'}
+                (Column("period"), "&&"),
+                (Column("group"), "="),
+                where=(Column("group") != "some group"),
+                ops={"group": "my_operator_class"},
             )
 
         The constraint is normally embedded into the :class:`_schema.Table`
@@ -142,19 +146,20 @@ class ExcludeConstraint(ColumnCollectionConstraint):
         directly, or added later using :meth:`.append_constraint`::
 
             some_table = Table(
-                'some_table', metadata,
-                Column('id', Integer, primary_key=True),
-                Column('period', TSRANGE()),
-                Column('group', String)
+                "some_table",
+                metadata,
+                Column("id", Integer, primary_key=True),
+                Column("period", TSRANGE()),
+                Column("group", String),
             )
 
             some_table.append_constraint(
                 ExcludeConstraint(
-                    (some_table.c.period, '&&'),
-                    (some_table.c.group, '='),
-                    where=some_table.c.group != 'some group',
-                    name='some_table_excl_const',
-                    ops={'group': 'my_operator_class'}
+                    (some_table.c.period, "&&"),
+                    (some_table.c.group, "="),
+                    where=some_table.c.group != "some group",
+                    name="some_table_excl_const",
+                    ops={"group": "my_operator_class"},
                 )
             )
 
index 04c8cf16015cc733e17252952ae2a41210b54122..5a2d451316dc4d906cb5203c80ad0b5d792d1494 100644 (file)
@@ -28,28 +28,29 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
 
     The :class:`.HSTORE` type stores dictionaries containing strings, e.g.::
 
-        data_table = Table('data_table', metadata,
-            Column('id', Integer, primary_key=True),
-            Column('data', HSTORE)
+        data_table = Table(
+            "data_table",
+            metadata,
+            Column("id", Integer, primary_key=True),
+            Column("data", HSTORE),
         )
 
         with engine.connect() as conn:
             conn.execute(
-                data_table.insert(),
-                data = {"key1": "value1", "key2": "value2"}
+                data_table.insert(), data={"key1": "value1", "key2": "value2"}
             )
 
     :class:`.HSTORE` provides for a wide range of operations, including:
 
     * Index operations::
 
-        data_table.c.data['some key'] == 'some value'
+        data_table.c.data["some key"] == "some value"
 
     * Containment operations::
 
-        data_table.c.data.has_key('some key')
+        data_table.c.data.has_key("some key")
 
-        data_table.c.data.has_all(['one', 'two', 'three'])
+        data_table.c.data.has_all(["one", "two", "three"])
 
     * Concatenation::
 
@@ -72,17 +73,19 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
 
             from sqlalchemy.ext.mutable import MutableDict
 
+
             class MyClass(Base):
-                __tablename__ = 'data_table'
+                __tablename__ = "data_table"
 
                 id = Column(Integer, primary_key=True)
                 data = Column(MutableDict.as_mutable(HSTORE))
 
+
             my_object = session.query(MyClass).one()
 
             # in-place mutation, requires Mutable extension
             # in order for the ORM to detect
-            my_object.data['some_key'] = 'some value'
+            my_object.data["some_key"] = "some value"
 
             session.commit()
 
@@ -96,7 +99,7 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
         :class:`.hstore` - render the PostgreSQL ``hstore()`` function.
 
 
-    """
+    """  # noqa: E501
 
     __visit_name__ = "HSTORE"
     hashable = False
@@ -221,12 +224,12 @@ class hstore(sqlfunc.GenericFunction):
 
         from sqlalchemy.dialects.postgresql import array, hstore
 
-        select(hstore('key1', 'value1'))
+        select(hstore("key1", "value1"))
 
         select(
             hstore(
-                array(['key1', 'key2', 'key3']),
-                array(['value1', 'value2', 'value3'])
+                array(["key1", "key2", "key3"]),
+                array(["value1", "value2", "value3"]),
             )
         )
 
index 914d8423d4b18afe56fdfa5f7946fd48807f7511..4e7c15ffe92ab456ada1e38efcf4807a5dc3324c 100644 (file)
@@ -90,14 +90,14 @@ class JSON(sqltypes.JSON):
 
     * Index operations (the ``->`` operator)::
 
-        data_table.c.data['some key']
+        data_table.c.data["some key"]
 
         data_table.c.data[5]
 
+    * Index operations returning text
+      (the ``->>`` operator)::
 
-    * Index operations returning text (the ``->>`` operator)::
-
-        data_table.c.data['some key'].astext == 'some value'
+        data_table.c.data["some key"].astext == "some value"
 
       Note that equivalent functionality is available via the
       :attr:`.JSON.Comparator.as_string` accessor.
@@ -105,18 +105,20 @@ class JSON(sqltypes.JSON):
     * Index operations with CAST
       (equivalent to ``CAST(col ->> ['some key'] AS <type>)``)::
 
-        data_table.c.data['some key'].astext.cast(Integer) == 5
+        data_table.c.data["some key"].astext.cast(Integer) == 5
 
       Note that equivalent functionality is available via the
       :attr:`.JSON.Comparator.as_integer` and similar accessors.
 
     * Path index operations (the ``#>`` operator)::
 
-        data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')]
+        data_table.c.data[("key_1", "key_2", 5, ..., "key_n")]
 
     * Path index operations returning text (the ``#>>`` operator)::
 
-        data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')].astext == 'some value'
+        data_table.c.data[
+            ("key_1", "key_2", 5, ..., "key_n")
+        ].astext == "some value"
 
     Index operations return an expression object whose type defaults to
     :class:`_types.JSON` by default,
@@ -128,10 +130,11 @@ class JSON(sqltypes.JSON):
     using psycopg2, the DBAPI only allows serializers at the per-cursor
     or per-connection level.   E.g.::
 
-        engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
-                                json_serializer=my_serialize_fn,
-                                json_deserializer=my_deserialize_fn
-                        )
+        engine = create_engine(
+            "postgresql+psycopg2://scott:tiger@localhost/test",
+            json_serializer=my_serialize_fn,
+            json_deserializer=my_deserialize_fn,
+        )
 
     When using the psycopg2 dialect, the json_deserializer is registered
     against the database using ``psycopg2.extras.register_default_json``.
@@ -156,6 +159,7 @@ class JSON(sqltypes.JSON):
          be used to persist a NULL value::
 
              from sqlalchemy import null
+
              conn.execute(table.insert(), {"data": null()})
 
          .. seealso::
@@ -181,7 +185,7 @@ class JSON(sqltypes.JSON):
 
             E.g.::
 
-                select(data_table.c.data['some key'].astext)
+                select(data_table.c.data["some key"].astext)
 
             .. seealso::
 
@@ -208,15 +212,16 @@ class JSONB(JSON):
     The :class:`_postgresql.JSONB` type stores arbitrary JSONB format data,
     e.g.::
 
-        data_table = Table('data_table', metadata,
-            Column('id', Integer, primary_key=True),
-            Column('data', JSONB)
+        data_table = Table(
+            "data_table",
+            metadata,
+            Column("id", Integer, primary_key=True),
+            Column("data", JSONB),
         )
 
         with engine.connect() as conn:
             conn.execute(
-                data_table.insert(),
-                data = {"key1": "value1", "key2": "value2"}
+                data_table.insert(), data={"key1": "value1", "key2": "value2"}
             )
 
     The :class:`_postgresql.JSONB` type includes all operations provided by
index 16e5c867efc26bb4998db2c92c40aa4d5011a261..320de440f861c0bd006135643c43d0532e87de80 100644 (file)
@@ -185,8 +185,10 @@ class ENUM(NamedType, type_api.NativeForEmulated, sqltypes.Enum):
     :meth:`_schema.Table.drop`
     methods are called::
 
-        table = Table('sometable', metadata,
-            Column('some_enum', ENUM('a', 'b', 'c', name='myenum'))
+        table = Table(
+            "sometable",
+            metadata,
+            Column("some_enum", ENUM("a", "b", "c", name="myenum")),
         )
 
         table.create(engine)  # will emit CREATE ENUM and CREATE TABLE
@@ -197,21 +199,17 @@ class ENUM(NamedType, type_api.NativeForEmulated, sqltypes.Enum):
     :class:`_postgresql.ENUM` independently, and associate it with the
     :class:`_schema.MetaData` object itself::
 
-        my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata)
+        my_enum = ENUM("a", "b", "c", name="myenum", metadata=metadata)
 
-        t1 = Table('sometable_one', metadata,
-            Column('some_enum', myenum)
-        )
+        t1 = Table("sometable_one", metadata, Column("some_enum", myenum))
 
-        t2 = Table('sometable_two', metadata,
-            Column('some_enum', myenum)
-        )
+        t2 = Table("sometable_two", metadata, Column("some_enum", myenum))
 
     When this pattern is used, care must still be taken at the level
     of individual table creates.  Emitting CREATE TABLE without also
     specifying ``checkfirst=True`` will still cause issues::
 
-        t1.create(engine) # will fail: no such type 'myenum'
+        t1.create(engine)  # will fail: no such type 'myenum'
 
     If we specify ``checkfirst=True``, the individual table-level create
     operation will check for the ``ENUM`` and create if not exists::
@@ -387,14 +385,12 @@ class DOMAIN(NamedType, sqltypes.SchemaType):
     A domain is essentially a data type with optional constraints
     that restrict the allowed set of values. E.g.::
 
-        PositiveInt = DOMAIN(
-            "pos_int", Integer, check="VALUE > 0", not_null=True
-        )
+        PositiveInt = DOMAIN("pos_int", Integer, check="VALUE > 0", not_null=True)
 
         UsPostalCode = DOMAIN(
             "us_postal_code",
             Text,
-            check="VALUE ~ '^\d{5}$' OR VALUE ~ '^\d{5}-\d{4}$'"
+            check="VALUE ~ '^\d{5}$' OR VALUE ~ '^\d{5}-\d{4}$'",
         )
 
     See the `PostgreSQL documentation`__ for additional details
@@ -403,7 +399,7 @@ class DOMAIN(NamedType, sqltypes.SchemaType):
 
     .. versionadded:: 2.0
 
-    """
+    """  # noqa: E501
 
     DDLGenerator = DomainGenerator
     DDLDropper = DomainDropper
index 0151be0253daed70687a7e129dfc5f3261be9ca6..aa878c353e0ed1b81abedf36fbbde536f3e667ea 100644 (file)
@@ -27,19 +27,21 @@ PostgreSQL ``client_encoding`` parameter; by default this is the value in
 the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
 Typically, this can be changed to ``utf-8``, as a more useful default::
 
-    #client_encoding = sql_ascii # actually, defaults to database
-                                 # encoding
+    # client_encoding = sql_ascii # actually, defaults to database encoding
     client_encoding = utf8
 
 The ``client_encoding`` can be overridden for a session by executing the SQL:
 
-SET CLIENT_ENCODING TO 'utf8';
+.. sourcecode:: sql
+
+    SET CLIENT_ENCODING TO 'utf8';
 
 SQLAlchemy will execute this SQL on all new connections based on the value
 passed to :func:`_sa.create_engine` using the ``client_encoding`` parameter::
 
     engine = create_engine(
-        "postgresql+pg8000://user:pass@host/dbname", client_encoding='utf8')
+        "postgresql+pg8000://user:pass@host/dbname", client_encoding="utf8"
+    )
 
 .. _pg8000_ssl:
 
@@ -50,6 +52,7 @@ pg8000 accepts a Python ``SSLContext`` object which may be specified using the
 :paramref:`_sa.create_engine.connect_args` dictionary::
 
     import ssl
+
     ssl_context = ssl.create_default_context()
     engine = sa.create_engine(
         "postgresql+pg8000://scott:tiger@192.168.0.199/test",
@@ -61,6 +64,7 @@ or does not match the host name (as seen from the client), it may also be
 necessary to disable hostname checking::
 
     import ssl
+
     ssl_context = ssl.create_default_context()
     ssl_context.check_hostname = False
     ssl_context.verify_mode = ssl.CERT_NONE
index b8bff9f4559b1ffe97301f216bd1e20c39706567..60b684450014eb11ffc65643d9390a7207c70274 100644 (file)
@@ -29,20 +29,29 @@ selected depending on how the engine is created:
   automatically select the sync version, e.g.::
 
     from sqlalchemy import create_engine
-    sync_engine = create_engine("postgresql+psycopg://scott:tiger@localhost/test")
+
+    sync_engine = create_engine(
+        "postgresql+psycopg://scott:tiger@localhost/test"
+    )
 
 * calling :func:`_asyncio.create_async_engine` with
   ``postgresql+psycopg://...`` will automatically select the async version,
   e.g.::
 
     from sqlalchemy.ext.asyncio import create_async_engine
-    asyncio_engine = create_async_engine("postgresql+psycopg://scott:tiger@localhost/test")
+
+    asyncio_engine = create_async_engine(
+        "postgresql+psycopg://scott:tiger@localhost/test"
+    )
 
 The asyncio version of the dialect may also be specified explicitly using the
 ``psycopg_async`` suffix, as::
 
     from sqlalchemy.ext.asyncio import create_async_engine
-    asyncio_engine = create_async_engine("postgresql+psycopg_async://scott:tiger@localhost/test")
+
+    asyncio_engine = create_async_engine(
+        "postgresql+psycopg_async://scott:tiger@localhost/test"
+    )
 
 .. seealso::
 
index fc05aca9078de957ced74888ded691eb9511a108..d7efc2eb9744af4cf6a4c8df503b56ad2b451e2e 100644 (file)
@@ -88,7 +88,6 @@ connection URI::
         "postgresql+psycopg2://scott:tiger@192.168.0.199:5432/test?sslmode=require"
     )
 
-
 Unix Domain Connections
 ------------------------
 
@@ -103,13 +102,17 @@ in ``/tmp``, or whatever socket directory was specified when PostgreSQL
 was built.  This value can be overridden by passing a pathname to psycopg2,
 using ``host`` as an additional keyword argument::
 
-    create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql")
+    create_engine(
+        "postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql"
+    )
 
 .. warning::  The format accepted here allows for a hostname in the main URL
    in addition to the "host" query string argument.  **When using this URL
    format, the initial host is silently ignored**.  That is, this URL::
 
-        engine = create_engine("postgresql+psycopg2://user:password@myhost1/dbname?host=myhost2")
+        engine = create_engine(
+            "postgresql+psycopg2://user:password@myhost1/dbname?host=myhost2"
+        )
 
    Above, the hostname ``myhost1`` is **silently ignored and discarded.**  The
    host which is connected is the ``myhost2`` host.
@@ -190,7 +193,7 @@ any or all elements of the connection string.
 For this form, the URL can be passed without any elements other than the
 initial scheme::
 
-    engine = create_engine('postgresql+psycopg2://')
+    engine = create_engine("postgresql+psycopg2://")
 
 In the above form, a blank "dsn" string is passed to the ``psycopg2.connect()``
 function which in turn represents an empty DSN passed to libpq.
@@ -264,8 +267,8 @@ used feature.  The use of this extension may be enabled using the
 
     engine = create_engine(
         "postgresql+psycopg2://scott:tiger@host/dbname",
-        executemany_mode='values_plus_batch')
-
+        executemany_mode="values_plus_batch",
+    )
 
 Possible options for ``executemany_mode`` include:
 
@@ -311,8 +314,10 @@ is below::
 
     engine = create_engine(
         "postgresql+psycopg2://scott:tiger@host/dbname",
-        executemany_mode='values_plus_batch',
-        insertmanyvalues_page_size=5000, executemany_batch_page_size=500)
+        executemany_mode="values_plus_batch",
+        insertmanyvalues_page_size=5000,
+        executemany_batch_page_size=500,
+    )
 
 .. seealso::
 
@@ -338,7 +343,9 @@ in the following ways:
   passed in the database URL; this parameter is consumed by the underlying
   ``libpq`` PostgreSQL client library::
 
-    engine = create_engine("postgresql+psycopg2://user:pass@host/dbname?client_encoding=utf8")
+    engine = create_engine(
+        "postgresql+psycopg2://user:pass@host/dbname?client_encoding=utf8"
+    )
 
   Alternatively, the above ``client_encoding`` value may be passed using
   :paramref:`_sa.create_engine.connect_args` for programmatic establishment with
@@ -346,7 +353,7 @@ in the following ways:
 
     engine = create_engine(
         "postgresql+psycopg2://user:pass@host/dbname",
-        connect_args={'client_encoding': 'utf8'}
+        connect_args={"client_encoding": "utf8"},
     )
 
 * For all PostgreSQL versions, psycopg2 supports a client-side encoding
@@ -355,8 +362,7 @@ in the following ways:
   ``client_encoding`` parameter passed to :func:`_sa.create_engine`::
 
       engine = create_engine(
-          "postgresql+psycopg2://user:pass@host/dbname",
-          client_encoding="utf8"
+          "postgresql+psycopg2://user:pass@host/dbname", client_encoding="utf8"
       )
 
   .. tip:: The above ``client_encoding`` parameter admittedly is very similar
@@ -375,11 +381,9 @@ in the following ways:
     # postgresql.conf file
 
     # client_encoding = sql_ascii # actually, defaults to database
-                                 # encoding
+    # encoding
     client_encoding = utf8
 
-
-
 Transactions
 ------------
 
@@ -426,15 +430,15 @@ is set to the ``logging.INFO`` level, notice messages will be logged::
 
     import logging
 
-    logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
+    logging.getLogger("sqlalchemy.dialects.postgresql").setLevel(logging.INFO)
 
 Above, it is assumed that logging is configured externally.  If this is not
 the case, configuration such as ``logging.basicConfig()`` must be utilized::
 
     import logging
 
-    logging.basicConfig()   # log messages to stdout
-    logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
+    logging.basicConfig()  # log messages to stdout
+    logging.getLogger("sqlalchemy.dialects.postgresql").setLevel(logging.INFO)
 
 .. seealso::
 
@@ -471,8 +475,10 @@ textual HSTORE expression.  If this behavior is not desired, disable the
 use of the hstore extension by setting ``use_native_hstore`` to ``False`` as
 follows::
 
-    engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
-                use_native_hstore=False)
+    engine = create_engine(
+        "postgresql+psycopg2://scott:tiger@localhost/test",
+        use_native_hstore=False,
+    )
 
 The ``HSTORE`` type is **still supported** when the
 ``psycopg2.extensions.register_hstore()`` extension is not used.  It merely
index 2acf63bef61e481a53a55079333955edb768eed2..73f9d372ab23004d45c6d8129f153591fa839bcc 100644 (file)
@@ -94,12 +94,11 @@ class MONEY(sqltypes.TypeEngine[str]):
         from sqlalchemy import Dialect
         from sqlalchemy import TypeDecorator
 
+
         class NumericMoney(TypeDecorator):
             impl = MONEY
 
-            def process_result_value(
-                self, value: Any, dialect: Dialect
-            ) -> None:
+            def process_result_value(self, value: Any, dialect: Dialect) -> None:
                 if value is not None:
                     # adjust this for the currency and numeric
                     m = re.match(r"\$([\d.]+)", value)
@@ -114,6 +113,7 @@ class MONEY(sqltypes.TypeEngine[str]):
         from sqlalchemy import cast
         from sqlalchemy import TypeDecorator
 
+
         class NumericMoney(TypeDecorator):
             impl = MONEY
 
@@ -122,7 +122,7 @@ class MONEY(sqltypes.TypeEngine[str]):
 
     .. versionadded:: 1.2
 
-    """
+    """  # noqa: E501
 
     __visit_name__ = "MONEY"
 
index 14e677892d2b693cf43037a3ce24a8c21d67ec1d..208a72833bae6d0406d65741c88dc4624fc9ed06 100644 (file)
@@ -31,6 +31,7 @@ This dialect should normally be used only with the
 :func:`_asyncio.create_async_engine` engine creation function::
 
     from sqlalchemy.ext.asyncio import create_async_engine
+
     engine = create_async_engine("sqlite+aiosqlite:///filename")
 
 The URL passes through all arguments to the ``pysqlite`` driver, so all
@@ -58,12 +59,14 @@ The solution is similar to :ref:`pysqlite_serializable`. This is achieved by the
 
     engine = create_async_engine("sqlite+aiosqlite:///myfile.db")
 
+
     @event.listens_for(engine.sync_engine, "connect")
     def do_connect(dbapi_connection, connection_record):
         # disable aiosqlite's emitting of the BEGIN statement entirely.
         # also stops it from emitting COMMIT before any DDL.
         dbapi_connection.isolation_level = None
 
+
     @event.listens_for(engine.sync_engine, "begin")
     def do_begin(conn):
         # emit our own BEGIN
index 84bb8937e16f5e08480fcca4ec7efca90d862355..0e4c9694bbffafc2a75b059ae9703927671b82a8 100644 (file)
@@ -7,7 +7,7 @@
 # mypy: ignore-errors
 
 
-r"""
+r'''
 .. dialect:: sqlite
     :name: SQLite
     :normal_support: 3.12+
@@ -69,9 +69,12 @@ To specifically render the AUTOINCREMENT keyword on the primary key column
 when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
 construct::
 
-    Table('sometable', metadata,
-            Column('id', Integer, primary_key=True),
-            sqlite_autoincrement=True)
+    Table(
+        "sometable",
+        metadata,
+        Column("id", Integer, primary_key=True),
+        sqlite_autoincrement=True,
+    )
 
 Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -91,8 +94,13 @@ One approach to achieve this is to use :class:`.Integer` on SQLite
 only using :meth:`.TypeEngine.with_variant`::
 
     table = Table(
-        "my_table", metadata,
-        Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True)
+        "my_table",
+        metadata,
+        Column(
+            "id",
+            BigInteger().with_variant(Integer, "sqlite"),
+            primary_key=True,
+        ),
     )
 
 Another is to use a subclass of :class:`.BigInteger` that overrides its DDL
@@ -101,21 +109,23 @@ name to be ``INTEGER`` when compiled against SQLite::
     from sqlalchemy import BigInteger
     from sqlalchemy.ext.compiler import compiles
 
+
     class SLBigInteger(BigInteger):
         pass
 
-    @compiles(SLBigInteger, 'sqlite')
+
+    @compiles(SLBigInteger, "sqlite")
     def bi_c(element, compiler, **kw):
         return "INTEGER"
 
+
     @compiles(SLBigInteger)
     def bi_c(element, compiler, **kw):
         return compiler.visit_BIGINT(element, **kw)
 
 
     table = Table(
-        "my_table", metadata,
-        Column("id", SLBigInteger(), primary_key=True)
+        "my_table", metadata, Column("id", SLBigInteger(), primary_key=True)
     )
 
 .. seealso::
@@ -235,26 +245,24 @@ To specify an explicit ``RETURNING`` clause, use the
 
     # INSERT..RETURNING
     result = connection.execute(
-        table.insert().
-        values(name='foo').
-        returning(table.c.col1, table.c.col2)
+        table.insert().values(name="foo").returning(table.c.col1, table.c.col2)
     )
     print(result.all())
 
     # UPDATE..RETURNING
     result = connection.execute(
-        table.update().
-        where(table.c.name=='foo').
-        values(name='bar').
-        returning(table.c.col1, table.c.col2)
+        table.update()
+        .where(table.c.name == "foo")
+        .values(name="bar")
+        .returning(table.c.col1, table.c.col2)
     )
     print(result.all())
 
     # DELETE..RETURNING
     result = connection.execute(
-        table.delete().
-        where(table.c.name=='foo').
-        returning(table.c.col1, table.c.col2)
+        table.delete()
+        .where(table.c.name == "foo")
+        .returning(table.c.col1, table.c.col2)
     )
     print(result.all())
 
@@ -317,6 +325,7 @@ new connections through the usage of events::
     from sqlalchemy.engine import Engine
     from sqlalchemy import event
 
+
     @event.listens_for(Engine, "connect")
     def set_sqlite_pragma(dbapi_connection, connection_record):
         cursor = dbapi_connection.cursor()
@@ -379,13 +388,16 @@ ABORT, FAIL, IGNORE, and REPLACE.   For example, to add a UNIQUE constraint
 that specifies the IGNORE algorithm::
 
     some_table = Table(
-        'some_table', metadata,
-        Column('id', Integer, primary_key=True),
-        Column('data', Integer),
-        UniqueConstraint('id', 'data', sqlite_on_conflict='IGNORE')
+        "some_table",
+        metadata,
+        Column("id", Integer, primary_key=True),
+        Column("data", Integer),
+        UniqueConstraint("id", "data", sqlite_on_conflict="IGNORE"),
     )
 
-The above renders CREATE TABLE DDL as::
+The above renders CREATE TABLE DDL as:
+
+.. sourcecode:: sql
 
     CREATE TABLE some_table (
         id INTEGER NOT NULL,
@@ -402,13 +414,17 @@ be added to the :class:`_schema.Column` as well, which will be added to the
 UNIQUE constraint in the DDL::
 
     some_table = Table(
-        'some_table', metadata,
-        Column('id', Integer, primary_key=True),
-        Column('data', Integer, unique=True,
-               sqlite_on_conflict_unique='IGNORE')
+        "some_table",
+        metadata,
+        Column("id", Integer, primary_key=True),
+        Column(
+            "data", Integer, unique=True, sqlite_on_conflict_unique="IGNORE"
+        ),
     )
 
-rendering::
+rendering:
+
+.. sourcecode:: sql
 
     CREATE TABLE some_table (
         id INTEGER NOT NULL,
@@ -421,13 +437,17 @@ To apply the FAIL algorithm for a NOT NULL constraint,
 ``sqlite_on_conflict_not_null`` is used::
 
     some_table = Table(
-        'some_table', metadata,
-        Column('id', Integer, primary_key=True),
-        Column('data', Integer, nullable=False,
-               sqlite_on_conflict_not_null='FAIL')
+        "some_table",
+        metadata,
+        Column("id", Integer, primary_key=True),
+        Column(
+            "data", Integer, nullable=False, sqlite_on_conflict_not_null="FAIL"
+        ),
     )
 
-this renders the column inline ON CONFLICT phrase::
+this renders the column inline ON CONFLICT phrase:
+
+.. sourcecode:: sql
 
     CREATE TABLE some_table (
         id INTEGER NOT NULL,
@@ -439,13 +459,20 @@ this renders the column inline ON CONFLICT phrase::
 Similarly, for an inline primary key, use ``sqlite_on_conflict_primary_key``::
 
     some_table = Table(
-        'some_table', metadata,
-        Column('id', Integer, primary_key=True,
-               sqlite_on_conflict_primary_key='FAIL')
+        "some_table",
+        metadata,
+        Column(
+            "id",
+            Integer,
+            primary_key=True,
+            sqlite_on_conflict_primary_key="FAIL",
+        ),
     )
 
 SQLAlchemy renders the PRIMARY KEY constraint separately, so the conflict
-resolution algorithm is applied to the constraint itself::
+resolution algorithm is applied to the constraint itself:
+
+.. sourcecode:: sql
 
     CREATE TABLE some_table (
         id INTEGER NOT NULL,
@@ -455,7 +482,7 @@ resolution algorithm is applied to the constraint itself::
 .. _sqlite_on_conflict_insert:
 
 INSERT...ON CONFLICT (Upsert)
------------------------------------
+-----------------------------
 
 .. seealso:: This section describes the :term:`DML` version of "ON CONFLICT" for
    SQLite, which occurs within an INSERT statement.  For "ON CONFLICT" as
@@ -483,21 +510,18 @@ and :meth:`_sqlite.Insert.on_conflict_do_nothing`:
     >>> from sqlalchemy.dialects.sqlite import insert
 
     >>> insert_stmt = insert(my_table).values(
-    ...     id='some_existing_id',
-    ...     data='inserted value')
+    ...     id="some_existing_id", data="inserted value"
+    ... )
 
     >>> do_update_stmt = insert_stmt.on_conflict_do_update(
-    ...     index_elements=['id'],
-    ...     set_=dict(data='updated value')
+    ...     index_elements=["id"], set_=dict(data="updated value")
     ... )
 
     >>> print(do_update_stmt)
     {printsql}INSERT INTO my_table (id, data) VALUES (?, ?)
     ON CONFLICT (id) DO UPDATE SET data = ?{stop}
 
-    >>> do_nothing_stmt = insert_stmt.on_conflict_do_nothing(
-    ...     index_elements=['id']
-    ... )
+    >>> do_nothing_stmt = insert_stmt.on_conflict_do_nothing(index_elements=["id"])
 
     >>> print(do_nothing_stmt)
     {printsql}INSERT INTO my_table (id, data) VALUES (?, ?)
@@ -528,13 +552,13 @@ Both methods supply the "target" of the conflict using column inference:
 
   .. sourcecode:: pycon+sql
 
-        >>> stmt = insert(my_table).values(user_email='a@b.com', data='inserted data')
+        >>> stmt = insert(my_table).values(user_email="a@b.com", data="inserted data")
 
         >>> do_update_stmt = stmt.on_conflict_do_update(
         ...     index_elements=[my_table.c.user_email],
-        ...     index_where=my_table.c.user_email.like('%@gmail.com'),
-        ...     set_=dict(data=stmt.excluded.data)
-        ...     )
+        ...     index_where=my_table.c.user_email.like("%@gmail.com"),
+        ...     set_=dict(data=stmt.excluded.data),
+        ... )
 
         >>> print(do_update_stmt)
         {printsql}INSERT INTO my_table (data, user_email) VALUES (?, ?)
@@ -554,11 +578,10 @@ for UPDATE:
 
 .. sourcecode:: pycon+sql
 
-    >>> stmt = insert(my_table).values(id='some_id', data='inserted value')
+    >>> stmt = insert(my_table).values(id="some_id", data="inserted value")
 
     >>> do_update_stmt = stmt.on_conflict_do_update(
-    ...     index_elements=['id'],
-    ...     set_=dict(data='updated value')
+    ...     index_elements=["id"], set_=dict(data="updated value")
     ... )
 
     >>> print(do_update_stmt)
@@ -586,14 +609,12 @@ would have been inserted had the constraint not failed:
 .. sourcecode:: pycon+sql
 
     >>> stmt = insert(my_table).values(
-    ...     id='some_id',
-    ...     data='inserted value',
-    ...     author='jlh'
+    ...     id="some_id", data="inserted value", author="jlh"
     ... )
 
     >>> do_update_stmt = stmt.on_conflict_do_update(
-    ...     index_elements=['id'],
-    ...     set_=dict(data='updated value', author=stmt.excluded.author)
+    ...     index_elements=["id"],
+    ...     set_=dict(data="updated value", author=stmt.excluded.author),
     ... )
 
     >>> print(do_update_stmt)
@@ -610,15 +631,13 @@ parameter, which will limit those rows which receive an UPDATE:
 .. sourcecode:: pycon+sql
 
     >>> stmt = insert(my_table).values(
-    ...     id='some_id',
-    ...     data='inserted value',
-    ...     author='jlh'
+    ...     id="some_id", data="inserted value", author="jlh"
     ... )
 
     >>> on_update_stmt = stmt.on_conflict_do_update(
-    ...     index_elements=['id'],
-    ...     set_=dict(data='updated value', author=stmt.excluded.author),
-    ...     where=(my_table.c.status == 2)
+    ...     index_elements=["id"],
+    ...     set_=dict(data="updated value", author=stmt.excluded.author),
+    ...     where=(my_table.c.status == 2),
     ... )
     >>> print(on_update_stmt)
     {printsql}INSERT INTO my_table (id, data, author) VALUES (?, ?, ?)
@@ -635,8 +654,8 @@ using the :meth:`_sqlite.Insert.on_conflict_do_nothing` method:
 
 .. sourcecode:: pycon+sql
 
-    >>> stmt = insert(my_table).values(id='some_id', data='inserted value')
-    >>> stmt = stmt.on_conflict_do_nothing(index_elements=['id'])
+    >>> stmt = insert(my_table).values(id="some_id", data="inserted value")
+    >>> stmt = stmt.on_conflict_do_nothing(index_elements=["id"])
     >>> print(stmt)
     {printsql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT (id) DO NOTHING
 
@@ -647,7 +666,7 @@ occurs:
 
 .. sourcecode:: pycon+sql
 
-    >>> stmt = insert(my_table).values(id='some_id', data='inserted value')
+    >>> stmt = insert(my_table).values(id="some_id", data="inserted value")
     >>> stmt = stmt.on_conflict_do_nothing()
     >>> print(stmt)
     {printsql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT DO NOTHING
@@ -707,11 +726,16 @@ Partial Indexes
 A partial index, e.g. one which uses a WHERE clause, can be specified
 with the DDL system using the argument ``sqlite_where``::
 
-    tbl = Table('testtbl', m, Column('data', Integer))
-    idx = Index('test_idx1', tbl.c.data,
-                sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10))
+    tbl = Table("testtbl", m, Column("data", Integer))
+    idx = Index(
+        "test_idx1",
+        tbl.c.data,
+        sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10),
+    )
+
+The index will be rendered at create time as:
 
-The index will be rendered at create time as::
+.. sourcecode:: sql
 
     CREATE INDEX test_idx1 ON testtbl (data)
     WHERE data > 5 AND data < 10
@@ -731,7 +755,11 @@ The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
 
     import sqlite3
 
-    assert sqlite3.sqlite_version_info < (3, 10, 0), "bug is fixed in this version"
+    assert sqlite3.sqlite_version_info < (
+        3,
+        10,
+        0,
+    ), "bug is fixed in this version"
 
     conn = sqlite3.connect(":memory:")
     cursor = conn.cursor()
@@ -741,17 +769,22 @@ The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
     cursor.execute("insert into x (a, b) values (2, 2)")
 
     cursor.execute("select x.a, x.b from x")
-    assert [c[0] for c in cursor.description] == ['a', 'b']
+    assert [c[0] for c in cursor.description] == ["a", "b"]
 
-    cursor.execute('''
+    cursor.execute(
+        """
         select x.a, x.b from x where a=1
         union
         select x.a, x.b from x where a=2
-    ''')
-    assert [c[0] for c in cursor.description] == ['a', 'b'], \
-        [c[0] for c in cursor.description]
+        """
+    )
+    assert [c[0] for c in cursor.description] == ["a", "b"], [
+        c[0] for c in cursor.description
+    ]
 
-The second assertion fails::
+The second assertion fails:
+
+.. sourcecode:: text
 
     Traceback (most recent call last):
       File "test.py", line 19, in <module>
@@ -779,11 +812,13 @@ to filter these out::
     result = conn.exec_driver_sql("select x.a, x.b from x")
     assert result.keys() == ["a", "b"]
 
-    result = conn.exec_driver_sql('''
+    result = conn.exec_driver_sql(
+        """
         select x.a, x.b from x where a=1
         union
         select x.a, x.b from x where a=2
-    ''')
+        """
+    )
     assert result.keys() == ["a", "b"]
 
 Note that above, even though SQLAlchemy filters out the dots, *both
@@ -807,16 +842,20 @@ contain dots, and the functionality of :meth:`_engine.CursorResult.keys` and
 the ``sqlite_raw_colnames`` execution option may be provided, either on a
 per-:class:`_engine.Connection` basis::
 
-    result = conn.execution_options(sqlite_raw_colnames=True).exec_driver_sql('''
+    result = conn.execution_options(sqlite_raw_colnames=True).exec_driver_sql(
+        """
         select x.a, x.b from x where a=1
         union
         select x.a, x.b from x where a=2
-    ''')
+        """
+    )
     assert result.keys() == ["x.a", "x.b"]
 
 or on a per-:class:`_engine.Engine` basis::
 
-    engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True})
+    engine = create_engine(
+        "sqlite://", execution_options={"sqlite_raw_colnames": True}
+    )
 
 When using the per-:class:`_engine.Engine` execution option, note that
 **Core and ORM queries that use UNION may not function properly**.
@@ -865,7 +904,7 @@ passed to methods such as :meth:`_schema.MetaData.reflect` or
     `SQLite Internal Schema Objects <https://www.sqlite.org/fileformat2.html#intschema>`_ - in the SQLite
     documentation.
 
-"""  # noqa
+'''  # noqa
 from __future__ import annotations
 
 import datetime
@@ -979,7 +1018,9 @@ class DATETIME(_DateTimeMixin, sqltypes.DateTime):
 
         "%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
 
-    e.g.::
+    e.g.:
+
+    .. sourcecode:: text
 
         2021-03-15 12:05:57.105542
 
@@ -995,9 +1036,11 @@ class DATETIME(_DateTimeMixin, sqltypes.DateTime):
         import re
         from sqlalchemy.dialects.sqlite import DATETIME
 
-        dt = DATETIME(storage_format="%(year)04d/%(month)02d/%(day)02d "
-                                     "%(hour)02d:%(minute)02d:%(second)02d",
-                      regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)"
+        dt = DATETIME(
+            storage_format=(
+                "%(year)04d/%(month)02d/%(day)02d %(hour)02d:%(minute)02d:%(second)02d"
+            ),
+            regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)",
         )
 
     :param storage_format: format string which will be applied to the dict
@@ -1087,7 +1130,9 @@ class DATE(_DateTimeMixin, sqltypes.Date):
 
         "%(year)04d-%(month)02d-%(day)02d"
 
-    e.g.::
+    e.g.:
+
+    .. sourcecode:: text
 
         2011-03-15
 
@@ -1105,9 +1150,9 @@ class DATE(_DateTimeMixin, sqltypes.Date):
         from sqlalchemy.dialects.sqlite import DATE
 
         d = DATE(
-                storage_format="%(month)02d/%(day)02d/%(year)04d",
-                regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)")
-            )
+            storage_format="%(month)02d/%(day)02d/%(year)04d",
+            regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)"),
+        )
 
     :param storage_format: format string which will be applied to the
      dict with keys year, month, and day.
@@ -1161,7 +1206,9 @@ class TIME(_DateTimeMixin, sqltypes.Time):
 
         "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
 
-    e.g.::
+    e.g.:
+
+    .. sourcecode:: text
 
         12:05:57.10558
 
@@ -1177,9 +1224,9 @@ class TIME(_DateTimeMixin, sqltypes.Time):
         import re
         from sqlalchemy.dialects.sqlite import TIME
 
-        t = TIME(storage_format="%(hour)02d-%(minute)02d-"
-                                "%(second)02d-%(microsecond)06d",
-                 regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
+        t = TIME(
+            storage_format="%(hour)02d-%(minute)02d-%(second)02d-%(microsecond)06d",
+            regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?"),
         )
 
     :param storage_format: format string which will be applied to the dict
index 388a4dff817da0fcec6fe812a2e8202154c6ee77..58471ac90ecc792db113419b8459bbbfc0d90be2 100644 (file)
@@ -39,7 +39,7 @@ Current dialect selection logic is:
 
         e = create_engine(
             "sqlite+pysqlcipher://:password@/dbname.db",
-            module=sqlcipher_compatible_driver
+            module=sqlcipher_compatible_driver,
         )
 
 These drivers make use of the SQLCipher engine. This system essentially
@@ -55,12 +55,12 @@ The format of the connect string is in every way the same as that
 of the :mod:`~sqlalchemy.dialects.sqlite.pysqlite` driver, except that the
 "password" field is now accepted, which should contain a passphrase::
 
-    e = create_engine('sqlite+pysqlcipher://:testing@/foo.db')
+    e = create_engine("sqlite+pysqlcipher://:testing@/foo.db")
 
 For an absolute file path, two leading slashes should be used for the
 database name::
 
-    e = create_engine('sqlite+pysqlcipher://:testing@//path/to/foo.db')
+    e = create_engine("sqlite+pysqlcipher://:testing@//path/to/foo.db")
 
 A selection of additional encryption-related pragmas supported by SQLCipher
 as documented at https://www.zetetic.net/sqlcipher/sqlcipher-api/ can be passed
@@ -68,7 +68,9 @@ in the query string, and will result in that PRAGMA being called for each
 new connection.  Currently, ``cipher``, ``kdf_iter``
 ``cipher_page_size`` and ``cipher_use_hmac`` are supported::
 
-    e = create_engine('sqlite+pysqlcipher://:testing@/foo.db?cipher=aes-256-cfb&kdf_iter=64000')
+    e = create_engine(
+        "sqlite+pysqlcipher://:testing@/foo.db?cipher=aes-256-cfb&kdf_iter=64000"
+    )
 
 .. warning:: Previous versions of sqlalchemy did not take into consideration
    the encryption-related pragmas passed in the url string, that were silently
index ab6ce6dc436d616f797481dbd65c3198689447b8..0c854630089562ba3f563f4b4bdda8639e1cd904 100644 (file)
@@ -28,7 +28,9 @@ Connect Strings
 ---------------
 
 The file specification for the SQLite database is taken as the "database"
-portion of the URL.  Note that the format of a SQLAlchemy url is::
+portion of the URL.  Note that the format of a SQLAlchemy url is:
+
+.. sourcecode:: text
 
     driver://user:pass@host/database
 
@@ -37,28 +39,28 @@ the **right** of the third slash.   So connecting to a relative filepath
 looks like::
 
     # relative path
-    e = create_engine('sqlite:///path/to/database.db')
+    e = create_engine("sqlite:///path/to/database.db")
 
 An absolute path, which is denoted by starting with a slash, means you
 need **four** slashes::
 
     # absolute path
-    e = create_engine('sqlite:////path/to/database.db')
+    e = create_engine("sqlite:////path/to/database.db")
 
 To use a Windows path, regular drive specifications and backslashes can be
 used. Double backslashes are probably needed::
 
     # absolute path on Windows
-    e = create_engine('sqlite:///C:\\path\\to\\database.db')
+    e = create_engine("sqlite:///C:\\path\\to\\database.db")
 
 To use sqlite ``:memory:`` database specify it as the filename using
 ``sqlite:///:memory:``. It's also the default if no filepath is
 present, specifying only ``sqlite://`` and nothing else::
 
     # in-memory database (note three slashes)
-    e = create_engine('sqlite:///:memory:')
+    e = create_engine("sqlite:///:memory:")
     # also in-memory database
-    e2 = create_engine('sqlite://')
+    e2 = create_engine("sqlite://")
 
 .. _pysqlite_uri_connections:
 
@@ -98,7 +100,9 @@ Above, the pysqlite / sqlite3 DBAPI would be passed arguments as::
 
     sqlite3.connect(
         "file:path/to/database?mode=ro&nolock=1",
-        check_same_thread=True, timeout=10, uri=True
+        check_same_thread=True,
+        timeout=10,
+        uri=True,
     )
 
 Regarding future parameters added to either the Python or native drivers. new
@@ -144,8 +148,11 @@ as follows::
     def regexp(a, b):
         return re.search(a, b) is not None
 
+
     sqlite_connection.create_function(
-        "regexp", 2, regexp,
+        "regexp",
+        2,
+        regexp,
     )
 
 There is currently no support for regular expression flags as a separate
@@ -186,10 +193,12 @@ Keeping in mind that pysqlite's parsing option is not recommended,
 nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
 can be forced if one configures "native_datetime=True" on create_engine()::
 
-    engine = create_engine('sqlite://',
-        connect_args={'detect_types':
-            sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
-        native_datetime=True
+    engine = create_engine(
+        "sqlite://",
+        connect_args={
+            "detect_types": sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
+        },
+        native_datetime=True,
     )
 
 With this flag enabled, the DATE and TIMESTAMP types (but note - not the
@@ -244,6 +253,7 @@ Pooling may be disabled for a file based database by specifying the
 parameter::
 
     from sqlalchemy import NullPool
+
     engine = create_engine("sqlite:///myfile.db", poolclass=NullPool)
 
 It's been observed that the :class:`.NullPool` implementation incurs an
@@ -263,9 +273,12 @@ globally, and the ``check_same_thread`` flag can be passed to Pysqlite
 as ``False``::
 
     from sqlalchemy.pool import StaticPool
-    engine = create_engine('sqlite://',
-                        connect_args={'check_same_thread':False},
-                        poolclass=StaticPool)
+
+    engine = create_engine(
+        "sqlite://",
+        connect_args={"check_same_thread": False},
+        poolclass=StaticPool,
+    )
 
 Note that using a ``:memory:`` database in multiple threads requires a recent
 version of SQLite.
@@ -284,14 +297,14 @@ needed within multiple threads for this case::
 
     # maintain the same connection per thread
     from sqlalchemy.pool import SingletonThreadPool
-    engine = create_engine('sqlite:///mydb.db',
-                        poolclass=SingletonThreadPool)
+
+    engine = create_engine("sqlite:///mydb.db", poolclass=SingletonThreadPool)
 
 
     # maintain the same connection across all threads
     from sqlalchemy.pool import StaticPool
-    engine = create_engine('sqlite:///mydb.db',
-                        poolclass=StaticPool)
+
+    engine = create_engine("sqlite:///mydb.db", poolclass=StaticPool)
 
 Note that :class:`.SingletonThreadPool` should be configured for the number
 of threads that are to be used; beyond that number, connections will be
@@ -320,13 +333,14 @@ same column, use a custom type that will check each row individually::
     from sqlalchemy import String
     from sqlalchemy import TypeDecorator
 
+
     class MixedBinary(TypeDecorator):
         impl = String
         cache_ok = True
 
         def process_result_value(self, value, dialect):
             if isinstance(value, str):
-                value = bytes(value, 'utf-8')
+                value = bytes(value, "utf-8")
             elif value is not None:
                 value = bytes(value)
 
@@ -367,12 +381,14 @@ ourselves. This is achieved using two event listeners::
 
     engine = create_engine("sqlite:///myfile.db")
 
+
     @event.listens_for(engine, "connect")
     def do_connect(dbapi_connection, connection_record):
         # disable pysqlite's emitting of the BEGIN statement entirely.
         # also stops it from emitting COMMIT before any DDL.
         dbapi_connection.isolation_level = None
 
+
     @event.listens_for(engine, "begin")
     def do_begin(conn):
         # emit our own BEGIN
@@ -442,7 +458,6 @@ connection when it is created. That is accomplished with an event listener::
         with engine.connect() as conn:
             print(conn.scalar(text("SELECT UDF()")))
 
-
 """  # noqa
 
 import math
index 13ec3d639acf797ec5999361afef766ce7b0467d..72b455d45a38d73853d9e2e56f8d1d36436c6b59 100644 (file)
@@ -817,7 +817,6 @@ class Connection(ConnectionEventsTarget, inspection.Inspectable["Inspector"]):
                 with conn.begin() as trans:
                     conn.execute(table.insert(), {"username": "sandy"})
 
-
         The returned object is an instance of :class:`_engine.RootTransaction`.
         This object represents the "scope" of the transaction,
         which completes when either the :meth:`_engine.Transaction.rollback`
@@ -923,7 +922,7 @@ class Connection(ConnectionEventsTarget, inspection.Inspectable["Inspector"]):
                     trans.rollback()  # rollback to savepoint
 
                 # outer transaction continues
-                connection.execute( ... )
+                connection.execute(...)
 
         If :meth:`_engine.Connection.begin_nested` is called without first
         calling :meth:`_engine.Connection.begin` or
@@ -933,11 +932,11 @@ class Connection(ConnectionEventsTarget, inspection.Inspectable["Inspector"]):
 
             with engine.connect() as connection:  # begin() wasn't called
 
-                with connection.begin_nested(): will auto-"begin()" first
-                    connection.execute( ... )
+                with connection.begin_nested():  # will auto-"begin()" first
+                    connection.execute(...)
                 # savepoint is released
 
-                connection.execute( ... )
+                connection.execute(...)
 
                 # explicitly commit outer transaction
                 connection.commit()
@@ -1750,21 +1749,20 @@ class Connection(ConnectionEventsTarget, inspection.Inspectable["Inspector"]):
 
              conn.exec_driver_sql(
                  "INSERT INTO table (id, value) VALUES (%(id)s, %(value)s)",
-                 [{"id":1, "value":"v1"}, {"id":2, "value":"v2"}]
+                 [{"id": 1, "value": "v1"}, {"id": 2, "value": "v2"}],
              )
 
          Single dictionary::
 
              conn.exec_driver_sql(
                  "INSERT INTO table (id, value) VALUES (%(id)s, %(value)s)",
-                 dict(id=1, value="v1")
+                 dict(id=1, value="v1"),
              )
 
          Single tuple::
 
              conn.exec_driver_sql(
-                 "INSERT INTO table (id, value) VALUES (?, ?)",
-                 (1, 'v1')
+                 "INSERT INTO table (id, value) VALUES (?, ?)", (1, "v1")
              )
 
          .. note:: The :meth:`_engine.Connection.exec_driver_sql` method does
@@ -2524,6 +2522,7 @@ class Transaction(TransactionalContext):
     :class:`_engine.Connection`::
 
         from sqlalchemy import create_engine
+
         engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test")
         connection = engine.connect()
         trans = connection.begin()
@@ -3101,10 +3100,10 @@ class Engine(
 
             shards = {"default": "base", "shard_1": "db1", "shard_2": "db2"}
 
+
             @event.listens_for(Engine, "before_cursor_execute")
-            def _switch_shard(conn, cursor, stmt,
-                    params, context, executemany):
-                shard_id = conn.get_execution_options().get('shard_id', "default")
+            def _switch_shard(conn, cursor, stmt, params, context, executemany):
+                shard_id = conn.get_execution_options().get("shard_id", "default")
                 current_shard = conn.info.get("current_shard", None)
 
                 if current_shard != shard_id:
@@ -3230,9 +3229,7 @@ class Engine(
         E.g.::
 
             with engine.begin() as conn:
-                conn.execute(
-                    text("insert into table (x, y, z) values (1, 2, 3)")
-                )
+                conn.execute(text("insert into table (x, y, z) values (1, 2, 3)"))
                 conn.execute(text("my_special_procedure(5)"))
 
         Upon successful operation, the :class:`.Transaction`
@@ -3248,7 +3245,7 @@ class Engine(
             :meth:`_engine.Connection.begin` - start a :class:`.Transaction`
             for a particular :class:`_engine.Connection`.
 
-        """
+        """  # noqa: E501
         with self.connect() as conn:
             with conn.begin():
                 yield conn
index 722a10ed0525265e748aa8602f8ae73c602b153d..dae72dfbdeff8d8cc39c5785acb8e21406aa71af 100644 (file)
@@ -133,8 +133,11 @@ def create_engine(url: Union[str, _url.URL], **kwargs: Any) -> Engine:
     and its underlying :class:`.Dialect` and :class:`_pool.Pool`
     constructs::
 
-        engine = create_engine("mysql+mysqldb://scott:tiger@hostname/dbname",
-                                    pool_recycle=3600, echo=True)
+        engine = create_engine(
+            "mysql+mysqldb://scott:tiger@hostname/dbname",
+            pool_recycle=3600,
+            echo=True,
+        )
 
     The string form of the URL is
     ``dialect[+driver]://user:password@host/dbname[?key=value..]``, where
index 491ef9e443d5f3871a8ed0c26b5b083415f43fdf..427f8aede21fc9f4945a653aa0700d71f71378e4 100644 (file)
@@ -1252,7 +1252,7 @@ class BufferedRowCursorFetchStrategy(CursorFetchStrategy):
 
             result = conn.execution_options(
                 stream_results=True, max_row_buffer=50
-                ).execute(text("select * from table"))
+            ).execute(text("select * from table"))
 
     .. versionadded:: 1.4 ``max_row_buffer`` may now exceed 1000 rows.
 
@@ -1858,11 +1858,9 @@ class CursorResult(Result[Unpack[_Ts]]):
 
             r1 = connection.execute(
                 users.insert().returning(
-                    users.c.user_name,
-                    users.c.user_id,
-                    sort_by_parameter_order=True
+                    users.c.user_name, users.c.user_id, sort_by_parameter_order=True
                 ),
-                user_values
+                user_values,
             )
 
             r2 = connection.execute(
@@ -1870,19 +1868,16 @@ class CursorResult(Result[Unpack[_Ts]]):
                     addresses.c.address_id,
                     addresses.c.address,
                     addresses.c.user_id,
-                    sort_by_parameter_order=True
+                    sort_by_parameter_order=True,
                 ),
-                address_values
+                address_values,
             )
 
             rows = r1.splice_horizontally(r2).all()
-            assert (
-                rows ==
-                [
-                    ("john", 1, 1, "foo@bar.com", 1),
-                    ("jack", 2, 2, "bar@bat.com", 2),
-                ]
-            )
+            assert rows == [
+                ("john", 1, 1, "foo@bar.com", 1),
+                ("jack", 2, 2, "bar@bat.com", 2),
+            ]
 
         .. versionadded:: 2.0
 
@@ -1891,7 +1886,7 @@ class CursorResult(Result[Unpack[_Ts]]):
             :meth:`.CursorResult.splice_vertically`
 
 
-        """
+        """  # noqa: E501
 
         clone = self._generate()
         total_rows = [
index 2273dd2c41a48396b0aa35d2ec026426bcf8173f..7b31138c52750e2d93939c8e804b0521ce528922 100644 (file)
@@ -56,19 +56,24 @@ class ConnectionEvents(event.Events[ConnectionEventsTarget]):
 
         from sqlalchemy import event, create_engine
 
-        def before_cursor_execute(conn, cursor, statement, parameters, context,
-                                                        executemany):
+
+        def before_cursor_execute(
+            conn, cursor, statement, parameters, context, executemany
+        ):
             log.info("Received statement: %s", statement)
 
-        engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/test')
+
+        engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test")
         event.listen(engine, "before_cursor_execute", before_cursor_execute)
 
     or with a specific :class:`_engine.Connection`::
 
         with engine.begin() as conn:
-            @event.listens_for(conn, 'before_cursor_execute')
-            def before_cursor_execute(conn, cursor, statement, parameters,
-                                            context, executemany):
+
+            @event.listens_for(conn, "before_cursor_execute")
+            def before_cursor_execute(
+                conn, cursor, statement, parameters, context, executemany
+            ):
                 log.info("Received statement: %s", statement)
 
     When the methods are called with a `statement` parameter, such as in
@@ -86,9 +91,11 @@ class ConnectionEvents(event.Events[ConnectionEventsTarget]):
         from sqlalchemy.engine import Engine
         from sqlalchemy import event
 
+
         @event.listens_for(Engine, "before_cursor_execute", retval=True)
-        def comment_sql_calls(conn, cursor, statement, parameters,
-                                            context, executemany):
+        def comment_sql_calls(
+            conn, cursor, statement, parameters, context, executemany
+        ):
             statement = statement + " -- some comment"
             return statement, parameters
 
@@ -318,8 +325,9 @@ class ConnectionEvents(event.Events[ConnectionEventsTarget]):
         returned as a two-tuple in this case::
 
             @event.listens_for(Engine, "before_cursor_execute", retval=True)
-            def before_cursor_execute(conn, cursor, statement,
-                            parameters, context, executemany):
+            def before_cursor_execute(
+                conn, cursor, statement, parameters, context, executemany
+            ):
                 # do something with statement, parameters
                 return statement, parameters
 
@@ -768,9 +776,9 @@ class DialectEvents(event.Events[Dialect]):
 
             @event.listens_for(Engine, "handle_error")
             def handle_exception(context):
-                if isinstance(context.original_exception,
-                    psycopg2.OperationalError) and \
-                    "failed" in str(context.original_exception):
+                if isinstance(
+                    context.original_exception, psycopg2.OperationalError
+                ) and "failed" in str(context.original_exception):
                     raise MySpecialException("failed operation")
 
         .. warning::  Because the
@@ -793,10 +801,13 @@ class DialectEvents(event.Events[Dialect]):
 
             @event.listens_for(Engine, "handle_error", retval=True)
             def handle_exception(context):
-                if context.chained_exception is not None and \
-                    "special" in context.chained_exception.message:
-                    return MySpecialException("failed",
-                        cause=context.chained_exception)
+                if (
+                    context.chained_exception is not None
+                    and "special" in context.chained_exception.message
+                ):
+                    return MySpecialException(
+                        "failed", cause=context.chained_exception
+                    )
 
         Handlers that return ``None`` may be used within the chain; when
         a handler returns ``None``, the previous exception instance,
@@ -838,7 +849,8 @@ class DialectEvents(event.Events[Dialect]):
 
             e = create_engine("postgresql+psycopg2://user@host/dbname")
 
-            @event.listens_for(e, 'do_connect')
+
+            @event.listens_for(e, "do_connect")
             def receive_do_connect(dialect, conn_rec, cargs, cparams):
                 cparams["password"] = "some_password"
 
@@ -847,7 +859,8 @@ class DialectEvents(event.Events[Dialect]):
 
             e = create_engine("postgresql+psycopg2://user@host/dbname")
 
-            @event.listens_for(e, 'do_connect')
+
+            @event.listens_for(e, "do_connect")
             def receive_do_connect(dialect, conn_rec, cargs, cparams):
                 return psycopg2.connect(*cargs, **cparams)
 
index e1e1b3ba5b89cf29d9bd92d3abd4d7bac97d2c00..e96881822ee996b61539d4c8dae2019bc1e4b570 100644 (file)
@@ -1061,11 +1061,7 @@ class Dialect(EventTarget):
     To implement, establish as a series of tuples, as in::
 
         construct_arguments = [
-            (schema.Index, {
-                "using": False,
-                "where": None,
-                "ops": None
-            })
+            (schema.Index, {"using": False, "where": None, "ops": None}),
         ]
 
     If the above construct is established on the PostgreSQL dialect,
@@ -2686,11 +2682,14 @@ class CreateEnginePlugin:
         from sqlalchemy.engine import CreateEnginePlugin
         from sqlalchemy import event
 
+
         class LogCursorEventsPlugin(CreateEnginePlugin):
             def __init__(self, url, kwargs):
                 # consume the parameter "log_cursor_logging_name" from the
                 # URL query
-                logging_name = url.query.get("log_cursor_logging_name", "log_cursor")
+                logging_name = url.query.get(
+                    "log_cursor_logging_name", "log_cursor"
+                )
 
                 self.log = logging.getLogger(logging_name)
 
@@ -2702,7 +2701,6 @@ class CreateEnginePlugin:
                 "attach an event listener after the new Engine is constructed"
                 event.listen(engine, "before_cursor_execute", self._log_event)
 
-
             def _log_event(
                 self,
                 conn,
@@ -2710,19 +2708,19 @@ class CreateEnginePlugin:
                 statement,
                 parameters,
                 context,
-                executemany):
+                executemany,
+            ):
 
                 self.log.info("Plugin logged cursor event: %s", statement)
 
-
-
     Plugins are registered using entry points in a similar way as that
     of dialects::
 
-        entry_points={
-            'sqlalchemy.plugins': [
-                'log_cursor_plugin = myapp.plugins:LogCursorEventsPlugin'
+        entry_points = {
+            "sqlalchemy.plugins": [
+                "log_cursor_plugin = myapp.plugins:LogCursorEventsPlugin"
             ]
+        }
 
     A plugin that uses the above names would be invoked from a database
     URL as in::
@@ -2739,15 +2737,16 @@ class CreateEnginePlugin:
     in the URL::
 
         engine = create_engine(
-          "mysql+pymysql://scott:tiger@localhost/test?"
-          "plugin=plugin_one&plugin=plugin_twp&plugin=plugin_three")
+            "mysql+pymysql://scott:tiger@localhost/test?"
+            "plugin=plugin_one&plugin=plugin_twp&plugin=plugin_three"
+        )
 
     The plugin names may also be passed directly to :func:`_sa.create_engine`
     using the :paramref:`_sa.create_engine.plugins` argument::
 
         engine = create_engine(
-          "mysql+pymysql://scott:tiger@localhost/test",
-          plugins=["myplugin"])
+            "mysql+pymysql://scott:tiger@localhost/test", plugins=["myplugin"]
+        )
 
     .. versionadded:: 1.2.3  plugin names can also be specified
        to :func:`_sa.create_engine` as a list
@@ -2769,9 +2768,9 @@ class CreateEnginePlugin:
 
         class MyPlugin(CreateEnginePlugin):
             def __init__(self, url, kwargs):
-                self.my_argument_one = url.query['my_argument_one']
-                self.my_argument_two = url.query['my_argument_two']
-                self.my_argument_three = kwargs.pop('my_argument_three', None)
+                self.my_argument_one = url.query["my_argument_one"]
+                self.my_argument_two = url.query["my_argument_two"]
+                self.my_argument_three = kwargs.pop("my_argument_three", None)
 
             def update_url(self, url):
                 return url.difference_update_query(
@@ -2784,9 +2783,9 @@ class CreateEnginePlugin:
         from sqlalchemy import create_engine
 
         engine = create_engine(
-          "mysql+pymysql://scott:tiger@localhost/test?"
-          "plugin=myplugin&my_argument_one=foo&my_argument_two=bar",
-          my_argument_three='bat'
+            "mysql+pymysql://scott:tiger@localhost/test?"
+            "plugin=myplugin&my_argument_one=foo&my_argument_two=bar",
+            my_argument_three="bat",
         )
 
     .. versionchanged:: 1.4
@@ -2805,15 +2804,15 @@ class CreateEnginePlugin:
                 def __init__(self, url, kwargs):
                     if hasattr(CreateEnginePlugin, "update_url"):
                         # detect the 1.4 API
-                        self.my_argument_one = url.query['my_argument_one']
-                        self.my_argument_two = url.query['my_argument_two']
+                        self.my_argument_one = url.query["my_argument_one"]
+                        self.my_argument_two = url.query["my_argument_two"]
                     else:
                         # detect the 1.3 and earlier API - mutate the
                         # URL directly
-                        self.my_argument_one = url.query.pop('my_argument_one')
-                        self.my_argument_two = url.query.pop('my_argument_two')
+                        self.my_argument_one = url.query.pop("my_argument_one")
+                        self.my_argument_two = url.query.pop("my_argument_two")
 
-                    self.my_argument_three = kwargs.pop('my_argument_three', None)
+                    self.my_argument_three = kwargs.pop("my_argument_three", None)
 
                 def update_url(self, url):
                     # this method is only called in the 1.4 version
@@ -3384,11 +3383,14 @@ class AdaptedConnection:
 
             engine = create_async_engine(...)
 
+
             @event.listens_for(engine.sync_engine, "connect")
-            def register_custom_types(dbapi_connection, ...):
+            def register_custom_types(
+                dbapi_connection,  # ...
+            ):
                 dbapi_connection.run_async(
                     lambda connection: connection.set_type_codec(
-                        'MyCustomType', encoder, decoder, ...
+                        "MyCustomType", encoder, decoder, ...
                     )
                 )
 
index c9fa5eb31a78e95a53e40632644f41953c6a8eda..fc59521cd2622b2684f8f7127b247ad3837e58ea 100644 (file)
@@ -90,10 +90,12 @@ def create_mock_engine(
 
         from sqlalchemy import create_mock_engine
 
+
         def dump(sql, *multiparams, **params):
             print(sql.compile(dialect=engine.dialect))
 
-        engine = create_mock_engine('postgresql+psycopg2://', dump)
+
+        engine = create_mock_engine("postgresql+psycopg2://", dump)
         metadata.create_all(engine, checkfirst=False)
 
     :param url: A string URL which typically needs to contain only the
index a0d4a58f26e6f2453fcea79281e0adc22e6eafc7..a4364e1d550a5da8c7ab853fc75a5b2051fc9e34 100644 (file)
@@ -193,7 +193,8 @@ class Inspector(inspection.Inspectable["Inspector"]):
     or a :class:`_engine.Connection`::
 
         from sqlalchemy import inspect, create_engine
-        engine = create_engine('...')
+
+        engine = create_engine("...")
         insp = inspect(engine)
 
     Where above, the :class:`~sqlalchemy.engine.interfaces.Dialect` associated
@@ -1492,9 +1493,9 @@ class Inspector(inspection.Inspectable["Inspector"]):
             from sqlalchemy import create_engine, MetaData, Table
             from sqlalchemy import inspect
 
-            engine = create_engine('...')
+            engine = create_engine("...")
             meta = MetaData()
-            user_table = Table('user', meta)
+            user_table = Table("user", meta)
             insp = inspect(engine)
             insp.reflect_table(user_table, None)
 
index 7b7be4fdb44accf7e72b323014d148d73686bc34..e495a2619da07be6fe29b8aa68b116529d7960a9 100644 (file)
@@ -1103,17 +1103,15 @@ class Result(_WithKeys, ResultInternal[Row[Unpack[_Ts]]]):
             statement = select(table.c.x, table.c.y, table.c.z)
             result = connection.execute(statement)
 
-            for z, y in result.columns('z', 'y'):
-                # ...
-
+            for z, y in result.columns("z", "y"):
+                ...
 
         Example of using the column objects from the statement itself::
 
             for z, y in result.columns(
-                    statement.selected_columns.c.z,
-                    statement.selected_columns.c.y
+                statement.selected_columns.c.z, statement.selected_columns.c.y
             ):
-                ...
+                ...
 
         .. versionadded:: 1.4
 
index 893b9c5c0cc9d5f684355608434d12b52bf51496..dda2ecc7be95b1d35548c7f0f3c4701088b80e71 100644 (file)
@@ -354,12 +354,11 @@ class RowMapping(BaseRow, typing.Mapping["_KeyType", Any]):
     as iteration of keys, values, and items::
 
         for row in result:
-            if 'a' in row._mapping:
-                print("Column 'a': %s" % row._mapping['a'])
+            if "a" in row._mapping:
+                print("Column 'a': %s" % row._mapping["a"])
 
             print("Column b: %s" % row._mapping[table.c.b])
 
-
     .. versionadded:: 1.4 The :class:`.RowMapping` object replaces the
        mapping-like access previously provided by a database result row,
        which now seeks to behave mostly like a named tuple.
index 7775a2ed88d9d9d5a4100a9881d478ca02e81808..7eb08df61a24958d40846a2b34af518e992e0949 100644 (file)
@@ -122,7 +122,9 @@ class URL(NamedTuple):
        for keys and either strings or tuples of strings for values, e.g.::
 
             >>> from sqlalchemy.engine import make_url
-            >>> url = make_url("postgresql+psycopg2://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt")
+            >>> url = make_url(
+            ...     "postgresql+psycopg2://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt"
+            ... )
             >>> url.query
             immutabledict({'alt_host': ('host1', 'host2'), 'ssl_cipher': '/path/to/crt'})
 
@@ -371,7 +373,9 @@ class URL(NamedTuple):
 
             >>> from sqlalchemy.engine import make_url
             >>> url = make_url("postgresql+psycopg2://user:pass@host/dbname")
-            >>> url = url.update_query_string("alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt")
+            >>> url = url.update_query_string(
+            ...     "alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt"
+            ... )
             >>> str(url)
             'postgresql+psycopg2://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt'
 
@@ -407,7 +411,13 @@ class URL(NamedTuple):
 
             >>> from sqlalchemy.engine import make_url
             >>> url = make_url("postgresql+psycopg2://user:pass@host/dbname")
-            >>> url = url.update_query_pairs([("alt_host", "host1"), ("alt_host", "host2"), ("ssl_cipher", "/path/to/crt")])
+            >>> url = url.update_query_pairs(
+            ...     [
+            ...         ("alt_host", "host1"),
+            ...         ("alt_host", "host2"),
+            ...         ("ssl_cipher", "/path/to/crt"),
+            ...     ]
+            ... )
             >>> str(url)
             'postgresql+psycopg2://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt'
 
@@ -489,7 +499,9 @@ class URL(NamedTuple):
 
             >>> from sqlalchemy.engine import make_url
             >>> url = make_url("postgresql+psycopg2://user:pass@host/dbname")
-            >>> url = url.update_query_dict({"alt_host": ["host1", "host2"], "ssl_cipher": "/path/to/crt"})
+            >>> url = url.update_query_dict(
+            ...     {"alt_host": ["host1", "host2"], "ssl_cipher": "/path/to/crt"}
+            ... )
             >>> str(url)
             'postgresql+psycopg2://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt'
 
@@ -527,14 +539,14 @@ class URL(NamedTuple):
 
         E.g.::
 
-            url = url.difference_update_query(['foo', 'bar'])
+            url = url.difference_update_query(["foo", "bar"])
 
         Equivalent to using :meth:`_engine.URL.set` as follows::
 
             url = url.set(
                 query={
                     key: url.query[key]
-                    for key in set(url.query).difference(['foo', 'bar'])
+                    for key in set(url.query).difference(["foo", "bar"])
                 }
             )
 
@@ -583,7 +595,9 @@ class URL(NamedTuple):
 
 
             >>> from sqlalchemy.engine import make_url
-            >>> url = make_url("postgresql+psycopg2://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt")
+            >>> url = make_url(
+            ...     "postgresql+psycopg2://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt"
+            ... )
             >>> url.query
             immutabledict({'alt_host': ('host1', 'host2'), 'ssl_cipher': '/path/to/crt'})
             >>> url.normalized_query
index 230ec6986678565f3e71ea89920b8c22e0f871eb..f528d74f69fc8114228479a284979a8dea796021 100644 (file)
@@ -51,15 +51,14 @@ def listen(
         from sqlalchemy import event
         from sqlalchemy.schema import UniqueConstraint
 
+
         def unique_constraint_name(const, table):
-            const.name = "uq_%s_%s" % (
-                table.name,
-                list(const.columns)[0].name
-            )
+            const.name = "uq_%s_%s" % (table.name, list(const.columns)[0].name)
+
+
         event.listen(
-                UniqueConstraint,
-                "after_parent_attach",
-                unique_constraint_name)
+            UniqueConstraint, "after_parent_attach", unique_constraint_name
+        )
 
     :param bool insert: The default behavior for event handlers is to append
       the decorated user defined function to an internal list of registered
@@ -139,12 +138,10 @@ def listens_for(
         from sqlalchemy import event
         from sqlalchemy.schema import UniqueConstraint
 
+
         @event.listens_for(UniqueConstraint, "after_parent_attach")
         def unique_constraint_name(const, table):
-            const.name = "uq_%s_%s" % (
-                table.name,
-                list(const.columns)[0].name
-            )
+            const.name = "uq_%s_%s" % (table.name, list(const.columns)[0].name)
 
     A given function can also be invoked for only the first invocation
     of the event using the ``once`` argument::
@@ -153,7 +150,6 @@ def listens_for(
         def on_config():
             do_config()
 
-
     .. warning:: The ``once`` argument does not imply automatic de-registration
        of the listener function after it has been invoked a first time; a
        listener entry will remain associated with the target object.
@@ -189,6 +185,7 @@ def remove(target: Any, identifier: str, fn: Callable[..., Any]) -> None:
         def my_listener_function(*arg):
             pass
 
+
         # ... it's removed like this
         event.remove(SomeMappedClass, "before_insert", my_listener_function)
 
index 7d7eff3606cc477017a8bfc62f999faa79698a6e..ced87df4b2d28b2b8192820f953051b146268272 100644 (file)
@@ -432,14 +432,16 @@ class DontWrapMixin:
 
         from sqlalchemy.exc import DontWrapMixin
 
+
         class MyCustomException(Exception, DontWrapMixin):
             pass
 
+
         class MySpecialType(TypeDecorator):
             impl = String
 
             def process_bind_param(self, value, dialect):
-                if value == 'invalid':
+                if value == "invalid":
                     raise MyCustomException("invalid!")
 
     """
index 5b033f735da46d5753fabd5c96d51f8186e462f7..52ba46b4d7a49911697e9cc11292a52792849608 100644 (file)
@@ -458,7 +458,7 @@ class AssociationProxy(
             class User(Base):
                 # ...
 
-                keywords = association_proxy('kws', 'keyword')
+                keywords = association_proxy("kws", "keyword")
 
         If we access this :class:`.AssociationProxy` from
         :attr:`_orm.Mapper.all_orm_descriptors`, and we want to view the
@@ -778,9 +778,9 @@ class AssociationProxyInstance(SQLORMOperations[_T]):
         :attr:`.AssociationProxyInstance.remote_attr` attributes separately::
 
             stmt = (
-                select(Parent).
-                join(Parent.proxied.local_attr).
-                join(Parent.proxied.remote_attr)
+                select(Parent)
+                .join(Parent.proxied.local_attr)
+                .join(Parent.proxied.remote_attr)
             )
 
         A future release may seek to provide a more succinct join pattern
index 9899364d1ff09a32765efe2410aaffe09dd09ed6..e534424c0f48865d31d04821386c1ba180b146f6 100644 (file)
@@ -224,7 +224,9 @@ def asyncstartablecontext(
     ``@contextlib.asynccontextmanager`` supports, and the usage pattern
     is different as well.
 
-    Typical usage::
+    Typical usage:
+
+    .. sourcecode:: text
 
         @asyncstartablecontext
         async def some_async_generator(<arguments>):
index 0b572d426a274e6d13d329fff30c33c916e08635..68de8112d03a967a30b2bc78f9e3b4985bc26420 100644 (file)
@@ -201,6 +201,7 @@ class AsyncConnection(
     method of :class:`_asyncio.AsyncEngine`::
 
         from sqlalchemy.ext.asyncio import create_async_engine
+
         engine = create_async_engine("postgresql+asyncpg://user:pass@host/dbname")
 
         async with engine.connect() as conn:
@@ -548,7 +549,7 @@ class AsyncConnection(
 
         E.g.::
 
-            result = await conn.stream(stmt):
+            result = await conn.stream(stmt)
             async for row in result:
                 print(f"{row}")
 
@@ -825,7 +826,7 @@ class AsyncConnection(
         *arg: _P.args,
         **kw: _P.kwargs,
     ) -> _T:
-        """Invoke the given synchronous (i.e. not async) callable,
+        '''Invoke the given synchronous (i.e. not async) callable,
         passing a synchronous-style :class:`_engine.Connection` as the first
         argument.
 
@@ -835,26 +836,26 @@ class AsyncConnection(
         E.g.::
 
             def do_something_with_core(conn: Connection, arg1: int, arg2: str) -> str:
-                '''A synchronous function that does not require awaiting
+                """A synchronous function that does not require awaiting
 
                 :param conn: a Core SQLAlchemy Connection, used synchronously
 
                 :return: an optional return value is supported
 
-                '''
-                conn.execute(
-                    some_table.insert().values(int_col=arg1, str_col=arg2)
-                )
+                """
+                conn.execute(some_table.insert().values(int_col=arg1, str_col=arg2))
                 return "success"
 
 
             async def do_something_async(async_engine: AsyncEngine) -> None:
-                '''an async function that uses awaiting'''
+                """an async function that uses awaiting"""
 
                 async with async_engine.begin() as async_conn:
                     # run do_something_with_core() with a sync-style
                     # Connection, proxied into an awaitable
-                    return_code = await async_conn.run_sync(do_something_with_core, 5, "strval")
+                    return_code = await async_conn.run_sync(
+                        do_something_with_core, 5, "strval"
+                    )
                     print(return_code)
 
         This method maintains the asyncio event loop all the way through
@@ -885,7 +886,7 @@ class AsyncConnection(
 
             :ref:`session_run_sync`
 
-        """  # noqa: E501
+        '''  # noqa: E501
 
         return await greenlet_spawn(
             fn, self._proxied, *arg, _require_await=False, **kw
@@ -1004,6 +1005,7 @@ class AsyncEngine(ProxyComparable[Engine], AsyncConnectable):
     :func:`_asyncio.create_async_engine` function::
 
         from sqlalchemy.ext.asyncio import create_async_engine
+
         engine = create_async_engine("postgresql+asyncpg://user:pass@host/dbname")
 
     .. versionadded:: 1.4
@@ -1060,7 +1062,6 @@ class AsyncEngine(ProxyComparable[Engine], AsyncConnectable):
                 )
                 await conn.execute(text("my_special_procedure(5)"))
 
-
         """
         conn = self.connect()
 
index 39731c47fb88093bc8f32d8561b4c7d772f8d516..952e7e3f8cee5791397f453a5b4a571b95870baa 100644 (file)
@@ -368,7 +368,7 @@ class async_scoped_session(Generic[_AS]):
         object is entered::
 
             async with async_session.begin():
-                # .. ORM transaction is begun
+                ...  # ORM transaction is begun
 
         Note that database IO will not normally occur when the session-level
         transaction is begun, as database transactions begin on an
@@ -812,28 +812,28 @@ class async_scoped_session(Generic[_AS]):
 
             # construct async engines w/ async drivers
             engines = {
-                'leader':create_async_engine("sqlite+aiosqlite:///leader.db"),
-                'other':create_async_engine("sqlite+aiosqlite:///other.db"),
-                'follower1':create_async_engine("sqlite+aiosqlite:///follower1.db"),
-                'follower2':create_async_engine("sqlite+aiosqlite:///follower2.db"),
+                "leader": create_async_engine("sqlite+aiosqlite:///leader.db"),
+                "other": create_async_engine("sqlite+aiosqlite:///other.db"),
+                "follower1": create_async_engine("sqlite+aiosqlite:///follower1.db"),
+                "follower2": create_async_engine("sqlite+aiosqlite:///follower2.db"),
             }
 
+
             class RoutingSession(Session):
                 def get_bind(self, mapper=None, clause=None, **kw):
                     # within get_bind(), return sync engines
                     if mapper and issubclass(mapper.class_, MyOtherClass):
-                        return engines['other'].sync_engine
+                        return engines["other"].sync_engine
                     elif self._flushing or isinstance(clause, (Update, Delete)):
-                        return engines['leader'].sync_engine
+                        return engines["leader"].sync_engine
                     else:
                         return engines[
-                            random.choice(['follower1','follower2'])
+                            random.choice(["follower1", "follower2"])
                         ].sync_engine
 
+
             # apply to AsyncSession using sync_session_class
-            AsyncSessionMaker = async_sessionmaker(
-                sync_session_class=RoutingSession
-            )
+            AsyncSessionMaker = async_sessionmaker(sync_session_class=RoutingSession)
 
         The :meth:`_orm.Session.get_bind` method is called in a non-asyncio,
         implicitly non-blocking context in the same manner as ORM event hooks
index 99094ef8589d6e1f3b53730bd8b930c99d712c3b..022de0d8d03875afe29c85de056a314415e3b9c0 100644 (file)
@@ -344,7 +344,7 @@ class AsyncSession(ReversibleProxy[Session]):
         *arg: _P.args,
         **kw: _P.kwargs,
     ) -> _T:
-        """Invoke the given synchronous (i.e. not async) callable,
+        '''Invoke the given synchronous (i.e. not async) callable,
         passing a synchronous-style :class:`_orm.Session` as the first
         argument.
 
@@ -354,25 +354,27 @@ class AsyncSession(ReversibleProxy[Session]):
         E.g.::
 
             def some_business_method(session: Session, param: str) -> str:
-                '''A synchronous function that does not require awaiting
+                """A synchronous function that does not require awaiting
 
                 :param session: a SQLAlchemy Session, used synchronously
 
                 :return: an optional return value is supported
 
-                '''
+                """
                 session.add(MyObject(param=param))
                 session.flush()
                 return "success"
 
 
             async def do_something_async(async_engine: AsyncEngine) -> None:
-                '''an async function that uses awaiting'''
+                """an async function that uses awaiting"""
 
                 with AsyncSession(async_engine) as async_session:
                     # run some_business_method() with a sync-style
                     # Session, proxied into an awaitable
-                    return_code = await async_session.run_sync(some_business_method, param="param1")
+                    return_code = await async_session.run_sync(
+                        some_business_method, param="param1"
+                    )
                     print(return_code)
 
         This method maintains the asyncio event loop all the way through
@@ -394,7 +396,7 @@ class AsyncSession(ReversibleProxy[Session]):
             :meth:`.AsyncConnection.run_sync`
 
             :ref:`session_run_sync`
-        """  # noqa: E501
+        '''  # noqa: E501
 
         return await greenlet_spawn(
             fn, self.sync_session, *arg, _require_await=False, **kw
@@ -880,28 +882,28 @@ class AsyncSession(ReversibleProxy[Session]):
 
             # construct async engines w/ async drivers
             engines = {
-                'leader':create_async_engine("sqlite+aiosqlite:///leader.db"),
-                'other':create_async_engine("sqlite+aiosqlite:///other.db"),
-                'follower1':create_async_engine("sqlite+aiosqlite:///follower1.db"),
-                'follower2':create_async_engine("sqlite+aiosqlite:///follower2.db"),
+                "leader": create_async_engine("sqlite+aiosqlite:///leader.db"),
+                "other": create_async_engine("sqlite+aiosqlite:///other.db"),
+                "follower1": create_async_engine("sqlite+aiosqlite:///follower1.db"),
+                "follower2": create_async_engine("sqlite+aiosqlite:///follower2.db"),
             }
 
+
             class RoutingSession(Session):
                 def get_bind(self, mapper=None, clause=None, **kw):
                     # within get_bind(), return sync engines
                     if mapper and issubclass(mapper.class_, MyOtherClass):
-                        return engines['other'].sync_engine
+                        return engines["other"].sync_engine
                     elif self._flushing or isinstance(clause, (Update, Delete)):
-                        return engines['leader'].sync_engine
+                        return engines["leader"].sync_engine
                     else:
                         return engines[
-                            random.choice(['follower1','follower2'])
+                            random.choice(["follower1", "follower2"])
                         ].sync_engine
 
+
             # apply to AsyncSession using sync_session_class
-            AsyncSessionMaker = async_sessionmaker(
-                sync_session_class=RoutingSession
-            )
+            AsyncSessionMaker = async_sessionmaker(sync_session_class=RoutingSession)
 
         The :meth:`_orm.Session.get_bind` method is called in a non-asyncio,
         implicitly non-blocking context in the same manner as ORM event hooks
@@ -957,7 +959,7 @@ class AsyncSession(ReversibleProxy[Session]):
         object is entered::
 
             async with async_session.begin():
-                # .. ORM transaction is begun
+                ...  # ORM transaction is begun
 
         Note that database IO will not normally occur when the session-level
         transaction is begun, as database transactions begin on an
@@ -1634,16 +1636,22 @@ class async_sessionmaker(Generic[_AS]):
         from sqlalchemy.ext.asyncio import AsyncSession
         from sqlalchemy.ext.asyncio import async_sessionmaker
 
-        async def run_some_sql(async_session: async_sessionmaker[AsyncSession]) -> None:
+
+        async def run_some_sql(
+            async_session: async_sessionmaker[AsyncSession],
+        ) -> None:
             async with async_session() as session:
                 session.add(SomeObject(data="object"))
                 session.add(SomeOtherObject(name="other object"))
                 await session.commit()
 
+
         async def main() -> None:
             # an AsyncEngine, which the AsyncSession will use for connection
             # resources
-            engine = create_async_engine('postgresql+asyncpg://scott:tiger@localhost/')
+            engine = create_async_engine(
+                "postgresql+asyncpg://scott:tiger@localhost/"
+            )
 
             # create a reusable factory for new AsyncSession instances
             async_session = async_sessionmaker(engine)
@@ -1742,7 +1750,6 @@ class async_sessionmaker(Generic[_AS]):
 
                 # commits transaction, closes session
 
-
         """
 
         session = self()
@@ -1775,7 +1782,7 @@ class async_sessionmaker(Generic[_AS]):
 
             AsyncSession = async_sessionmaker(some_engine)
 
-            AsyncSession.configure(bind=create_async_engine('sqlite+aiosqlite://'))
+            AsyncSession.configure(bind=create_async_engine("sqlite+aiosqlite://"))
         """  # noqa E501
 
         self.kw.update(new_kw)
index 07d49f17c86c826eac98a465ecb71ccf1ec742be..74b36b62e1149882d7e1ee420439a2e1a4128271 100644 (file)
@@ -192,8 +192,12 @@ the schema name ``default`` is used if no schema is present::
     Base = automap_base()
 
     Base.prepare(e, modulename_for_table=module_name_for_table)
-    Base.prepare(e, schema="test_schema", modulename_for_table=module_name_for_table)
-    Base.prepare(e, schema="test_schema_2", modulename_for_table=module_name_for_table)
+    Base.prepare(
+        e, schema="test_schema", modulename_for_table=module_name_for_table
+    )
+    Base.prepare(
+        e, schema="test_schema_2", modulename_for_table=module_name_for_table
+    )
 
 The same named-classes are organized into a hierarchical collection available
 at :attr:`.AutomapBase.by_module`.  This collection is traversed using the
@@ -550,7 +554,9 @@ SQLAlchemy can guess::
         id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
         favorite_employee_id = Column(Integer, ForeignKey("employee.id"))
 
-        favorite_employee = relationship(Employee, foreign_keys=favorite_employee_id)
+        favorite_employee = relationship(
+            Employee, foreign_keys=favorite_employee_id
+        )
 
         __mapper_args__ = {
             "polymorphic_identity": "engineer",
@@ -587,12 +593,16 @@ and will emit an error on mapping.
 
 We can resolve this conflict by using an underscore as follows::
 
-    def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
+    def name_for_scalar_relationship(
+        base, local_cls, referred_cls, constraint
+    ):
         name = referred_cls.__name__.lower()
         local_table = local_cls.__table__
         if name in local_table.columns:
             newname = name + "_"
-            warnings.warn("Already detected name %s present.  using %s" % (name, newname))
+            warnings.warn(
+                "Already detected name %s present.  using %s" % (name, newname)
+            )
             return newname
         return name
 
index 60f7ae664471aedb3cb95b3156393ee29d53080c..c9dd63a87f8c2496d528b8e641933c47d995a107 100644 (file)
@@ -258,23 +258,19 @@ class BakedQuery:
         is passed to the lambda::
 
             sub_bq = self.bakery(lambda s: s.query(User.name))
-            sub_bq += lambda q: q.filter(
-                User.id == Address.user_id).correlate(Address)
+            sub_bq += lambda q: q.filter(User.id == Address.user_id).correlate(Address)
 
             main_bq = self.bakery(lambda s: s.query(Address))
-            main_bq += lambda q: q.filter(
-                sub_bq.to_query(q).exists())
+            main_bq += lambda q: q.filter(sub_bq.to_query(q).exists())
 
         In the case where the subquery is used in the first callable against
         a :class:`.Session`, the :class:`.Session` is also accepted::
 
             sub_bq = self.bakery(lambda s: s.query(User.name))
-            sub_bq += lambda q: q.filter(
-                User.id == Address.user_id).correlate(Address)
+            sub_bq += lambda q: q.filter(User.id == Address.user_id).correlate(Address)
 
             main_bq = self.bakery(
-                lambda s: s.query(
-                Address.id, sub_bq.to_query(q).scalar_subquery())
+                lambda s: s.query(Address.id, sub_bq.to_query(q).scalar_subquery())
             )
 
         :param query_or_session: a :class:`_query.Query` object or a class
@@ -285,7 +281,7 @@ class BakedQuery:
          .. versionadded:: 1.3
 
 
-        """
+        """  # noqa: E501
 
         if isinstance(query_or_session, Session):
             session = query_or_session
index 9d4be255c0d41d3f0775e1b9817eec79f6898826..199329d5b45bd0c22a0db9d658f11d8262c0410b 100644 (file)
@@ -17,9 +17,11 @@ more callables defining its compilation::
     from sqlalchemy.ext.compiler import compiles
     from sqlalchemy.sql.expression import ColumnClause
 
+
     class MyColumn(ColumnClause):
         inherit_cache = True
 
+
     @compiles(MyColumn)
     def compile_mycolumn(element, compiler, **kw):
         return "[%s]" % element.name
@@ -31,10 +33,12 @@ when the object is compiled to a string::
 
     from sqlalchemy import select
 
-    s = select(MyColumn('x'), MyColumn('y'))
+    s = select(MyColumn("x"), MyColumn("y"))
     print(str(s))
 
-Produces::
+Produces:
+
+.. sourcecode:: sql
 
     SELECT [x], [y]
 
@@ -46,6 +50,7 @@ invoked for the dialect in use::
 
     from sqlalchemy.schema import DDLElement
 
+
     class AlterColumn(DDLElement):
         inherit_cache = False
 
@@ -53,14 +58,18 @@ invoked for the dialect in use::
             self.column = column
             self.cmd = cmd
 
+
     @compiles(AlterColumn)
     def visit_alter_column(element, compiler, **kw):
         return "ALTER COLUMN %s ..." % element.column.name
 
-    @compiles(AlterColumn, 'postgresql')
+
+    @compiles(AlterColumn, "postgresql")
     def visit_alter_column(element, compiler, **kw):
-        return "ALTER TABLE %s ALTER COLUMN %s ..." % (element.table.name,
-                                                       element.column.name)
+        return "ALTER TABLE %s ALTER COLUMN %s ..." % (
+            element.table.name,
+            element.column.name,
+        )
 
 The second ``visit_alter_table`` will be invoked when any ``postgresql``
 dialect is used.
@@ -80,6 +89,7 @@ method which can be used for compilation of embedded attributes::
 
     from sqlalchemy.sql.expression import Executable, ClauseElement
 
+
     class InsertFromSelect(Executable, ClauseElement):
         inherit_cache = False
 
@@ -87,20 +97,27 @@ method which can be used for compilation of embedded attributes::
             self.table = table
             self.select = select
 
+
     @compiles(InsertFromSelect)
     def visit_insert_from_select(element, compiler, **kw):
         return "INSERT INTO %s (%s)" % (
             compiler.process(element.table, asfrom=True, **kw),
-            compiler.process(element.select, **kw)
+            compiler.process(element.select, **kw),
         )
 
-    insert = InsertFromSelect(t1, select(t1).where(t1.c.x>5))
+
+    insert = InsertFromSelect(t1, select(t1).where(t1.c.x > 5))
     print(insert)
 
-Produces::
+Produces (formatted for readability):
+
+.. sourcecode:: sql
 
-    "INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z
-                          FROM mytable WHERE mytable.x > :x_1)"
+    INSERT INTO mytable (
+        SELECT mytable.x, mytable.y, mytable.z
+        FROM mytable
+        WHERE mytable.x > :x_1
+    )
 
 .. note::
 
@@ -120,11 +137,10 @@ below where we generate a CHECK constraint that embeds a SQL expression::
 
     @compiles(MyConstraint)
     def compile_my_constraint(constraint, ddlcompiler, **kw):
-        kw['literal_binds'] = True
+        kw["literal_binds"] = True
         return "CONSTRAINT %s CHECK (%s)" % (
             constraint.name,
-            ddlcompiler.sql_compiler.process(
-                constraint.expression, **kw)
+            ddlcompiler.sql_compiler.process(constraint.expression, **kw),
         )
 
 Above, we add an additional flag to the process step as called by
@@ -152,6 +168,7 @@ an endless loop.   Such as, to add "prefix" to all insert statements::
 
     from sqlalchemy.sql.expression import Insert
 
+
     @compiles(Insert)
     def prefix_inserts(insert, compiler, **kw):
         return compiler.visit_insert(insert.prefix_with("some prefix"), **kw)
@@ -167,17 +184,16 @@ Changing Compilation of Types
 ``compiler`` works for types, too, such as below where we implement the
 MS-SQL specific 'max' keyword for ``String``/``VARCHAR``::
 
-    @compiles(String, 'mssql')
-    @compiles(VARCHAR, 'mssql')
+    @compiles(String, "mssql")
+    @compiles(VARCHAR, "mssql")
     def compile_varchar(element, compiler, **kw):
-        if element.length == 'max':
+        if element.length == "max":
             return "VARCHAR('max')"
         else:
             return compiler.visit_VARCHAR(element, **kw)
 
-    foo = Table('foo', metadata,
-        Column('data', VARCHAR('max'))
-    )
+
+    foo = Table("foo", metadata, Column("data", VARCHAR("max")))
 
 Subclassing Guidelines
 ======================
@@ -215,19 +231,23 @@ A synopsis is as follows:
 
       from sqlalchemy.sql.expression import FunctionElement
 
+
       class coalesce(FunctionElement):
-          name = 'coalesce'
+          name = "coalesce"
           inherit_cache = True
 
+
       @compiles(coalesce)
       def compile(element, compiler, **kw):
           return "coalesce(%s)" % compiler.process(element.clauses, **kw)
 
-      @compiles(coalesce, 'oracle')
+
+      @compiles(coalesce, "oracle")
       def compile(element, compiler, **kw):
           if len(element.clauses) > 2:
-              raise TypeError("coalesce only supports two arguments on "
-                              "Oracle Database")
+              raise TypeError(
+                  "coalesce only supports two arguments on " "Oracle Database"
+              )
           return "nvl(%s)" % compiler.process(element.clauses, **kw)
 
 * :class:`.ExecutableDDLElement` - The root of all DDL expressions,
@@ -281,6 +301,7 @@ for example to the "synopsis" example indicated previously::
     class MyColumn(ColumnClause):
         inherit_cache = True
 
+
     @compiles(MyColumn)
     def compile_mycolumn(element, compiler, **kw):
         return "[%s]" % element.name
@@ -319,11 +340,12 @@ caching::
             self.table = table
             self.select = select
 
+
     @compiles(InsertFromSelect)
     def visit_insert_from_select(element, compiler, **kw):
         return "INSERT INTO %s (%s)" % (
             compiler.process(element.table, asfrom=True, **kw),
-            compiler.process(element.select, **kw)
+            compiler.process(element.select, **kw),
         )
 
 While it is also possible that the above ``InsertFromSelect`` could be made to
@@ -359,28 +381,32 @@ For PostgreSQL and Microsoft SQL Server::
     from sqlalchemy.ext.compiler import compiles
     from sqlalchemy.types import DateTime
 
+
     class utcnow(expression.FunctionElement):
         type = DateTime()
         inherit_cache = True
 
-    @compiles(utcnow, 'postgresql')
+
+    @compiles(utcnow, "postgresql")
     def pg_utcnow(element, compiler, **kw):
         return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
 
-    @compiles(utcnow, 'mssql')
+
+    @compiles(utcnow, "mssql")
     def ms_utcnow(element, compiler, **kw):
         return "GETUTCDATE()"
 
 Example usage::
 
-    from sqlalchemy import (
-                Table, Column, Integer, String, DateTime, MetaData
-            )
+    from sqlalchemy import Table, Column, Integer, String, DateTime, MetaData
+
     metadata = MetaData()
-    event = Table("event", metadata,
+    event = Table(
+        "event",
+        metadata,
         Column("id", Integer, primary_key=True),
         Column("description", String(50), nullable=False),
-        Column("timestamp", DateTime, server_default=utcnow())
+        Column("timestamp", DateTime, server_default=utcnow()),
     )
 
 "GREATEST" function
@@ -395,30 +421,30 @@ accommodates two arguments::
     from sqlalchemy.ext.compiler import compiles
     from sqlalchemy.types import Numeric
 
+
     class greatest(expression.FunctionElement):
         type = Numeric()
-        name = 'greatest'
+        name = "greatest"
         inherit_cache = True
 
+
     @compiles(greatest)
     def default_greatest(element, compiler, **kw):
         return compiler.visit_function(element)
 
-    @compiles(greatest, 'sqlite')
-    @compiles(greatest, 'mssql')
-    @compiles(greatest, 'oracle')
+
+    @compiles(greatest, "sqlite")
+    @compiles(greatest, "mssql")
+    @compiles(greatest, "oracle")
     def case_greatest(element, compiler, **kw):
         arg1, arg2 = list(element.clauses)
         return compiler.process(case((arg1 > arg2, arg1), else_=arg2), **kw)
 
 Example usage::
 
-    Session.query(Account).\
-            filter(
-                greatest(
-                    Account.checking_balance,
-                    Account.savings_balance) > 10000
-            )
+    Session.query(Account).filter(
+        greatest(Account.checking_balance, Account.savings_balance) > 10000
+    )
 
 "false" expression
 ------------------
@@ -429,16 +455,19 @@ don't have a "false" constant::
     from sqlalchemy.sql import expression
     from sqlalchemy.ext.compiler import compiles
 
+
     class sql_false(expression.ColumnElement):
         inherit_cache = True
 
+
     @compiles(sql_false)
     def default_false(element, compiler, **kw):
         return "false"
 
-    @compiles(sql_false, 'mssql')
-    @compiles(sql_false, 'mysql')
-    @compiles(sql_false, 'oracle')
+
+    @compiles(sql_false, "mssql")
+    @compiles(sql_false, "mysql")
+    @compiles(sql_false, "oracle")
     def int_false(element, compiler, **kw):
         return "0"
 
@@ -448,7 +477,7 @@ Example usage::
 
     exp = union_all(
         select(users.c.name, sql_false().label("enrolled")),
-        select(customers.c.name, customers.c.enrolled)
+        select(customers.c.name, customers.c.enrolled),
     )
 
 """
index c0f7e3405803726aa54f17700206f5ac34c1a867..4be4262d0df1d890f2e06f5cd0812bd21c50539c 100644 (file)
@@ -50,23 +50,26 @@ class ConcreteBase:
 
         from sqlalchemy.ext.declarative import ConcreteBase
 
+
         class Employee(ConcreteBase, Base):
-            __tablename__ = 'employee'
+            __tablename__ = "employee"
             employee_id = Column(Integer, primary_key=True)
             name = Column(String(50))
             __mapper_args__ = {
-                            'polymorphic_identity':'employee',
-                            'concrete':True}
+                "polymorphic_identity": "employee",
+                "concrete": True,
+            }
+
 
         class Manager(Employee):
-            __tablename__ = 'manager'
+            __tablename__ = "manager"
             employee_id = Column(Integer, primary_key=True)
             name = Column(String(50))
             manager_data = Column(String(40))
             __mapper_args__ = {
-                            'polymorphic_identity':'manager',
-                            'concrete':True}
-
+                "polymorphic_identity": "manager",
+                "concrete": True,
+            }
 
     The name of the discriminator column used by :func:`.polymorphic_union`
     defaults to the name ``type``.  To suit the use case of a mapping where an
@@ -75,7 +78,7 @@ class ConcreteBase:
     ``_concrete_discriminator_name`` attribute::
 
         class Employee(ConcreteBase, Base):
-            _concrete_discriminator_name = '_concrete_discriminator'
+            _concrete_discriminator_name = "_concrete_discriminator"
 
     .. versionadded:: 1.3.19 Added the ``_concrete_discriminator_name``
        attribute to :class:`_declarative.ConcreteBase` so that the
@@ -168,23 +171,27 @@ class AbstractConcreteBase(ConcreteBase):
         from sqlalchemy.orm import DeclarativeBase
         from sqlalchemy.ext.declarative import AbstractConcreteBase
 
+
         class Base(DeclarativeBase):
             pass
 
+
         class Employee(AbstractConcreteBase, Base):
             pass
 
+
         class Manager(Employee):
-            __tablename__ = 'manager'
+            __tablename__ = "manager"
             employee_id = Column(Integer, primary_key=True)
             name = Column(String(50))
             manager_data = Column(String(40))
 
             __mapper_args__ = {
-                'polymorphic_identity':'manager',
-                'concrete':True
+                "polymorphic_identity": "manager",
+                "concrete": True,
             }
 
+
         Base.registry.configure()
 
     The abstract base class is handled by declarative in a special way;
@@ -200,10 +207,12 @@ class AbstractConcreteBase(ConcreteBase):
 
         from sqlalchemy.ext.declarative import AbstractConcreteBase
 
+
         class Company(Base):
-            __tablename__ = 'company'
+            __tablename__ = "company"
             id = Column(Integer, primary_key=True)
 
+
         class Employee(AbstractConcreteBase, Base):
             strict_attrs = True
 
@@ -211,31 +220,31 @@ class AbstractConcreteBase(ConcreteBase):
 
             @declared_attr
             def company_id(cls):
-                return Column(ForeignKey('company.id'))
+                return Column(ForeignKey("company.id"))
 
             @declared_attr
             def company(cls):
                 return relationship("Company")
 
+
         class Manager(Employee):
-            __tablename__ = 'manager'
+            __tablename__ = "manager"
 
             name = Column(String(50))
             manager_data = Column(String(40))
 
             __mapper_args__ = {
-                'polymorphic_identity':'manager',
-                'concrete':True
+                "polymorphic_identity": "manager",
+                "concrete": True,
             }
 
+
         Base.registry.configure()
 
     When we make use of our mappings however, both ``Manager`` and
     ``Employee`` will have an independently usable ``.company`` attribute::
 
-        session.execute(
-            select(Employee).filter(Employee.company.has(id=5))
-        )
+        session.execute(select(Employee).filter(Employee.company.has(id=5)))
 
     :param strict_attrs: when specified on the base class, "strict" attribute
      mode is enabled which attempts to limit ORM mapped attributes on the
@@ -366,10 +375,12 @@ class DeferredReflection:
 
         from sqlalchemy.ext.declarative import declarative_base
         from sqlalchemy.ext.declarative import DeferredReflection
+
         Base = declarative_base()
 
+
         class MyClass(DeferredReflection, Base):
-            __tablename__ = 'mytable'
+            __tablename__ = "mytable"
 
     Above, ``MyClass`` is not yet mapped.   After a series of
     classes have been defined in the above fashion, all tables
@@ -391,17 +402,22 @@ class DeferredReflection:
         class ReflectedOne(DeferredReflection, Base):
             __abstract__ = True
 
+
         class ReflectedTwo(DeferredReflection, Base):
             __abstract__ = True
 
+
         class MyClass(ReflectedOne):
-            __tablename__ = 'mytable'
+            __tablename__ = "mytable"
+
 
         class MyOtherClass(ReflectedOne):
-            __tablename__ = 'myothertable'
+            __tablename__ = "myothertable"
+
 
         class YetAnotherClass(ReflectedTwo):
-            __tablename__ = 'yetanothertable'
+            __tablename__ = "yetanothertable"
+
 
         # ... etc.
 
index 53a8f5ae7cdd103a134fc7721c8cc81bf95d158c..87e767bcd6b72c67813ce9581aa58202492fe85c 100644 (file)
@@ -128,12 +128,9 @@ class ShardedQuery(Query[_T]):
         The shard_id can be passed for a 2.0 style execution to the
         bind_arguments dictionary of :meth:`.Session.execute`::
 
-            results = session.execute(
-                stmt,
-                bind_arguments={"shard_id": "my_shard"}
-            )
+            results = session.execute(stmt, bind_arguments={"shard_id": "my_shard"})
 
-        """
+        """  # noqa: E501
         return self.execution_options(_sa_shard_id=shard_id)
 
 
@@ -385,9 +382,9 @@ class set_shard_id(ORMOption):
     the :meth:`_sql.Executable.options` method of any executable statement::
 
         stmt = (
-            select(MyObject).
-            where(MyObject.name == 'some name').
-            options(set_shard_id("shard1"))
+            select(MyObject)
+            .where(MyObject.name == "some name")
+            .options(set_shard_id("shard1"))
         )
 
     Above, the statement when invoked will limit to the "shard1" shard
index 8de6128f20d2f977dd484e84fb09512301c03272..dd39b7777eca9b0043fd67eeea08589eb90e1b57 100644 (file)
@@ -34,8 +34,9 @@ may receive the class directly, depending on context::
     class Base(DeclarativeBase):
         pass
 
+
     class Interval(Base):
-        __tablename__ = 'interval'
+        __tablename__ = "interval"
 
         id: Mapped[int] = mapped_column(primary_key=True)
         start: Mapped[int]
@@ -57,7 +58,6 @@ may receive the class directly, depending on context::
         def intersects(self, other: Interval) -> bool:
             return self.contains(other.start) | self.contains(other.end)
 
-
 Above, the ``length`` property returns the difference between the
 ``end`` and ``start`` attributes.  With an instance of ``Interval``,
 this subtraction occurs in Python, using normal Python descriptor
@@ -150,6 +150,7 @@ the absolute value function::
     from sqlalchemy import func
     from sqlalchemy import type_coerce
 
+
     class Interval(Base):
         # ...
 
@@ -214,6 +215,7 @@ example below that illustrates the use of :meth:`.hybrid_property.setter` and
 
     # correct use, however is not accepted by pep-484 tooling
 
+
     class Interval(Base):
         # ...
 
@@ -256,6 +258,7 @@ a single decorator under one name::
 
     # correct use which is also accepted by pep-484 tooling
 
+
     class Interval(Base):
         # ...
 
@@ -330,6 +333,7 @@ expression is used as the column that's the target of the SET.  If our
 ``Interval.start``, this could be substituted directly::
 
     from sqlalchemy import update
+
     stmt = update(Interval).values({Interval.start_point: 10})
 
 However, when using a composite hybrid like ``Interval.length``, this
@@ -340,6 +344,7 @@ A handler that works similarly to our setter would be::
 
     from typing import List, Tuple, Any
 
+
     class Interval(Base):
         # ...
 
@@ -352,10 +357,10 @@ A handler that works similarly to our setter would be::
             self.end = self.start + value
 
         @length.inplace.update_expression
-        def _length_update_expression(cls, value: Any) -> List[Tuple[Any, Any]]:
-            return [
-                (cls.end, cls.start + value)
-            ]
+        def _length_update_expression(
+            cls, value: Any
+        ) -> List[Tuple[Any, Any]]:
+            return [(cls.end, cls.start + value)]
 
 Above, if we use ``Interval.length`` in an UPDATE expression, we get
 a hybrid SET expression:
@@ -412,15 +417,16 @@ mapping which relates a ``User`` to a ``SavingsAccount``::
 
 
     class SavingsAccount(Base):
-        __tablename__ = 'account'
+        __tablename__ = "account"
         id: Mapped[int] = mapped_column(primary_key=True)
-        user_id: Mapped[int] = mapped_column(ForeignKey('user.id'))
+        user_id: Mapped[int] = mapped_column(ForeignKey("user.id"))
         balance: Mapped[Decimal] = mapped_column(Numeric(15, 5))
 
         owner: Mapped[User] = relationship(back_populates="accounts")
 
+
     class User(Base):
-        __tablename__ = 'user'
+        __tablename__ = "user"
         id: Mapped[int] = mapped_column(primary_key=True)
         name: Mapped[str] = mapped_column(String(100))
 
@@ -448,7 +454,10 @@ mapping which relates a ``User`` to a ``SavingsAccount``::
         @balance.inplace.expression
         @classmethod
         def _balance_expression(cls) -> SQLColumnExpression[Optional[Decimal]]:
-            return cast("SQLColumnExpression[Optional[Decimal]]", SavingsAccount.balance)
+            return cast(
+                "SQLColumnExpression[Optional[Decimal]]",
+                SavingsAccount.balance,
+            )
 
 The above hybrid property ``balance`` works with the first
 ``SavingsAccount`` entry in the list of accounts for this user.   The
@@ -471,8 +480,11 @@ be used in an appropriate context such that an appropriate join to
 .. sourcecode:: pycon+sql
 
     >>> from sqlalchemy import select
-    >>> print(select(User, User.balance).
-    ...       join(User.accounts).filter(User.balance > 5000))
+    >>> print(
+    ...     select(User, User.balance)
+    ...     .join(User.accounts)
+    ...     .filter(User.balance > 5000)
+    ... )
     {printsql}SELECT "user".id AS user_id, "user".name AS user_name,
     account.balance AS account_balance
     FROM "user" JOIN account ON "user".id = account.user_id
@@ -487,8 +499,11 @@ would use an outer join:
 
     >>> from sqlalchemy import select
     >>> from sqlalchemy import or_
-    >>> print (select(User, User.balance).outerjoin(User.accounts).
-    ...         filter(or_(User.balance < 5000, User.balance == None)))
+    >>> print(
+    ...     select(User, User.balance)
+    ...     .outerjoin(User.accounts)
+    ...     .filter(or_(User.balance < 5000, User.balance == None))
+    ... )
     {printsql}SELECT "user".id AS user_id, "user".name AS user_name,
     account.balance AS account_balance
     FROM "user" LEFT OUTER JOIN account ON "user".id = account.user_id
@@ -528,15 +543,16 @@ we can adjust our ``SavingsAccount`` example to aggregate the balances for
 
 
     class SavingsAccount(Base):
-        __tablename__ = 'account'
+        __tablename__ = "account"
         id: Mapped[int] = mapped_column(primary_key=True)
-        user_id: Mapped[int] = mapped_column(ForeignKey('user.id'))
+        user_id: Mapped[int] = mapped_column(ForeignKey("user.id"))
         balance: Mapped[Decimal] = mapped_column(Numeric(15, 5))
 
         owner: Mapped[User] = relationship(back_populates="accounts")
 
+
     class User(Base):
-        __tablename__ = 'user'
+        __tablename__ = "user"
         id: Mapped[int] = mapped_column(primary_key=True)
         name: Mapped[str] = mapped_column(String(100))
 
@@ -546,7 +562,9 @@ we can adjust our ``SavingsAccount`` example to aggregate the balances for
 
         @hybrid_property
         def balance(self) -> Decimal:
-            return sum((acc.balance for acc in self.accounts), start=Decimal("0"))
+            return sum(
+                (acc.balance for acc in self.accounts), start=Decimal("0")
+            )
 
         @balance.inplace.expression
         @classmethod
@@ -557,7 +575,6 @@ we can adjust our ``SavingsAccount`` example to aggregate the balances for
                 .label("total_balance")
             )
 
-
 The above recipe will give us the ``balance`` column which renders
 a correlated SELECT:
 
@@ -604,6 +621,7 @@ named ``word_insensitive``::
     from sqlalchemy.orm import Mapped
     from sqlalchemy.orm import mapped_column
 
+
     class Base(DeclarativeBase):
         pass
 
@@ -612,8 +630,9 @@ named ``word_insensitive``::
         def __eq__(self, other: Any) -> ColumnElement[bool]:  # type: ignore[override]  # noqa: E501
             return func.lower(self.__clause_element__()) == func.lower(other)
 
+
     class SearchWord(Base):
-        __tablename__ = 'searchword'
+        __tablename__ = "searchword"
 
         id: Mapped[int] = mapped_column(primary_key=True)
         word: Mapped[str]
@@ -675,6 +694,7 @@ how the standard Python ``@property`` object works::
         def _name_setter(self, value: str) -> None:
             self.first_name = value
 
+
     class FirstNameLastName(FirstNameOnly):
         # ...
 
@@ -684,11 +704,11 @@ how the standard Python ``@property`` object works::
         # of FirstNameOnly.name that is local to FirstNameLastName
         @FirstNameOnly.name.getter
         def name(self) -> str:
-            return self.first_name + ' ' + self.last_name
+            return self.first_name + " " + self.last_name
 
         @name.inplace.setter
         def _name_setter(self, value: str) -> None:
-            self.first_name, self.last_name = value.split(' ', 1)
+            self.first_name, self.last_name = value.split(" ", 1)
 
 Above, the ``FirstNameLastName`` class refers to the hybrid from
 ``FirstNameOnly.name`` to repurpose its getter and setter for the subclass.
@@ -709,8 +729,7 @@ reference the instrumented attribute back to the hybrid object::
         @FirstNameOnly.name.overrides.expression
         @classmethod
         def name(cls):
-            return func.concat(cls.first_name, ' ', cls.last_name)
-
+            return func.concat(cls.first_name, " ", cls.last_name)
 
 Hybrid Value Objects
 --------------------
@@ -751,7 +770,7 @@ Replacing the previous ``CaseInsensitiveComparator`` class with a new
         def __str__(self):
             return self.word
 
-        key = 'word'
+        key = "word"
         "Label to apply to Query tuple results"
 
 Above, the ``CaseInsensitiveWord`` object represents ``self.word``, which may
@@ -762,7 +781,7 @@ SQL side or Python side. Our ``SearchWord`` class can now deliver the
 ``CaseInsensitiveWord`` object unconditionally from a single hybrid call::
 
     class SearchWord(Base):
-        __tablename__ = 'searchword'
+        __tablename__ = "searchword"
         id: Mapped[int] = mapped_column(primary_key=True)
         word: Mapped[str]
 
@@ -983,6 +1002,7 @@ class hybrid_method(interfaces.InspectionAttrInfo, Generic[_P, _R]):
 
             from sqlalchemy.ext.hybrid import hybrid_method
 
+
             class SomeClass:
                 @hybrid_method
                 def value(self, x, y):
@@ -1080,6 +1100,7 @@ class hybrid_property(interfaces.InspectionAttrInfo, ORMDescriptor[_T]):
 
             from sqlalchemy.ext.hybrid import hybrid_property
 
+
             class SomeClass:
                 @hybrid_property
                 def value(self):
@@ -1158,6 +1179,7 @@ class hybrid_property(interfaces.InspectionAttrInfo, ORMDescriptor[_T]):
                 def foobar(self):
                     return self._foobar
 
+
             class SubClass(SuperClass):
                 # ...
 
@@ -1367,10 +1389,7 @@ class hybrid_property(interfaces.InspectionAttrInfo, ORMDescriptor[_T]):
                 @fullname.update_expression
                 def fullname(cls, value):
                     fname, lname = value.split(" ", 1)
-                    return [
-                        (cls.first_name, fname),
-                        (cls.last_name, lname)
-                    ]
+                    return [(cls.first_name, fname), (cls.last_name, lname)]
 
         .. versionadded:: 1.2
 
index 3c419308a696b497da5d929b424b8c9638387228..e79f613f27488b68336a6bcb648f1fb4dea2c83d 100644 (file)
@@ -36,19 +36,19 @@ as a dedicated attribute which behaves like a standalone column::
 
     Base = declarative_base()
 
+
     class Person(Base):
-        __tablename__ = 'person'
+        __tablename__ = "person"
 
         id = Column(Integer, primary_key=True)
         data = Column(JSON)
 
-        name = index_property('data', 'name')
-
+        name = index_property("data", "name")
 
 Above, the ``name`` attribute now behaves like a mapped column.   We
 can compose a new ``Person`` and set the value of ``name``::
 
-    >>> person = Person(name='Alchemist')
+    >>> person = Person(name="Alchemist")
 
 The value is now accessible::
 
@@ -59,11 +59,11 @@ Behind the scenes, the JSON field was initialized to a new blank dictionary
 and the field was set::
 
     >>> person.data
-    {"name": "Alchemist'}
+    {'name': 'Alchemist'}
 
 The field is mutable in place::
 
-    >>> person.name = 'Renamed'
+    >>> person.name = "Renamed"
     >>> person.name
     'Renamed'
     >>> person.data
@@ -87,18 +87,17 @@ A missing key will produce ``AttributeError``::
 
     >>> person = Person()
     >>> person.name
-    ...
     AttributeError: 'name'
 
 Unless you set a default value::
 
     >>> class Person(Base):
-    >>>     __tablename__ = 'person'
-    >>>
-    >>>     id = Column(Integer, primary_key=True)
-    >>>     data = Column(JSON)
-    >>>
-    >>>     name = index_property('data', 'name', default=None)  # See default
+    ...     __tablename__ = "person"
+    ...
+    ...     id = Column(Integer, primary_key=True)
+    ...     data = Column(JSON)
+    ...
+    ...     name = index_property("data", "name", default=None)  # See default
 
     >>> person = Person()
     >>> print(person.name)
@@ -111,11 +110,11 @@ an indexed SQL criteria::
 
     >>> from sqlalchemy.orm import Session
     >>> session = Session()
-    >>> query = session.query(Person).filter(Person.name == 'Alchemist')
+    >>> query = session.query(Person).filter(Person.name == "Alchemist")
 
 The above query is equivalent to::
 
-    >>> query = session.query(Person).filter(Person.data['name'] == 'Alchemist')
+    >>> query = session.query(Person).filter(Person.data["name"] == "Alchemist")
 
 Multiple :class:`.index_property` objects can be chained to produce
 multiple levels of indexing::
@@ -126,22 +125,25 @@ multiple levels of indexing::
 
     Base = declarative_base()
 
+
     class Person(Base):
-        __tablename__ = 'person'
+        __tablename__ = "person"
 
         id = Column(Integer, primary_key=True)
         data = Column(JSON)
 
-        birthday = index_property('data', 'birthday')
-        year = index_property('birthday', 'year')
-        month = index_property('birthday', 'month')
-        day = index_property('birthday', 'day')
+        birthday = index_property("data", "birthday")
+        year = index_property("birthday", "year")
+        month = index_property("birthday", "month")
+        day = index_property("birthday", "day")
 
 Above, a query such as::
 
-    q = session.query(Person).filter(Person.year == '1980')
+    q = session.query(Person).filter(Person.year == "1980")
 
-On a PostgreSQL backend, the above query will render as::
+On a PostgreSQL backend, the above query will render as:
+
+.. sourcecode:: sql
 
     SELECT person.id, person.data
     FROM person
@@ -198,13 +200,14 @@ version of :class:`_postgresql.JSON`::
 
     Base = declarative_base()
 
+
     class Person(Base):
-        __tablename__ = 'person'
+        __tablename__ = "person"
 
         id = Column(Integer, primary_key=True)
         data = Column(JSON)
 
-        age = pg_json_property('data', 'age', Integer)
+        age = pg_json_property("data", "age", Integer)
 
 The ``age`` attribute at the instance level works as before; however
 when rendering SQL, PostgreSQL's ``->>`` operator will be used
@@ -212,7 +215,8 @@ for indexed access, instead of the usual index operator of ``->``::
 
     >>> query = session.query(Person).filter(Person.age < 20)
 
-The above query will render::
+The above query will render:
+.. sourcecode:: sql
 
     SELECT person.id, person.data
     FROM person
index 8f58749f9468407b311671a09df5a45e2c39ce77..398351dacdd717d94e08a8815e4580cb956014ae 100644 (file)
@@ -21,6 +21,7 @@ JSON strings before being persisted::
     from sqlalchemy.types import TypeDecorator, VARCHAR
     import json
 
+
     class JSONEncodedDict(TypeDecorator):
         "Represents an immutable structure as a json-encoded string."
 
@@ -48,6 +49,7 @@ the :class:`.Mutable` mixin to a plain Python dictionary::
 
     from sqlalchemy.ext.mutable import Mutable
 
+
     class MutableDict(Mutable, dict):
         @classmethod
         def coerce(cls, key, value):
@@ -101,9 +103,11 @@ attribute. Such as, with classical table metadata::
 
     from sqlalchemy import Table, Column, Integer
 
-    my_data = Table('my_data', metadata,
-        Column('id', Integer, primary_key=True),
-        Column('data', MutableDict.as_mutable(JSONEncodedDict))
+    my_data = Table(
+        "my_data",
+        metadata,
+        Column("id", Integer, primary_key=True),
+        Column("data", MutableDict.as_mutable(JSONEncodedDict)),
     )
 
 Above, :meth:`~.Mutable.as_mutable` returns an instance of ``JSONEncodedDict``
@@ -115,13 +119,17 @@ mapping against the ``my_data`` table::
     from sqlalchemy.orm import Mapped
     from sqlalchemy.orm import mapped_column
 
+
     class Base(DeclarativeBase):
         pass
 
+
     class MyDataClass(Base):
-        __tablename__ = 'my_data'
+        __tablename__ = "my_data"
         id: Mapped[int] = mapped_column(primary_key=True)
-        data: Mapped[dict[str, str]] = mapped_column(MutableDict.as_mutable(JSONEncodedDict))
+        data: Mapped[dict[str, str]] = mapped_column(
+            MutableDict.as_mutable(JSONEncodedDict)
+        )
 
 The ``MyDataClass.data`` member will now be notified of in place changes
 to its value.
@@ -132,11 +140,11 @@ will flag the attribute as "dirty" on the parent object::
     >>> from sqlalchemy.orm import Session
 
     >>> sess = Session(some_engine)
-    >>> m1 = MyDataClass(data={'value1':'foo'})
+    >>> m1 = MyDataClass(data={"value1": "foo"})
     >>> sess.add(m1)
     >>> sess.commit()
 
-    >>> m1.data['value1'] = 'bar'
+    >>> m1.data["value1"] = "bar"
     >>> assert m1 in sess.dirty
     True
 
@@ -153,15 +161,16 @@ the need to declare it individually::
 
     MutableDict.associate_with(JSONEncodedDict)
 
+
     class Base(DeclarativeBase):
         pass
 
+
     class MyDataClass(Base):
-        __tablename__ = 'my_data'
+        __tablename__ = "my_data"
         id: Mapped[int] = mapped_column(primary_key=True)
         data: Mapped[dict[str, str]] = mapped_column(JSONEncodedDict)
 
-
 Supporting Pickling
 --------------------
 
@@ -180,7 +189,7 @@ stream::
     class MyMutableType(Mutable):
         def __getstate__(self):
             d = self.__dict__.copy()
-            d.pop('_parents', None)
+            d.pop("_parents", None)
             return d
 
 With our dictionary example, we need to return the contents of the dict itself
@@ -213,13 +222,18 @@ from within the mutable extension::
     from sqlalchemy.orm import mapped_column
     from sqlalchemy import event
 
+
     class Base(DeclarativeBase):
         pass
 
+
     class MyDataClass(Base):
-        __tablename__ = 'my_data'
+        __tablename__ = "my_data"
         id: Mapped[int] = mapped_column(primary_key=True)
-        data: Mapped[dict[str, str]] = mapped_column(MutableDict.as_mutable(JSONEncodedDict))
+        data: Mapped[dict[str, str]] = mapped_column(
+            MutableDict.as_mutable(JSONEncodedDict)
+        )
+
 
     @event.listens_for(MyDataClass.data, "modified")
     def modified_json(instance, initiator):
@@ -247,6 +261,7 @@ class introduced in :ref:`mapper_composite` to include
     import dataclasses
     from sqlalchemy.ext.mutable import MutableComposite
 
+
     @dataclasses.dataclass
     class Point(MutableComposite):
         x: int
@@ -261,7 +276,6 @@ class introduced in :ref:`mapper_composite` to include
             # alert all parents to the change
             self.changed()
 
-
 The :class:`.MutableComposite` class makes use of class mapping events to
 automatically establish listeners for any usage of :func:`_orm.composite` that
 specifies our ``Point`` type. Below, when ``Point`` is mapped to the ``Vertex``
@@ -271,6 +285,7 @@ objects to each of the ``Vertex.start`` and ``Vertex.end`` attributes::
     from sqlalchemy.orm import DeclarativeBase, Mapped
     from sqlalchemy.orm import composite, mapped_column
 
+
     class Base(DeclarativeBase):
         pass
 
@@ -280,8 +295,12 @@ objects to each of the ``Vertex.start`` and ``Vertex.end`` attributes::
 
         id: Mapped[int] = mapped_column(primary_key=True)
 
-        start: Mapped[Point] = composite(mapped_column("x1"), mapped_column("y1"))
-        end: Mapped[Point] = composite(mapped_column("x2"), mapped_column("y2"))
+        start: Mapped[Point] = composite(
+            mapped_column("x1"), mapped_column("y1")
+        )
+        end: Mapped[Point] = composite(
+            mapped_column("x2"), mapped_column("y2")
+        )
 
         def __repr__(self):
             return f"Vertex(start={self.start}, end={self.end})"
@@ -648,9 +667,11 @@ class Mutable(MutableBase):
         The type is returned, unconditionally as an instance, so that
         :meth:`.as_mutable` can be used inline::
 
-            Table('mytable', metadata,
-                Column('id', Integer, primary_key=True),
-                Column('data', MyMutableType.as_mutable(PickleType))
+            Table(
+                "mytable",
+                metadata,
+                Column("id", Integer, primary_key=True),
+                Column("data", MyMutableType.as_mutable(PickleType)),
             )
 
         Note that the returned type is always an instance, even if a class
index eb9019453d55f6d02383b4dcd49da5708de4c4c2..84eb9772491a02cf694ca1245e0be53847d555f0 100644 (file)
@@ -199,11 +199,15 @@ def apply_type_to_mapped_statement(
 
     To one that describes the final Python behavior to Mypy::
 
+    ... format: off
+
         class User(Base):
             # ...
 
             attrname : Mapped[Optional[int]] = <meaningless temp node>
 
+    ... format: on
+
     """
     left_node = lvalue.node
     assert isinstance(left_node, Var)
index 09b3c443ab06e6ec7fe27769fe3a7dcdbfaa9dd7..8826672f72e74b71b1a299d8d62b7aaea3111ca3 100644 (file)
@@ -385,9 +385,9 @@ def _infer_type_from_decl_column(
         class MyClass:
             # ...
 
-            a : Mapped[int]
+            a: Mapped[int]
 
-            b : Mapped[str]
+            b: Mapped[str]
 
             c: Mapped[int]
 
index 1a12cf38c6981da4c92b667ff7064de75041cef1..ae904b0fc6c7e08edb03ae667d2f0eef91555360 100644 (file)
@@ -26,18 +26,20 @@ displayed in order based on the value of the ``position`` column in the
 
     Base = declarative_base()
 
+
     class Slide(Base):
-        __tablename__ = 'slide'
+        __tablename__ = "slide"
 
         id = Column(Integer, primary_key=True)
         name = Column(String)
 
         bullets = relationship("Bullet", order_by="Bullet.position")
 
+
     class Bullet(Base):
-        __tablename__ = 'bullet'
+        __tablename__ = "bullet"
         id = Column(Integer, primary_key=True)
-        slide_id = Column(Integer, ForeignKey('slide.id'))
+        slide_id = Column(Integer, ForeignKey("slide.id"))
         position = Column(Integer)
         text = Column(String)
 
@@ -57,19 +59,24 @@ constructed using the :func:`.ordering_list` factory::
 
     Base = declarative_base()
 
+
     class Slide(Base):
-        __tablename__ = 'slide'
+        __tablename__ = "slide"
 
         id = Column(Integer, primary_key=True)
         name = Column(String)
 
-        bullets = relationship("Bullet", order_by="Bullet.position",
-                                collection_class=ordering_list('position'))
+        bullets = relationship(
+            "Bullet",
+            order_by="Bullet.position",
+            collection_class=ordering_list("position"),
+        )
+
 
     class Bullet(Base):
-        __tablename__ = 'bullet'
+        __tablename__ = "bullet"
         id = Column(Integer, primary_key=True)
-        slide_id = Column(Integer, ForeignKey('slide.id'))
+        slide_id = Column(Integer, ForeignKey("slide.id"))
         position = Column(Integer)
         text = Column(String)
 
@@ -151,14 +158,18 @@ def ordering_list(
 
         from sqlalchemy.ext.orderinglist import ordering_list
 
+
         class Slide(Base):
-            __tablename__ = 'slide'
+            __tablename__ = "slide"
 
             id = Column(Integer, primary_key=True)
             name = Column(String)
 
-            bullets = relationship("Bullet", order_by="Bullet.position",
-                                    collection_class=ordering_list('position'))
+            bullets = relationship(
+                "Bullet",
+                order_by="Bullet.position",
+                collection_class=ordering_list("position"),
+            )
 
     :param attr:
       Name of the mapped attribute to use for storage and retrieval of
index 130d2537474e2f7c245b368df34c5d03962db90f..9cbc61a1c36d58c88da679fd6707b372201adb90 100644 (file)
@@ -28,13 +28,17 @@ when it is deserialized.
 Usage is nearly the same as that of the standard Python pickle module::
 
     from sqlalchemy.ext.serializer import loads, dumps
+
     metadata = MetaData(bind=some_engine)
     Session = scoped_session(sessionmaker())
 
     # ... define mappers
 
-    query = Session.query(MyClass).
-        filter(MyClass.somedata=='foo').order_by(MyClass.sortkey)
+    query = (
+        Session.query(MyClass)
+        .filter(MyClass.somedata == "foo")
+        .order_by(MyClass.sortkey)
+    )
 
     # pickle the query
     serialized = dumps(query)
@@ -42,7 +46,7 @@ Usage is nearly the same as that of the standard Python pickle module::
     # unpickle.  Pass in metadata + scoped_session
     query2 = loads(serialized, metadata, Session)
 
-    print query2.all()
+    print(query2.all())
 
 Similar restrictions as when using raw pickle apply; mapped classes must be
 themselves be pickleable, meaning they are importable from a module-level
index baebc25740d7e40e0e1fc6b17959a0898664d55b..3ee2009cc12d2514fe716b39c8988ef4808afd6e 100644 (file)
@@ -829,7 +829,7 @@ def with_loader_criteria(
 
         stmt = select(User).options(
             selectinload(User.addresses),
-            with_loader_criteria(Address, Address.email_address != 'foo'))
+            with_loader_criteria(Address, Address.email_address != "foo"),
         )
 
     Above, the "selectinload" for ``User.addresses`` will apply the
@@ -839,8 +839,10 @@ def with_loader_criteria(
     ON clause of the join, in this example using :term:`1.x style`
     queries::
 
-        q = session.query(User).outerjoin(User.addresses).options(
-            with_loader_criteria(Address, Address.email_address != 'foo'))
+        q = (
+            session.query(User)
+            .outerjoin(User.addresses)
+            .options(with_loader_criteria(Address, Address.email_address != "foo"))
         )
 
     The primary purpose of :func:`_orm.with_loader_criteria` is to use
@@ -853,6 +855,7 @@ def with_loader_criteria(
 
         session = Session(bind=engine)
 
+
         @event.listens_for("do_orm_execute", session)
         def _add_filtering_criteria(execute_state):
 
@@ -864,8 +867,8 @@ def with_loader_criteria(
                 execute_state.statement = execute_state.statement.options(
                     with_loader_criteria(
                         SecurityRole,
-                        lambda cls: cls.role.in_(['some_role']),
-                        include_aliases=True
+                        lambda cls: cls.role.in_(["some_role"]),
+                        include_aliases=True,
                     )
                 )
 
@@ -902,16 +905,19 @@ def with_loader_criteria(
        ``A -> A.bs -> B``, the given :func:`_orm.with_loader_criteria`
        option will affect the way in which the JOIN is rendered::
 
-            stmt = select(A).join(A.bs).options(
-                contains_eager(A.bs),
-                with_loader_criteria(B, B.flag == 1)
+            stmt = (
+                select(A)
+                .join(A.bs)
+                .options(contains_eager(A.bs), with_loader_criteria(B, B.flag == 1))
             )
 
        Above, the given :func:`_orm.with_loader_criteria` option will
        affect the ON clause of the JOIN that is specified by
        ``.join(A.bs)``, so is applied as expected. The
        :func:`_orm.contains_eager` option has the effect that columns from
-       ``B`` are added to the columns clause::
+       ``B`` are added to the columns clause:
+
+       .. sourcecode:: sql
 
             SELECT
                 b.id, b.a_id, b.data, b.flag,
@@ -977,7 +983,7 @@ def with_loader_criteria(
 
      .. versionadded:: 1.4.0b2
 
-    """
+    """  # noqa: E501
     return LoaderCriteriaOption(
         entity_or_base,
         where_criteria,
@@ -1904,14 +1910,13 @@ def synonym(
     e.g.::
 
         class MyClass(Base):
-            __tablename__ = 'my_table'
+            __tablename__ = "my_table"
 
             id = Column(Integer, primary_key=True)
             job_status = Column(String(50))
 
             status = synonym("job_status")
 
-
     :param name: the name of the existing mapped property.  This
       can refer to the string name ORM-mapped attribute
       configured on the class, including column-bound attributes
@@ -1939,11 +1944,13 @@ def synonym(
       :paramref:`.synonym.descriptor` parameter::
 
         my_table = Table(
-            "my_table", metadata,
-            Column('id', Integer, primary_key=True),
-            Column('job_status', String(50))
+            "my_table",
+            metadata,
+            Column("id", Integer, primary_key=True),
+            Column("job_status", String(50)),
         )
 
+
         class MyClass:
             @property
             def _job_status_descriptor(self):
@@ -1951,11 +1958,15 @@ def synonym(
 
 
         mapper(
-            MyClass, my_table, properties={
+            MyClass,
+            my_table,
+            properties={
                 "job_status": synonym(
-                    "_job_status", map_column=True,
-                    descriptor=MyClass._job_status_descriptor)
-            }
+                    "_job_status",
+                    map_column=True,
+                    descriptor=MyClass._job_status_descriptor,
+                )
+            },
         )
 
       Above, the attribute named ``_job_status`` is automatically
@@ -2105,8 +2116,7 @@ def backref(name: str, **kwargs: Any) -> ORMBackrefArgument:
 
     E.g.::
 
-        'items':relationship(
-            SomeItem, backref=backref('parent', lazy='subquery'))
+        "items": relationship(SomeItem, backref=backref("parent", lazy="subquery"))
 
     The :paramref:`_orm.relationship.backref` parameter is generally
     considered to be legacy; for modern applications, using
@@ -2118,7 +2128,7 @@ def backref(name: str, **kwargs: Any) -> ORMBackrefArgument:
 
         :ref:`relationships_backref` - background on backrefs
 
-    """
+    """  # noqa: E501
 
     return (name, kwargs)
 
@@ -2379,17 +2389,21 @@ def aliased(
      aggregate functions::
 
         class UnitPrice(Base):
-            __tablename__ = 'unit_price'
+            __tablename__ = "unit_price"
             ...
             unit_id = Column(Integer)
             price = Column(Numeric)
 
-        aggregated_unit_price = Session.query(
-                                    func.sum(UnitPrice.price).label('price')
-                                ).group_by(UnitPrice.unit_id).subquery()
 
-        aggregated_unit_price = aliased(UnitPrice,
-                    alias=aggregated_unit_price, adapt_on_names=True)
+        aggregated_unit_price = (
+            Session.query(func.sum(UnitPrice.price).label("price"))
+            .group_by(UnitPrice.unit_id)
+            .subquery()
+        )
+
+        aggregated_unit_price = aliased(
+            UnitPrice, alias=aggregated_unit_price, adapt_on_names=True
+        )
 
      Above, functions on ``aggregated_unit_price`` which refer to
      ``.price`` will return the
@@ -2535,16 +2549,21 @@ def join(
     :meth:`_sql.Select.select_from` method, as in::
 
         from sqlalchemy.orm import join
-        stmt = select(User).\
-            select_from(join(User, Address, User.addresses)).\
-            filter(Address.email_address=='foo@bar.com')
+
+        stmt = (
+            select(User)
+            .select_from(join(User, Address, User.addresses))
+            .filter(Address.email_address == "foo@bar.com")
+        )
 
     In modern SQLAlchemy the above join can be written more
     succinctly as::
 
-        stmt = select(User).\
-                join(User.addresses).\
-                filter(Address.email_address=='foo@bar.com')
+        stmt = (
+            select(User)
+            .join(User.addresses)
+            .filter(Address.email_address == "foo@bar.com")
+        )
 
     .. warning:: using :func:`_orm.join` directly may not work properly
        with modern ORM options such as :func:`_orm.with_loader_criteria`.
index de02141bda20b44f4733d8ffc17a61e3c654c948..d65597238bff6b4b96e8558470de12fc01e8749b 100644 (file)
@@ -2663,7 +2663,7 @@ def init_collection(obj: object, key: str) -> CollectionAdapter:
     This function is used to provide direct access to collection internals
     for a previously unloaded attribute.  e.g.::
 
-        collection_adapter = init_collection(someobject, 'elements')
+        collection_adapter = init_collection(someobject, "elements")
         for elem in values:
             collection_adapter.append_without_event(elem)
 
index ace7542c12d23ec02cc0c58e03bf2f77469d64f1..c7c1e927e09379a8e20322c6b06789c0e5297389 100644 (file)
@@ -21,6 +21,8 @@ provided.  One is a bundle of generic decorators that map function arguments
 and return values to events::
 
   from sqlalchemy.orm.collections import collection
+
+
   class MyClass:
       # ...
 
@@ -32,7 +34,6 @@ and return values to events::
       def pop(self):
           return self.data.pop()
 
-
 The second approach is a bundle of targeted decorators that wrap appropriate
 append and remove notifiers around the mutation methods present in the
 standard Python ``list``, ``set`` and ``dict`` interfaces.  These could be
@@ -73,10 +74,11 @@ generally not needed.  Odds are, the extension method will delegate to a
 method that's already instrumented.  For example::
 
   class QueueIsh(list):
-     def push(self, item):
-         self.append(item)
-     def shift(self):
-         return self.pop(0)
+      def push(self, item):
+          self.append(item)
+
+      def shift(self):
+          return self.pop(0)
 
 There's no need to decorate these methods.  ``append`` and ``pop`` are already
 instrumented as part of the ``list`` interface.  Decorating them would fire
@@ -195,9 +197,10 @@ class collection:
     The recipe decorators all require parens, even those that take no
     arguments::
 
-        @collection.adds('entity')
+        @collection.adds("entity")
         def insert(self, position, entity): ...
 
+
         @collection.removes_return()
         def popitem(self): ...
 
@@ -217,11 +220,13 @@ class collection:
             @collection.appender
             def add(self, append): ...
 
+
             # or, equivalently
             @collection.appender
             @collection.adds(1)
             def add(self, append): ...
 
+
             # for mapping type, an 'append' may kick out a previous value
             # that occupies that slot.  consider d['a'] = 'foo'- any previous
             # value in d['a'] is discarded.
@@ -261,10 +266,11 @@ class collection:
             @collection.remover
             def zap(self, entity): ...
 
+
             # or, equivalently
             @collection.remover
             @collection.removes_return()
-            def zap(self): ...
+            def zap(self): ...
 
         If the value to remove is not present in the collection, you may
         raise an exception or return None to ignore the error.
@@ -364,7 +370,8 @@ class collection:
             @collection.adds(1)
             def push(self, item): ...
 
-            @collection.adds('entity')
+
+            @collection.adds("entity")
             def do_stuff(self, thing, entity=None): ...
 
         """
index 421a8c675a7ce16ecb0c804b4dc81575794612a5..71270c6b4eb3a409964905fb274ffdb1c426fdbe 100644 (file)
@@ -207,7 +207,7 @@ def synonym_for(
     :paramref:`.orm.synonym.descriptor` parameter::
 
         class MyClass(Base):
-            __tablename__ = 'my_table'
+            __tablename__ = "my_table"
 
             id = Column(Integer, primary_key=True)
             _job_status = Column("job_status", String(50))
@@ -373,20 +373,21 @@ class declared_attr(interfaces._MappedAttribute[_T], _declared_attr_common):
     for subclasses::
 
         class Employee(Base):
-            __tablename__ = 'employee'
+            __tablename__ = "employee"
 
             id: Mapped[int] = mapped_column(primary_key=True)
             type: Mapped[str] = mapped_column(String(50))
 
             @declared_attr.directive
             def __mapper_args__(cls) -> Dict[str, Any]:
-                if cls.__name__ == 'Employee':
+                if cls.__name__ == "Employee":
                     return {
-                            "polymorphic_on":cls.type,
-                            "polymorphic_identity":"Employee"
+                        "polymorphic_on": cls.type,
+                        "polymorphic_identity": "Employee",
                     }
                 else:
-                    return {"polymorphic_identity":cls.__name__}
+                    return {"polymorphic_identity": cls.__name__}
+
 
         class Engineer(Employee):
             pass
@@ -485,6 +486,7 @@ def declarative_mixin(cls: Type[_T]) -> Type[_T]:
         from sqlalchemy.orm import declared_attr
         from sqlalchemy.orm import declarative_mixin
 
+
         @declarative_mixin
         class MyMixin:
 
@@ -492,10 +494,11 @@ def declarative_mixin(cls: Type[_T]) -> Type[_T]:
             def __tablename__(cls):
                 return cls.__name__.lower()
 
-            __table_args__ = {'mysql_engine': 'InnoDB'}
-            __mapper_args__= {'always_refresh': True}
+            __table_args__ = {"mysql_engine": "InnoDB"}
+            __mapper_args__ = {"always_refresh": True}
+
+            id = Column(Integer, primary_key=True)
 
-            id =  Column(Integer, primary_key=True)
 
         class MyModel(MyMixin, Base):
             name = Column(String(1000))
@@ -638,10 +641,10 @@ class DeclarativeBase(
 
         from sqlalchemy.orm import DeclarativeBase
 
+
         class Base(DeclarativeBase):
             pass
 
-
     The above ``Base`` class is now usable as the base for new declarative
     mappings.  The superclass makes use of the ``__init_subclass__()``
     method to set up new classes and metaclasses aren't used.
@@ -664,11 +667,12 @@ class DeclarativeBase(
         bigint = Annotated[int, "bigint"]
         my_metadata = MetaData()
 
+
         class Base(DeclarativeBase):
             metadata = my_metadata
             type_annotation_map = {
                 str: String().with_variant(String(255), "mysql", "mariadb"),
-                bigint: BigInteger()
+                bigint: BigInteger(),
             }
 
     Class-level attributes which may be specified include:
@@ -1480,6 +1484,7 @@ class registry:
 
             Base = mapper_registry.generate_base()
 
+
             class MyClass(Base):
                 __tablename__ = "my_table"
                 id = Column(Integer, primary_key=True)
@@ -1492,6 +1497,7 @@ class registry:
 
             mapper_registry = registry()
 
+
             class Base(metaclass=DeclarativeMeta):
                 __abstract__ = True
                 registry = mapper_registry
@@ -1659,9 +1665,10 @@ class registry:
 
             mapper_registry = registry()
 
+
             @mapper_registry.mapped
             class Foo:
-                __tablename__ = 'some_table'
+                __tablename__ = "some_table"
 
                 id = Column(Integer, primary_key=True)
                 name = Column(String)
@@ -1701,15 +1708,17 @@ class registry:
 
             mapper_registry = registry()
 
+
             @mapper_registry.as_declarative_base()
             class Base:
                 @declared_attr
                 def __tablename__(cls):
                     return cls.__name__.lower()
+
                 id = Column(Integer, primary_key=True)
 
-            class MyMappedClass(Base):
-                # ...
+
+            class MyMappedClass(Base): ...
 
         All keyword arguments passed to
         :meth:`_orm.registry.as_declarative_base` are passed
@@ -1739,12 +1748,14 @@ class registry:
 
             mapper_registry = registry()
 
+
             class Foo:
-                __tablename__ = 'some_table'
+                __tablename__ = "some_table"
 
                 id = Column(Integer, primary_key=True)
                 name = Column(String)
 
+
             mapper = mapper_registry.map_declaratively(Foo)
 
         This function is more conveniently invoked indirectly via either the
@@ -1797,12 +1808,14 @@ class registry:
             my_table = Table(
                 "my_table",
                 mapper_registry.metadata,
-                Column('id', Integer, primary_key=True)
+                Column("id", Integer, primary_key=True),
             )
 
+
             class MyClass:
                 pass
 
+
             mapper_registry.map_imperatively(MyClass, my_table)
 
         See the section :ref:`orm_imperative_mapping` for complete background
@@ -1849,15 +1862,17 @@ def as_declarative(**kw: Any) -> Callable[[Type[_T]], Type[_T]]:
 
         from sqlalchemy.orm import as_declarative
 
+
         @as_declarative()
         class Base:
             @declared_attr
             def __tablename__(cls):
                 return cls.__name__.lower()
+
             id = Column(Integer, primary_key=True)
 
-        class MyMappedClass(Base):
-            # ...
+
+        class MyMappedClass(Base): ...
 
     .. seealso::
 
index 534637a48c58d00e5a59292bf8d2c1de71c2efda..c58a4cbace189d6b6651465ecb6b1ba15f9e49ac 100644 (file)
@@ -207,10 +207,12 @@ class InstanceEvents(event.Events[ClassManager[Any]]):
 
         from sqlalchemy import event
 
+
         def my_load_listener(target, context):
             print("on load!")
 
-        event.listen(SomeClass, 'load', my_load_listener)
+
+        event.listen(SomeClass, "load", my_load_listener)
 
     Available targets include:
 
@@ -456,8 +458,7 @@ class InstanceEvents(event.Events[ClassManager[Any]]):
             the existing loading context is maintained for the object after the
             event is called::
 
-                @event.listens_for(
-                    SomeClass, "load", restore_load_context=True)
+                @event.listens_for(SomeClass, "load", restore_load_context=True)
                 def on_load(instance, context):
                     instance.some_unloaded_attribute
 
@@ -492,7 +493,7 @@ class InstanceEvents(event.Events[ClassManager[Any]]):
 
             :meth:`.SessionEvents.loaded_as_persistent`
 
-        """
+        """  # noqa: E501
 
     def refresh(
         self, target: _O, context: QueryContext, attrs: Optional[Iterable[str]]
@@ -739,6 +740,7 @@ class MapperEvents(event.Events[mapperlib.Mapper[Any]]):
 
         from sqlalchemy import event
 
+
         def my_before_insert_listener(mapper, connection, target):
             # execute a stored procedure upon INSERT,
             # apply the value to the row to be inserted
@@ -746,10 +748,10 @@ class MapperEvents(event.Events[mapperlib.Mapper[Any]]):
                 text("select my_special_function(%d)" % target.special_number)
             ).scalar()
 
+
         # associate the listener function with SomeClass,
         # to execute during the "before_insert" hook
-        event.listen(
-            SomeClass, 'before_insert', my_before_insert_listener)
+        event.listen(SomeClass, "before_insert", my_before_insert_listener)
 
     Available targets include:
 
@@ -915,9 +917,10 @@ class MapperEvents(event.Events[mapperlib.Mapper[Any]]):
 
             Base = declarative_base()
 
+
             @event.listens_for(Base, "instrument_class", propagate=True)
             def on_new_class(mapper, cls_):
-                " ... "
+                "..."
 
         :param mapper: the :class:`_orm.Mapper` which is the target
          of this event.
@@ -996,13 +999,16 @@ class MapperEvents(event.Events[mapperlib.Mapper[Any]]):
 
             DontConfigureBase = declarative_base()
 
+
             @event.listens_for(
                 DontConfigureBase,
-                "before_mapper_configured", retval=True, propagate=True)
+                "before_mapper_configured",
+                retval=True,
+                propagate=True,
+            )
             def dont_configure(mapper, cls):
                 return EXT_SKIP
 
-
         .. seealso::
 
             :meth:`.MapperEvents.before_configured`
@@ -1084,9 +1090,9 @@ class MapperEvents(event.Events[mapperlib.Mapper[Any]]):
 
             from sqlalchemy.orm import Mapper
 
+
             @event.listens_for(Mapper, "before_configured")
-            def go():
-                ...
+            def go(): ...
 
         Contrast this event to :meth:`.MapperEvents.after_configured`,
         which is invoked after the series of mappers has been configured,
@@ -1104,10 +1110,9 @@ class MapperEvents(event.Events[mapperlib.Mapper[Any]]):
 
             from sqlalchemy.orm import mapper
 
-            @event.listens_for(mapper, "before_configured", once=True)
-            def go():
-                ...
 
+            @event.listens_for(mapper, "before_configured", once=True)
+            def go(): ...
 
         .. seealso::
 
@@ -1144,9 +1149,9 @@ class MapperEvents(event.Events[mapperlib.Mapper[Any]]):
 
             from sqlalchemy.orm import Mapper
 
+
             @event.listens_for(Mapper, "after_configured")
-            def go():
-                # ...
+            def go(): ...
 
         Theoretically this event is called once per
         application, but is actually called any time new mappers
@@ -1158,9 +1163,9 @@ class MapperEvents(event.Events[mapperlib.Mapper[Any]]):
 
             from sqlalchemy.orm import mapper
 
+
             @event.listens_for(mapper, "after_configured", once=True)
-            def go():
-                # ...
+            def go(): ...
 
         .. seealso::
 
@@ -1547,9 +1552,11 @@ class SessionEvents(event.Events[Session]):
         from sqlalchemy import event
         from sqlalchemy.orm import sessionmaker
 
+
         def my_before_commit(session):
             print("before commit!")
 
+
         Session = sessionmaker()
 
         event.listen(Session, "before_commit", my_before_commit)
@@ -1769,7 +1776,7 @@ class SessionEvents(event.Events[Session]):
                 @event.listens_for(session, "after_transaction_create")
                 def after_transaction_create(session, transaction):
                     if transaction.parent is None:
-                        # work with top-level transaction
+                        ...  # work with top-level transaction
 
          To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
          :attr:`.SessionTransaction.nested` attribute::
@@ -1777,8 +1784,7 @@ class SessionEvents(event.Events[Session]):
                 @event.listens_for(session, "after_transaction_create")
                 def after_transaction_create(session, transaction):
                     if transaction.nested:
-                        # work with SAVEPOINT transaction
-
+                        ...  # work with SAVEPOINT transaction
 
         .. seealso::
 
@@ -1810,7 +1816,7 @@ class SessionEvents(event.Events[Session]):
                 @event.listens_for(session, "after_transaction_create")
                 def after_transaction_end(session, transaction):
                     if transaction.parent is None:
-                        # work with top-level transaction
+                        ...  # work with top-level transaction
 
          To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
          :attr:`.SessionTransaction.nested` attribute::
@@ -1818,8 +1824,7 @@ class SessionEvents(event.Events[Session]):
                 @event.listens_for(session, "after_transaction_create")
                 def after_transaction_end(session, transaction):
                     if transaction.nested:
-                        # work with SAVEPOINT transaction
-
+                        ...  # work with SAVEPOINT transaction
 
         .. seealso::
 
@@ -2425,11 +2430,11 @@ class AttributeEvents(event.Events[QueryableAttribute[Any]]):
 
         from sqlalchemy import event
 
-        @event.listens_for(MyClass.collection, 'append', propagate=True)
+
+        @event.listens_for(MyClass.collection, "append", propagate=True)
         def my_append_listener(target, value, initiator):
             print("received append event for target: %s" % target)
 
-
     Listeners have the option to return a possibly modified version of the
     value, when the :paramref:`.AttributeEvents.retval` flag is passed to
     :func:`.event.listen` or :func:`.event.listens_for`, such as below,
@@ -2438,11 +2443,12 @@ class AttributeEvents(event.Events[QueryableAttribute[Any]]):
         def validate_phone(target, value, oldvalue, initiator):
             "Strip non-numeric characters from a phone number"
 
-            return re.sub(r'\D', '', value)
+            return re.sub(r"\D", "", value)
+
 
         # setup listener on UserContact.phone attribute, instructing
         # it to use the return value
-        listen(UserContact.phone, 'set', validate_phone, retval=True)
+        listen(UserContact.phone, "set", validate_phone, retval=True)
 
     A validation function like the above can also raise an exception
     such as :exc:`ValueError` to halt the operation.
@@ -2452,7 +2458,7 @@ class AttributeEvents(event.Events[QueryableAttribute[Any]]):
     as when using mapper inheritance patterns::
 
 
-        @event.listens_for(MySuperClass.attr, 'set', propagate=True)
+        @event.listens_for(MySuperClass.attr, "set", propagate=True)
         def receive_set(target, value, initiator):
             print("value set: %s" % target)
 
@@ -2685,10 +2691,12 @@ class AttributeEvents(event.Events[QueryableAttribute[Any]]):
 
             from sqlalchemy.orm.attributes import OP_BULK_REPLACE
 
+
             @event.listens_for(SomeObject.collection, "bulk_replace")
             def process_collection(target, values, initiator):
                 values[:] = [_make_value(value) for value in values]
 
+
             @event.listens_for(SomeObject.collection, "append", retval=True)
             def process_collection(target, value, initiator):
                 # make sure bulk_replace didn't already do it
@@ -2836,16 +2844,18 @@ class AttributeEvents(event.Events[QueryableAttribute[Any]]):
 
             SOME_CONSTANT = 3.1415926
 
+
             class MyClass(Base):
                 # ...
 
                 some_attribute = Column(Numeric, default=SOME_CONSTANT)
 
+
             @event.listens_for(
-                MyClass.some_attribute, "init_scalar",
-                retval=True, propagate=True)
+                MyClass.some_attribute, "init_scalar", retval=True, propagate=True
+            )
             def _init_some_attribute(target, dict_, value):
-                dict_['some_attribute'] = SOME_CONSTANT
+                dict_["some_attribute"] = SOME_CONSTANT
                 return SOME_CONSTANT
 
         Above, we initialize the attribute ``MyClass.some_attribute`` to the
@@ -2881,9 +2891,10 @@ class AttributeEvents(event.Events[QueryableAttribute[Any]]):
 
             SOME_CONSTANT = 3.1415926
 
+
             @event.listens_for(
-                MyClass.some_attribute, "init_scalar",
-                retval=True, propagate=True)
+                MyClass.some_attribute, "init_scalar", retval=True, propagate=True
+            )
             def _init_some_attribute(target, dict_, value):
                 # will also fire off attribute set events
                 target.some_attribute = SOME_CONSTANT
@@ -2920,7 +2931,7 @@ class AttributeEvents(event.Events[QueryableAttribute[Any]]):
             :ref:`examples_instrumentation` - see the
             ``active_column_defaults.py`` example.
 
-        """
+        """  # noqa: E501
 
     def init_collection(
         self,
@@ -3058,8 +3069,8 @@ class QueryEvents(event.Events[Query[Any]]):
             @event.listens_for(Query, "before_compile", retval=True)
             def no_deleted(query):
                 for desc in query.column_descriptions:
-                    if desc['type'] is User:
-                        entity = desc['entity']
+                    if desc["type"] is User:
+                        entity = desc["entity"]
                         query = query.filter(entity.deleted == False)
                 return query
 
@@ -3075,12 +3086,11 @@ class QueryEvents(event.Events[Query[Any]]):
         re-establish the query being cached, apply the event adding the
         ``bake_ok`` flag::
 
-            @event.listens_for(
-                Query, "before_compile", retval=True, bake_ok=True)
+            @event.listens_for(Query, "before_compile", retval=True, bake_ok=True)
             def my_event(query):
                 for desc in query.column_descriptions:
-                    if desc['type'] is User:
-                        entity = desc['entity']
+                    if desc["type"] is User:
+                        entity = desc["entity"]
                         query = query.filter(entity.deleted == False)
                 return query
 
@@ -3101,7 +3111,7 @@ class QueryEvents(event.Events[Query[Any]]):
 
             :ref:`baked_with_before_compile`
 
-        """
+        """  # noqa: E501
 
     def before_compile_update(
         self, query: Query[Any], update_context: BulkUpdate
@@ -3121,12 +3131,12 @@ class QueryEvents(event.Events[Query[Any]]):
             @event.listens_for(Query, "before_compile_update", retval=True)
             def no_deleted(query, update_context):
                 for desc in query.column_descriptions:
-                    if desc['type'] is User:
-                        entity = desc['entity']
+                    if desc["type"] is User:
+                        entity = desc["entity"]
                         query = query.filter(entity.deleted == False)
 
-                        update_context.values['timestamp'] = (
-                            datetime.datetime.now(datetime.UTC)
+                        update_context.values["timestamp"] = datetime.datetime.now(
+                            datetime.UTC
                         )
                 return query
 
@@ -3155,7 +3165,7 @@ class QueryEvents(event.Events[Query[Any]]):
             :meth:`.QueryEvents.before_compile_delete`
 
 
-        """
+        """  # noqa: E501
 
     def before_compile_delete(
         self, query: Query[Any], delete_context: BulkDelete
@@ -3174,8 +3184,8 @@ class QueryEvents(event.Events[Query[Any]]):
             @event.listens_for(Query, "before_compile_delete", retval=True)
             def no_deleted(query, delete_context):
                 for desc in query.column_descriptions:
-                    if desc['type'] is User:
-                        entity = desc['entity']
+                    if desc["type"] is User:
+                        entity = desc["entity"]
                         query = query.filter(entity.deleted == False)
                 return query
 
index 4ad141288900b9ebe4b877952ca572bc6db6f423..22290450f2f460481cd33f8b9e16fb291e5bfb83 100644 (file)
@@ -688,27 +688,37 @@ class PropComparator(SQLORMOperations[_T_co], Generic[_T_co], ColumnOperators):
 
         # definition of custom PropComparator subclasses
 
-        from sqlalchemy.orm.properties import \
-                                ColumnProperty,\
-                                Composite,\
-                                Relationship
+        from sqlalchemy.orm.properties import (
+            ColumnProperty,
+            Composite,
+            Relationship,
+        )
+
 
         class MyColumnComparator(ColumnProperty.Comparator):
             def __eq__(self, other):
                 return self.__clause_element__() == other
 
+
         class MyRelationshipComparator(Relationship.Comparator):
             def any(self, expression):
                 "define the 'any' operation"
                 # ...
 
+
         class MyCompositeComparator(Composite.Comparator):
             def __gt__(self, other):
                 "redefine the 'greater than' operation"
 
-                return sql.and_(*[a>b for a, b in
-                                  zip(self.__clause_element__().clauses,
-                                      other.__composite_values__())])
+                return sql.and_(
+                    *[
+                        a > b
+                        for a, b in zip(
+                            self.__clause_element__().clauses,
+                            other.__composite_values__(),
+                        )
+                    ]
+                )
 
 
         # application of custom PropComparator subclasses
@@ -716,17 +726,22 @@ class PropComparator(SQLORMOperations[_T_co], Generic[_T_co], ColumnOperators):
         from sqlalchemy.orm import column_property, relationship, composite
         from sqlalchemy import Column, String
 
+
         class SomeMappedClass(Base):
-            some_column = column_property(Column("some_column", String),
-                                comparator_factory=MyColumnComparator)
+            some_column = column_property(
+                Column("some_column", String),
+                comparator_factory=MyColumnComparator,
+            )
 
-            some_relationship = relationship(SomeOtherClass,
-                                comparator_factory=MyRelationshipComparator)
+            some_relationship = relationship(
+                SomeOtherClass, comparator_factory=MyRelationshipComparator
+            )
 
             some_composite = composite(
-                    Column("a", String), Column("b", String),
-                    comparator_factory=MyCompositeComparator
-                )
+                Column("a", String),
+                Column("b", String),
+                comparator_factory=MyCompositeComparator,
+            )
 
     Note that for column-level operator redefinition, it's usually
     simpler to define the operators at the Core level, using the
@@ -868,8 +883,9 @@ class PropComparator(SQLORMOperations[_T_co], Generic[_T_co], ColumnOperators):
 
         e.g.::
 
-            query.join(Company.employees.of_type(Engineer)).\
-               filter(Engineer.name=='foo')
+            query.join(Company.employees.of_type(Engineer)).filter(
+                Engineer.name == "foo"
+            )
 
         :param \class_: a class or mapper indicating that criterion will be
             against this specific subclass.
@@ -895,11 +911,11 @@ class PropComparator(SQLORMOperations[_T_co], Generic[_T_co], ColumnOperators):
 
 
             stmt = select(User).join(
-                User.addresses.and_(Address.email_address != 'foo')
+                User.addresses.and_(Address.email_address != "foo")
             )
 
             stmt = select(User).options(
-                joinedload(User.addresses.and_(Address.email_address != 'foo'))
+                joinedload(User.addresses.and_(Address.email_address != "foo"))
             )
 
         .. versionadded:: 1.4
index d22878e7d792aeb3b2b1041aa32180e72e48bcda..deac38a39b26fcdf293c060cfce3b2b2556362f3 100644 (file)
@@ -331,7 +331,7 @@ class Mapper(
 
                 class User(Base):
                     __table__ = user_table
-                    __mapper_args__ = {'column_prefix':'_'}
+                    __mapper_args__ = {"column_prefix": "_"}
 
            The above mapping will assign the ``user_id``, ``user_name``, and
            ``password`` columns to attributes named ``_user_id``,
@@ -547,14 +547,14 @@ class Mapper(
           base-most mapped :class:`.Table`::
 
             class Employee(Base):
-                __tablename__ = 'employee'
+                __tablename__ = "employee"
 
                 id: Mapped[int] = mapped_column(primary_key=True)
                 discriminator: Mapped[str] = mapped_column(String(50))
 
                 __mapper_args__ = {
-                    "polymorphic_on":discriminator,
-                    "polymorphic_identity":"employee"
+                    "polymorphic_on": discriminator,
+                    "polymorphic_identity": "employee",
                 }
 
           It may also be specified
@@ -563,17 +563,18 @@ class Mapper(
           approach::
 
             class Employee(Base):
-                __tablename__ = 'employee'
+                __tablename__ = "employee"
 
                 id: Mapped[int] = mapped_column(primary_key=True)
                 discriminator: Mapped[str] = mapped_column(String(50))
 
                 __mapper_args__ = {
-                    "polymorphic_on":case(
+                    "polymorphic_on": case(
                         (discriminator == "EN", "engineer"),
                         (discriminator == "MA", "manager"),
-                        else_="employee"),
-                    "polymorphic_identity":"employee"
+                        else_="employee",
+                    ),
+                    "polymorphic_identity": "employee",
                 }
 
           It may also refer to any attribute using its string name,
@@ -581,14 +582,14 @@ class Mapper(
           configurations::
 
                 class Employee(Base):
-                    __tablename__ = 'employee'
+                    __tablename__ = "employee"
 
                     id: Mapped[int] = mapped_column(primary_key=True)
                     discriminator: Mapped[str]
 
                     __mapper_args__ = {
                         "polymorphic_on": "discriminator",
-                        "polymorphic_identity": "employee"
+                        "polymorphic_identity": "employee",
                     }
 
           When setting ``polymorphic_on`` to reference an
@@ -605,6 +606,7 @@ class Mapper(
                 from sqlalchemy import event
                 from sqlalchemy.orm import object_mapper
 
+
                 @event.listens_for(Employee, "init", propagate=True)
                 def set_identity(instance, *arg, **kw):
                     mapper = object_mapper(instance)
@@ -3261,14 +3263,9 @@ class Mapper(
         The resulting structure is a dictionary of columns mapped
         to lists of equivalent columns, e.g.::
 
-            {
-                tablea.col1:
-                    {tableb.col1, tablec.col1},
-                tablea.col2:
-                    {tabled.col2}
-            }
+            {tablea.col1: {tableb.col1, tablec.col1}, tablea.col2: {tabled.col2}}
 
-        """
+        """  # noqa: E501
         result: _EquivalentColumnMap = {}
 
         def visit_binary(binary):
@@ -3741,14 +3738,15 @@ class Mapper(
 
         given::
 
-            class A:
-                ...
+            class A: ...
+
 
             class B(A):
                 __mapper_args__ = {"polymorphic_load": "selectin"}
 
-            class C(B):
-                ...
+
+            class C(B): ...
+
 
             class D(B):
                 __mapper_args__ = {"polymorphic_load": "selectin"}
index 4b17c0c5d3640ba19ccbf4d8c280f666a21cb444..0792c1d1c675e74f2184b274dd1b27f221c63c96 100644 (file)
@@ -280,8 +280,8 @@ class ColumnProperty(
 
                 name = Column(String(64))
                 extension = Column(String(8))
-                filename = column_property(name + '.' + extension)
-                path = column_property('C:/' + filename.expression)
+                filename = column_property(name + "." + extension)
+                path = column_property("C:/" + filename.expression)
 
         .. seealso::
 
index 8f58143e6145e5d26b19dc272c4255b84a8c6032..84bb856d78e2c219b486cda0a71485278a676103 100644 (file)
@@ -685,41 +685,38 @@ class Query(
 
             from sqlalchemy.orm import aliased
 
+
             class Part(Base):
-                __tablename__ = 'part'
+                __tablename__ = "part"
                 part = Column(String, primary_key=True)
                 sub_part = Column(String, primary_key=True)
                 quantity = Column(Integer)
 
-            included_parts = session.query(
-                            Part.sub_part,
-                            Part.part,
-                            Part.quantity).\
-                                filter(Part.part=="our part").\
-                                cte(name="included_parts", recursive=True)
+
+            included_parts = (
+                session.query(Part.sub_part, Part.part, Part.quantity)
+                .filter(Part.part == "our part")
+                .cte(name="included_parts", recursive=True)
+            )
 
             incl_alias = aliased(included_parts, name="pr")
             parts_alias = aliased(Part, name="p")
             included_parts = included_parts.union_all(
                 session.query(
-                    parts_alias.sub_part,
-                    parts_alias.part,
-                    parts_alias.quantity).\
-                        filter(parts_alias.part==incl_alias.c.sub_part)
-                )
+                    parts_alias.sub_part, parts_alias.part, parts_alias.quantity
+                ).filter(parts_alias.part == incl_alias.c.sub_part)
+            )
 
             q = session.query(
-                    included_parts.c.sub_part,
-                    func.sum(included_parts.c.quantity).
-                        label('total_quantity')
-                ).\
-                group_by(included_parts.c.sub_part)
+                included_parts.c.sub_part,
+                func.sum(included_parts.c.quantity).label("total_quantity"),
+            ).group_by(included_parts.c.sub_part)
 
         .. seealso::
 
             :meth:`_sql.Select.cte` - v2 equivalent method.
 
-        """
+        """  # noqa: E501
         return (
             self.enable_eagerloads(False)
             ._get_select_statement_only()
@@ -954,9 +951,7 @@ class Query(
            :attr:`_query.Query.statement` using :meth:`.Session.execute`::
 
                 result = session.execute(
-                    query
-                    .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
-                    .statement
+                    query.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).statement
                 )
 
         .. versionadded:: 1.4
@@ -1065,8 +1060,7 @@ class Query(
 
             some_object = session.query(VersionedFoo).get((5, 10))
 
-            some_object = session.query(VersionedFoo).get(
-                {"id": 5, "version_id": 10})
+            some_object = session.query(VersionedFoo).get({"id": 5, "version_id": 10})
 
         :meth:`_query.Query.get` is special in that it provides direct
         access to the identity map of the owning :class:`.Session`.
@@ -1132,7 +1126,7 @@ class Query(
 
         :return: The object instance, or ``None``.
 
-        """
+        """  # noqa: E501
         self._no_criterion_assertion("get", order_by=False, distinct=False)
 
         # we still implement _get_impl() so that baked query can override
@@ -1584,19 +1578,22 @@ class Query(
 
             # Users, filtered on some arbitrary criterion
             # and then ordered by related email address
-            q = session.query(User).\
-                        join(User.address).\
-                        filter(User.name.like('%ed%')).\
-                        order_by(Address.email)
+            q = (
+                session.query(User)
+                .join(User.address)
+                .filter(User.name.like("%ed%"))
+                .order_by(Address.email)
+            )
 
             # given *only* User.id==5, Address.email, and 'q', what
             # would the *next* User in the result be ?
-            subq = q.with_entities(Address.email).\
-                        order_by(None).\
-                        filter(User.id==5).\
-                        subquery()
-            q = q.join((subq, subq.c.email < Address.email)).\
-                        limit(1)
+            subq = (
+                q.with_entities(Address.email)
+                .order_by(None)
+                .filter(User.id == 5)
+                .subquery()
+            )
+            q = q.join((subq, subq.c.email < Address.email)).limit(1)
 
         .. seealso::
 
@@ -1692,9 +1689,11 @@ class Query(
             def filter_something(criterion):
                 def transform(q):
                     return q.filter(criterion)
+
                 return transform
 
-            q = q.with_transformation(filter_something(x==5))
+
+            q = q.with_transformation(filter_something(x == 5))
 
         This allows ad-hoc recipes to be created for :class:`_query.Query`
         objects.
@@ -1812,9 +1811,15 @@ class Query(
 
         E.g.::
 
-            q = sess.query(User).populate_existing().with_for_update(nowait=True, of=User)
+            q = (
+                sess.query(User)
+                .populate_existing()
+                .with_for_update(nowait=True, of=User)
+            )
+
+        The above query on a PostgreSQL backend will render like:
 
-        The above query on a PostgreSQL backend will render like::
+        .. sourcecode:: sql
 
             SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
 
@@ -1892,14 +1897,13 @@ class Query(
 
         e.g.::
 
-            session.query(MyClass).filter(MyClass.name == 'some name')
+            session.query(MyClass).filter(MyClass.name == "some name")
 
         Multiple criteria may be specified as comma separated; the effect
         is that they will be joined together using the :func:`.and_`
         function::
 
-            session.query(MyClass).\
-                filter(MyClass.name == 'some name', MyClass.id > 5)
+            session.query(MyClass).filter(MyClass.name == "some name", MyClass.id > 5)
 
         The criterion is any SQL expression object applicable to the
         WHERE clause of a select.   String expressions are coerced
@@ -1912,7 +1916,7 @@ class Query(
 
             :meth:`_sql.Select.where` - v2 equivalent method.
 
-        """
+        """  # noqa: E501
         for crit in list(criterion):
             crit = coercions.expect(
                 roles.WhereHavingRole, crit, apply_propagate_attrs=self
@@ -1980,14 +1984,13 @@ class Query(
 
         e.g.::
 
-            session.query(MyClass).filter_by(name = 'some name')
+            session.query(MyClass).filter_by(name="some name")
 
         Multiple criteria may be specified as comma separated; the effect
         is that they will be joined together using the :func:`.and_`
         function::
 
-            session.query(MyClass).\
-                filter_by(name = 'some name', id = 5)
+            session.query(MyClass).filter_by(name="some name", id=5)
 
         The keyword expressions are extracted from the primary
         entity of the query, or the last entity that was the
@@ -2116,10 +2119,12 @@ class Query(
         HAVING criterion makes it possible to use filters on aggregate
         functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
 
-            q = session.query(User.id).\
-                        join(User.addresses).\
-                        group_by(User.id).\
-                        having(func.count(Address.id) > 2)
+            q = (
+                session.query(User.id)
+                .join(User.addresses)
+                .group_by(User.id)
+                .having(func.count(Address.id) > 2)
+            )
 
         .. seealso::
 
@@ -2143,8 +2148,8 @@ class Query(
 
         e.g.::
 
-            q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
-            q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
+            q1 = sess.query(SomeClass).filter(SomeClass.foo == "bar")
+            q2 = sess.query(SomeClass).filter(SomeClass.bar == "foo")
 
             q3 = q1.union(q2)
 
@@ -2153,7 +2158,9 @@ class Query(
 
             x.union(y).union(z).all()
 
-        will nest on each ``union()``, and produces::
+        will nest on each ``union()``, and produces:
+
+        .. sourcecode:: sql
 
             SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
                             SELECT * FROM y) UNION SELECT * FROM Z)
@@ -2162,7 +2169,9 @@ class Query(
 
             x.union(y, z).all()
 
-        produces::
+        produces:
+
+        .. sourcecode:: sql
 
             SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
                             SELECT * FROM Z)
@@ -2274,7 +2283,9 @@ class Query(
             q = session.query(User).join(User.addresses)
 
         Where above, the call to :meth:`_query.Query.join` along
-        ``User.addresses`` will result in SQL approximately equivalent to::
+        ``User.addresses`` will result in SQL approximately equivalent to:
+
+        .. sourcecode:: sql
 
             SELECT user.id, user.name
             FROM user JOIN address ON user.id = address.user_id
@@ -2287,10 +2298,12 @@ class Query(
         calls may be used.  The relationship-bound attribute implies both
         the left and right side of the join at once::
 
-            q = session.query(User).\
-                    join(User.orders).\
-                    join(Order.items).\
-                    join(Item.keywords)
+            q = (
+                session.query(User)
+                .join(User.orders)
+                .join(Order.items)
+                .join(Item.keywords)
+            )
 
         .. note:: as seen in the above example, **the order in which each
            call to the join() method occurs is important**.    Query would not,
@@ -2329,7 +2342,7 @@ class Query(
         as the ON clause to be passed explicitly.    A example that includes
         a SQL expression as the ON clause is as follows::
 
-            q = session.query(User).join(Address, User.id==Address.user_id)
+            q = session.query(User).join(Address, User.id == Address.user_id)
 
         The above form may also use a relationship-bound attribute as the
         ON clause as well::
@@ -2344,11 +2357,13 @@ class Query(
             a1 = aliased(Address)
             a2 = aliased(Address)
 
-            q = session.query(User).\
-                    join(a1, User.addresses).\
-                    join(a2, User.addresses).\
-                    filter(a1.email_address=='ed@foo.com').\
-                    filter(a2.email_address=='ed@bar.com')
+            q = (
+                session.query(User)
+                .join(a1, User.addresses)
+                .join(a2, User.addresses)
+                .filter(a1.email_address == "ed@foo.com")
+                .filter(a2.email_address == "ed@bar.com")
+            )
 
         The relationship-bound calling form can also specify a target entity
         using the :meth:`_orm.PropComparator.of_type` method; a query
@@ -2357,11 +2372,13 @@ class Query(
             a1 = aliased(Address)
             a2 = aliased(Address)
 
-            q = session.query(User).\
-                    join(User.addresses.of_type(a1)).\
-                    join(User.addresses.of_type(a2)).\
-                    filter(a1.email_address == 'ed@foo.com').\
-                    filter(a2.email_address == 'ed@bar.com')
+            q = (
+                session.query(User)
+                .join(User.addresses.of_type(a1))
+                .join(User.addresses.of_type(a2))
+                .filter(a1.email_address == "ed@foo.com")
+                .filter(a2.email_address == "ed@bar.com")
+            )
 
         **Augmenting Built-in ON Clauses**
 
@@ -2372,7 +2389,7 @@ class Query(
         with the default criteria using AND::
 
             q = session.query(User).join(
-                User.addresses.and_(Address.email_address != 'foo@bar.com')
+                User.addresses.and_(Address.email_address != "foo@bar.com")
             )
 
         .. versionadded:: 1.4
@@ -2385,29 +2402,28 @@ class Query(
         appropriate ``.subquery()`` method in order to make a subquery
         out of a query::
 
-            subq = session.query(Address).\
-                filter(Address.email_address == 'ed@foo.com').\
-                subquery()
+            subq = (
+                session.query(Address)
+                .filter(Address.email_address == "ed@foo.com")
+                .subquery()
+            )
 
 
-            q = session.query(User).join(
-                subq, User.id == subq.c.user_id
-            )
+            q = session.query(User).join(subq, User.id == subq.c.user_id)
 
         Joining to a subquery in terms of a specific relationship and/or
         target entity may be achieved by linking the subquery to the
         entity using :func:`_orm.aliased`::
 
-            subq = session.query(Address).\
-                filter(Address.email_address == 'ed@foo.com').\
-                subquery()
+            subq = (
+                session.query(Address)
+                .filter(Address.email_address == "ed@foo.com")
+                .subquery()
+            )
 
             address_subq = aliased(Address, subq)
 
-            q = session.query(User).join(
-                User.addresses.of_type(address_subq)
-            )
-
+            q = session.query(User).join(User.addresses.of_type(address_subq))
 
         **Controlling what to Join From**
 
@@ -2415,11 +2431,16 @@ class Query(
         :class:`_query.Query` is not in line with what we want to join from,
         the :meth:`_query.Query.select_from` method may be used::
 
-            q = session.query(Address).select_from(User).\
-                            join(User.addresses).\
-                            filter(User.name == 'ed')
+            q = (
+                session.query(Address)
+                .select_from(User)
+                .join(User.addresses)
+                .filter(User.name == "ed")
+            )
+
+        Which will produce SQL similar to:
 
-        Which will produce SQL similar to::
+        .. sourcecode:: sql
 
             SELECT address.* FROM user
                 JOIN address ON user.id=address.user_id
@@ -2523,11 +2544,16 @@ class Query(
 
         A typical example::
 
-            q = session.query(Address).select_from(User).\
-                join(User.addresses).\
-                filter(User.name == 'ed')
+            q = (
+                session.query(Address)
+                .select_from(User)
+                .join(User.addresses)
+                .filter(User.name == "ed")
+            )
 
-        Which produces SQL equivalent to::
+        Which produces SQL equivalent to:
+
+        .. sourcecode:: sql
 
             SELECT address.* FROM user
             JOIN address ON user.id=address.user_id
@@ -2887,7 +2913,7 @@ class Query(
 
         Format is a list of dictionaries::
 
-            user_alias = aliased(User, name='user2')
+            user_alias = aliased(User, name="user2")
             q = sess.query(User, User.id, user_alias)
 
             # this expression:
@@ -2896,26 +2922,26 @@ class Query(
             # would return:
             [
                 {
-                    'name':'User',
-                    'type':User,
-                    'aliased':False,
-                    'expr':User,
-                    'entity': User
+                    "name": "User",
+                    "type": User,
+                    "aliased": False,
+                    "expr": User,
+                    "entity": User,
                 },
                 {
-                    'name':'id',
-                    'type':Integer(),
-                    'aliased':False,
-                    'expr':User.id,
-                    'entity': User
+                    "name": "id",
+                    "type": Integer(),
+                    "aliased": False,
+                    "expr": User.id,
+                    "entity": User,
                 },
                 {
-                    'name':'user2',
-                    'type':User,
-                    'aliased':True,
-                    'expr':user_alias,
-                    'entity': user_alias
-                }
+                    "name": "user2",
+                    "type": User,
+                    "aliased": True,
+                    "expr": user_alias,
+                    "entity": user_alias,
+                },
             ]
 
         .. seealso::
@@ -3024,10 +3050,12 @@ class Query(
 
         e.g.::
 
-            q = session.query(User).filter(User.name == 'fred')
+            q = session.query(User).filter(User.name == "fred")
             session.query(q.exists())
 
-        Producing SQL similar to::
+        Producing SQL similar to:
+
+        .. sourcecode:: sql
 
             SELECT EXISTS (
                 SELECT 1 FROM users WHERE users.name = :name_1
@@ -3076,7 +3104,9 @@ class Query(
         r"""Return a count of rows this the SQL formed by this :class:`Query`
         would return.
 
-        This generates the SQL for this Query as follows::
+        This generates the SQL for this Query as follows:
+
+        .. sourcecode:: sql
 
             SELECT count(1) AS count_1 FROM (
                 SELECT <rest of query follows...>
@@ -3116,8 +3146,7 @@ class Query(
 
             # return count of user "id" grouped
             # by "name"
-            session.query(func.count(User.id)).\
-                    group_by(User.name)
+            session.query(func.count(User.id)).group_by(User.name)
 
             from sqlalchemy import distinct
 
@@ -3143,11 +3172,11 @@ class Query(
 
         E.g.::
 
-            sess.query(User).filter(User.age == 25).\
-                delete(synchronize_session=False)
+            sess.query(User).filter(User.age == 25).delete(synchronize_session=False)
 
-            sess.query(User).filter(User.age == 25).\
-                delete(synchronize_session='evaluate')
+            sess.query(User).filter(User.age == 25).delete(
+                synchronize_session="evaluate"
+            )
 
         .. warning::
 
@@ -3167,7 +3196,7 @@ class Query(
 
             :ref:`orm_expression_update_delete`
 
-        """
+        """  # noqa: E501
 
         bulk_del = BulkDelete(self)
         if self.dispatch.before_compile_delete:
@@ -3205,11 +3234,13 @@ class Query(
 
         E.g.::
 
-            sess.query(User).filter(User.age == 25).\
-                update({User.age: User.age - 10}, synchronize_session=False)
+            sess.query(User).filter(User.age == 25).update(
+                {User.age: User.age - 10}, synchronize_session=False
+            )
 
-            sess.query(User).filter(User.age == 25).\
-                update({"age": User.age - 10}, synchronize_session='evaluate')
+            sess.query(User).filter(User.age == 25).update(
+                {"age": User.age - 10}, synchronize_session="evaluate"
+            )
 
         .. warning::
 
index 3a9c4d3ad84cc99fe4e48907e0d5a3ff908b8234..02be1d3432a6f874b8dc004d61fb0059bf7bb199 100644 (file)
@@ -748,12 +748,16 @@ class RelationshipProperty(
         def __eq__(self, other: Any) -> ColumnElement[bool]:  # type: ignore[override]  # noqa: E501
             """Implement the ``==`` operator.
 
-            In a many-to-one context, such as::
+            In a many-to-one context, such as:
+
+            .. sourcecode:: text
 
               MyClass.some_prop == <some object>
 
             this will typically produce a
-            clause such as::
+            clause such as:
+
+            .. sourcecode:: text
 
               mytable.related_id == <some id>
 
@@ -916,11 +920,12 @@ class RelationshipProperty(
             An expression like::
 
                 session.query(MyClass).filter(
-                    MyClass.somereference.any(SomeRelated.x==2)
+                    MyClass.somereference.any(SomeRelated.x == 2)
                 )
 
+            Will produce a query like:
 
-            Will produce a query like::
+            .. sourcecode:: sql
 
                 SELECT * FROM my_table WHERE
                 EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
@@ -934,11 +939,11 @@ class RelationshipProperty(
             :meth:`~.Relationship.Comparator.any` is particularly
             useful for testing for empty collections::
 
-                session.query(MyClass).filter(
-                    ~MyClass.somereference.any()
-                )
+                session.query(MyClass).filter(~MyClass.somereference.any())
+
+            will produce:
 
-            will produce::
+            .. sourcecode:: sql
 
                 SELECT * FROM my_table WHERE
                 NOT (EXISTS (SELECT 1 FROM related WHERE
@@ -969,11 +974,12 @@ class RelationshipProperty(
             An expression like::
 
                 session.query(MyClass).filter(
-                    MyClass.somereference.has(SomeRelated.x==2)
+                    MyClass.somereference.has(SomeRelated.x == 2)
                 )
 
+            Will produce a query like:
 
-            Will produce a query like::
+            .. sourcecode:: sql
 
                 SELECT * FROM my_table WHERE
                 EXISTS (SELECT 1 FROM related WHERE
@@ -1012,7 +1018,9 @@ class RelationshipProperty(
 
                 MyClass.contains(other)
 
-            Produces a clause like::
+            Produces a clause like:
+
+            .. sourcecode:: sql
 
                 mytable.id == <some id>
 
@@ -1032,7 +1040,9 @@ class RelationshipProperty(
 
                 query(MyClass).filter(MyClass.contains(other))
 
-            Produces a query like::
+            Produces a query like:
+
+            .. sourcecode:: sql
 
                 SELECT * FROM my_table, my_association_table AS
                 my_association_table_1 WHERE
@@ -1128,11 +1138,15 @@ class RelationshipProperty(
         def __ne__(self, other: Any) -> ColumnElement[bool]:  # type: ignore[override]  # noqa: E501
             """Implement the ``!=`` operator.
 
-            In a many-to-one context, such as::
+            In a many-to-one context, such as:
+
+            .. sourcecode:: text
 
               MyClass.some_prop != <some object>
 
-            This will typically produce a clause such as::
+            This will typically produce a clause such as:
+
+            .. sourcecode:: sql
 
               mytable.related_id != <some id>
 
index d333f174a51feae684cde90853e9ad76825fb801..26c8521227d4950040d23abf1b725a905f1fdac9 100644 (file)
@@ -285,11 +285,13 @@ class scoped_session(Generic[_S]):
 
             Session = scoped_session(sessionmaker())
 
+
             class MyClass:
                 query: QueryPropertyDescriptor = Session.query_property()
 
+
             # after mappers are defined
-            result = MyClass.query.filter(MyClass.name=='foo').all()
+            result = MyClass.query.filter(MyClass.name == "foo").all()
 
         Produces instances of the session's configured query class by
         default.  To override and use a custom implementation, provide
@@ -735,9 +737,8 @@ class scoped_session(Generic[_S]):
         E.g.::
 
             from sqlalchemy import select
-            result = session.execute(
-                select(User).where(User.id == 5)
-            )
+
+            result = session.execute(select(User).where(User.id == 5))
 
         The API contract of :meth:`_orm.Session.execute` is similar to that
         of :meth:`_engine.Connection.execute`, the :term:`2.0 style` version
@@ -967,10 +968,7 @@ class scoped_session(Generic[_S]):
 
             some_object = session.get(VersionedFoo, (5, 10))
 
-            some_object = session.get(
-                VersionedFoo,
-                {"id": 5, "version_id": 10}
-            )
+            some_object = session.get(VersionedFoo, {"id": 5, "version_id": 10})
 
         .. versionadded:: 1.4 Added :meth:`_orm.Session.get`, which is moved
            from the now legacy :meth:`_orm.Query.get` method.
index 50e7e1cf68cc90bcb7c6aa259c8d3ee190416d36..2befa8f43d0e3ba3c6dbf58dacec9a821cab048a 100644 (file)
@@ -1571,12 +1571,16 @@ class Session(_SessionClassMethods, EventTarget):
            operation.    The complete heuristics for resolution are
            described at :meth:`.Session.get_bind`.  Usage looks like::
 
-            Session = sessionmaker(binds={
-                SomeMappedClass: create_engine('postgresql+psycopg2://engine1'),
-                SomeDeclarativeBase: create_engine('postgresql+psycopg2://engine2'),
-                some_mapper: create_engine('postgresql+psycopg2://engine3'),
-                some_table: create_engine('postgresql+psycopg2://engine4'),
-                })
+            Session = sessionmaker(
+                binds={
+                    SomeMappedClass: create_engine("postgresql+psycopg2://engine1"),
+                    SomeDeclarativeBase: create_engine(
+                        "postgresql+psycopg2://engine2"
+                    ),
+                    some_mapper: create_engine("postgresql+psycopg2://engine3"),
+                    some_table: create_engine("postgresql+psycopg2://engine4"),
+                }
+            )
 
            .. seealso::
 
@@ -2322,9 +2326,8 @@ class Session(_SessionClassMethods, EventTarget):
         E.g.::
 
             from sqlalchemy import select
-            result = session.execute(
-                select(User).where(User.id == 5)
-            )
+
+            result = session.execute(select(User).where(User.id == 5))
 
         The API contract of :meth:`_orm.Session.execute` is similar to that
         of :meth:`_engine.Connection.execute`, the :term:`2.0 style` version
@@ -2984,7 +2987,7 @@ class Session(_SessionClassMethods, EventTarget):
 
         e.g.::
 
-            obj = session._identity_lookup(inspect(SomeClass), (1, ))
+            obj = session._identity_lookup(inspect(SomeClass), (1,))
 
         :param mapper: mapper in use
         :param primary_key_identity: the primary key we are searching for, as
@@ -3612,10 +3615,7 @@ class Session(_SessionClassMethods, EventTarget):
 
             some_object = session.get(VersionedFoo, (5, 10))
 
-            some_object = session.get(
-                VersionedFoo,
-                {"id": 5, "version_id": 10}
-            )
+            some_object = session.get(VersionedFoo, {"id": 5, "version_id": 10})
 
         .. versionadded:: 1.4 Added :meth:`_orm.Session.get`, which is moved
            from the now legacy :meth:`_orm.Query.get` method.
@@ -3704,7 +3704,7 @@ class Session(_SessionClassMethods, EventTarget):
 
         :return: The object instance, or ``None``.
 
-        """
+        """  # noqa: E501
         return self._get_impl(
             entity,
             ident,
@@ -4957,7 +4957,7 @@ class sessionmaker(_SessionClassMethods, Generic[_S]):
 
         # an Engine, which the Session will use for connection
         # resources
-        engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/')
+        engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/")
 
         Session = sessionmaker(engine)
 
@@ -5010,7 +5010,7 @@ class sessionmaker(_SessionClassMethods, Generic[_S]):
 
         with engine.connect() as connection:
             with Session(bind=connection) as session:
-                # work with session
+                ...  # work with session
 
     The class also includes a method :meth:`_orm.sessionmaker.configure`, which
     can be used to specify additional keyword arguments to the factory, which
@@ -5025,7 +5025,7 @@ class sessionmaker(_SessionClassMethods, Generic[_S]):
 
         # ... later, when an engine URL is read from a configuration
         # file or other events allow the engine to be created
-        engine = create_engine('sqlite:///foo.db')
+        engine = create_engine("sqlite:///foo.db")
         Session.configure(bind=engine)
 
         sess = Session()
@@ -5163,7 +5163,7 @@ class sessionmaker(_SessionClassMethods, Generic[_S]):
 
             Session = sessionmaker()
 
-            Session.configure(bind=create_engine('sqlite://'))
+            Session.configure(bind=create_engine("sqlite://"))
         """
         self.kw.update(new_kw)
 
index 2ecbe246290f1d89536493ac85b1d387f4d1c2d8..c2f46e7ab4cf85becdedcd03627473ca8fbc4a25 100644 (file)
@@ -109,9 +109,7 @@ class _AbstractLoad(traversals.GenerativeOnTraversal, LoaderOption):
         The option is used in conjunction with an explicit join that loads
         the desired rows, i.e.::
 
-            sess.query(Order).join(Order.user).options(
-                contains_eager(Order.user)
-            )
+            sess.query(Order).join(Order.user).options(contains_eager(Order.user))
 
         The above query would join from the ``Order`` entity to its related
         ``User`` entity, and the returned ``Order`` objects would have the
@@ -257,15 +255,11 @@ class _AbstractLoad(traversals.GenerativeOnTraversal, LoaderOption):
             select(User).options(joinedload(User.orders))
 
             # joined-load Order.items and then Item.keywords
-            select(Order).options(
-                joinedload(Order.items).joinedload(Item.keywords)
-            )
+            select(Order).options(joinedload(Order.items).joinedload(Item.keywords))
 
             # lazily load Order.items, but when Items are loaded,
             # joined-load the keywords collection
-            select(Order).options(
-                lazyload(Order.items).joinedload(Item.keywords)
-            )
+            select(Order).options(lazyload(Order.items).joinedload(Item.keywords))
 
         :param innerjoin: if ``True``, indicates that the joined eager load
          should use an inner join instead of the default of left outer join::
@@ -276,9 +270,7 @@ class _AbstractLoad(traversals.GenerativeOnTraversal, LoaderOption):
         OUTER and others INNER, right-nested joins are used to link them::
 
             select(A).options(
-                joinedload(A.bs, innerjoin=False).joinedload(
-                    B.cs, innerjoin=True
-                )
+                joinedload(A.bs, innerjoin=False).joinedload(B.cs, innerjoin=True)
             )
 
         The above query, linking A.bs via "outer" join and B.cs via "inner"
@@ -293,10 +285,7 @@ class _AbstractLoad(traversals.GenerativeOnTraversal, LoaderOption):
         will render as LEFT OUTER JOIN.  For example, supposing ``A.bs``
         is an outerjoin::
 
-            select(A).options(
-                joinedload(A.bs).joinedload(B.cs, innerjoin="unnested")
-            )
-
+            select(A).options(joinedload(A.bs).joinedload(B.cs, innerjoin="unnested"))
 
         The above join will render as "a LEFT OUTER JOIN b LEFT OUTER JOIN c",
         rather than as "a LEFT OUTER JOIN (b JOIN c)".
@@ -326,7 +315,7 @@ class _AbstractLoad(traversals.GenerativeOnTraversal, LoaderOption):
 
             :ref:`joined_eager_loading`
 
-        """
+        """  # noqa: E501
         loader = self._set_relationship_strategy(
             attr,
             {"lazy": "joined"},
@@ -357,10 +346,7 @@ class _AbstractLoad(traversals.GenerativeOnTraversal, LoaderOption):
 
             # lazily load Order.items, but when Items are loaded,
             # subquery-load the keywords collection
-            select(Order).options(
-                lazyload(Order.items).subqueryload(Item.keywords)
-            )
-
+            select(Order).options(lazyload(Order.items).subqueryload(Item.keywords))
 
         .. seealso::
 
@@ -394,9 +380,7 @@ class _AbstractLoad(traversals.GenerativeOnTraversal, LoaderOption):
 
             # lazily load Order.items, but when Items are loaded,
             # selectin-load the keywords collection
-            select(Order).options(
-                lazyload(Order.items).selectinload(Item.keywords)
-            )
+            select(Order).options(lazyload(Order.items).selectinload(Item.keywords))
 
         :param recursion_depth: optional int; when set to a positive integer
          in conjunction with a self-referential relationship,
@@ -609,8 +593,7 @@ class _AbstractLoad(traversals.GenerativeOnTraversal, LoaderOption):
             from sqlalchemy.orm import defer
 
             session.query(MyClass).options(
-                defer(MyClass.attribute_one),
-                defer(MyClass.attribute_two)
+                defer(MyClass.attribute_one), defer(MyClass.attribute_two)
             )
 
         To specify a deferred load of an attribute on a related class,
@@ -630,7 +613,7 @@ class _AbstractLoad(traversals.GenerativeOnTraversal, LoaderOption):
                 defaultload(MyClass.someattr).options(
                     defer(RelatedClass.some_column),
                     defer(RelatedClass.some_other_column),
-                    defer(RelatedClass.another_column)
+                    defer(RelatedClass.another_column),
                 )
             )
 
@@ -676,14 +659,10 @@ class _AbstractLoad(traversals.GenerativeOnTraversal, LoaderOption):
             )
 
             # undefer all columns specific to a single class using Load + *
-            session.query(MyClass, MyOtherClass).options(
-                Load(MyClass).undefer("*")
-            )
+            session.query(MyClass, MyOtherClass).options(Load(MyClass).undefer("*"))
 
             # undefer a column on a related object
-            select(MyClass).options(
-                defaultload(MyClass.items).undefer(MyClass.text)
-            )
+            select(MyClass).options(defaultload(MyClass.items).undefer(MyClass.text))
 
         :param key: Attribute to be undeferred.
 
@@ -696,7 +675,7 @@ class _AbstractLoad(traversals.GenerativeOnTraversal, LoaderOption):
 
             :func:`_orm.undefer_group`
 
-        """
+        """  # noqa: E501
         return self._set_column_strategy(
             (key,), {"deferred": False, "instrument": True}
         )
@@ -1218,13 +1197,11 @@ class Load(_AbstractLoad):
 
             query = session.query(Author)
             query = query.options(
-                        joinedload(Author.book).options(
-                            load_only(Book.summary, Book.excerpt),
-                            joinedload(Book.citations).options(
-                                joinedload(Citation.author)
-                            )
-                        )
-                    )
+                joinedload(Author.book).options(
+                    load_only(Book.summary, Book.excerpt),
+                    joinedload(Book.citations).options(joinedload(Citation.author)),
+                )
+            )
 
         :param \*opts: A series of loader option objects (ultimately
          :class:`_orm.Load` objects) which should be applied to the path
@@ -1668,13 +1645,17 @@ class _LoadElement(
         loads, and adjusts the given path to be relative to the
         current_path.
 
-        E.g. given a loader path and current path::
+        E.g. given a loader path and current path:
+
+        .. sourcecode:: text
 
             lp: User -> orders -> Order -> items -> Item -> keywords -> Keyword
 
             cp: User -> orders -> Order -> items
 
-        The adjusted path would be::
+        The adjusted path would be:
+
+        .. sourcecode:: text
 
             Item -> keywords -> Keyword
 
@@ -2155,11 +2136,11 @@ class _TokenStrategyLoad(_LoadElement):
 
     e.g.::
 
-        raiseload('*')
-        Load(User).lazyload('*')
-        defer('*')
+        raiseload("*")
+        Load(User).lazyload("*")
+        defer("*")
         load_only(User.name, User.email)  # will create a defer('*')
-        joinedload(User.addresses).raiseload('*')
+        joinedload(User.addresses).raiseload("*")
 
     """
 
index 670f99f73d377495e58d58f2db077e6db628abcb..6ae46c0c307edf8811ca4b0350b323bce50995e6 100644 (file)
@@ -475,9 +475,7 @@ def identity_key(
 
       E.g.::
 
-        >>> row = engine.execute(\
-            text("select * from table where a=1 and b=2")\
-            ).first()
+        >>> row = engine.execute(text("select * from table where a=1 and b=2")).first()
         >>> identity_key(MyClass, row=row)
         (<class '__main__.MyClass'>, (1, 2), None)
 
@@ -488,7 +486,7 @@ def identity_key(
 
         .. versionadded:: 1.2 added identity_token
 
-    """
+    """  # noqa: E501
     if class_ is not None:
         mapper = class_mapper(class_)
         if row is None:
@@ -666,9 +664,9 @@ class AliasedClass(
 
         # find all pairs of users with the same name
         user_alias = aliased(User)
-        session.query(User, user_alias).\
-                        join((user_alias, User.id > user_alias.id)).\
-                        filter(User.name == user_alias.name)
+        session.query(User, user_alias).join(
+            (user_alias, User.id > user_alias.id)
+        ).filter(User.name == user_alias.name)
 
     :class:`.AliasedClass` is also capable of mapping an existing mapped
     class to an entirely new selectable, provided this selectable is column-
@@ -692,6 +690,7 @@ class AliasedClass(
     using :func:`_sa.inspect`::
 
         from sqlalchemy import inspect
+
         my_alias = aliased(MyClass)
         insp = inspect(my_alias)
 
@@ -1601,8 +1600,7 @@ class Bundle(
 
             bn = Bundle("mybundle", MyClass.x, MyClass.y)
 
-            for row in session.query(bn).filter(
-                    bn.c.x == 5).filter(bn.c.y == 4):
+            for row in session.query(bn).filter(bn.c.x == 5).filter(bn.c.y == 4):
                 print(row.mybundle.x, row.mybundle.y)
 
         :param name: name of the bundle.
@@ -1611,7 +1609,7 @@ class Bundle(
          can be returned as a "single entity" outside of any enclosing tuple
          in the same manner as a mapped entity.
 
-        """
+        """  # noqa: E501
         self.name = self._label = name
         coerced_exprs = [
             coercions.expect(
@@ -1666,19 +1664,19 @@ class Bundle(
 
         Nesting of bundles is also supported::
 
-            b1 = Bundle("b1",
-                    Bundle('b2', MyClass.a, MyClass.b),
-                    Bundle('b3', MyClass.x, MyClass.y)
-                )
+            b1 = Bundle(
+                "b1",
+                Bundle("b2", MyClass.a, MyClass.b),
+                Bundle("b3", MyClass.x, MyClass.y),
+            )
 
-            q = sess.query(b1).filter(
-                b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
+            q = sess.query(b1).filter(b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
 
     .. seealso::
 
         :attr:`.Bundle.c`
 
-    """
+    """  # noqa: E501
 
     c: ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]
     """An alias for :attr:`.Bundle.columns`."""
@@ -1744,25 +1742,24 @@ class Bundle(
 
             from sqlalchemy.orm import Bundle
 
+
             class DictBundle(Bundle):
                 def create_row_processor(self, query, procs, labels):
-                    'Override create_row_processor to return values as
-                    dictionaries'
+                    "Override create_row_processor to return values as dictionaries"
 
                     def proc(row):
-                        return dict(
-                            zip(labels, (proc(row) for proc in procs))
-                        )
+                        return dict(zip(labels, (proc(row) for proc in procs)))
+
                     return proc
 
         A result from the above :class:`_orm.Bundle` will return dictionary
         values::
 
-            bn = DictBundle('mybundle', MyClass.data1, MyClass.data2)
-            for row in session.execute(select(bn)).where(bn.c.data1 == 'd1'):
-                print(row.mybundle['data1'], row.mybundle['data2'])
+            bn = DictBundle("mybundle", MyClass.data1, MyClass.data2)
+            for row in session.execute(select(bn)).where(bn.c.data1 == "d1"):
+                print(row.mybundle["data1"], row.mybundle["data2"])
 
-        """
+        """  # noqa: E501
         keyed_tuple = result_tuple(labels, [() for l in labels])
 
         def proc(row: Row[Unpack[TupleAny]]) -> Any:
@@ -1988,7 +1985,6 @@ def with_parent(
 
         stmt = select(Address).where(with_parent(some_user, User.addresses))
 
-
     The SQL rendered is the same as that rendered when a lazy loader
     would fire off from the given parent on that attribute, meaning
     that the appropriate state is taken from the parent object in
@@ -2001,9 +1997,7 @@ def with_parent(
 
         a1 = aliased(Address)
         a2 = aliased(Address)
-        stmt = select(a1, a2).where(
-            with_parent(u1, User.addresses.of_type(a2))
-        )
+        stmt = select(a1, a2).where(with_parent(u1, User.addresses.of_type(a2)))
 
     The above use is equivalent to using the
     :func:`_orm.with_parent.from_entity` argument::
@@ -2028,7 +2022,7 @@ def with_parent(
 
       .. versionadded:: 1.2
 
-    """
+    """  # noqa: E501
     prop_t: RelationshipProperty[Any]
 
     if isinstance(prop, str):
@@ -2122,14 +2116,13 @@ def _entity_corresponds_to_use_path_impl(
         someoption(A).someoption(C.d)  # -> fn(A, C) -> False
 
         a1 = aliased(A)
-        someoption(a1).someoption(A.b) # -> fn(a1, A) -> False
-        someoption(a1).someoption(a1.b) # -> fn(a1, a1) -> True
+        someoption(a1).someoption(A.b)  # -> fn(a1, A) -> False
+        someoption(a1).someoption(a1.b)  # -> fn(a1, a1) -> True
 
         wp = with_polymorphic(A, [A1, A2])
         someoption(wp).someoption(A1.foo)  # -> fn(wp, A1) -> False
         someoption(wp).someoption(wp.A1.foo)  # -> fn(wp, wp.A1) -> True
 
-
     """
     if insp_is_aliased_class(given):
         return (
index 4b4f4e47851116e139470d024b9899a37c0dcbfa..b54fad125b185c3f1d1edb375c2134f960bab7ec 100644 (file)
@@ -35,10 +35,12 @@ class PoolEvents(event.Events[Pool]):
 
         from sqlalchemy import event
 
+
         def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
             "handle an on checkout event"
 
-        event.listen(Pool, 'checkout', my_on_checkout)
+
+        event.listen(Pool, "checkout", my_on_checkout)
 
     In addition to accepting the :class:`_pool.Pool` class and
     :class:`_pool.Pool` instances, :class:`_events.PoolEvents` also accepts
@@ -49,7 +51,7 @@ class PoolEvents(event.Events[Pool]):
         engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test")
 
         # will associate with engine.pool
-        event.listen(engine, 'checkout', my_on_checkout)
+        event.listen(engine, "checkout", my_on_checkout)
 
     """  # noqa: E501
 
index a7ead521f86e1af0d18533c2e8d3f226359cd4a5..3afe70e3afc10109a6968fa83cb31042fcc6f9bc 100644 (file)
@@ -24,10 +24,7 @@ def insert(table: _DMLTableArgument) -> Insert:
 
         from sqlalchemy import insert
 
-        stmt = (
-            insert(user_table).
-            values(name='username', fullname='Full Username')
-        )
+        stmt = insert(user_table).values(name="username", fullname="Full Username")
 
     Similar functionality is available via the
     :meth:`_expression.TableClause.insert` method on
@@ -78,7 +75,7 @@ def insert(table: _DMLTableArgument) -> Insert:
 
         :ref:`tutorial_core_insert` - in the :ref:`unified_tutorial`
 
-    """
+    """  # noqa: E501
     return Insert(table)
 
 
@@ -90,9 +87,7 @@ def update(table: _DMLTableArgument) -> Update:
         from sqlalchemy import update
 
         stmt = (
-            update(user_table).
-            where(user_table.c.id == 5).
-            values(name='user #5')
+            update(user_table).where(user_table.c.id == 5).values(name="user #5")
         )
 
     Similar functionality is available via the
@@ -109,7 +104,7 @@ def update(table: _DMLTableArgument) -> Update:
         :ref:`tutorial_core_update_delete` - in the :ref:`unified_tutorial`
 
 
-    """
+    """  # noqa: E501
     return Update(table)
 
 
@@ -120,10 +115,7 @@ def delete(table: _DMLTableArgument) -> Delete:
 
         from sqlalchemy import delete
 
-        stmt = (
-            delete(user_table).
-            where(user_table.c.id == 5)
-        )
+        stmt = delete(user_table).where(user_table.c.id == 5)
 
     Similar functionality is available via the
     :meth:`_expression.TableClause.delete` method on
index 55e92dd0c4fdaa63290c9acf42724f58836df2df..121386781e9a99e8ff60a58720e31cc75ced179a 100644 (file)
@@ -125,11 +125,8 @@ def and_(  # type: ignore[empty-body]
         from sqlalchemy import and_
 
         stmt = select(users_table).where(
-                        and_(
-                            users_table.c.name == 'wendy',
-                            users_table.c.enrolled == True
-                        )
-                    )
+            and_(users_table.c.name == "wendy", users_table.c.enrolled == True)
+        )
 
     The :func:`.and_` conjunction is also available using the
     Python ``&`` operator (though note that compound expressions
@@ -137,9 +134,8 @@ def and_(  # type: ignore[empty-body]
     operator precedence behavior)::
 
         stmt = select(users_table).where(
-                        (users_table.c.name == 'wendy') &
-                        (users_table.c.enrolled == True)
-                    )
+            (users_table.c.name == "wendy") & (users_table.c.enrolled == True)
+        )
 
     The :func:`.and_` operation is also implicit in some cases;
     the :meth:`_expression.Select.where`
@@ -147,9 +143,11 @@ def and_(  # type: ignore[empty-body]
     times against a statement, which will have the effect of each
     clause being combined using :func:`.and_`::
 
-        stmt = select(users_table).\
-                where(users_table.c.name == 'wendy').\
-                where(users_table.c.enrolled == True)
+        stmt = (
+            select(users_table)
+            .where(users_table.c.name == "wendy")
+            .where(users_table.c.enrolled == True)
+        )
 
     The :func:`.and_` construct must be given at least one positional
     argument in order to be valid; a :func:`.and_` construct with no
@@ -159,6 +157,7 @@ def and_(  # type: ignore[empty-body]
     specified::
 
         from sqlalchemy import true
+
         criteria = and_(true(), *expressions)
 
     The above expression will compile to SQL as the expression ``true``
@@ -190,11 +189,8 @@ if not TYPE_CHECKING:
             from sqlalchemy import and_
 
             stmt = select(users_table).where(
-                            and_(
-                                users_table.c.name == 'wendy',
-                                users_table.c.enrolled == True
-                            )
-                        )
+                and_(users_table.c.name == "wendy", users_table.c.enrolled == True)
+            )
 
         The :func:`.and_` conjunction is also available using the
         Python ``&`` operator (though note that compound expressions
@@ -202,9 +198,8 @@ if not TYPE_CHECKING:
         operator precedence behavior)::
 
             stmt = select(users_table).where(
-                            (users_table.c.name == 'wendy') &
-                            (users_table.c.enrolled == True)
-                        )
+                (users_table.c.name == "wendy") & (users_table.c.enrolled == True)
+            )
 
         The :func:`.and_` operation is also implicit in some cases;
         the :meth:`_expression.Select.where`
@@ -212,9 +207,11 @@ if not TYPE_CHECKING:
         times against a statement, which will have the effect of each
         clause being combined using :func:`.and_`::
 
-            stmt = select(users_table).\
-                    where(users_table.c.name == 'wendy').\
-                    where(users_table.c.enrolled == True)
+            stmt = (
+                select(users_table)
+                .where(users_table.c.name == "wendy")
+                .where(users_table.c.enrolled == True)
+            )
 
         The :func:`.and_` construct must be given at least one positional
         argument in order to be valid; a :func:`.and_` construct with no
@@ -224,6 +221,7 @@ if not TYPE_CHECKING:
         specified::
 
             from sqlalchemy import true
+
             criteria = and_(true(), *expressions)
 
         The above expression will compile to SQL as the expression ``true``
@@ -241,7 +239,7 @@ if not TYPE_CHECKING:
 
             :func:`.or_`
 
-        """
+        """  # noqa: E501
         return BooleanClauseList.and_(*clauses)
 
 
@@ -307,9 +305,12 @@ def asc(
     e.g.::
 
         from sqlalchemy import asc
+
         stmt = select(users_table).order_by(asc(users_table.c.name))
 
-    will produce SQL as::
+    will produce SQL as:
+
+    .. sourcecode:: sql
 
         SELECT id, name FROM user ORDER BY name ASC
 
@@ -346,9 +347,11 @@ def collate(
 
     e.g.::
 
-        collate(mycolumn, 'utf8_bin')
+        collate(mycolumn, "utf8_bin")
+
+    produces:
 
-    produces::
+    .. sourcecode:: sql
 
         mycolumn COLLATE utf8_bin
 
@@ -373,9 +376,12 @@ def between(
     E.g.::
 
         from sqlalchemy import between
+
         stmt = select(users_table).where(between(users_table.c.id, 5, 7))
 
-    Would produce SQL resembling::
+    Would produce SQL resembling:
+
+    .. sourcecode:: sql
 
         SELECT id, name FROM user WHERE id BETWEEN :id_1 AND :id_2
 
@@ -497,7 +503,9 @@ def bindparam(
             users_table.c.name == bindparam("username")
         )
 
-    The above statement, when rendered, will produce SQL similar to::
+    The above statement, when rendered, will produce SQL similar to:
+
+    .. sourcecode:: sql
 
         SELECT id, name FROM user WHERE name = :username
 
@@ -532,7 +540,7 @@ def bindparam(
     coerced into fixed :func:`.bindparam` constructs.  For example, given
     a comparison operation such as::
 
-        expr = users_table.c.name == 'Wendy'
+        expr = users_table.c.name == "Wendy"
 
     The above expression will produce a :class:`.BinaryExpression`
     construct, where the left side is the :class:`_schema.Column` object
@@ -540,9 +548,11 @@ def bindparam(
     :class:`.BindParameter` representing the literal value::
 
         print(repr(expr.right))
-        BindParameter('%(4327771088 name)s', 'Wendy', type_=String())
+        BindParameter("%(4327771088 name)s", "Wendy", type_=String())
 
-    The expression above will render SQL such as::
+    The expression above will render SQL such as:
+
+    .. sourcecode:: sql
 
         user.name = :name_1
 
@@ -551,10 +561,12 @@ def bindparam(
     along where it is later used within statement execution.  If we
     invoke a statement like the following::
 
-        stmt = select(users_table).where(users_table.c.name == 'Wendy')
+        stmt = select(users_table).where(users_table.c.name == "Wendy")
         result = connection.execute(stmt)
 
-    We would see SQL logging output as::
+    We would see SQL logging output as:
+
+    .. sourcecode:: sql
 
         SELECT "user".id, "user".name
         FROM "user"
@@ -574,7 +586,9 @@ def bindparam(
         stmt = users_table.insert()
         result = connection.execute(stmt, {"name": "Wendy"})
 
-    The above will produce SQL output as::
+    The above will produce SQL output as:
+
+    .. sourcecode:: sql
 
         INSERT INTO "user" (name) VALUES (%(name)s)
         {'name': 'Wendy'}
@@ -738,16 +752,17 @@ def case(
 
         from sqlalchemy import case
 
-        stmt = select(users_table).\
-                    where(
-                        case(
-                            (users_table.c.name == 'wendy', 'W'),
-                            (users_table.c.name == 'jack', 'J'),
-                            else_='E'
-                        )
-                    )
+        stmt = select(users_table).where(
+            case(
+                (users_table.c.name == "wendy", "W"),
+                (users_table.c.name == "jack", "J"),
+                else_="E",
+            )
+        )
+
+    The above statement will produce SQL resembling:
 
-    The above statement will produce SQL resembling::
+    .. sourcecode:: sql
 
         SELECT id, name FROM user
         WHERE CASE
@@ -765,14 +780,9 @@ def case(
     compared against keyed to result expressions.  The statement below is
     equivalent to the preceding statement::
 
-        stmt = select(users_table).\
-                    where(
-                        case(
-                            {"wendy": "W", "jack": "J"},
-                            value=users_table.c.name,
-                            else_='E'
-                        )
-                    )
+        stmt = select(users_table).where(
+            case({"wendy": "W", "jack": "J"}, value=users_table.c.name, else_="E")
+        )
 
     The values which are accepted as result values in
     :paramref:`.case.whens` as well as with :paramref:`.case.else_` are
@@ -787,20 +797,16 @@ def case(
         from sqlalchemy import case, literal_column
 
         case(
-            (
-                orderline.c.qty > 100,
-                literal_column("'greaterthan100'")
-            ),
-            (
-                orderline.c.qty > 10,
-                literal_column("'greaterthan10'")
-            ),
-            else_=literal_column("'lessthan10'")
+            (orderline.c.qty > 100, literal_column("'greaterthan100'")),
+            (orderline.c.qty > 10, literal_column("'greaterthan10'")),
+            else_=literal_column("'lessthan10'"),
         )
 
     The above will render the given constants without using bound
     parameters for the result values (but still for the comparison
-    values), as in::
+    values), as in:
+
+    .. sourcecode:: sql
 
         CASE
             WHEN (orderline.qty > :qty_1) THEN 'greaterthan100'
@@ -821,8 +827,8 @@ def case(
      resulting value, e.g.::
 
         case(
-            (users_table.c.name == 'wendy', 'W'),
-            (users_table.c.name == 'jack', 'J')
+            (users_table.c.name == "wendy", "W"),
+            (users_table.c.name == "jack", "J"),
         )
 
      In the second form, it accepts a Python dictionary of comparison
@@ -830,10 +836,7 @@ def case(
      :paramref:`.case.value` to be present, and values will be compared
      using the ``==`` operator, e.g.::
 
-        case(
-            {"wendy": "W", "jack": "J"},
-            value=users_table.c.name
-        )
+        case({"wendy": "W", "jack": "J"}, value=users_table.c.name)
 
     :param value: An optional SQL expression which will be used as a
       fixed "comparison point" for candidate values within a dictionary
@@ -846,7 +849,7 @@ def case(
       expressions evaluate to true.
 
 
-    """
+    """  # noqa: E501
     return Case(*whens, value=value, else_=else_)
 
 
@@ -864,7 +867,9 @@ def cast(
 
         stmt = select(cast(product_table.c.unit_price, Numeric(10, 4)))
 
-    The above statement will produce SQL resembling::
+    The above statement will produce SQL resembling:
+
+    .. sourcecode:: sql
 
         SELECT CAST(unit_price AS NUMERIC(10, 4)) FROM product
 
@@ -933,11 +938,11 @@ def try_cast(
 
         from sqlalchemy import select, try_cast, Numeric
 
-        stmt = select(
-            try_cast(product_table.c.unit_price, Numeric(10, 4))
-        )
+        stmt = select(try_cast(product_table.c.unit_price, Numeric(10, 4)))
 
-    The above would render on Microsoft SQL Server as::
+    The above would render on Microsoft SQL Server as:
+
+    .. sourcecode:: sql
 
         SELECT TRY_CAST (product_table.unit_price AS NUMERIC(10, 4))
         FROM product_table
@@ -968,7 +973,9 @@ def column(
         id, name = column("id"), column("name")
         stmt = select(id, name).select_from("user")
 
-    The above statement would produce SQL like::
+    The above statement would produce SQL like:
+
+    .. sourcecode:: sql
 
         SELECT id, name FROM user
 
@@ -1004,13 +1011,14 @@ def column(
 
         from sqlalchemy import table, column, select
 
-        user = table("user",
-                column("id"),
-                column("name"),
-                column("description"),
+        user = table(
+            "user",
+            column("id"),
+            column("name"),
+            column("description"),
         )
 
-        stmt = select(user.c.description).where(user.c.name == 'wendy')
+        stmt = select(user.c.description).where(user.c.name == "wendy")
 
     A :func:`_expression.column` / :func:`.table`
     construct like that illustrated
@@ -1057,7 +1065,9 @@ def desc(
 
         stmt = select(users_table).order_by(desc(users_table.c.name))
 
-    will produce SQL as::
+    will produce SQL as:
+
+    .. sourcecode:: sql
 
         SELECT id, name FROM user ORDER BY name DESC
 
@@ -1096,9 +1106,12 @@ def distinct(expr: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]:
     an aggregate function, as in::
 
         from sqlalchemy import distinct, func
+
         stmt = select(users_table.c.id, func.count(distinct(users_table.c.name)))
 
-    The above would produce an statement resembling::
+    The above would produce an statement resembling:
+
+    .. sourcecode:: sql
 
         SELECT user.id, count(DISTINCT user.name) FROM user
 
@@ -1170,9 +1183,10 @@ def extract(field: str, expr: _ColumnExpressionArgument[Any]) -> Extract:
         from sqlalchemy import extract
         from sqlalchemy import table, column
 
-        logged_table = table("user",
-                column("id"),
-                column("date_created"),
+        logged_table = table(
+            "user",
+            column("id"),
+            column("date_created"),
         )
 
         stmt = select(logged_table.c.id).where(
@@ -1184,9 +1198,9 @@ def extract(field: str, expr: _ColumnExpressionArgument[Any]) -> Extract:
 
     Similarly, one can also select an extracted component::
 
-        stmt = select(
-            extract("YEAR", logged_table.c.date_created)
-        ).where(logged_table.c.id == 1)
+        stmt = select(extract("YEAR", logged_table.c.date_created)).where(
+            logged_table.c.id == 1
+        )
 
     The implementation of ``EXTRACT`` may vary across database backends.
     Users are reminded to consult their database documentation.
@@ -1245,7 +1259,8 @@ def funcfilter(
     E.g.::
 
         from sqlalchemy import funcfilter
-        funcfilter(func.count(1), MyClass.name == 'some name')
+
+        funcfilter(func.count(1), MyClass.name == "some name")
 
     Would produce "COUNT(1) FILTER (WHERE myclass.name = 'some name')".
 
@@ -1302,10 +1317,11 @@ def nulls_first(column: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]:
 
         from sqlalchemy import desc, nulls_first
 
-        stmt = select(users_table).order_by(
-            nulls_first(desc(users_table.c.name)))
+        stmt = select(users_table).order_by(nulls_first(desc(users_table.c.name)))
 
-    The SQL expression from the above would resemble::
+    The SQL expression from the above would resemble:
+
+    .. sourcecode:: sql
 
         SELECT id, name FROM user ORDER BY name DESC NULLS FIRST
 
@@ -1316,7 +1332,8 @@ def nulls_first(column: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]:
     function version, as in::
 
         stmt = select(users_table).order_by(
-            users_table.c.name.desc().nulls_first())
+            users_table.c.name.desc().nulls_first()
+        )
 
     .. versionchanged:: 1.4 :func:`.nulls_first` is renamed from
         :func:`.nullsfirst` in previous releases.
@@ -1332,7 +1349,7 @@ def nulls_first(column: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]:
 
         :meth:`_expression.Select.order_by`
 
-    """
+    """  # noqa: E501
     return UnaryExpression._create_nulls_first(column)
 
 
@@ -1346,10 +1363,11 @@ def nulls_last(column: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]:
 
         from sqlalchemy import desc, nulls_last
 
-        stmt = select(users_table).order_by(
-            nulls_last(desc(users_table.c.name)))
+        stmt = select(users_table).order_by(nulls_last(desc(users_table.c.name)))
 
-    The SQL expression from the above would resemble::
+    The SQL expression from the above would resemble:
+
+    .. sourcecode:: sql
 
         SELECT id, name FROM user ORDER BY name DESC NULLS LAST
 
@@ -1359,8 +1377,7 @@ def nulls_last(column: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]:
     rather than as its standalone
     function version, as in::
 
-        stmt = select(users_table).order_by(
-            users_table.c.name.desc().nulls_last())
+        stmt = select(users_table).order_by(users_table.c.name.desc().nulls_last())
 
     .. versionchanged:: 1.4 :func:`.nulls_last` is renamed from
         :func:`.nullslast` in previous releases.
@@ -1376,7 +1393,7 @@ def nulls_last(column: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]:
 
         :meth:`_expression.Select.order_by`
 
-    """
+    """  # noqa: E501
     return UnaryExpression._create_nulls_last(column)
 
 
@@ -1391,11 +1408,8 @@ def or_(  # type: ignore[empty-body]
         from sqlalchemy import or_
 
         stmt = select(users_table).where(
-                        or_(
-                            users_table.c.name == 'wendy',
-                            users_table.c.name == 'jack'
-                        )
-                    )
+            or_(users_table.c.name == "wendy", users_table.c.name == "jack")
+        )
 
     The :func:`.or_` conjunction is also available using the
     Python ``|`` operator (though note that compound expressions
@@ -1403,9 +1417,8 @@ def or_(  # type: ignore[empty-body]
     operator precedence behavior)::
 
         stmt = select(users_table).where(
-                        (users_table.c.name == 'wendy') |
-                        (users_table.c.name == 'jack')
-                    )
+            (users_table.c.name == "wendy") | (users_table.c.name == "jack")
+        )
 
     The :func:`.or_` construct must be given at least one positional
     argument in order to be valid; a :func:`.or_` construct with no
@@ -1415,6 +1428,7 @@ def or_(  # type: ignore[empty-body]
     specified::
 
         from sqlalchemy import false
+
         or_criteria = or_(false(), *expressions)
 
     The above expression will compile to SQL as the expression ``false``
@@ -1446,11 +1460,8 @@ if not TYPE_CHECKING:
             from sqlalchemy import or_
 
             stmt = select(users_table).where(
-                            or_(
-                                users_table.c.name == 'wendy',
-                                users_table.c.name == 'jack'
-                            )
-                        )
+                or_(users_table.c.name == "wendy", users_table.c.name == "jack")
+            )
 
         The :func:`.or_` conjunction is also available using the
         Python ``|`` operator (though note that compound expressions
@@ -1458,9 +1469,8 @@ if not TYPE_CHECKING:
         operator precedence behavior)::
 
             stmt = select(users_table).where(
-                            (users_table.c.name == 'wendy') |
-                            (users_table.c.name == 'jack')
-                        )
+                (users_table.c.name == "wendy") | (users_table.c.name == "jack")
+            )
 
         The :func:`.or_` construct must be given at least one positional
         argument in order to be valid; a :func:`.or_` construct with no
@@ -1470,6 +1480,7 @@ if not TYPE_CHECKING:
         specified::
 
             from sqlalchemy import false
+
             or_criteria = or_(false(), *expressions)
 
         The above expression will compile to SQL as the expression ``false``
@@ -1487,7 +1498,7 @@ if not TYPE_CHECKING:
 
             :func:`.and_`
 
-        """
+        """  # noqa: E501
         return BooleanClauseList.or_(*clauses)
 
 
@@ -1508,7 +1519,9 @@ def over(
 
         func.row_number().over(order_by=mytable.c.some_column)
 
-    Would produce::
+    Would produce:
+
+    .. sourcecode:: sql
 
         ROW_NUMBER() OVER(ORDER BY some_column)
 
@@ -1517,10 +1530,11 @@ def over(
     mutually-exclusive parameters each accept a 2-tuple, which contains
     a combination of integers and None::
 
-        func.row_number().over(
-            order_by=my_table.c.some_column, range_=(None, 0))
+        func.row_number().over(order_by=my_table.c.some_column, range_=(None, 0))
+
+    The above would produce:
 
-    The above would produce::
+    .. sourcecode:: sql
 
         ROW_NUMBER() OVER(ORDER BY some_column
         RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)
@@ -1531,19 +1545,19 @@ def over(
 
     * RANGE BETWEEN 5 PRECEDING AND 10 FOLLOWING::
 
-        func.row_number().over(order_by='x', range_=(-5, 10))
+        func.row_number().over(order_by="x", range_=(-5, 10))
 
     * ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW::
 
-        func.row_number().over(order_by='x', rows=(None, 0))
+        func.row_number().over(order_by="x", rows=(None, 0))
 
     * RANGE BETWEEN 2 PRECEDING AND UNBOUNDED FOLLOWING::
 
-        func.row_number().over(order_by='x', range_=(-2, None))
+        func.row_number().over(order_by="x", range_=(-2, None))
 
     * RANGE BETWEEN 1 FOLLOWING AND 3 FOLLOWING::
 
-        func.row_number().over(order_by='x', range_=(1, 3))
+        func.row_number().over(order_by="x", range_=(1, 3))
 
     :param element: a :class:`.FunctionElement`, :class:`.WithinGroup`,
      or other compatible construct.
@@ -1572,7 +1586,7 @@ def over(
 
         :func:`_expression.within_group`
 
-    """
+    """  # noqa: E501
     return Over(element, partition_by, order_by, range_, rows)
 
 
@@ -1621,9 +1635,11 @@ def text(text: str) -> TextClause:
     method allows
     specification of return columns including names and types::
 
-        t = text("SELECT * FROM users WHERE id=:user_id").\
-                bindparams(user_id=7).\
-                columns(id=Integer, name=String)
+        t = (
+            text("SELECT * FROM users WHERE id=:user_id")
+            .bindparams(user_id=7)
+            .columns(id=Integer, name=String)
+        )
 
         for id, name in connection.execute(t):
             print(id, name)
@@ -1705,9 +1721,7 @@ def tuple_(
 
         from sqlalchemy import tuple_
 
-        tuple_(table.c.col1, table.c.col2).in_(
-            [(1, 2), (5, 12), (10, 19)]
-        )
+        tuple_(table.c.col1, table.c.col2).in_([(1, 2), (5, 12), (10, 19)])
 
     .. versionchanged:: 1.3.6 Added support for SQLite IN tuples.
 
@@ -1757,10 +1771,9 @@ def type_coerce(
     :meth:`_expression.ColumnElement.label`::
 
         stmt = select(
-            type_coerce(log_table.date_string, StringDateTime()).label('date')
+            type_coerce(log_table.date_string, StringDateTime()).label("date")
         )
 
-
     A type that features bound-value handling will also have that behavior
     take effect when literal values or :func:`.bindparam` constructs are
     passed to :func:`.type_coerce` as targets.
@@ -1821,11 +1834,10 @@ def within_group(
     the :meth:`.FunctionElement.within_group` method, e.g.::
 
         from sqlalchemy import within_group
+
         stmt = select(
             department.c.id,
-            func.percentile_cont(0.5).within_group(
-                department.c.salary.desc()
-            )
+            func.percentile_cont(0.5).within_group(department.c.salary.desc()),
         )
 
     The above statement would produce SQL similar to
index 1737597172362ce91d6cfa4dfad704bc6a3ee48e..18bdc63eacd063e3a2e9bd9b60858326c0d9ccc1 100644 (file)
@@ -157,16 +157,16 @@ def exists(
     :meth:`_sql.SelectBase.exists` method::
 
         exists_criteria = (
-            select(table2.c.col2).
-            where(table1.c.col1 == table2.c.col2).
-            exists()
+            select(table2.c.col2).where(table1.c.col1 == table2.c.col2).exists()
         )
 
     The EXISTS criteria is then used inside of an enclosing SELECT::
 
         stmt = select(table1.c.col1).where(exists_criteria)
 
-    The above statement will then be of the form::
+    The above statement will then be of the form:
+
+    .. sourcecode:: sql
 
         SELECT col1 FROM table1 WHERE EXISTS
         (SELECT table2.col2 FROM table2 WHERE table2.col2 = table1.col1)
@@ -227,11 +227,14 @@ def join(
 
     E.g.::
 
-        j = join(user_table, address_table,
-                 user_table.c.id == address_table.c.user_id)
+        j = join(
+            user_table, address_table, user_table.c.id == address_table.c.user_id
+        )
         stmt = select(user_table).select_from(j)
 
-    would emit SQL along the lines of::
+    would emit SQL along the lines of:
+
+    .. sourcecode:: sql
 
         SELECT user.id, user.name FROM user
         JOIN address ON user.id = address.user_id
@@ -265,7 +268,7 @@ def join(
 
         :class:`_expression.Join` - the type of object produced.
 
-    """
+    """  # noqa: E501
 
     return Join(left, right, onclause, isouter, full)
 
@@ -541,13 +544,14 @@ def tablesample(
         from sqlalchemy import func
 
         selectable = people.tablesample(
-                    func.bernoulli(1),
-                    name='alias',
-                    seed=func.random())
+            func.bernoulli(1), name="alias", seed=func.random()
+        )
         stmt = select(selectable.c.people_id)
 
     Assuming ``people`` with a column ``people_id``, the above
-    statement would render as::
+    statement would render as:
+
+    .. sourcecode:: sql
 
         SELECT alias.people_id FROM
         people AS alias TABLESAMPLE bernoulli(:bernoulli_1)
@@ -625,12 +629,10 @@ def values(
         from sqlalchemy import values
 
         value_expr = values(
-            column('id', Integer),
-            column('name', String),
-            name="my_values"
-        ).data(
-            [(1, 'name1'), (2, 'name2'), (3, 'name3')]
-        )
+            column("id", Integer),
+            column("name", String),
+            name="my_values",
+        ).data([(1, "name1"), (2, "name2"), (3, "name3")])
 
     :param \*columns: column expressions, typically composed using
      :func:`_expression.column` objects.
index 970d0dd754f5d477e7944767fe1b4a7a17fc35d4..17c0a92df28b90befe4c602b20dc37315de51d51 100644 (file)
@@ -479,7 +479,7 @@ class DialectKWArgs:
 
             Index.argument_for("mydialect", "length", None)
 
-            some_index = Index('a', 'b', mydialect_length=5)
+            some_index = Index("a", "b", mydialect_length=5)
 
         The :meth:`.DialectKWArgs.argument_for` method is a per-argument
         way adding extra arguments to the
@@ -568,7 +568,7 @@ class DialectKWArgs:
         and ``<argument_name>``.  For example, the ``postgresql_where``
         argument would be locatable as::
 
-            arg = my_object.dialect_options['postgresql']['where']
+            arg = my_object.dialect_options["postgresql"]["where"]
 
         .. versionadded:: 0.9.2
 
@@ -916,11 +916,7 @@ class Options(metaclass=_MetaOptions):
                 execution_options,
             ) = QueryContext.default_load_options.from_execution_options(
                 "_sa_orm_load_options",
-                {
-                    "populate_existing",
-                    "autoflush",
-                    "yield_per"
-                },
+                {"populate_existing", "autoflush", "yield_per"},
                 execution_options,
                 statement._execution_options,
             )
@@ -1224,6 +1220,7 @@ class Executable(roles.StatementRole):
 
              from sqlalchemy import event
 
+
              @event.listens_for(some_engine, "before_execute")
              def _process_opt(conn, statement, multiparams, params, execution_options):
                  "run a SQL function before invoking a statement"
@@ -1475,14 +1472,14 @@ class ColumnCollection(Generic[_COLKEY, _COL_co]):
     mean either two columns with the same key, in which case the column
     returned by key  access is **arbitrary**::
 
-        >>> x1, x2 = Column('x', Integer), Column('x', Integer)
+        >>> x1, x2 = Column("x", Integer), Column("x", Integer)
         >>> cc = ColumnCollection(columns=[(x1.name, x1), (x2.name, x2)])
         >>> list(cc)
         [Column('x', Integer(), table=None),
          Column('x', Integer(), table=None)]
-        >>> cc['x'] is x1
+        >>> cc["x"] is x1
         False
-        >>> cc['x'] is x2
+        >>> cc["x"] is x2
         True
 
     Or it can also mean the same column multiple times.   These cases are
@@ -2033,8 +2030,8 @@ class DedupeColumnCollection(ColumnCollection[str, _NAMEDCOL]):
 
         e.g.::
 
-            t = Table('sometable', metadata, Column('col1', Integer))
-            t.columns.replace(Column('col1', Integer, key='columnone'))
+            t = Table("sometable", metadata, Column("col1", Integer))
+            t.columns.replace(Column("col1", Integer, key="columnone"))
 
         will remove the original 'col1' from the collection, and add
         the new column under the name 'columnname'.
index ff7838e6dadf0e7a999f1ba26bb6f7bff13ac7df..c524f896f957e37962820c03b4d0b861a54a0841 100644 (file)
@@ -155,8 +155,8 @@ class ExecutableDDLElement(roles.DDLRole, Executable, BaseDDLElement):
 
         event.listen(
             users,
-            'after_create',
-            AddConstraint(constraint).execute_if(dialect='postgresql')
+            "after_create",
+            AddConstraint(constraint).execute_if(dialect="postgresql"),
         )
 
     .. seealso::
@@ -231,20 +231,20 @@ class ExecutableDDLElement(roles.DDLRole, Executable, BaseDDLElement):
         Used to provide a wrapper for event listening::
 
             event.listen(
-                        metadata,
-                        'before_create',
-                        DDL("my_ddl").execute_if(dialect='postgresql')
-                    )
+                metadata,
+                "before_create",
+                DDL("my_ddl").execute_if(dialect="postgresql"),
+            )
 
         :param dialect: May be a string or tuple of strings.
           If a string, it will be compared to the name of the
           executing database dialect::
 
-            DDL('something').execute_if(dialect='postgresql')
+            DDL("something").execute_if(dialect="postgresql")
 
           If a tuple, specifies multiple dialect names::
 
-            DDL('something').execute_if(dialect=('postgresql', 'mysql'))
+            DDL("something").execute_if(dialect=("postgresql", "mysql"))
 
         :param callable\_: A callable, which will be invoked with
           three positional arguments as well as optional keyword
@@ -342,17 +342,19 @@ class DDL(ExecutableDDLElement):
 
       from sqlalchemy import event, DDL
 
-      tbl = Table('users', metadata, Column('uid', Integer))
-      event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger'))
+      tbl = Table("users", metadata, Column("uid", Integer))
+      event.listen(tbl, "before_create", DDL("DROP TRIGGER users_trigger"))
 
-      spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE')
-      event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb'))
+      spow = DDL("ALTER TABLE %(table)s SET secretpowers TRUE")
+      event.listen(tbl, "after_create", spow.execute_if(dialect="somedb"))
 
-      drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE')
+      drop_spow = DDL("ALTER TABLE users SET secretpowers FALSE")
       connection.execute(drop_spow)
 
     When operating on Table events, the following ``statement``
-    string substitutions are available::
+    string substitutions are available:
+
+    .. sourcecode:: text
 
       %(table)s  - the Table name, with any required quoting applied
       %(schema)s - the schema name, with any required quoting applied
@@ -568,6 +570,7 @@ class CreateColumn(BaseDDLElement):
         from sqlalchemy import schema
         from sqlalchemy.ext.compiler import compiles
 
+
         @compiles(schema.CreateColumn)
         def compile(element, compiler, **kw):
             column = element.element
@@ -576,9 +579,9 @@ class CreateColumn(BaseDDLElement):
                 return compiler.visit_create_column(element, **kw)
 
             text = "%s SPECIAL DIRECTIVE %s" % (
-                    column.name,
-                    compiler.type_compiler.process(column.type)
-                )
+                column.name,
+                compiler.type_compiler.process(column.type),
+            )
             default = compiler.get_column_default_string(column)
             if default is not None:
                 text += " DEFAULT " + default
@@ -588,8 +591,8 @@ class CreateColumn(BaseDDLElement):
 
             if column.constraints:
                 text += " ".join(
-                            compiler.process(const)
-                            for const in column.constraints)
+                    compiler.process(const) for const in column.constraints
+                )
             return text
 
     The above construct can be applied to a :class:`_schema.Table`
@@ -600,17 +603,21 @@ class CreateColumn(BaseDDLElement):
 
         metadata = MetaData()
 
-        table = Table('mytable', MetaData(),
-                Column('x', Integer, info={"special":True}, primary_key=True),
-                Column('y', String(50)),
-                Column('z', String(20), info={"special":True})
-            )
+        table = Table(
+            "mytable",
+            MetaData(),
+            Column("x", Integer, info={"special": True}, primary_key=True),
+            Column("y", String(50)),
+            Column("z", String(20), info={"special": True}),
+        )
 
         metadata.create_all(conn)
 
     Above, the directives we've added to the :attr:`_schema.Column.info`
     collection
-    will be detected by our custom compilation scheme::
+    will be detected by our custom compilation scheme:
+
+    .. sourcecode:: sql
 
         CREATE TABLE mytable (
                 x SPECIAL DIRECTIVE INTEGER NOT NULL,
@@ -635,18 +642,21 @@ class CreateColumn(BaseDDLElement):
 
         from sqlalchemy.schema import CreateColumn
 
+
         @compiles(CreateColumn, "postgresql")
         def skip_xmin(element, compiler, **kw):
-            if element.element.name == 'xmin':
+            if element.element.name == "xmin":
                 return None
             else:
                 return compiler.visit_create_column(element, **kw)
 
 
-        my_table = Table('mytable', metadata,
-                    Column('id', Integer, primary_key=True),
-                    Column('xmin', Integer)
-                )
+        my_table = Table(
+            "mytable",
+            metadata,
+            Column("id", Integer, primary_key=True),
+            Column("xmin", Integer),
+        )
 
     Above, a :class:`.CreateTable` construct will generate a ``CREATE TABLE``
     which only includes the ``id`` column in the string; the ``xmin`` column
index e934028297ea5f36e68e900fe89c564907920631..4fa4c67f00c248cedf87a30ac645c611063bf176 100644 (file)
@@ -529,11 +529,11 @@ class UpdateBase(
 
         E.g.::
 
-            stmt = table.insert().values(data='newdata').return_defaults()
+            stmt = table.insert().values(data="newdata").return_defaults()
 
             result = connection.execute(stmt)
 
-            server_created_at = result.returned_defaults['created_at']
+            server_created_at = result.returned_defaults["created_at"]
 
         When used against an UPDATE statement
         :meth:`.UpdateBase.return_defaults` instead looks for columns that
@@ -1036,7 +1036,7 @@ class ValuesBase(UpdateBase):
 
                 users.insert().values(name="some name")
 
-                users.update().where(users.c.id==5).values(name="some name")
+                users.update().where(users.c.id == 5).values(name="some name")
 
         :param \*args: As an alternative to passing key/value parameters,
          a dictionary, tuple, or list of dictionaries or tuples can be passed
@@ -1066,13 +1066,17 @@ class ValuesBase(UpdateBase):
          this syntax is supported on backends such as SQLite, PostgreSQL,
          MySQL, but not necessarily others::
 
-            users.insert().values([
-                                {"name": "some name"},
-                                {"name": "some other name"},
-                                {"name": "yet another name"},
-                            ])
+            users.insert().values(
+                [
+                    {"name": "some name"},
+                    {"name": "some other name"},
+                    {"name": "yet another name"},
+                ]
+            )
+
+         The above form would render a multiple VALUES statement similar to:
 
-         The above form would render a multiple VALUES statement similar to::
+         .. sourcecode:: sql
 
                 INSERT INTO users (name) VALUES
                                 (:name_1),
@@ -1250,7 +1254,7 @@ class Insert(ValuesBase):
         e.g.::
 
             sel = select(table1.c.a, table1.c.b).where(table1.c.c > 5)
-            ins = table2.insert().from_select(['a', 'b'], sel)
+            ins = table2.insert().from_select(["a", "b"], sel)
 
         :param names: a sequence of string column names or
          :class:`_schema.Column`
@@ -1550,9 +1554,7 @@ class Update(DMLWhereBase, ValuesBase):
 
         E.g.::
 
-            stmt = table.update().ordered_values(
-                ("name", "ed"), ("ident", "foo")
-            )
+            stmt = table.update().ordered_values(("name", "ed"), ("ident", "foo"))
 
         .. seealso::
 
@@ -1565,7 +1567,7 @@ class Update(DMLWhereBase, ValuesBase):
            :paramref:`_expression.update.preserve_parameter_order`
            parameter, which will be removed in SQLAlchemy 2.0.
 
-        """
+        """  # noqa: E501
         if self._values:
             raise exc.ArgumentError(
                 "This statement already has values present"
index 78278315576a5636fb3eea380ddabf8b6961cd2d..4ca8ec4b43f18b35cd8b7152e8e84d6e0eedb26b 100644 (file)
@@ -283,7 +283,7 @@ class CompilerElement(Visitable):
 
                 from sqlalchemy.sql import table, column, select
 
-                t = table('t', column('x'))
+                t = table("t", column("x"))
 
                 s = select(t).where(t.c.x == 5)
 
@@ -588,10 +588,10 @@ class ClauseElement(
         :func:`_expression.bindparam`
         elements replaced with values taken from the given dictionary::
 
-          >>> clause = column('x') + bindparam('foo')
+          >>> clause = column("x") + bindparam("foo")
           >>> print(clause.compile().params)
           {'foo':None}
-          >>> print(clause.params({'foo':7}).compile().params)
+          >>> print(clause.params({"foo": 7}).compile().params)
           {'foo':7}
 
         """
@@ -1290,9 +1290,9 @@ class ColumnElement(
     .. sourcecode:: pycon+sql
 
         >>> from sqlalchemy.sql import column
-        >>> column('a') + column('b')
+        >>> column("a") + column("b")
         <sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
-        >>> print(column('a') + column('b'))
+        >>> print(column("a") + column("b"))
         {printsql}a + b
 
     .. seealso::
@@ -1381,7 +1381,9 @@ class ColumnElement(
         SQL.
 
         Concretely, this is the "name" of a column or a label in a
-        SELECT statement; ``<columnname>`` and ``<labelname>`` below::
+        SELECT statement; ``<columnname>`` and ``<labelname>`` below:
+
+        .. sourcecode:: sql
 
             SELECT <columnmame> FROM table
 
@@ -2242,7 +2244,6 @@ class TextClause(
         t = text("SELECT * FROM users")
         result = connection.execute(t)
 
-
     The :class:`_expression.TextClause` construct is produced using the
     :func:`_expression.text`
     function; see that function for full documentation.
@@ -2319,16 +2320,19 @@ class TextClause(
         Given a text construct such as::
 
             from sqlalchemy import text
-            stmt = text("SELECT id, name FROM user WHERE name=:name "
-                        "AND timestamp=:timestamp")
+
+            stmt = text(
+                "SELECT id, name FROM user WHERE name=:name AND timestamp=:timestamp"
+            )
 
         the :meth:`_expression.TextClause.bindparams`
         method can be used to establish
         the initial value of ``:name`` and ``:timestamp``,
         using simple keyword arguments::
 
-            stmt = stmt.bindparams(name='jack',
-                        timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
+            stmt = stmt.bindparams(
+                name="jack", timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
+            )
 
         Where above, new :class:`.BindParameter` objects
         will be generated with the names ``name`` and ``timestamp``, and
@@ -2343,10 +2347,11 @@ class TextClause(
         argument, then an optional value and type::
 
             from sqlalchemy import bindparam
+
             stmt = stmt.bindparams(
-                            bindparam('name', value='jack', type_=String),
-                            bindparam('timestamp', type_=DateTime)
-                        )
+                bindparam("name", value="jack", type_=String),
+                bindparam("timestamp", type_=DateTime),
+            )
 
         Above, we specified the type of :class:`.DateTime` for the
         ``timestamp`` bind, and the type of :class:`.String` for the ``name``
@@ -2356,8 +2361,9 @@ class TextClause(
         Additional bound parameters can be supplied at statement execution
         time, e.g.::
 
-            result = connection.execute(stmt,
-                        timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
+            result = connection.execute(
+                stmt, timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
+            )
 
         The :meth:`_expression.TextClause.bindparams`
         method can be called repeatedly,
@@ -2367,15 +2373,15 @@ class TextClause(
         first with typing information, and a
         second time with value information, and it will be combined::
 
-            stmt = text("SELECT id, name FROM user WHERE name=:name "
-                        "AND timestamp=:timestamp")
+            stmt = text(
+                "SELECT id, name FROM user WHERE name=:name "
+                "AND timestamp=:timestamp"
+            )
             stmt = stmt.bindparams(
-                bindparam('name', type_=String),
-                bindparam('timestamp', type_=DateTime)
+                bindparam("name", type_=String), bindparam("timestamp", type_=DateTime)
             )
             stmt = stmt.bindparams(
-                name='jack',
-                timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
+                name="jack", timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
             )
 
         The :meth:`_expression.TextClause.bindparams`
@@ -2389,18 +2395,17 @@ class TextClause(
         object::
 
             stmt1 = text("select id from table where name=:name").bindparams(
-                bindparam("name", value='name1', unique=True)
+                bindparam("name", value="name1", unique=True)
             )
             stmt2 = text("select id from table where name=:name").bindparams(
-                bindparam("name", value='name2', unique=True)
+                bindparam("name", value="name2", unique=True)
             )
 
-            union = union_all(
-                stmt1.columns(column("id")),
-                stmt2.columns(column("id"))
-            )
+            union = union_all(stmt1.columns(column("id")), stmt2.columns(column("id")))
+
+        The above statement will render as:
 
-        The above statement will render as::
+        .. sourcecode:: sql
 
             select id from table where name=:name_1
             UNION ALL select id from table where name=:name_2
@@ -2410,7 +2415,7 @@ class TextClause(
            :func:`_expression.text`
            constructs.
 
-        """
+        """  # noqa: E501
         self._bindparams = new_params = self._bindparams.copy()
 
         for bind in binds:
@@ -2464,12 +2469,13 @@ class TextClause(
             from sqlalchemy.sql import column, text
 
             stmt = text("SELECT id, name FROM some_table")
-            stmt = stmt.columns(column('id'), column('name')).subquery('st')
+            stmt = stmt.columns(column("id"), column("name")).subquery("st")
 
-            stmt = select(mytable).\
-                    select_from(
-                        mytable.join(stmt, mytable.c.name == stmt.c.name)
-                    ).where(stmt.c.id > 5)
+            stmt = (
+                select(mytable)
+                .select_from(mytable.join(stmt, mytable.c.name == stmt.c.name))
+                .where(stmt.c.id > 5)
+            )
 
         Above, we pass a series of :func:`_expression.column` elements to the
         :meth:`_expression.TextClause.columns` method positionally.  These
@@ -2490,10 +2496,10 @@ class TextClause(
 
             stmt = text("SELECT id, name, timestamp FROM some_table")
             stmt = stmt.columns(
-                        column('id', Integer),
-                        column('name', Unicode),
-                        column('timestamp', DateTime)
-                    )
+                column("id", Integer),
+                column("name", Unicode),
+                column("timestamp", DateTime),
+            )
 
             for id, name, timestamp in connection.execute(stmt):
                 print(id, name, timestamp)
@@ -2502,11 +2508,7 @@ class TextClause(
         types alone may be used, if only type conversion is needed::
 
             stmt = text("SELECT id, name, timestamp FROM some_table")
-            stmt = stmt.columns(
-                        id=Integer,
-                        name=Unicode,
-                        timestamp=DateTime
-                    )
+            stmt = stmt.columns(id=Integer, name=Unicode, timestamp=DateTime)
 
             for id, name, timestamp in connection.execute(stmt):
                 print(id, name, timestamp)
@@ -2520,26 +2522,31 @@ class TextClause(
         the result set will match to those columns positionally, meaning the
         name or origin of the column in the textual SQL doesn't matter::
 
-            stmt = text("SELECT users.id, addresses.id, users.id, "
-                 "users.name, addresses.email_address AS email "
-                 "FROM users JOIN addresses ON users.id=addresses.user_id "
-                 "WHERE users.id = 1").columns(
-                    User.id,
-                    Address.id,
-                    Address.user_id,
-                    User.name,
-                    Address.email_address
-                 )
+            stmt = text(
+                "SELECT users.id, addresses.id, users.id, "
+                "users.name, addresses.email_address AS email "
+                "FROM users JOIN addresses ON users.id=addresses.user_id "
+                "WHERE users.id = 1"
+            ).columns(
+                User.id,
+                Address.id,
+                Address.user_id,
+                User.name,
+                Address.email_address,
+            )
 
-            query = session.query(User).from_statement(stmt).options(
-                contains_eager(User.addresses))
+            query = (
+                session.query(User)
+                .from_statement(stmt)
+                .options(contains_eager(User.addresses))
+            )
 
         The :meth:`_expression.TextClause.columns` method provides a direct
         route to calling :meth:`_expression.FromClause.subquery` as well as
         :meth:`_expression.SelectBase.cte`
         against a textual SELECT statement::
 
-            stmt = stmt.columns(id=Integer, name=String).cte('st')
+            stmt = stmt.columns(id=Integer, name=String).cte("st")
 
             stmt = select(sometable).where(sometable.c.id == stmt.c.id)
 
@@ -3284,14 +3291,13 @@ class Case(ColumnElement[_T]):
 
         from sqlalchemy import case
 
-        stmt = select(users_table).\
-                    where(
-                        case(
-                            (users_table.c.name == 'wendy', 'W'),
-                            (users_table.c.name == 'jack', 'J'),
-                            else_='E'
-                        )
-                    )
+        stmt = select(users_table).where(
+            case(
+                (users_table.c.name == "wendy", "W"),
+                (users_table.c.name == "jack", "J"),
+                else_="E",
+            )
+        )
 
     Details on :class:`.Case` usage is at :func:`.case`.
 
@@ -3829,9 +3835,9 @@ class BinaryExpression(OperatorExpression[_T]):
     .. sourcecode:: pycon+sql
 
         >>> from sqlalchemy.sql import column
-        >>> column('a') + column('b')
+        >>> column("a") + column("b")
         <sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
-        >>> print(column('a') + column('b'))
+        >>> print(column("a") + column("b"))
         {printsql}a + b
 
     """
@@ -3920,7 +3926,7 @@ class BinaryExpression(OperatorExpression[_T]):
         The rationale here is so that ColumnElement objects can be hashable.
         What?  Well, suppose you do this::
 
-            c1, c2 = column('x'), column('y')
+            c1, c2 = column("x"), column("y")
             s1 = set([c1, c2])
 
         We do that **a lot**, columns inside of sets is an extremely basic
@@ -4505,12 +4511,13 @@ class FunctionFilter(Generative, ColumnElement[_T]):
 
         The expression::
 
-            func.rank().filter(MyClass.y > 5).over(order_by='x')
+            func.rank().filter(MyClass.y > 5).over(order_by="x")
 
         is shorthand for::
 
             from sqlalchemy import over, funcfilter
-            over(funcfilter(func.rank(), MyClass.y > 5), order_by='x')
+
+            over(funcfilter(func.rank(), MyClass.y > 5), order_by="x")
 
         See :func:`_expression.over` for a full description.
 
@@ -4872,7 +4879,9 @@ class ColumnClause(
         id, name = column("id"), column("name")
         stmt = select(id, name).select_from("user")
 
-    The above statement would produce SQL like::
+    The above statement would produce SQL like:
+
+    .. sourcecode:: sql
 
         SELECT id, name FROM user
 
@@ -5427,11 +5436,12 @@ class conv(_truncated_label):
     E.g. when we create a :class:`.Constraint` using a naming convention
     as follows::
 
-        m = MetaData(naming_convention={
-            "ck": "ck_%(table_name)s_%(constraint_name)s"
-        })
-        t = Table('t', m, Column('x', Integer),
-                        CheckConstraint('x > 5', name='x5'))
+        m = MetaData(
+            naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
+        )
+        t = Table(
+            "t", m, Column("x", Integer), CheckConstraint("x > 5", name="x5")
+        )
 
     The name of the above constraint will be rendered as ``"ck_t_x5"``.
     That is, the existing name ``x5`` is used in the naming convention as the
@@ -5444,11 +5454,15 @@ class conv(_truncated_label):
     use this explicitly as follows::
 
 
-        m = MetaData(naming_convention={
-            "ck": "ck_%(table_name)s_%(constraint_name)s"
-        })
-        t = Table('t', m, Column('x', Integer),
-                        CheckConstraint('x > 5', name=conv('ck_t_x5')))
+        m = MetaData(
+            naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
+        )
+        t = Table(
+            "t",
+            m,
+            Column("x", Integer),
+            CheckConstraint("x > 5", name=conv("ck_t_x5")),
+        )
 
     Where above, the :func:`_schema.conv` marker indicates that the constraint
     name here is final, and the name will render as ``"ck_t_x5"`` and not
index 1a6a9a6a7d0f41f851ee9fcf8132d4424825e3da..e9d19f337d02ecd5b59d8578c14bcb602a0ddff3 100644 (file)
@@ -63,13 +63,14 @@ class DDLEvents(event.Events[SchemaEventTarget]):
         from sqlalchemy import Table, Column, Metadata, Integer
 
         m = MetaData()
-        some_table = Table('some_table', m, Column('data', Integer))
+        some_table = Table("some_table", m, Column("data", Integer))
+
 
         @event.listens_for(some_table, "after_create")
         def after_create(target, connection, **kw):
-            connection.execute(text(
-                "ALTER TABLE %s SET name=foo_%s" % (target.name, target.name)
-            ))
+            connection.execute(
+                text("ALTER TABLE %s SET name=foo_%s" % (target.name, target.name))
+            )
 
 
         some_engine = create_engine("postgresql://scott:tiger@host/test")
@@ -127,10 +128,11 @@ class DDLEvents(event.Events[SchemaEventTarget]):
     as listener callables::
 
         from sqlalchemy import DDL
+
         event.listen(
             some_table,
             "after_create",
-            DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
+            DDL("ALTER TABLE %(table)s SET name=foo_%(table)s"),
         )
 
     **Event Propagation to MetaData Copies**
@@ -149,7 +151,7 @@ class DDLEvents(event.Events[SchemaEventTarget]):
             some_table,
             "after_create",
             DDL("ALTER TABLE %(table)s SET name=foo_%(table)s"),
-            propagate=True
+            propagate=True,
         )
 
         new_metadata = MetaData()
@@ -169,7 +171,7 @@ class DDLEvents(event.Events[SchemaEventTarget]):
 
         :ref:`schema_ddl_sequences`
 
-    """
+    """  # noqa: E501
 
     _target_class_doc = "SomeSchemaClassOrObject"
     _dispatch_target = SchemaEventTarget
@@ -358,16 +360,17 @@ class DDLEvents(event.Events[SchemaEventTarget]):
 
             metadata = MetaData()
 
-            @event.listens_for(metadata, 'column_reflect')
+
+            @event.listens_for(metadata, "column_reflect")
             def receive_column_reflect(inspector, table, column_info):
                 # receives for all Table objects that are reflected
                 # under this MetaData
+                ...
 
 
             # will use the above event hook
             my_table = Table("my_table", metadata, autoload_with=some_engine)
 
-
         .. versionadded:: 1.4.0b2 The :meth:`_events.DDLEvents.column_reflect`
            hook may now be applied to a :class:`_schema.MetaData` object as
            well as the :class:`_schema.MetaData` class itself where it will
@@ -379,9 +382,11 @@ class DDLEvents(event.Events[SchemaEventTarget]):
 
             from sqlalchemy import Table
 
-            @event.listens_for(Table, 'column_reflect')
+
+            @event.listens_for(Table, "column_reflect")
             def receive_column_reflect(inspector, table, column_info):
                 # receives for all Table objects that are reflected
+                ...
 
         It can also be applied to a specific :class:`_schema.Table` at the
         point that one is being reflected using the
@@ -390,9 +395,7 @@ class DDLEvents(event.Events[SchemaEventTarget]):
             t1 = Table(
                 "my_table",
                 autoload_with=some_engine,
-                listeners=[
-                    ('column_reflect', receive_column_reflect)
-                ]
+                listeners=[("column_reflect", receive_column_reflect)],
             )
 
         The dictionary of column information as returned by the
index 3ebf5c0a1ef566d01d74bd13997d83030c02241d..7160922cc6c6b7b69a782dd99d42239bf0c9c556 100644 (file)
@@ -246,9 +246,8 @@ class FunctionElement(Executable, ColumnElement[_T], FromClause, Generative):
 
         .. sourcecode:: pycon+sql
 
-            >>> fn = (
-            ...     func.generate_series(1, 5).
-            ...     table_valued("value", "start", "stop", "step")
+            >>> fn = func.generate_series(1, 5).table_valued(
+            ...     "value", "start", "stop", "step"
             ... )
 
             >>> print(select(fn))
@@ -265,7 +264,9 @@ class FunctionElement(Executable, ColumnElement[_T], FromClause, Generative):
 
         .. sourcecode:: pycon+sql
 
-            >>> fn = func.generate_series(4, 1, -1).table_valued("gen", with_ordinality="ordinality")
+            >>> fn = func.generate_series(4, 1, -1).table_valued(
+            ...     "gen", with_ordinality="ordinality"
+            ... )
             >>> print(select(fn))
             {printsql}SELECT anon_1.gen, anon_1.ordinality
             FROM generate_series(:generate_series_1, :generate_series_2, :generate_series_3) WITH ORDINALITY AS anon_1
@@ -377,7 +378,7 @@ class FunctionElement(Executable, ColumnElement[_T], FromClause, Generative):
         .. sourcecode:: pycon+sql
 
             >>> from sqlalchemy import column, select, func
-            >>> stmt = select(column('x'), column('y')).select_from(func.myfunction())
+            >>> stmt = select(column("x"), column("y")).select_from(func.myfunction())
             >>> print(stmt)
             {printsql}SELECT x, y FROM myfunction()
 
@@ -442,12 +443,13 @@ class FunctionElement(Executable, ColumnElement[_T], FromClause, Generative):
 
         The expression::
 
-            func.row_number().over(order_by='x')
+            func.row_number().over(order_by="x")
 
         is shorthand for::
 
             from sqlalchemy import over
-            over(func.row_number(), order_by='x')
+
+            over(func.row_number(), order_by="x")
 
         See :func:`_expression.over` for a full description.
 
@@ -511,6 +513,7 @@ class FunctionElement(Executable, ColumnElement[_T], FromClause, Generative):
         is shorthand for::
 
             from sqlalchemy import funcfilter
+
             funcfilter(func.count(1), True)
 
         .. seealso::
@@ -567,7 +570,7 @@ class FunctionElement(Executable, ColumnElement[_T], FromClause, Generative):
         An ORM example is as follows::
 
             class Venue(Base):
-                __tablename__ = 'venue'
+                __tablename__ = "venue"
                 id = Column(Integer, primary_key=True)
                 name = Column(String)
 
@@ -575,9 +578,10 @@ class FunctionElement(Executable, ColumnElement[_T], FromClause, Generative):
                     "Venue",
                     primaryjoin=func.instr(
                         remote(foreign(name)), name + "/"
-                    ).as_comparison(1, 2) == 1,
+                    ).as_comparison(1, 2)
+                    == 1,
                     viewonly=True,
-                    order_by=name
+                    order_by=name,
                 )
 
         Above, the "Venue" class can load descendant "Venue" objects by
@@ -881,8 +885,11 @@ class _FunctionGenerator:
 
     .. sourcecode:: pycon+sql
 
-        >>> print(func.my_string(u'hi', type_=Unicode) + ' ' +
-        ...       func.my_string(u'there', type_=Unicode))
+        >>> print(
+        ...     func.my_string("hi", type_=Unicode)
+        ...     + " "
+        ...     + func.my_string("there", type_=Unicode)
+        ... )
         {printsql}my_string(:my_string_1) || :my_string_2 || my_string(:my_string_3)
 
     The object returned by a :data:`.func` call is usually an instance of
@@ -1367,10 +1374,12 @@ class GenericFunction(Function[_T]):
         from sqlalchemy.sql.functions import GenericFunction
         from sqlalchemy.types import DateTime
 
+
         class as_utc(GenericFunction):
             type = DateTime()
             inherit_cache = True
 
+
         print(select(func.as_utc()))
 
     User-defined generic functions can be organized into
@@ -1418,6 +1427,7 @@ class GenericFunction(Function[_T]):
 
         from sqlalchemy.sql import quoted_name
 
+
         class GeoBuffer(GenericFunction):
             type = Geometry()
             package = "geo"
@@ -1657,7 +1667,7 @@ class concat(GenericFunction[str]):
 
     .. sourcecode:: pycon+sql
 
-        >>> print(select(func.concat('a', 'b')))
+        >>> print(select(func.concat("a", "b")))
         {printsql}SELECT concat(:concat_2, :concat_3) AS concat_1
 
     String concatenation in SQLAlchemy is more commonly available using the
@@ -1705,11 +1715,13 @@ class count(GenericFunction[int]):
         from sqlalchemy import select
         from sqlalchemy import table, column
 
-        my_table = table('some_table', column('id'))
+        my_table = table("some_table", column("id"))
 
         stmt = select(func.count()).select_from(my_table)
 
-    Executing ``stmt`` would emit::
+    Executing ``stmt`` would emit:
+
+    .. sourcecode:: sql
 
         SELECT count(*) AS count_1
         FROM some_table
@@ -2009,9 +2021,7 @@ class grouping_sets(GenericFunction[_T]):
         from sqlalchemy import tuple_
 
         stmt = select(
-            func.sum(table.c.value),
-            table.c.col_1, table.c.col_2,
-            table.c.col_3
+            func.sum(table.c.value), table.c.col_1, table.c.col_2, table.c.col_3
         ).group_by(
             func.grouping_sets(
                 tuple_(table.c.col_1, table.c.col_2),
@@ -2019,10 +2029,9 @@ class grouping_sets(GenericFunction[_T]):
             )
         )
 
-
     .. versionadded:: 1.2
 
-    """
+    """  # noqa: E501
 
     _has_args = True
     inherit_cache = True
index 2657b2c243d7acb34b6dab812084ffe8023b9d4d..061da29707cee793154eb9ac8db80b1375bfb4a7 100644 (file)
@@ -518,7 +518,6 @@ class StatementLambdaElement(
 
         stmt += lambda s: s.where(table.c.col == parameter)
 
-
     .. versionadded:: 1.4
 
     .. seealso::
@@ -558,9 +557,7 @@ class StatementLambdaElement(
             ...     stmt = lambda_stmt(
             ...         lambda: select(table.c.x, table.c.y),
             ...     )
-            ...     stmt = stmt.add_criteria(
-            ...         lambda: table.c.x > parameter
-            ...     )
+            ...     stmt = stmt.add_criteria(lambda: table.c.x > parameter)
             ...     return stmt
 
         The :meth:`_sql.StatementLambdaElement.add_criteria` method is
@@ -571,18 +568,15 @@ class StatementLambdaElement(
             >>> def my_stmt(self, foo):
             ...     stmt = lambda_stmt(
             ...         lambda: select(func.max(foo.x, foo.y)),
-            ...         track_closure_variables=False
-            ...     )
-            ...     stmt = stmt.add_criteria(
-            ...         lambda: self.where_criteria,
-            ...         track_on=[self]
+            ...         track_closure_variables=False,
             ...     )
+            ...     stmt = stmt.add_criteria(lambda: self.where_criteria, track_on=[self])
             ...     return stmt
 
         See :func:`_sql.lambda_stmt` for a description of the parameters
         accepted.
 
-        """
+        """  # noqa: E501
 
         opts = self.opts + dict(
             enable_tracking=enable_tracking,
index dc3fe63554068889909a8f3f14cfc383d4dd50c9..c97f03badb5471d05b3223037cea2e95f01cabeb 100644 (file)
@@ -148,6 +148,7 @@ class Operators:
         is equivalent to::
 
             from sqlalchemy import and_
+
             and_(a, b)
 
         Care should be taken when using ``&`` regarding
@@ -172,6 +173,7 @@ class Operators:
         is equivalent to::
 
             from sqlalchemy import or_
+
             or_(a, b)
 
         Care should be taken when using ``|`` regarding
@@ -196,6 +198,7 @@ class Operators:
         is equivalent to::
 
             from sqlalchemy import not_
+
             not_(a)
 
         """
@@ -224,7 +227,7 @@ class Operators:
         This function can also be used to make bitwise operators explicit. For
         example::
 
-          somecolumn.op('&')(0xff)
+          somecolumn.op("&")(0xFF)
 
         is a bitwise AND of the value in ``somecolumn``.
 
@@ -275,7 +278,7 @@ class Operators:
 
          e.g.::
 
-            >>> expr = column('x').op('+', python_impl=lambda a, b: a + b)('y')
+            >>> expr = column("x").op("+", python_impl=lambda a, b: a + b)("y")
 
          The operator for the above expression will also work for non-SQL
          left and right objects::
@@ -389,10 +392,9 @@ class custom_op(OperatorType, Generic[_T]):
         from sqlalchemy.sql import operators
         from sqlalchemy import Numeric
 
-        unary = UnaryExpression(table.c.somecolumn,
-                modifier=operators.custom_op("!"),
-                type_=Numeric)
-
+        unary = UnaryExpression(
+            table.c.somecolumn, modifier=operators.custom_op("!"), type_=Numeric
+        )
 
     .. seealso::
 
@@ -400,7 +402,7 @@ class custom_op(OperatorType, Generic[_T]):
 
         :meth:`.Operators.bool_op`
 
-    """
+    """  # noqa: E501
 
     __name__ = "custom_op"
 
@@ -698,14 +700,15 @@ class ColumnOperators(Operators):
     ) -> ColumnOperators:
         r"""Implement the ``like`` operator.
 
-        In a column context, produces the expression::
+        In a column context, produces the expression:
+
+        .. sourcecode:: sql
 
             a LIKE other
 
         E.g.::
 
-            stmt = select(sometable).\
-                where(sometable.c.column.like("%foobar%"))
+            stmt = select(sometable).where(sometable.c.column.like("%foobar%"))
 
         :param other: expression to be compared
         :param escape: optional escape character, renders the ``ESCAPE``
@@ -725,18 +728,21 @@ class ColumnOperators(Operators):
     ) -> ColumnOperators:
         r"""Implement the ``ilike`` operator, e.g. case insensitive LIKE.
 
-        In a column context, produces an expression either of the form::
+        In a column context, produces an expression either of the form:
+
+        .. sourcecode:: sql
 
             lower(a) LIKE lower(other)
 
-        Or on backends that support the ILIKE operator::
+        Or on backends that support the ILIKE operator:
+
+        .. sourcecode:: sql
 
             a ILIKE other
 
         E.g.::
 
-            stmt = select(sometable).\
-                where(sometable.c.column.ilike("%foobar%"))
+            stmt = select(sometable).where(sometable.c.column.ilike("%foobar%"))
 
         :param other: expression to be compared
         :param escape: optional escape character, renders the ``ESCAPE``
@@ -748,7 +754,7 @@ class ColumnOperators(Operators):
 
             :meth:`.ColumnOperators.like`
 
-        """
+        """  # noqa: E501
         return self.operate(ilike_op, other, escape=escape)
 
     def bitwise_xor(self, other: Any) -> ColumnOperators:
@@ -842,12 +848,15 @@ class ColumnOperators(Operators):
 
         The given parameter ``other`` may be:
 
-        * A list of literal values, e.g.::
+        * A list of literal values,
+          e.g.::
 
             stmt.where(column.in_([1, 2, 3]))
 
           In this calling form, the list of items is converted to a set of
-          bound parameters the same length as the list given::
+          bound parameters the same length as the list given:
+
+          .. sourcecode:: sql
 
             WHERE COL IN (?, ?, ?)
 
@@ -855,16 +864,20 @@ class ColumnOperators(Operators):
           :func:`.tuple_` containing multiple expressions::
 
             from sqlalchemy import tuple_
+
             stmt.where(tuple_(col1, col2).in_([(1, 10), (2, 20), (3, 30)]))
 
-        * An empty list, e.g.::
+        * An empty list,
+          e.g.::
 
             stmt.where(column.in_([]))
 
           In this calling form, the expression renders an "empty set"
           expression.  These expressions are tailored to individual backends
           and are generally trying to get an empty SELECT statement as a
-          subquery.  Such as on SQLite, the expression is::
+          subquery.  Such as on SQLite, the expression is:
+
+          .. sourcecode:: sql
 
             WHERE col IN (SELECT 1 FROM (SELECT 1) WHERE 1!=1)
 
@@ -874,10 +887,12 @@ class ColumnOperators(Operators):
         * A bound parameter, e.g. :func:`.bindparam`, may be used if it
           includes the :paramref:`.bindparam.expanding` flag::
 
-            stmt.where(column.in_(bindparam('value', expanding=True)))
+            stmt.where(column.in_(bindparam("value", expanding=True)))
 
           In this calling form, the expression renders a special non-SQL
-          placeholder expression that looks like::
+          placeholder expression that looks like:
+
+          .. sourcecode:: sql
 
             WHERE COL IN ([EXPANDING_value])
 
@@ -887,7 +902,9 @@ class ColumnOperators(Operators):
 
             connection.execute(stmt, {"value": [1, 2, 3]})
 
-          The database would be passed a bound parameter for each value::
+          The database would be passed a bound parameter for each value:
+
+          .. sourcecode:: sql
 
             WHERE COL IN (?, ?, ?)
 
@@ -895,7 +912,9 @@ class ColumnOperators(Operators):
 
           If an empty list is passed, a special "empty list" expression,
           which is specific to the database in use, is rendered.  On
-          SQLite this would be::
+          SQLite this would be:
+
+          .. sourcecode:: sql
 
             WHERE COL IN (SELECT 1 FROM (SELECT 1) WHERE 1!=1)
 
@@ -906,13 +925,12 @@ class ColumnOperators(Operators):
           correlated scalar select::
 
             stmt.where(
-                column.in_(
-                    select(othertable.c.y).
-                    where(table.c.x == othertable.c.x)
-                )
+                column.in_(select(othertable.c.y).where(table.c.x == othertable.c.x))
             )
 
-          In this calling form, :meth:`.ColumnOperators.in_` renders as given::
+          In this calling form, :meth:`.ColumnOperators.in_` renders as given:
+
+          .. sourcecode:: sql
 
             WHERE COL IN (SELECT othertable.y
             FROM othertable WHERE othertable.x = table.x)
@@ -921,7 +939,7 @@ class ColumnOperators(Operators):
          construct, or a :func:`.bindparam` construct that includes the
          :paramref:`.bindparam.expanding` flag set to True.
 
-        """
+        """  # noqa: E501
         return self.operate(in_op, other)
 
     def not_in(self, other: Any) -> ColumnOperators:
@@ -1065,14 +1083,15 @@ class ColumnOperators(Operators):
         r"""Implement the ``startswith`` operator.
 
         Produces a LIKE expression that tests against a match for the start
-        of a string value::
+        of a string value:
+
+        .. sourcecode:: sql
 
             column LIKE <other> || '%'
 
         E.g.::
 
-            stmt = select(sometable).\
-                where(sometable.c.column.startswith("foobar"))
+            stmt = select(sometable).where(sometable.c.column.startswith("foobar"))
 
         Since the operator uses ``LIKE``, wildcard characters
         ``"%"`` and ``"_"`` that are present inside the <other> expression
@@ -1101,7 +1120,9 @@ class ColumnOperators(Operators):
 
             somecolumn.startswith("foo%bar", autoescape=True)
 
-          Will render as::
+          Will render as:
+
+          .. sourcecode:: sql
 
             somecolumn LIKE :param || '%' ESCAPE '/'
 
@@ -1117,7 +1138,9 @@ class ColumnOperators(Operators):
 
             somecolumn.startswith("foo/%bar", escape="^")
 
-          Will render as::
+          Will render as:
+
+          .. sourcecode:: sql
 
             somecolumn LIKE :param || '%' ESCAPE '^'
 
@@ -1137,7 +1160,7 @@ class ColumnOperators(Operators):
 
             :meth:`.ColumnOperators.like`
 
-        """
+        """  # noqa: E501
         return self.operate(
             startswith_op, other, escape=escape, autoescape=autoescape
         )
@@ -1152,14 +1175,15 @@ class ColumnOperators(Operators):
         version of :meth:`.ColumnOperators.startswith`.
 
         Produces a LIKE expression that tests against an insensitive
-        match for the start of a string value::
+        match for the start of a string value:
+
+        .. sourcecode:: sql
 
             lower(column) LIKE lower(<other>) || '%'
 
         E.g.::
 
-            stmt = select(sometable).\
-                where(sometable.c.column.istartswith("foobar"))
+            stmt = select(sometable).where(sometable.c.column.istartswith("foobar"))
 
         Since the operator uses ``LIKE``, wildcard characters
         ``"%"`` and ``"_"`` that are present inside the <other> expression
@@ -1188,7 +1212,9 @@ class ColumnOperators(Operators):
 
             somecolumn.istartswith("foo%bar", autoescape=True)
 
-          Will render as::
+          Will render as:
+
+          .. sourcecode:: sql
 
             lower(somecolumn) LIKE lower(:param) || '%' ESCAPE '/'
 
@@ -1204,7 +1230,9 @@ class ColumnOperators(Operators):
 
             somecolumn.istartswith("foo/%bar", escape="^")
 
-          Will render as::
+          Will render as:
+
+          .. sourcecode:: sql
 
             lower(somecolumn) LIKE lower(:param) || '%' ESCAPE '^'
 
@@ -1219,7 +1247,7 @@ class ColumnOperators(Operators):
         .. seealso::
 
             :meth:`.ColumnOperators.startswith`
-        """
+        """  # noqa: E501
         return self.operate(
             istartswith_op, other, escape=escape, autoescape=autoescape
         )
@@ -1233,14 +1261,15 @@ class ColumnOperators(Operators):
         r"""Implement the 'endswith' operator.
 
         Produces a LIKE expression that tests against a match for the end
-        of a string value::
+        of a string value:
+
+        .. sourcecode:: sql
 
             column LIKE '%' || <other>
 
         E.g.::
 
-            stmt = select(sometable).\
-                where(sometable.c.column.endswith("foobar"))
+            stmt = select(sometable).where(sometable.c.column.endswith("foobar"))
 
         Since the operator uses ``LIKE``, wildcard characters
         ``"%"`` and ``"_"`` that are present inside the <other> expression
@@ -1269,7 +1298,9 @@ class ColumnOperators(Operators):
 
             somecolumn.endswith("foo%bar", autoescape=True)
 
-          Will render as::
+          Will render as:
+
+          .. sourcecode:: sql
 
             somecolumn LIKE '%' || :param ESCAPE '/'
 
@@ -1285,7 +1316,9 @@ class ColumnOperators(Operators):
 
             somecolumn.endswith("foo/%bar", escape="^")
 
-          Will render as::
+          Will render as:
+
+          .. sourcecode:: sql
 
             somecolumn LIKE '%' || :param ESCAPE '^'
 
@@ -1305,7 +1338,7 @@ class ColumnOperators(Operators):
 
             :meth:`.ColumnOperators.like`
 
-        """
+        """  # noqa: E501
         return self.operate(
             endswith_op, other, escape=escape, autoescape=autoescape
         )
@@ -1320,14 +1353,15 @@ class ColumnOperators(Operators):
         version of :meth:`.ColumnOperators.endswith`.
 
         Produces a LIKE expression that tests against an insensitive match
-        for the end of a string value::
+        for the end of a string value:
+
+        .. sourcecode:: sql
 
             lower(column) LIKE '%' || lower(<other>)
 
         E.g.::
 
-            stmt = select(sometable).\
-                where(sometable.c.column.iendswith("foobar"))
+            stmt = select(sometable).where(sometable.c.column.iendswith("foobar"))
 
         Since the operator uses ``LIKE``, wildcard characters
         ``"%"`` and ``"_"`` that are present inside the <other> expression
@@ -1356,7 +1390,9 @@ class ColumnOperators(Operators):
 
             somecolumn.iendswith("foo%bar", autoescape=True)
 
-          Will render as::
+          Will render as:
+
+          .. sourcecode:: sql
 
             lower(somecolumn) LIKE '%' || lower(:param) ESCAPE '/'
 
@@ -1372,7 +1408,9 @@ class ColumnOperators(Operators):
 
             somecolumn.iendswith("foo/%bar", escape="^")
 
-          Will render as::
+          Will render as:
+
+          .. sourcecode:: sql
 
             lower(somecolumn) LIKE '%' || lower(:param) ESCAPE '^'
 
@@ -1387,7 +1425,7 @@ class ColumnOperators(Operators):
         .. seealso::
 
             :meth:`.ColumnOperators.endswith`
-        """
+        """  # noqa: E501
         return self.operate(
             iendswith_op, other, escape=escape, autoescape=autoescape
         )
@@ -1396,14 +1434,15 @@ class ColumnOperators(Operators):
         r"""Implement the 'contains' operator.
 
         Produces a LIKE expression that tests against a match for the middle
-        of a string value::
+        of a string value:
+
+        .. sourcecode:: sql
 
             column LIKE '%' || <other> || '%'
 
         E.g.::
 
-            stmt = select(sometable).\
-                where(sometable.c.column.contains("foobar"))
+            stmt = select(sometable).where(sometable.c.column.contains("foobar"))
 
         Since the operator uses ``LIKE``, wildcard characters
         ``"%"`` and ``"_"`` that are present inside the <other> expression
@@ -1432,7 +1471,9 @@ class ColumnOperators(Operators):
 
             somecolumn.contains("foo%bar", autoescape=True)
 
-          Will render as::
+          Will render as:
+
+          .. sourcecode:: sql
 
             somecolumn LIKE '%' || :param || '%' ESCAPE '/'
 
@@ -1448,7 +1489,9 @@ class ColumnOperators(Operators):
 
             somecolumn.contains("foo/%bar", escape="^")
 
-          Will render as::
+          Will render as:
+
+          .. sourcecode:: sql
 
             somecolumn LIKE '%' || :param || '%' ESCAPE '^'
 
@@ -1469,7 +1512,7 @@ class ColumnOperators(Operators):
             :meth:`.ColumnOperators.like`
 
 
-        """
+        """  # noqa: E501
         return self.operate(contains_op, other, **kw)
 
     def icontains(self, other: Any, **kw: Any) -> ColumnOperators:
@@ -1477,14 +1520,15 @@ class ColumnOperators(Operators):
         version of :meth:`.ColumnOperators.contains`.
 
         Produces a LIKE expression that tests against an insensitive match
-        for the middle of a string value::
+        for the middle of a string value:
+
+        .. sourcecode:: sql
 
             lower(column) LIKE '%' || lower(<other>) || '%'
 
         E.g.::
 
-            stmt = select(sometable).\
-                where(sometable.c.column.icontains("foobar"))
+            stmt = select(sometable).where(sometable.c.column.icontains("foobar"))
 
         Since the operator uses ``LIKE``, wildcard characters
         ``"%"`` and ``"_"`` that are present inside the <other> expression
@@ -1513,7 +1557,9 @@ class ColumnOperators(Operators):
 
             somecolumn.icontains("foo%bar", autoescape=True)
 
-          Will render as::
+          Will render as:
+
+          .. sourcecode:: sql
 
             lower(somecolumn) LIKE '%' || lower(:param) || '%' ESCAPE '/'
 
@@ -1529,7 +1575,9 @@ class ColumnOperators(Operators):
 
             somecolumn.icontains("foo/%bar", escape="^")
 
-          Will render as::
+          Will render as:
+
+          .. sourcecode:: sql
 
             lower(somecolumn) LIKE '%' || lower(:param) || '%' ESCAPE '^'
 
@@ -1545,7 +1593,7 @@ class ColumnOperators(Operators):
 
             :meth:`.ColumnOperators.contains`
 
-        """
+        """  # noqa: E501
         return self.operate(icontains_op, other, **kw)
 
     def match(self, other: Any, **kwargs: Any) -> ColumnOperators:
@@ -1586,7 +1634,7 @@ class ColumnOperators(Operators):
         E.g.::
 
             stmt = select(table.c.some_column).where(
-                table.c.some_column.regexp_match('^(b|c)')
+                table.c.some_column.regexp_match("^(b|c)")
             )
 
         :meth:`_sql.ColumnOperators.regexp_match` attempts to resolve to
@@ -1644,11 +1692,7 @@ class ColumnOperators(Operators):
         E.g.::
 
             stmt = select(
-                table.c.some_column.regexp_replace(
-                    'b(..)',
-                    'X\1Y',
-                    flags='g'
-                )
+                table.c.some_column.regexp_replace("b(..)", "X\1Y", flags="g")
             )
 
         :meth:`_sql.ColumnOperators.regexp_replace` attempts to resolve to
index b8f9075bdc8c64a0dc73bef715dd26d956c7d9ea..6539e303fa98b1977412c9f3da5abc89a0a5b617 100644 (file)
@@ -321,9 +321,10 @@ class Table(
     e.g.::
 
         mytable = Table(
-            "mytable", metadata,
-            Column('mytable_id', Integer, primary_key=True),
-            Column('value', String(50))
+            "mytable",
+            metadata,
+            Column("mytable_id", Integer, primary_key=True),
+            Column("value", String(50)),
         )
 
     The :class:`_schema.Table`
@@ -633,11 +634,13 @@ class Table(
             :class:`_schema.Column`
             named "y"::
 
-                Table("mytable", metadata,
-                            Column('y', Integer),
-                            extend_existing=True,
-                            autoload_with=engine
-                        )
+                Table(
+                    "mytable",
+                    metadata,
+                    Column("y", Integer),
+                    extend_existing=True,
+                    autoload_with=engine,
+                )
 
             .. seealso::
 
@@ -734,12 +737,12 @@ class Table(
                     "handle the column reflection event"
                     # ...
 
+
                 t = Table(
-                    'sometable',
+                    "sometable",
                     autoload_with=engine,
-                    listeners=[
-                        ('column_reflect', listen_for_reflect)
-                    ])
+                    listeners=[("column_reflect", listen_for_reflect)],
+                )
 
             .. seealso::
 
@@ -1346,7 +1349,7 @@ class Table(
 
             m1 = MetaData()
 
-            user = Table('user', m1, Column('id', Integer, primary_key=True))
+            user = Table("user", m1, Column("id", Integer, primary_key=True))
 
             m2 = MetaData()
             user_copy = user.to_metadata(m2)
@@ -1370,7 +1373,7 @@ class Table(
          unless
          set explicitly::
 
-            m2 = MetaData(schema='newschema')
+            m2 = MetaData(schema="newschema")
 
             # user_copy_one will have "newschema" as the schema name
             user_copy_one = user.to_metadata(m2, schema=None)
@@ -1397,15 +1400,16 @@ class Table(
 
          E.g.::
 
-                def referred_schema_fn(table, to_schema,
-                                                constraint, referred_schema):
-                    if referred_schema == 'base_tables':
+                def referred_schema_fn(table, to_schema, constraint, referred_schema):
+                    if referred_schema == "base_tables":
                         return referred_schema
                     else:
                         return to_schema
 
-                new_table = table.to_metadata(m2, schema="alt_schema",
-                                        referred_schema_fn=referred_schema_fn)
+
+                new_table = table.to_metadata(
+                    m2, schema="alt_schema", referred_schema_fn=referred_schema_fn
+                )
 
         :param name: optional string name indicating the target table name.
          If not specified or None, the table name is retained.  This allows
@@ -1413,7 +1417,7 @@ class Table(
          :class:`_schema.MetaData` target
          with a new name.
 
-        """
+        """  # noqa: E501
         if name is None:
             name = self.name
 
@@ -1559,10 +1563,10 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
           as well, e.g.::
 
             # use a type with arguments
-            Column('data', String(50))
+            Column("data", String(50))
 
             # use no arguments
-            Column('level', Integer)
+            Column("level", Integer)
 
           The ``type`` argument may be the second positional argument
           or specified by keyword.
@@ -1664,8 +1668,12 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
 
                 # turn on autoincrement for this column despite
                 # the ForeignKey()
-                Column('id', ForeignKey('other.id'),
-                            primary_key=True, autoincrement='ignore_fk')
+                Column(
+                    "id",
+                    ForeignKey("other.id"),
+                    primary_key=True,
+                    autoincrement="ignore_fk",
+                )
 
           It is typically not desirable to have "autoincrement" enabled on a
           column that refers to another via foreign key, as such a column is
@@ -1782,7 +1790,7 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
                     "some_table",
                     metadata,
                     Column("x", Integer),
-                    Index("ix_some_table_x", "x")
+                    Index("ix_some_table_x", "x"),
                 )
 
             To add the :paramref:`_schema.Index.unique` flag to the
@@ -1864,14 +1872,22 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
 
             String types will be emitted as-is, surrounded by single quotes::
 
-                Column('x', Text, server_default="val")
+                Column("x", Text, server_default="val")
+
+            will render:
+
+            .. sourcecode:: sql
 
                 x TEXT DEFAULT 'val'
 
             A :func:`~sqlalchemy.sql.expression.text` expression will be
             rendered as-is, without quotes::
 
-                Column('y', DateTime, server_default=text('NOW()'))
+                Column("y", DateTime, server_default=text("NOW()"))
+
+            will render:
+
+            .. sourcecode:: sql
 
                 y DATETIME DEFAULT NOW()
 
@@ -1886,20 +1902,21 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
                 from sqlalchemy.dialects.postgresql import array
 
                 engine = create_engine(
-                    'postgresql+psycopg2://scott:tiger@localhost/mydatabase'
+                    "postgresql+psycopg2://scott:tiger@localhost/mydatabase"
                 )
                 metadata_obj = MetaData()
                 tbl = Table(
-                        "foo",
-                        metadata_obj,
-                        Column("bar",
-                               ARRAY(Text),
-                               server_default=array(["biz", "bang", "bash"])
-                               )
+                    "foo",
+                    metadata_obj,
+                    Column(
+                        "bar", ARRAY(Text), server_default=array(["biz", "bang", "bash"])
+                    ),
                 )
                 metadata_obj.create_all(engine)
 
-            The above results in a table created with the following SQL::
+            The above results in a table created with the following SQL:
+
+            .. sourcecode:: sql
 
                 CREATE TABLE foo (
                     bar TEXT[] DEFAULT ARRAY['biz', 'bang', 'bash']
@@ -1964,12 +1981,7 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
             :class:`_schema.UniqueConstraint` construct explicitly at the
             level of the :class:`_schema.Table` construct itself::
 
-                Table(
-                    "some_table",
-                    metadata,
-                    Column("x", Integer),
-                    UniqueConstraint("x")
-                )
+                Table("some_table", metadata, Column("x", Integer), UniqueConstraint("x"))
 
             The :paramref:`_schema.UniqueConstraint.name` parameter
             of the unique constraint object is left at its default value
@@ -2740,8 +2752,10 @@ class ForeignKey(DialectKWArgs, SchemaItem):
     object,
     e.g.::
 
-        t = Table("remote_table", metadata,
-            Column("remote_id", ForeignKey("main_table.id"))
+        t = Table(
+            "remote_table",
+            metadata,
+            Column("remote_id", ForeignKey("main_table.id")),
         )
 
     Note that ``ForeignKey`` is only a marker object that defines
@@ -3419,12 +3433,11 @@ class ColumnDefault(DefaultGenerator, ABC):
 
     For example, the following::
 
-        Column('foo', Integer, default=50)
+        Column("foo", Integer, default=50)
 
     Is equivalent to::
 
-        Column('foo', Integer, ColumnDefault(50))
-
+        Column("foo", Integer, ColumnDefault(50))
 
     """
 
@@ -3746,9 +3759,14 @@ class Sequence(HasSchemaAttr, IdentityOptions, DefaultGenerator):
     The :class:`.Sequence` is typically associated with a primary key column::
 
         some_table = Table(
-            'some_table', metadata,
-            Column('id', Integer, Sequence('some_table_seq', start=1),
-            primary_key=True)
+            "some_table",
+            metadata,
+            Column(
+                "id",
+                Integer,
+                Sequence("some_table_seq", start=1),
+                primary_key=True,
+            ),
         )
 
     When CREATE TABLE is emitted for the above :class:`_schema.Table`, if the
@@ -4026,7 +4044,7 @@ class FetchedValue(SchemaEventTarget):
 
     E.g.::
 
-        Column('foo', Integer, FetchedValue())
+        Column("foo", Integer, FetchedValue())
 
     Would indicate that some trigger or default generator
     will create a new value for the ``foo`` column during an
@@ -4092,11 +4110,11 @@ class DefaultClause(FetchedValue):
 
     For example, the following::
 
-        Column('foo', Integer, server_default="50")
+        Column("foo", Integer, server_default="50")
 
     Is equivalent to::
 
-        Column('foo', Integer, DefaultClause("50"))
+        Column("foo", Integer, DefaultClause("50"))
 
     """
 
@@ -4928,11 +4946,13 @@ class PrimaryKeyConstraint(ColumnCollectionConstraint):
     :class:`_schema.Column` objects corresponding to those marked with
     the :paramref:`_schema.Column.primary_key` flag::
 
-        >>> my_table = Table('mytable', metadata,
-        ...                 Column('id', Integer, primary_key=True),
-        ...                 Column('version_id', Integer, primary_key=True),
-        ...                 Column('data', String(50))
-        ...     )
+        >>> my_table = Table(
+        ...     "mytable",
+        ...     metadata,
+        ...     Column("id", Integer, primary_key=True),
+        ...     Column("version_id", Integer, primary_key=True),
+        ...     Column("data", String(50)),
+        ... )
         >>> my_table.primary_key
         PrimaryKeyConstraint(
             Column('id', Integer(), table=<mytable>,
@@ -4946,13 +4966,14 @@ class PrimaryKeyConstraint(ColumnCollectionConstraint):
     the "name" of the constraint can also be specified, as well as other
     options which may be recognized by dialects::
 
-        my_table = Table('mytable', metadata,
-                    Column('id', Integer),
-                    Column('version_id', Integer),
-                    Column('data', String(50)),
-                    PrimaryKeyConstraint('id', 'version_id',
-                                         name='mytable_pk')
-                )
+        my_table = Table(
+            "mytable",
+            metadata,
+            Column("id", Integer),
+            Column("version_id", Integer),
+            Column("data", String(50)),
+            PrimaryKeyConstraint("id", "version_id", name="mytable_pk"),
+        )
 
     The two styles of column-specification should generally not be mixed.
     An warning is emitted if the columns present in the
@@ -4970,13 +4991,14 @@ class PrimaryKeyConstraint(ColumnCollectionConstraint):
     primary key column collection from the :class:`_schema.Table` based on the
     flags::
 
-        my_table = Table('mytable', metadata,
-                    Column('id', Integer, primary_key=True),
-                    Column('version_id', Integer, primary_key=True),
-                    Column('data', String(50)),
-                    PrimaryKeyConstraint(name='mytable_pk',
-                                         mssql_clustered=True)
-                )
+        my_table = Table(
+            "mytable",
+            metadata,
+            Column("id", Integer, primary_key=True),
+            Column("version_id", Integer, primary_key=True),
+            Column("data", String(50)),
+            PrimaryKeyConstraint(name="mytable_pk", mssql_clustered=True),
+        )
 
     """
 
@@ -5182,19 +5204,21 @@ class Index(
 
     E.g.::
 
-        sometable = Table("sometable", metadata,
-                        Column("name", String(50)),
-                        Column("address", String(100))
-                    )
+        sometable = Table(
+            "sometable",
+            metadata,
+            Column("name", String(50)),
+            Column("address", String(100)),
+        )
 
         Index("some_index", sometable.c.name)
 
     For a no-frills, single column index, adding
     :class:`_schema.Column` also supports ``index=True``::
 
-        sometable = Table("sometable", metadata,
-                        Column("name", String(50), index=True)
-                    )
+        sometable = Table(
+            "sometable", metadata, Column("name", String(50), index=True)
+        )
 
     For a composite index, multiple columns can be specified::
 
@@ -5213,22 +5237,26 @@ class Index(
     the names
     of the indexed columns can be specified as strings::
 
-        Table("sometable", metadata,
-                        Column("name", String(50)),
-                        Column("address", String(100)),
-                        Index("some_index", "name", "address")
-                )
+        Table(
+            "sometable",
+            metadata,
+            Column("name", String(50)),
+            Column("address", String(100)),
+            Index("some_index", "name", "address"),
+        )
 
     To support functional or expression-based indexes in this form, the
     :func:`_expression.text` construct may be used::
 
         from sqlalchemy import text
 
-        Table("sometable", metadata,
-                        Column("name", String(50)),
-                        Column("address", String(100)),
-                        Index("some_index", text("lower(name)"))
-                )
+        Table(
+            "sometable",
+            metadata,
+            Column("name", String(50)),
+            Column("address", String(100)),
+            Index("some_index", text("lower(name)")),
+        )
 
     .. seealso::
 
@@ -5986,9 +6014,11 @@ class Computed(FetchedValue, SchemaItem):
 
         from sqlalchemy import Computed
 
-        Table('square', metadata_obj,
-            Column('side', Float, nullable=False),
-            Column('area', Float, Computed('side * side'))
+        Table(
+            "square",
+            metadata_obj,
+            Column("side", Float, nullable=False),
+            Column("area", Float, Computed("side * side")),
         )
 
     See the linked documentation below for complete details.
@@ -6093,9 +6123,11 @@ class Identity(IdentityOptions, FetchedValue, SchemaItem):
 
         from sqlalchemy import Identity
 
-        Table('foo', metadata_obj,
-            Column('id', Integer, Identity())
-            Column('description', Text),
+        Table(
+            "foo",
+            metadata_obj,
+            Column("id", Integer, Identity()),
+            Column("description", Text),
         )
 
     See the linked documentation below for complete details.
index 46ed0be33470e1ce68c543dca1866ba424428653..b761943dc9dccf8a2731c218e521647edc08f0c7 100644 (file)
@@ -397,8 +397,7 @@ class HasPrefixes:
             stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql")
 
             # MySQL 5.7 optimizer hints
-            stmt = select(table).prefix_with(
-                "/*+ BKA(t1) */", dialect="mysql")
+            stmt = select(table).prefix_with("/*+ BKA(t1) */", dialect="mysql")
 
         Multiple prefixes can be specified by multiple calls
         to :meth:`_expression.HasPrefixes.prefix_with`.
@@ -445,8 +444,13 @@ class HasSuffixes:
 
         E.g.::
 
-            stmt = select(col1, col2).cte().suffix_with(
-                "cycle empno set y_cycle to 1 default 0", dialect="oracle")
+            stmt = (
+                select(col1, col2)
+                .cte()
+                .suffix_with(
+                    "cycle empno set y_cycle to 1 default 0", dialect="oracle"
+                )
+            )
 
         Multiple suffixes can be specified by multiple calls
         to :meth:`_expression.HasSuffixes.suffix_with`.
@@ -545,20 +549,21 @@ class HasHints:
         the table or alias. E.g. when using Oracle Database, the
         following::
 
-            select(mytable).\
-                with_hint(mytable, "index(%(name)s ix_mytable)")
+            select(mytable).with_hint(mytable, "index(%(name)s ix_mytable)")
 
-        Would render SQL as::
+        Would render SQL as:
+
+        .. sourcecode:: sql
 
             select /*+ index(mytable ix_mytable) */ ... from mytable
 
         The ``dialect_name`` option will limit the rendering of a particular
         hint to a particular backend. Such as, to add hints for both Oracle
-        Database and Sybase simultaneously::
+        Database and MSSql simultaneously::
 
-            select(mytable).\
-                with_hint(mytable, "index(%(name)s ix_mytable)", 'oracle').\
-                with_hint(mytable, "WITH INDEX ix_mytable", 'mssql')
+            select(mytable).with_hint(
+                mytable, "index(%(name)s ix_mytable)", "oracle"
+            ).with_hint(mytable, "WITH INDEX ix_mytable", "mssql")
 
         .. seealso::
 
@@ -670,11 +675,14 @@ class FromClause(roles.AnonymizedFromClauseRole, Selectable):
 
             from sqlalchemy import join
 
-            j = user_table.join(address_table,
-                            user_table.c.id == address_table.c.user_id)
+            j = user_table.join(
+                address_table, user_table.c.id == address_table.c.user_id
+            )
             stmt = select(user_table).select_from(j)
 
-        would emit SQL along the lines of::
+        would emit SQL along the lines of:
+
+        .. sourcecode:: sql
 
             SELECT user.id, user.name FROM user
             JOIN address ON user.id = address.user_id
@@ -720,15 +728,15 @@ class FromClause(roles.AnonymizedFromClauseRole, Selectable):
 
             from sqlalchemy import outerjoin
 
-            j = user_table.outerjoin(address_table,
-                            user_table.c.id == address_table.c.user_id)
+            j = user_table.outerjoin(
+                address_table, user_table.c.id == address_table.c.user_id
+            )
 
         The above is equivalent to::
 
             j = user_table.join(
-                address_table,
-                user_table.c.id == address_table.c.user_id,
-                isouter=True)
+                address_table, user_table.c.id == address_table.c.user_id, isouter=True
+            )
 
         :param right: the right side of the join; this is any
          :class:`_expression.FromClause` object such as a
@@ -750,7 +758,7 @@ class FromClause(roles.AnonymizedFromClauseRole, Selectable):
 
             :class:`_expression.Join`
 
-        """
+        """  # noqa: E501
 
         return Join(self, right, onclause, True, full)
 
@@ -761,7 +769,7 @@ class FromClause(roles.AnonymizedFromClauseRole, Selectable):
 
         E.g.::
 
-            a2 = some_table.alias('a2')
+            a2 = some_table.alias("a2")
 
         The above code creates an :class:`_expression.Alias`
         object which can be used
@@ -898,7 +906,7 @@ class FromClause(roles.AnonymizedFromClauseRole, Selectable):
         This is the namespace that is used to resolve "filter_by()" type
         expressions, such as::
 
-            stmt.filter_by(address='some address')
+            stmt.filter_by(address="some address")
 
         It defaults to the ``.c`` collection, however internally it can
         be overridden using the "entity_namespace" annotation to deliver
@@ -1081,7 +1089,11 @@ class SelectLabelStyle(Enum):
         >>> from sqlalchemy import table, column, select, true, LABEL_STYLE_NONE
         >>> table1 = table("table1", column("columna"), column("columnb"))
         >>> table2 = table("table2", column("columna"), column("columnc"))
-        >>> print(select(table1, table2).join(table2, true()).set_label_style(LABEL_STYLE_NONE))
+        >>> print(
+        ...     select(table1, table2)
+        ...     .join(table2, true())
+        ...     .set_label_style(LABEL_STYLE_NONE)
+        ... )
         {printsql}SELECT table1.columna, table1.columnb, table2.columna, table2.columnc
         FROM table1 JOIN table2 ON true
 
@@ -1103,10 +1115,20 @@ class SelectLabelStyle(Enum):
 
     .. sourcecode:: pycon+sql
 
-        >>> from sqlalchemy import table, column, select, true, LABEL_STYLE_TABLENAME_PLUS_COL
+        >>> from sqlalchemy import (
+        ...     table,
+        ...     column,
+        ...     select,
+        ...     true,
+        ...     LABEL_STYLE_TABLENAME_PLUS_COL,
+        ... )
         >>> table1 = table("table1", column("columna"), column("columnb"))
         >>> table2 = table("table2", column("columna"), column("columnc"))
-        >>> print(select(table1, table2).join(table2, true()).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL))
+        >>> print(
+        ...     select(table1, table2)
+        ...     .join(table2, true())
+        ...     .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
+        ... )
         {printsql}SELECT table1.columna AS table1_columna, table1.columnb AS table1_columnb, table2.columna AS table2_columna, table2.columnc AS table2_columnc
         FROM table1 JOIN table2 ON true
 
@@ -1132,10 +1154,20 @@ class SelectLabelStyle(Enum):
 
     .. sourcecode:: pycon+sql
 
-        >>> from sqlalchemy import table, column, select, true, LABEL_STYLE_DISAMBIGUATE_ONLY
+        >>> from sqlalchemy import (
+        ...     table,
+        ...     column,
+        ...     select,
+        ...     true,
+        ...     LABEL_STYLE_DISAMBIGUATE_ONLY,
+        ... )
         >>> table1 = table("table1", column("columna"), column("columnb"))
         >>> table2 = table("table2", column("columna"), column("columnc"))
-        >>> print(select(table1, table2).join(table2, true()).set_label_style(LABEL_STYLE_DISAMBIGUATE_ONLY))
+        >>> print(
+        ...     select(table1, table2)
+        ...     .join(table2, true())
+        ...     .set_label_style(LABEL_STYLE_DISAMBIGUATE_ONLY)
+        ... )
         {printsql}SELECT table1.columna, table1.columnb, table2.columna AS columna_1, table2.columnc
         FROM table1 JOIN table2 ON true
 
@@ -1533,7 +1565,9 @@ class Join(roles.DMLTableRole, FromClause):
 
             stmt = stmt.select()
 
-        The above will produce a SQL string resembling::
+        The above will produce a SQL string resembling:
+
+        .. sourcecode:: sql
 
             SELECT table_a.id, table_a.col, table_b.id, table_b.a_id
             FROM table_a JOIN table_b ON table_a.id = table_b.a_id
@@ -1767,7 +1801,9 @@ class TableValuedAlias(LateralFromClause, Alias):
     .. sourcecode:: pycon+sql
 
         >>> from sqlalchemy import select, func
-        >>> fn = func.json_array_elements_text('["one", "two", "three"]').table_valued("value")
+        >>> fn = func.json_array_elements_text('["one", "two", "three"]').table_valued(
+        ...     "value"
+        ... )
         >>> print(select(fn.c.value))
         {printsql}SELECT anon_1.value
         FROM json_array_elements_text(:json_array_elements_text_1) AS anon_1
@@ -1886,8 +1922,9 @@ class TableValuedAlias(LateralFromClause, Alias):
 
             >>> print(
             ...     select(
-            ...         func.unnest(array(["one", "two", "three"])).
-                        table_valued("x", with_ordinality="o").render_derived()
+            ...         func.unnest(array(["one", "two", "three"]))
+            ...         .table_valued("x", with_ordinality="o")
+            ...         .render_derived()
             ...     )
             ... )
             {printsql}SELECT anon_1.x, anon_1.o
@@ -1901,9 +1938,7 @@ class TableValuedAlias(LateralFromClause, Alias):
 
             >>> print(
             ...     select(
-            ...         func.json_to_recordset(
-            ...             '[{"a":1,"b":"foo"},{"a":"2","c":"bar"}]'
-            ...         )
+            ...         func.json_to_recordset('[{"a":1,"b":"foo"},{"a":"2","c":"bar"}]')
             ...         .table_valued(column("a", Integer), column("b", String))
             ...         .render_derived(with_types=True)
             ...     )
@@ -2460,16 +2495,20 @@ class HasCTE(roles.HasCTERole, SelectsRows):
         E.g.::
 
             from sqlalchemy import table, column, select
-            t = table('t', column('c1'), column('c2'))
+
+            t = table("t", column("c1"), column("c2"))
 
             ins = t.insert().values({"c1": "x", "c2": "y"}).cte()
 
             stmt = select(t).add_cte(ins)
 
-        Would render::
+        Would render:
+
+        .. sourcecode:: sql
 
-            WITH anon_1 AS
-            (INSERT INTO t (c1, c2) VALUES (:param_1, :param_2))
+            WITH anon_1 AS (
+                INSERT INTO t (c1, c2) VALUES (:param_1, :param_2)
+            )
             SELECT t.c1, t.c2
             FROM t
 
@@ -2485,9 +2524,7 @@ class HasCTE(roles.HasCTERole, SelectsRows):
 
             t = table("t", column("c1"), column("c2"))
 
-            delete_statement_cte = (
-                t.delete().where(t.c.c1 < 1).cte("deletions")
-            )
+            delete_statement_cte = t.delete().where(t.c.c1 < 1).cte("deletions")
 
             insert_stmt = insert(t).values({"c1": 1, "c2": 2})
             update_statement = insert_stmt.on_conflict_do_update(
@@ -2500,10 +2537,13 @@ class HasCTE(roles.HasCTERole, SelectsRows):
 
             print(update_statement)
 
-        The above statement renders as::
+        The above statement renders as:
+
+        .. sourcecode:: sql
 
-            WITH deletions AS
-            (DELETE FROM t WHERE t.c1 < %(c1_1)s)
+            WITH deletions AS (
+                DELETE FROM t WHERE t.c1 < %(c1_1)s
+            )
             INSERT INTO t (c1, c2) VALUES (%(c1)s, %(c2)s)
             ON CONFLICT (c1) DO UPDATE SET c1 = excluded.c1, c2 = excluded.c2
 
@@ -2527,10 +2567,8 @@ class HasCTE(roles.HasCTERole, SelectsRows):
             :paramref:`.HasCTE.cte.nesting`
 
 
-        """
-        opt = _CTEOpts(
-            nest_here,
-        )
+        """  # noqa: E501
+        opt = _CTEOpts(nest_here)
         for cte in ctes:
             cte = coercions.expect(roles.IsCTERole, cte)
             self._independent_ctes += (cte,)
@@ -2598,95 +2636,123 @@ class HasCTE(roles.HasCTERole, SelectsRows):
 
         Example 1, non recursive::
 
-            from sqlalchemy import (Table, Column, String, Integer,
-                                    MetaData, select, func)
+            from sqlalchemy import (
+                Table,
+                Column,
+                String,
+                Integer,
+                MetaData,
+                select,
+                func,
+            )
 
             metadata = MetaData()
 
-            orders = Table('orders', metadata,
-                Column('region', String),
-                Column('amount', Integer),
-                Column('product', String),
-                Column('quantity', Integer)
+            orders = Table(
+                "orders",
+                metadata,
+                Column("region", String),
+                Column("amount", Integer),
+                Column("product", String),
+                Column("quantity", Integer),
             )
 
-            regional_sales = select(
-                                orders.c.region,
-                                func.sum(orders.c.amount).label('total_sales')
-                            ).group_by(orders.c.region).cte("regional_sales")
+            regional_sales = (
+                select(orders.c.region, func.sum(orders.c.amount).label("total_sales"))
+                .group_by(orders.c.region)
+                .cte("regional_sales")
+            )
 
 
-            top_regions = select(regional_sales.c.region).\
-                    where(
-                        regional_sales.c.total_sales >
-                        select(
-                            func.sum(regional_sales.c.total_sales) / 10
-                        )
-                    ).cte("top_regions")
+            top_regions = (
+                select(regional_sales.c.region)
+                .where(
+                    regional_sales.c.total_sales
+                    > select(func.sum(regional_sales.c.total_sales) / 10)
+                )
+                .cte("top_regions")
+            )
 
-            statement = select(
-                        orders.c.region,
-                        orders.c.product,
-                        func.sum(orders.c.quantity).label("product_units"),
-                        func.sum(orders.c.amount).label("product_sales")
-                ).where(orders.c.region.in_(
-                    select(top_regions.c.region)
-                )).group_by(orders.c.region, orders.c.product)
+            statement = (
+                select(
+                    orders.c.region,
+                    orders.c.product,
+                    func.sum(orders.c.quantity).label("product_units"),
+                    func.sum(orders.c.amount).label("product_sales"),
+                )
+                .where(orders.c.region.in_(select(top_regions.c.region)))
+                .group_by(orders.c.region, orders.c.product)
+            )
 
             result = conn.execute(statement).fetchall()
 
         Example 2, WITH RECURSIVE::
 
-            from sqlalchemy import (Table, Column, String, Integer,
-                                    MetaData, select, func)
+            from sqlalchemy import (
+                Table,
+                Column,
+                String,
+                Integer,
+                MetaData,
+                select,
+                func,
+            )
 
             metadata = MetaData()
 
-            parts = Table('parts', metadata,
-                Column('part', String),
-                Column('sub_part', String),
-                Column('quantity', Integer),
+            parts = Table(
+                "parts",
+                metadata,
+                Column("part", String),
+                Column("sub_part", String),
+                Column("quantity", Integer),
             )
 
-            included_parts = select(\
-                parts.c.sub_part, parts.c.part, parts.c.quantity\
-                ).\
-                where(parts.c.part=='our part').\
-                cte(recursive=True)
+            included_parts = (
+                select(parts.c.sub_part, parts.c.part, parts.c.quantity)
+                .where(parts.c.part == "our part")
+                .cte(recursive=True)
+            )
 
 
             incl_alias = included_parts.alias()
             parts_alias = parts.alias()
             included_parts = included_parts.union_all(
                 select(
-                    parts_alias.c.sub_part,
-                    parts_alias.c.part,
-                    parts_alias.c.quantity
-                ).\
-                where(parts_alias.c.part==incl_alias.c.sub_part)
+                    parts_alias.c.sub_part, parts_alias.c.part, parts_alias.c.quantity
+                ).where(parts_alias.c.part == incl_alias.c.sub_part)
             )
 
             statement = select(
-                        included_parts.c.sub_part,
-                        func.sum(included_parts.c.quantity).
-                          label('total_quantity')
-                    ).\
-                    group_by(included_parts.c.sub_part)
+                included_parts.c.sub_part,
+                func.sum(included_parts.c.quantity).label("total_quantity"),
+            ).group_by(included_parts.c.sub_part)
 
             result = conn.execute(statement).fetchall()
 
         Example 3, an upsert using UPDATE and INSERT with CTEs::
 
             from datetime import date
-            from sqlalchemy import (MetaData, Table, Column, Integer,
-                                    Date, select, literal, and_, exists)
+            from sqlalchemy import (
+                MetaData,
+                Table,
+                Column,
+                Integer,
+                Date,
+                select,
+                literal,
+                and_,
+                exists,
+            )
 
             metadata = MetaData()
 
-            visitors = Table('visitors', metadata,
-                Column('product_id', Integer, primary_key=True),
-                Column('date', Date, primary_key=True),
-                Column('count', Integer),
+            visitors = Table(
+                "visitors",
+                metadata,
+                Column("product_id", Integer, primary_key=True),
+                Column("date", Date, primary_key=True),
+                Column("count", Integer),
             )
 
             # add 5 visitors for the product_id == 1
@@ -2696,31 +2762,31 @@ class HasCTE(roles.HasCTERole, SelectsRows):
 
             update_cte = (
                 visitors.update()
-                .where(and_(visitors.c.product_id == product_id,
-                            visitors.c.date == day))
+                .where(
+                    and_(visitors.c.product_id == product_id, visitors.c.date == day)
+                )
                 .values(count=visitors.c.count + count)
                 .returning(literal(1))
-                .cte('update_cte')
+                .cte("update_cte")
             )
 
             upsert = visitors.insert().from_select(
                 [visitors.c.product_id, visitors.c.date, visitors.c.count],
-                select(literal(product_id), literal(day), literal(count))
-                    .where(~exists(update_cte.select()))
+                select(literal(product_id), literal(day), literal(count)).where(
+                    ~exists(update_cte.select())
+                ),
             )
 
             connection.execute(upsert)
 
         Example 4, Nesting CTE (SQLAlchemy 1.4.24 and above)::
 
-            value_a = select(
-                literal("root").label("n")
-            ).cte("value_a")
+            value_a = select(literal("root").label("n")).cte("value_a")
 
             # A nested CTE with the same name as the root one
-            value_a_nested = select(
-                literal("nesting").label("n")
-            ).cte("value_a", nesting=True)
+            value_a_nested = select(literal("nesting").label("n")).cte(
+                "value_a", nesting=True
+            )
 
             # Nesting CTEs takes ascendency locally
             # over the CTEs at a higher level
@@ -2729,7 +2795,9 @@ class HasCTE(roles.HasCTERole, SelectsRows):
             value_ab = select(value_a.c.n.label("a"), value_b.c.n.label("b"))
 
         The above query will render the second CTE nested inside the first,
-        shown with inline parameters below as::
+        shown with inline parameters below as:
+
+        .. sourcecode:: sql
 
             WITH
                 value_a AS
@@ -2744,21 +2812,17 @@ class HasCTE(roles.HasCTERole, SelectsRows):
         The same CTE can be set up using the :meth:`.HasCTE.add_cte` method
         as follows (SQLAlchemy 2.0 and above)::
 
-            value_a = select(
-                literal("root").label("n")
-            ).cte("value_a")
+            value_a = select(literal("root").label("n")).cte("value_a")
 
             # A nested CTE with the same name as the root one
-            value_a_nested = select(
-                literal("nesting").label("n")
-            ).cte("value_a")
+            value_a_nested = select(literal("nesting").label("n")).cte("value_a")
 
             # Nesting CTEs takes ascendency locally
             # over the CTEs at a higher level
             value_b = (
-                select(value_a_nested.c.n).
-                add_cte(value_a_nested, nest_here=True).
-                cte("value_b")
+                select(value_a_nested.c.n)
+                .add_cte(value_a_nested, nest_here=True)
+                .cte("value_b")
             )
 
             value_ab = select(value_a.c.n.label("a"), value_b.c.n.label("b"))
@@ -2773,9 +2837,7 @@ class HasCTE(roles.HasCTERole, SelectsRows):
                 Column("right", Integer),
             )
 
-            root_node = select(literal(1).label("node")).cte(
-                "nodes", recursive=True
-            )
+            root_node = select(literal(1).label("node")).cte("nodes", recursive=True)
 
             left_edge = select(edge.c.left).join(
                 root_node, edge.c.right == root_node.c.node
@@ -2788,7 +2850,9 @@ class HasCTE(roles.HasCTERole, SelectsRows):
 
             subgraph = select(subgraph_cte)
 
-        The above query will render 2 UNIONs inside the recursive CTE::
+        The above query will render 2 UNIONs inside the recursive CTE:
+
+        .. sourcecode:: sql
 
             WITH RECURSIVE nodes(node) AS (
                     SELECT 1 AS node
@@ -2806,7 +2870,7 @@ class HasCTE(roles.HasCTERole, SelectsRows):
             :meth:`_orm.Query.cte` - ORM version of
             :meth:`_expression.HasCTE.cte`.
 
-        """
+        """  # noqa: E501
         return CTE._construct(
             self, name=name, recursive=recursive, nesting=nesting
         )
@@ -2963,10 +3027,11 @@ class TableClause(roles.DMLTableRole, Immutable, NamedFromClause):
 
         from sqlalchemy import table, column
 
-        user = table("user",
-                column("id"),
-                column("name"),
-                column("description"),
+        user = table(
+            "user",
+            column("id"),
+            column("name"),
+            column("description"),
         )
 
     The :class:`_expression.TableClause` construct serves as the base for
@@ -3072,7 +3137,7 @@ class TableClause(roles.DMLTableRole, Immutable, NamedFromClause):
 
         E.g.::
 
-            table.insert().values(name='foo')
+            table.insert().values(name="foo")
 
         See :func:`_expression.insert` for argument and usage information.
 
@@ -3087,7 +3152,7 @@ class TableClause(roles.DMLTableRole, Immutable, NamedFromClause):
 
         E.g.::
 
-            table.update().where(table.c.id==7).values(name='foo')
+            table.update().where(table.c.id == 7).values(name="foo")
 
         See :func:`_expression.update` for argument and usage information.
 
@@ -3103,7 +3168,7 @@ class TableClause(roles.DMLTableRole, Immutable, NamedFromClause):
 
         E.g.::
 
-            table.delete().where(table.c.id==7)
+            table.delete().where(table.c.id == 7)
 
         See :func:`_expression.delete` for argument and usage information.
 
@@ -3291,7 +3356,7 @@ class Values(roles.InElementRole, Generative, LateralFromClause):
 
         E.g.::
 
-            my_values = my_values.data([(1, 'value 1'), (2, 'value2')])
+            my_values = my_values.data([(1, "value 1"), (2, "value2")])
 
         :param values: a sequence (i.e. list) of tuples that map to the
          column expressions given in the :class:`_expression.Values`
@@ -3597,7 +3662,9 @@ class SelectBase(
 
             stmt = select(table.c.id, table.c.name)
 
-        The above statement might look like::
+        The above statement might look like:
+
+        .. sourcecode:: sql
 
             SELECT table.id, table.name FROM table
 
@@ -3608,7 +3675,9 @@ class SelectBase(
             subq = stmt.subquery()
             new_stmt = select(subq)
 
-        The above renders as::
+        The above renders as:
+
+        .. sourcecode:: sql
 
             SELECT anon_1.id, anon_1.name
             FROM (SELECT table.id, table.name FROM table) AS anon_1
@@ -3803,12 +3872,16 @@ class GenerativeSelect(SelectBase, Generative):
             stmt = select(table).with_for_update(nowait=True)
 
         On a database like PostgreSQL or Oracle Database, the above would
-        render a statement like::
+        render a statement like:
+
+        .. sourcecode:: sql
 
             SELECT table.a, table.b FROM table FOR UPDATE NOWAIT
 
         on other backends, the ``nowait`` option is ignored and instead
-        would produce::
+        would produce:
+
+        .. sourcecode:: sql
 
             SELECT table.a, table.b FROM table FOR UPDATE
 
@@ -4227,8 +4300,7 @@ class GenerativeSelect(SelectBase, Generative):
 
         e.g.::
 
-            stmt = select(table.c.name, func.max(table.c.stat)).\
-            group_by(table.c.name)
+            stmt = select(table.c.name, func.max(table.c.stat)).group_by(table.c.name)
 
         :param \*clauses: a series of :class:`_expression.ColumnElement`
          constructs
@@ -4241,7 +4313,7 @@ class GenerativeSelect(SelectBase, Generative):
 
             :ref:`tutorial_order_by_label` - in the :ref:`unified_tutorial`
 
-        """
+        """  # noqa: E501
 
         if not clauses and __first is None:
             self._group_by_clauses = ()
@@ -5322,11 +5394,17 @@ class Select(
 
         E.g.::
 
-            stmt = select(user_table).join(address_table, user_table.c.id == address_table.c.user_id)
+            stmt = select(user_table).join(
+                address_table, user_table.c.id == address_table.c.user_id
+            )
 
-        The above statement generates SQL similar to::
+        The above statement generates SQL similar to:
 
-            SELECT user.id, user.name FROM user JOIN address ON user.id = address.user_id
+        .. sourcecode:: sql
+
+            SELECT user.id, user.name
+            FROM user
+            JOIN address ON user.id = address.user_id
 
         .. versionchanged:: 1.4 :meth:`_expression.Select.join` now creates
            a :class:`_sql.Join` object between a :class:`_sql.FromClause`
@@ -5430,7 +5508,9 @@ class Select(
                 user_table, address_table, user_table.c.id == address_table.c.user_id
             )
 
-        The above statement generates SQL similar to::
+        The above statement generates SQL similar to:
+
+        .. sourcecode:: sql
 
             SELECT user.id, user.name, address.id, address.email, address.user_id
             FROM user JOIN address ON user.id = address.user_id
@@ -6049,9 +6129,12 @@ class Select(
         E.g.::
 
             from sqlalchemy import select
+
             stmt = select(users_table.c.id, users_table.c.name).distinct()
 
-        The above would produce an statement resembling::
+        The above would produce an statement resembling:
+
+        .. sourcecode:: sql
 
             SELECT DISTINCT user.id, user.name FROM user
 
@@ -6087,12 +6170,11 @@ class Select(
 
         E.g.::
 
-            table1 = table('t1', column('a'))
-            table2 = table('t2', column('b'))
-            s = select(table1.c.a).\
-                select_from(
-                    table1.join(table2, table1.c.a==table2.c.b)
-                )
+            table1 = table("t1", column("a"))
+            table2 = table("t2", column("b"))
+            s = select(table1.c.a).select_from(
+                table1.join(table2, table1.c.a == table2.c.b)
+            )
 
         The "from" list is a unique set on the identity of each element,
         so adding an already present :class:`_schema.Table`
@@ -6111,7 +6193,7 @@ class Select(
         if desired, in the case that the FROM clause cannot be fully
         derived from the columns clause::
 
-            select(func.count('*')).select_from(table1)
+            select(func.count("*")).select_from(table1)
 
         """
 
@@ -6264,8 +6346,8 @@ class Select(
         :class:`_expression.ColumnElement` objects are directly present as they
         were given, e.g.::
 
-            col1 = column('q', Integer)
-            col2 = column('p', Integer)
+            col1 = column("q", Integer)
+            col2 = column("p", Integer)
             stmt = select(col1, col2)
 
         Above, ``stmt.selected_columns`` would be a collection that contains
@@ -6280,7 +6362,8 @@ class Select(
         criteria, e.g.::
 
             def filter_on_id(my_select, id):
-                return my_select.where(my_select.selected_columns['id'] == id)
+                return my_select.where(my_select.selected_columns["id"] == id)
+
 
             stmt = select(MyModel)
 
@@ -6736,7 +6819,9 @@ class Exists(UnaryExpression[bool]):
 
             stmt = exists(some_table.c.id).where(some_table.c.id == 5).select()
 
-        This will produce a statement resembling::
+        This will produce a statement resembling:
+
+        .. sourcecode:: sql
 
             SELECT EXISTS (SELECT id FROM some_table WHERE some_table = :param) AS anon_1
 
index 95d94a27deca2179b16e4566ffd3648f59638744..281079fcacbd09b3df1750d59ce17ad61b55bb0a 100644 (file)
@@ -203,7 +203,7 @@ class String(Concatenable, TypeEngine[str]):
           .. sourcecode:: pycon+sql
 
             >>> from sqlalchemy import cast, select, String
-            >>> print(select(cast('some string', String(collation='utf8'))))
+            >>> print(select(cast("some string", String(collation="utf8"))))
             {printsql}SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1
 
           .. note::
@@ -652,7 +652,7 @@ class Float(Numeric[_N]):
 
                     Column(
                         "float_data",
-                        Float(5).with_variant(oracle.FLOAT(binary_precision=16), "oracle")
+                        Float(5).with_variant(oracle.FLOAT(binary_precision=16), "oracle"),
                     )
 
         :param asdecimal: the same flag as that of :class:`.Numeric`, but
@@ -1227,15 +1227,14 @@ class Enum(String, SchemaType, Emulated, TypeEngine[Union[str, enum.Enum]]):
         import enum
         from sqlalchemy import Enum
 
+
         class MyEnum(enum.Enum):
             one = 1
             two = 2
             three = 3
 
-        t = Table(
-            'data', MetaData(),
-            Column('value', Enum(MyEnum))
-        )
+
+        t = Table("data", MetaData(), Column("value", Enum(MyEnum)))
 
         connection.execute(t.insert(), {"value": MyEnum.two})
         assert connection.scalar(t.select()) is MyEnum.two
@@ -2176,15 +2175,16 @@ class JSON(Indexable, TypeEngine[Any]):
 
     The :class:`_types.JSON` type stores arbitrary JSON format data, e.g.::
 
-        data_table = Table('data_table', metadata,
-            Column('id', Integer, primary_key=True),
-            Column('data', JSON)
+        data_table = Table(
+            "data_table",
+            metadata,
+            Column("id", Integer, primary_key=True),
+            Column("data", JSON),
         )
 
         with engine.connect() as conn:
             conn.execute(
-                data_table.insert(),
-                {"data": {"key1": "value1", "key2": "value2"}}
+                data_table.insert(), {"data": {"key1": "value1", "key2": "value2"}}
             )
 
     **JSON-Specific Expression Operators**
@@ -2194,7 +2194,7 @@ class JSON(Indexable, TypeEngine[Any]):
 
     * Keyed index operations::
 
-        data_table.c.data['some key']
+        data_table.c.data["some key"]
 
     * Integer index operations::
 
@@ -2202,7 +2202,7 @@ class JSON(Indexable, TypeEngine[Any]):
 
     * Path index operations::
 
-        data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')]
+        data_table.c.data[("key_1", "key_2", 5, ..., "key_n")]
 
     * Data casters for specific JSON element types, subsequent to an index
       or path operation being invoked::
@@ -2257,13 +2257,12 @@ class JSON(Indexable, TypeEngine[Any]):
 
            from sqlalchemy import cast, type_coerce
            from sqlalchemy import String, JSON
-           cast(
-               data_table.c.data['some_key'], String
-           ) == type_coerce(55, JSON)
+
+           cast(data_table.c.data["some_key"], String) == type_coerce(55, JSON)
 
         The above case now works directly as::
 
-            data_table.c.data['some_key'].as_integer() == 5
+            data_table.c.data["some_key"].as_integer() == 5
 
         For details on the previous comparison approach within the 1.3.x
         series, see the documentation for SQLAlchemy 1.2 or the included HTML
@@ -2294,6 +2293,7 @@ class JSON(Indexable, TypeEngine[Any]):
     should be SQL NULL as opposed to JSON ``"null"``::
 
         from sqlalchemy import null
+
         conn.execute(table.insert(), {"json_value": null()})
 
     To insert or select against a value that is JSON ``"null"``, use the
@@ -2326,7 +2326,8 @@ class JSON(Indexable, TypeEngine[Any]):
 
         engine = create_engine(
             "sqlite://",
-            json_serializer=lambda obj: json.dumps(obj, ensure_ascii=False))
+            json_serializer=lambda obj: json.dumps(obj, ensure_ascii=False),
+        )
 
     .. versionchanged:: 1.3.7
 
@@ -2344,7 +2345,7 @@ class JSON(Indexable, TypeEngine[Any]):
 
         :class:`sqlalchemy.dialects.sqlite.JSON`
 
-    """
+    """  # noqa: E501
 
     __visit_name__ = "JSON"
 
@@ -2378,8 +2379,7 @@ class JSON(Indexable, TypeEngine[Any]):
     transparent method is to use :func:`_expression.text`::
 
         Table(
-            'my_table', metadata,
-            Column('json_data', JSON, default=text("'null'"))
+            "my_table", metadata, Column("json_data", JSON, default=text("'null'"))
         )
 
     While it is possible to use :attr:`_types.JSON.NULL` in this context, the
@@ -2391,7 +2391,7 @@ class JSON(Indexable, TypeEngine[Any]):
     generated defaults.
 
 
-    """
+    """  # noqa: E501
 
     def __init__(self, none_as_null: bool = False):
         """Construct a :class:`_types.JSON` type.
@@ -2404,6 +2404,7 @@ class JSON(Indexable, TypeEngine[Any]):
          as SQL NULL::
 
              from sqlalchemy import null
+
              conn.execute(table.insert(), {"data": null()})
 
          .. note::
@@ -2545,15 +2546,13 @@ class JSON(Indexable, TypeEngine[Any]):
 
             e.g.::
 
-                stmt = select(
-                    mytable.c.json_column['some_data'].as_boolean()
-                ).where(
-                    mytable.c.json_column['some_data'].as_boolean() == True
+                stmt = select(mytable.c.json_column["some_data"].as_boolean()).where(
+                    mytable.c.json_column["some_data"].as_boolean() == True
                 )
 
             .. versionadded:: 1.3.11
 
-            """
+            """  # noqa: E501
             return self._binary_w_type(Boolean(), "as_boolean")
 
         def as_string(self):
@@ -2564,16 +2563,13 @@ class JSON(Indexable, TypeEngine[Any]):
 
             e.g.::
 
-                stmt = select(
-                    mytable.c.json_column['some_data'].as_string()
-                ).where(
-                    mytable.c.json_column['some_data'].as_string() ==
-                    'some string'
+                stmt = select(mytable.c.json_column["some_data"].as_string()).where(
+                    mytable.c.json_column["some_data"].as_string() == "some string"
                 )
 
             .. versionadded:: 1.3.11
 
-            """
+            """  # noqa: E501
             return self._binary_w_type(Unicode(), "as_string")
 
         def as_integer(self):
@@ -2584,15 +2580,13 @@ class JSON(Indexable, TypeEngine[Any]):
 
             e.g.::
 
-                stmt = select(
-                    mytable.c.json_column['some_data'].as_integer()
-                ).where(
-                    mytable.c.json_column['some_data'].as_integer() == 5
+                stmt = select(mytable.c.json_column["some_data"].as_integer()).where(
+                    mytable.c.json_column["some_data"].as_integer() == 5
                 )
 
             .. versionadded:: 1.3.11
 
-            """
+            """  # noqa: E501
             return self._binary_w_type(Integer(), "as_integer")
 
         def as_float(self):
@@ -2603,15 +2597,13 @@ class JSON(Indexable, TypeEngine[Any]):
 
             e.g.::
 
-                stmt = select(
-                    mytable.c.json_column['some_data'].as_float()
-                ).where(
-                    mytable.c.json_column['some_data'].as_float() == 29.75
+                stmt = select(mytable.c.json_column["some_data"].as_float()).where(
+                    mytable.c.json_column["some_data"].as_float() == 29.75
                 )
 
             .. versionadded:: 1.3.11
 
-            """
+            """  # noqa: E501
             return self._binary_w_type(Float(), "as_float")
 
         def as_numeric(self, precision, scale, asdecimal=True):
@@ -2622,16 +2614,13 @@ class JSON(Indexable, TypeEngine[Any]):
 
             e.g.::
 
-                stmt = select(
-                    mytable.c.json_column['some_data'].as_numeric(10, 6)
-                ).where(
-                    mytable.c.
-                    json_column['some_data'].as_numeric(10, 6) == 29.75
+                stmt = select(mytable.c.json_column["some_data"].as_numeric(10, 6)).where(
+                    mytable.c.json_column["some_data"].as_numeric(10, 6) == 29.75
                 )
 
             .. versionadded:: 1.4.0b2
 
-            """
+            """  # noqa: E501
             return self._binary_w_type(
                 Numeric(precision, scale, asdecimal=asdecimal), "as_numeric"
             )
@@ -2644,7 +2633,7 @@ class JSON(Indexable, TypeEngine[Any]):
 
             e.g.::
 
-                stmt = select(mytable.c.json_column['some_data'].as_json())
+                stmt = select(mytable.c.json_column["some_data"].as_json())
 
             This is typically the default behavior of indexed elements in any
             case.
@@ -2762,26 +2751,21 @@ class ARRAY(
     An :class:`_types.ARRAY` type is constructed given the "type"
     of element::
 
-        mytable = Table("mytable", metadata,
-                Column("data", ARRAY(Integer))
-            )
+        mytable = Table("mytable", metadata, Column("data", ARRAY(Integer)))
 
     The above type represents an N-dimensional array,
     meaning a supporting backend such as PostgreSQL will interpret values
     with any number of dimensions automatically.   To produce an INSERT
     construct that passes in a 1-dimensional array of integers::
 
-        connection.execute(
-                mytable.insert(),
-                {"data": [1,2,3]}
-        )
+        connection.execute(mytable.insert(), {"data": [1, 2, 3]})
 
     The :class:`_types.ARRAY` type can be constructed given a fixed number
     of dimensions::
 
-        mytable = Table("mytable", metadata,
-                Column("data", ARRAY(Integer, dimensions=2))
-            )
+        mytable = Table(
+            "mytable", metadata, Column("data", ARRAY(Integer, dimensions=2))
+        )
 
     Sending a number of dimensions is optional, but recommended if the
     datatype is to represent arrays of more than one dimension.  This number
@@ -2815,10 +2799,9 @@ class ARRAY(
     as well as UPDATE statements when the :meth:`_expression.Update.values`
     method is used::
 
-        mytable.update().values({
-            mytable.c.data[5]: 7,
-            mytable.c.data[2:7]: [1, 2, 3]
-        })
+        mytable.update().values(
+            {mytable.c.data[5]: 7, mytable.c.data[2:7]: [1, 2, 3]}
+        )
 
     Indexed access is one-based by default;
     for zero-based index conversion, set :paramref:`_types.ARRAY.zero_indexes`.
@@ -2840,6 +2823,7 @@ class ARRAY(
             from sqlalchemy import ARRAY
             from sqlalchemy.ext.mutable import MutableList
 
+
             class SomeOrmClass(Base):
                 # ...
 
@@ -2878,7 +2862,7 @@ class ARRAY(
 
         E.g.::
 
-          Column('myarray', ARRAY(Integer))
+          Column("myarray", ARRAY(Integer))
 
         Arguments are:
 
@@ -2987,9 +2971,7 @@ class ARRAY(
                 from sqlalchemy.sql import operators
 
                 conn.execute(
-                    select(table.c.data).where(
-                            table.c.data.any(7, operator=operators.lt)
-                        )
+                    select(table.c.data).where(table.c.data.any(7, operator=operators.lt))
                 )
 
             :param other: expression to be compared
@@ -3003,7 +2985,7 @@ class ARRAY(
 
                 :meth:`.types.ARRAY.Comparator.all`
 
-            """
+            """  # noqa: E501
             elements = util.preloaded.sql_elements
             operator = operator if operator else operators.eq
 
@@ -3036,9 +3018,7 @@ class ARRAY(
                 from sqlalchemy.sql import operators
 
                 conn.execute(
-                    select(table.c.data).where(
-                            table.c.data.all(7, operator=operators.lt)
-                        )
+                    select(table.c.data).where(table.c.data.all(7, operator=operators.lt))
                 )
 
             :param other: expression to be compared
@@ -3052,7 +3032,7 @@ class ARRAY(
 
                 :meth:`.types.ARRAY.Comparator.any`
 
-            """
+            """  # noqa: E501
             elements = util.preloaded.sql_elements
             operator = operator if operator else operators.eq
 
@@ -3541,14 +3521,13 @@ class Uuid(Emulated, TypeEngine[_UUID_RETURN]):
         t = Table(
             "t",
             metadata_obj,
-            Column('uuid_data', Uuid, primary_key=True),
-            Column("other_data", String)
+            Column("uuid_data", Uuid, primary_key=True),
+            Column("other_data", String),
         )
 
         with engine.begin() as conn:
             conn.execute(
-                t.insert(),
-                {"uuid_data": uuid.uuid4(), "other_data", "some data"}
+                t.insert(), {"uuid_data": uuid.uuid4(), "other_data": "some data"}
             )
 
     To have the :class:`_sqltypes.Uuid` datatype work with string-based
@@ -3562,7 +3541,7 @@ class Uuid(Emulated, TypeEngine[_UUID_RETURN]):
         :class:`_sqltypes.UUID` - represents exactly the ``UUID`` datatype
         without any backend-agnostic behaviors.
 
-    """
+    """  # noqa: E501
 
     __visit_name__ = "uuid"
 
index 228020ec20e4b8a7f09cedc6ff121d94da871105..bf38be341174cd93923f7af1803658b541190445 100644 (file)
@@ -311,11 +311,13 @@ class TypeEngine(Visitable, Generic[_T]):
         E.g.::
 
                 Table(
-                    'some_table', metadata,
+                    "some_table",
+                    metadata,
                     Column(
                         String(50).evaluates_none(),
                         nullable=True,
-                        server_default='no value')
+                        server_default="no value",
+                    ),
                 )
 
         The ORM uses this flag to indicate that a positive value of ``None``
@@ -641,7 +643,7 @@ class TypeEngine(Visitable, Generic[_T]):
             string_type = String()
 
             string_type = string_type.with_variant(
-                mysql.VARCHAR(collation='foo'), 'mysql', 'mariadb'
+                mysql.VARCHAR(collation="foo"), "mysql", "mariadb"
             )
 
         The variant mapping indicates that when this type is
@@ -1128,7 +1130,7 @@ class ExternalType(TypeEngineMixin):
     """
 
     cache_ok: Optional[bool] = None
-    """Indicate if statements using this :class:`.ExternalType` are "safe to
+    '''Indicate if statements using this :class:`.ExternalType` are "safe to
     cache".
 
     The default value ``None`` will emit a warning and then not allow caching
@@ -1169,12 +1171,12 @@ class ExternalType(TypeEngineMixin):
     series of tuples.   Given a previously un-cacheable type as::
 
         class LookupType(UserDefinedType):
-            '''a custom type that accepts a dictionary as a parameter.
+            """a custom type that accepts a dictionary as a parameter.
 
             this is the non-cacheable version, as "self.lookup" is not
             hashable.
 
-            '''
+            """
 
             def __init__(self, lookup):
                 self.lookup = lookup
@@ -1182,8 +1184,7 @@ class ExternalType(TypeEngineMixin):
             def get_col_spec(self, **kw):
                 return "VARCHAR(255)"
 
-            def bind_processor(self, dialect):
-                # ...  works with "self.lookup" ...
+            def bind_processor(self, dialect): ...  # works with "self.lookup" ...
 
     Where "lookup" is a dictionary.  The type will not be able to generate
     a cache key::
@@ -1219,7 +1220,7 @@ class ExternalType(TypeEngineMixin):
     to the ".lookup" attribute::
 
         class LookupType(UserDefinedType):
-            '''a custom type that accepts a dictionary as a parameter.
+            """a custom type that accepts a dictionary as a parameter.
 
             The dictionary is stored both as itself in a private variable,
             and published in a public variable as a sorted tuple of tuples,
@@ -1227,7 +1228,7 @@ class ExternalType(TypeEngineMixin):
             two equivalent dictionaries.  Note it assumes the keys and
             values of the dictionary are themselves hashable.
 
-            '''
+            """
 
             cache_ok = True
 
@@ -1236,15 +1237,12 @@ class ExternalType(TypeEngineMixin):
 
                 # assume keys/values of "lookup" are hashable; otherwise
                 # they would also need to be converted in some way here
-                self.lookup = tuple(
-                    (key, lookup[key]) for key in sorted(lookup)
-                )
+                self.lookup = tuple((key, lookup[key]) for key in sorted(lookup))
 
             def get_col_spec(self, **kw):
                 return "VARCHAR(255)"
 
-            def bind_processor(self, dialect):
-                # ...  works with "self._lookup" ...
+            def bind_processor(self, dialect): ...  # works with "self._lookup" ...
 
     Where above, the cache key for ``LookupType({"a": 10, "b": 20})`` will be::
 
@@ -1262,7 +1260,7 @@ class ExternalType(TypeEngineMixin):
 
         :ref:`sql_caching`
 
-    """  # noqa: E501
+    '''  # noqa: E501
 
     @util.non_memoized_property
     def _static_cache_key(
@@ -1304,10 +1302,11 @@ class UserDefinedType(
 
       import sqlalchemy.types as types
 
+
       class MyType(types.UserDefinedType):
           cache_ok = True
 
-          def __init__(self, precision = 8):
+          def __init__(self, precision=8):
               self.precision = precision
 
           def get_col_spec(self, **kw):
@@ -1316,19 +1315,23 @@ class UserDefinedType(
           def bind_processor(self, dialect):
               def process(value):
                   return value
+
               return process
 
           def result_processor(self, dialect, coltype):
               def process(value):
                   return value
+
               return process
 
     Once the type is made, it's immediately usable::
 
-      table = Table('foo', metadata_obj,
-          Column('id', Integer, primary_key=True),
-          Column('data', MyType(16))
-          )
+      table = Table(
+          "foo",
+          metadata_obj,
+          Column("id", Integer, primary_key=True),
+          Column("data", MyType(16)),
+      )
 
     The ``get_col_spec()`` method will in most cases receive a keyword
     argument ``type_expression`` which refers to the owning expression
@@ -1493,7 +1496,7 @@ class NativeForEmulated(TypeEngineMixin):
 
 
 class TypeDecorator(SchemaEventTarget, ExternalType, TypeEngine[_T]):
-    """Allows the creation of types which add additional functionality
+    '''Allows the creation of types which add additional functionality
     to an existing type.
 
     This method is preferred to direct subclassing of SQLAlchemy's
@@ -1504,10 +1507,11 @@ class TypeDecorator(SchemaEventTarget, ExternalType, TypeEngine[_T]):
 
       import sqlalchemy.types as types
 
+
       class MyType(types.TypeDecorator):
-          '''Prefixes Unicode values with "PREFIX:" on the way in and
+          """Prefixes Unicode values with "PREFIX:" on the way in and
           strips it off on the way out.
-          '''
+          """
 
           impl = types.Unicode
 
@@ -1599,6 +1603,7 @@ class TypeDecorator(SchemaEventTarget, ExternalType, TypeEngine[_T]):
             from sqlalchemy import JSON
             from sqlalchemy import TypeDecorator
 
+
             class MyJsonType(TypeDecorator):
                 impl = JSON
 
@@ -1619,6 +1624,7 @@ class TypeDecorator(SchemaEventTarget, ExternalType, TypeEngine[_T]):
             from sqlalchemy import ARRAY
             from sqlalchemy import TypeDecorator
 
+
             class MyArrayType(TypeDecorator):
                 impl = ARRAY
 
@@ -1627,8 +1633,7 @@ class TypeDecorator(SchemaEventTarget, ExternalType, TypeEngine[_T]):
                 def coerce_compared_value(self, op, value):
                     return self.impl.coerce_compared_value(op, value)
 
-
-    """
+    '''
 
     __visit_name__ = "type_decorator"
 
index 737ee6822d194f1a38b4423435ff2b6707800f8e..d7252f899ef233ab7f35821a5a9e2059cc920754 100644 (file)
@@ -107,7 +107,7 @@ def join_condition(
 
     would produce an expression along the lines of::
 
-        tablea.c.id==tableb.c.tablea_id
+        tablea.c.id == tableb.c.tablea_id
 
     The join is determined based on the foreign key relationships
     between the two selectables.   If there are multiple ways
@@ -269,7 +269,7 @@ def visit_binary_product(
 
     The function is of the form::
 
-        def my_fn(binary, left, right)
+        def my_fn(binary, left, right): ...
 
     For each binary expression located which has a
     comparison operator, the product of "left" and
@@ -278,12 +278,11 @@ def visit_binary_product(
 
     Hence an expression like::
 
-        and_(
-            (a + b) == q + func.sum(e + f),
-            j == r
-        )
+        and_((a + b) == q + func.sum(e + f), j == r)
+
+    would have the traversal:
 
-    would have the traversal::
+    .. sourcecode:: text
 
         a <eq> q
         a <eq> e
@@ -529,9 +528,7 @@ def bind_values(clause):
 
     E.g.::
 
-        >>> expr = and_(
-        ...    table.c.foo==5, table.c.foo==7
-        ... )
+        >>> expr = and_(table.c.foo == 5, table.c.foo == 7)
         >>> bind_values(expr)
         [5, 7]
     """
@@ -1044,20 +1041,24 @@ class ClauseAdapter(visitors.ReplacingExternalTraversal):
 
     E.g.::
 
-      table1 = Table('sometable', metadata,
-          Column('col1', Integer),
-          Column('col2', Integer)
-          )
-      table2 = Table('someothertable', metadata,
-          Column('col1', Integer),
-          Column('col2', Integer)
-          )
+      table1 = Table(
+          "sometable",
+          metadata,
+          Column("col1", Integer),
+          Column("col2", Integer),
+      )
+      table2 = Table(
+          "someothertable",
+          metadata,
+          Column("col1", Integer),
+          Column("col2", Integer),
+      )
 
       condition = table1.c.col1 == table2.c.col1
 
     make an alias of table1::
 
-      s = table1.alias('foo')
+      s = table1.alias("foo")
 
     calling ``ClauseAdapter(s).traverse(condition)`` converts
     condition to read::
index 3e7c24eaff4bd8aa78a74f9072b0374a86f4c960..2c7202c2989cb30a05652b46a09e4cbe97bf460a 100644 (file)
@@ -924,11 +924,13 @@ def traverse(
 
         from sqlalchemy.sql import visitors
 
-        stmt = select(some_table).where(some_table.c.foo == 'bar')
+        stmt = select(some_table).where(some_table.c.foo == "bar")
+
 
         def visit_bindparam(bind_param):
             print("found bound value: %s" % bind_param.value)
 
+
         visitors.traverse(stmt, {}, {"bindparam": visit_bindparam})
 
     The iteration of objects uses the :func:`.visitors.iterate` function,
index f2292224e80276301a64ff5e4e5296acdf2c3951..2555073c2801841cd56604f60bf7832129fc34d5 100644 (file)
@@ -121,7 +121,9 @@ def combinations(
      passed, each argument combination is turned into a pytest.param() object,
      mapping the elements of the argument tuple to produce an id based on a
      character value in the same position within the string template using the
-     following scheme::
+     following scheme:
+
+     .. sourcecode:: text
 
         i - the given argument is a string that is part of the id only, don't
             pass it as an argument
@@ -145,7 +147,7 @@ def combinations(
             (operator.ne, "ne"),
             (operator.gt, "gt"),
             (operator.lt, "lt"),
-            id_="na"
+            id_="na",
         )
         def test_operator(self, opfunc, name):
             pass
@@ -227,14 +229,9 @@ def variation(argname_or_fn, cases=None):
 
         @testing.variation("querytyp", ["select", "subquery", "legacy_query"])
         @testing.variation("lazy", ["select", "raise", "raise_on_sql"])
-        def test_thing(
-            self,
-            querytyp,
-            lazy,
-            decl_base
-        ):
+        def test_thing(self, querytyp, lazy, decl_base):
             class Thing(decl_base):
-                __tablename__ = 'thing'
+                __tablename__ = "thing"
 
                 # use name directly
                 rel = relationship("Rel", lazy=lazy.name)
@@ -249,7 +246,6 @@ def variation(argname_or_fn, cases=None):
             else:
                 querytyp.fail()
 
-
     The variable provided is a slots object of boolean variables, as well
     as the name of the case itself under the attribute ".name"
 
index b57ec1afb52632521ab636ebcb2f3d2676d47214..080551222b179188860cbecb83bf2c31a6c5be51 100644 (file)
@@ -108,7 +108,9 @@ def generate_db_urls(db_urls, extra_drivers):
     """Generate a set of URLs to test given configured URLs plus additional
     driver names.
 
-    Given::
+    Given:
+
+    .. sourcecode:: text
 
         --dburi postgresql://db1  \
         --dburi postgresql://db2  \
@@ -116,7 +118,9 @@ def generate_db_urls(db_urls, extra_drivers):
         --dbdriver=psycopg2 --dbdriver=asyncpg
 
     Noting that the default postgresql driver is psycopg2,  the output
-    would be::
+    would be:
+
+    .. sourcecode:: text
 
         postgresql+psycopg2://db1
         postgresql+asyncpg://db1
@@ -130,7 +134,9 @@ def generate_db_urls(db_urls, extra_drivers):
     we want to keep it in that dburi.
 
     Driver specific query options can be specified by added them to the
-    driver name. For example, to a sample option the asyncpg::
+    driver name. For example, to a sample option the asyncpg:
+
+    .. sourcecode:: text
 
         --dburi postgresql://db1  \
         --dbdriver=asyncpg?some_option=a_value
index b1d3d0f085a46c93cf4837049bb15c9a1b061f92..539d0233b526837c3af1a6e5077fa936382f77d7 100644 (file)
@@ -91,7 +91,9 @@ class SuiteRequirements(Requirements):
 
     @property
     def table_value_constructor(self):
-        """Database / dialect supports a query like::
+        """Database / dialect supports a query like:
+
+        .. sourcecode:: sql
 
              SELECT * FROM VALUES ( (c1, c2), (c1, c2), ...)
              AS some_table(col1, col2)
@@ -992,7 +994,9 @@ class SuiteRequirements(Requirements):
     @property
     def binary_literals(self):
         """target backend supports simple binary literals, e.g. an
-        expression like::
+        expression like:
+
+        .. sourcecode:: sql
 
             SELECT CAST('foo' AS BINARY)
 
@@ -1173,9 +1177,7 @@ class SuiteRequirements(Requirements):
 
             expr = decimal.Decimal("15.7563")
 
-            value = e.scalar(
-                select(literal(expr))
-            )
+            value = e.scalar(select(literal(expr)))
 
             assert value == expr
 
@@ -1343,7 +1345,9 @@ class SuiteRequirements(Requirements):
         present in a subquery in the WHERE clause.
 
         This is an ANSI-standard syntax that apparently MySQL can't handle,
-        such as::
+        such as:
+
+        .. sourcecode:: sql
 
             UPDATE documents SET flag=1 WHERE documents.title IN
                 (SELECT max(documents.title) AS title
@@ -1376,7 +1380,11 @@ class SuiteRequirements(Requirements):
         """target database supports ordering by a column from a SELECT
         inside of a UNION
 
-        E.g.  (SELECT id, ...) UNION (SELECT id, ...) ORDER BY id
+        E.g.:
+
+        .. sourcecode:: sql
+
+            (SELECT id, ...) UNION (SELECT id, ...) ORDER BY id
 
         """
         return exclusions.open()
@@ -1386,7 +1394,9 @@ class SuiteRequirements(Requirements):
         """target backend supports ORDER BY a column label within an
         expression.
 
-        Basically this::
+        Basically this:
+
+        .. sourcecode:: sql
 
             select data as foo from test order by foo || 'bar'
 
index f6fad11d0e2a7869e22e0eb100639e32555f485d..d2f8f5b618471639d0e9cb775ea4ff4c07b6e1a6 100644 (file)
@@ -254,18 +254,19 @@ def flag_combinations(*combinations):
             dict(lazy=False, passive=True),
             dict(lazy=False, passive=True, raiseload=True),
         )
-
+        def test_fn(lazy, passive, raiseload): ...
 
     would result in::
 
         @testing.combinations(
-            ('', False, False, False),
-            ('lazy', True, False, False),
-            ('lazy_passive', True, True, False),
-            ('lazy_passive', True, True, True),
-            id_='iaaa',
-            argnames='lazy,passive,raiseload'
+            ("", False, False, False),
+            ("lazy", True, False, False),
+            ("lazy_passive", True, True, False),
+            ("lazy_passive", True, True, True),
+            id_="iaaa",
+            argnames="lazy,passive,raiseload",
         )
+        def test_fn(lazy, passive, raiseload): ...
 
     """
 
index 34b435e05f7da8f757dd47cb6128d2bb067fc47f..719817acd4d4405e10392d66d2a7c22862975344 100644 (file)
@@ -62,8 +62,8 @@ def merge_lists_w_ordering(a: List[Any], b: List[Any]) -> List[Any]:
 
     Example::
 
-        >>> a = ['__tablename__', 'id', 'x', 'created_at']
-        >>> b = ['id', 'name', 'data', 'y', 'created_at']
+        >>> a = ["__tablename__", "id", "x", "created_at"]
+        >>> b = ["id", "name", "data", "y", "created_at"]
         >>> merge_lists_w_ordering(a, b)
         ['__tablename__', 'id', 'name', 'data', 'y', 'x', 'created_at']
 
index 3034715b5e661f6bce51bb3ae9ece84d64208389..3a59a8a4bcd4e9db1bf82dc8dd3d77f3ef194388 100644 (file)
@@ -205,10 +205,10 @@ def deprecated_params(**specs: Tuple[str, str]) -> Callable[[_F], _F]:
             weak_identity_map=(
                 "0.7",
                 "the :paramref:`.Session.weak_identity_map parameter "
-                "is deprecated."
+                "is deprecated.",
             )
-
         )
+        def some_function(**kwargs): ...
 
     """
 
index 82cfca8c557bbee4827200925d1585bd881f3b17..4f0e17420ada563c810c4cb6f0a59c7f88f28f03 100644 (file)
@@ -656,7 +656,9 @@ def format_argspec_init(method, grouped=True):
     """format_argspec_plus with considerations for typical __init__ methods
 
     Wraps format_argspec_plus with error handling strategies for typical
-    __init__ cases::
+    __init__ cases:
+
+    .. sourcecode:: text
 
       object.__init__ -> (self)
       other unreflectable (usually C) -> (self, *args, **kwargs)
@@ -711,7 +713,9 @@ def create_proxy_methods(
 def getargspec_init(method):
     """inspect.getargspec with considerations for typical __init__ methods
 
-    Wraps inspect.getargspec with error handling for typical __init__ cases::
+    Wraps inspect.getargspec with error handling for typical __init__ cases:
+
+    .. sourcecode:: text
 
       object.__init__ -> (self)
       other unreflectable (usually C) -> (self, *args, **kwargs)
@@ -1585,9 +1589,9 @@ class hybridmethod(Generic[_T]):
 class symbol(int):
     """A constant symbol.
 
-    >>> symbol('foo') is symbol('foo')
+    >>> symbol("foo") is symbol("foo")
     True
-    >>> symbol('foo')
+    >>> symbol("foo")
     <symbol 'foo>
 
     A slight refinement of the MAGICCOOKIE=object() pattern.  The primary
index 11a09ab67fb7204908e8f923eb5baad2af693f40..c6d2616e6dab394df0c7c9fec0f35e2c9e1d7b45 100644 (file)
@@ -10,6 +10,7 @@ running a kill of all detected sessions does not seem to release the
 database in process.
 
 """
+
 import logging
 import sys
 
index e3b5df0ad48ca1eaa3042853905a1d30b166413b..c34d54169e8fcb1bab4f388eb99fe572c1564306 100644 (file)
@@ -96,11 +96,11 @@ class MappedColumnTest(_MappedColumnTest):
 
             ll = list
 
+
             def make_class() -> None:
 
                 x: ll[int] = [1, 2, 3]
 
-
         """  # noqa: E501
 
         class Foo(decl_base):
index 5b5989c9205dae306afcb82200b59a405ec0f377..d0f8e680d0dee7cf371a677ca5605a89d77863c5 100644 (file)
@@ -469,19 +469,20 @@ class GeometryFixtureBase(fixtures.DeclarativeMappedTest):
     e.g.::
 
         self._fixture_from_geometry(
-            "a": {
-                "subclasses": {
-                    "b": {"polymorphic_load": "selectin"},
-                    "c": {
-                        "subclasses": {
-                            "d": {
-                                "polymorphic_load": "inlne", "single": True
-                            },
-                            "e": {
-                                "polymorphic_load": "inline", "single": True
+            {
+                "a": {
+                    "subclasses": {
+                        "b": {"polymorphic_load": "selectin"},
+                        "c": {
+                            "subclasses": {
+                                "d": {"polymorphic_load": "inlne", "single": True},
+                                "e": {
+                                    "polymorphic_load": "inline",
+                                    "single": True,
+                                },
                             },
+                            "polymorphic_load": "selectin",
                         },
-                        "polymorphic_load": "selectin",
                     }
                 }
             }
@@ -490,42 +491,41 @@ class GeometryFixtureBase(fixtures.DeclarativeMappedTest):
     would provide the equivalent of::
 
         class a(Base):
-            __tablename__ = 'a'
+            __tablename__ = "a"
 
             id = Column(Integer, primary_key=True)
             a_data = Column(String(50))
             type = Column(String(50))
-            __mapper_args__ = {
-                "polymorphic_on": type,
-                "polymorphic_identity": "a"
-            }
+            __mapper_args__ = {"polymorphic_on": type, "polymorphic_identity": "a"}
+
 
         class b(a):
-            __tablename__ = 'b'
+            __tablename__ = "b"
 
-            id = Column(ForeignKey('a.id'), primary_key=True)
+            id = Column(ForeignKey("a.id"), primary_key=True)
             b_data = Column(String(50))
 
             __mapper_args__ = {
                 "polymorphic_identity": "b",
-                "polymorphic_load": "selectin"
+                "polymorphic_load": "selectin",
             }
 
             # ...
 
+
         class c(a):
-            __tablename__ = 'c'
+            __tablename__ = "c"
 
-        class d(c):
-            # ...
 
-        class e(c):
-            # ...
+        class d(c): ...
+
+
+        class e(c): ...
 
     Declarative is used so that we get extra behaviors of declarative,
     such as single-inheritance column masking.
 
-    """
+    """  # noqa: E501
 
     run_create_tables = "each"
     run_define_tables = "each"
index a783fad3e8a5b3b661c80d721ad93ec7a1e22fc8..0d4211656a30cb4b023be86c8df7db9423302199 100644 (file)
@@ -433,7 +433,9 @@ class DirectSelfRefFKTest(fixtures.MappedTest, AssertsCompiledSQL):
     that points to itself, e.g. within a SQL function or similar.
     The test is against a materialized path setup.
 
-    this is an **extremely** unusual case::
+    this is an **extremely** unusual case:
+
+    .. sourcecode:: text
 
         Entity
         ------
@@ -1024,7 +1026,9 @@ class CompositeSelfRefFKTest(fixtures.MappedTest, AssertsCompiledSQL):
     the relationship(), one col points
     to itself in the same table.
 
-    this is a very unusual case::
+    this is a very unusual case:
+
+    .. sourcecode:: text
 
         company         employee
         ----------      ----------
index a5f4ee11ec826cd022ca9995be720c6fbb7f29d8..6eb8accc3ddb8901255b3cf6e56cfc9c7372d86e 100644 (file)
@@ -301,7 +301,9 @@ class DefaultRequirements(SuiteRequirements):
     @property
     def binary_literals(self):
         """target backend supports simple binary literals, e.g. an
-        expression like::
+        expression like:
+
+        .. sourcecode:: sql
 
             SELECT CAST('foo' AS BINARY)
 
@@ -522,7 +524,9 @@ class DefaultRequirements(SuiteRequirements):
         present in a subquery in the WHERE clause.
 
         This is an ANSI-standard syntax that apparently MySQL can't handle,
-        such as::
+        such as:
+
+        .. sourcecode:: sql
 
             UPDATE documents SET flag=1 WHERE documents.title IN
                 (SELECT max(documents.title) AS title
@@ -1472,9 +1476,7 @@ class DefaultRequirements(SuiteRequirements):
 
             expr = decimal.Decimal("15.7563")
 
-            value = e.scalar(
-                select(literal(expr))
-            )
+            value = e.scalar(select(literal(expr)))
 
             assert value == expr
 
index ef7eac51e3d2d3e4c7835682b68a5e3f8997679a..383f2adaabd0748e5a78ebc054ef828426691d07 100644 (file)
@@ -296,7 +296,9 @@ class CTETest(fixtures.TestBase, AssertsCompiledSQL):
     def test_recursive_union_no_alias_two(self):
         """
 
-        pg's example::
+        pg's example:
+
+        .. sourcecode:: sql
 
             WITH RECURSIVE t(n) AS (
                 VALUES (1)
index 139499d941e76006bda0721e25db1702d2527bc1..6608c51073bb2c67902bda7c7a8a20a13788e8c7 100644 (file)
@@ -97,7 +97,7 @@ class TestFindUnmatchingFroms(fixtures.TablesTest):
     @testing.combinations(("lateral",), ("cartesian",), ("join",))
     def test_lateral_subqueries(self, control):
         """
-        ::
+        .. sourcecode:: sql
 
             test=> create table a (id integer);
             CREATE TABLE
index b7e82391c16fe2c371c82b220bd3aa0a7c364ba6..163df0a0d71b9aca353a89bdc9967a0297047a07 100644 (file)
@@ -1626,8 +1626,7 @@ class TableValuedCompileTest(fixtures.TestBase, AssertsCompiledSQL):
 
     def test_alias_column(self):
         """
-
-        ::
+        .. sourcecode:: sql
 
             SELECT x, y
             FROM
@@ -1658,8 +1657,7 @@ class TableValuedCompileTest(fixtures.TestBase, AssertsCompiledSQL):
 
     def test_column_valued_two(self):
         """
-
-        ::
+        .. sourcecode:: sql
 
             SELECT x, y
             FROM
@@ -1774,7 +1772,7 @@ class TableValuedCompileTest(fixtures.TestBase, AssertsCompiledSQL):
 
     def test_function_alias(self):
         """
-        ::
+        .. sourcecode:: sql
 
             SELECT result_elem -> 'Field' as field
             FROM "check" AS check_, json_array_elements(
index f3bc8e494813f9a3a5400833513b966f0a177804..58a64e5c3815e4f9eae06be92331ad4adb475d92 100644 (file)
@@ -195,7 +195,9 @@ class QuoteTest(fixtures.TestBase, AssertsCompiledSQL):
         """test the quoting of labels.
 
         If labels aren't quoted, a query in postgresql in particular will
-        fail since it produces::
+        fail since it produces:
+
+        .. sourcecode:: sql
 
             SELECT
                 LaLa.lowercase, LaLa."UPPERCASE", LaLa."MixedCase", LaLa."ASC"
index 4e7a425da554156de6333cd60f93a815922f88ca..7e73dd0be3588a5a3f533ba66796071bac481430 100644 (file)
@@ -60,7 +60,7 @@ def run_file(cmd: code_writer_cmd, file: Path):
 
 def run(cmd: code_writer_cmd):
     i = 0
-    for file in sa_path.glob(f"**/*_cy.py"):
+    for file in sa_path.glob("**/*_cy.py"):
         run_file(cmd, file)
         i += 1
     cmd.write_status(f"\nDone. Processed {i} files.")
index 8d24a9163af8b4413269c698b4ee26c91b99f010..3a06ac9f2735b48d047a18c69b0a7c9c6b51af74 100644 (file)
@@ -13,6 +13,7 @@ from argparse import ArgumentParser
 from argparse import RawDescriptionHelpFormatter
 from collections.abc import Iterator
 from functools import partial
+from itertools import chain
 from pathlib import Path
 import re
 from typing import NamedTuple
@@ -25,7 +26,12 @@ from black.mode import TargetVersion
 
 
 home = Path(__file__).parent.parent
-ignore_paths = (re.compile(r"changelog/unreleased_\d{2}"),)
+ignore_paths = (
+    re.compile(r"changelog/unreleased_\d{2}"),
+    re.compile(r"README\.unittests\.rst"),
+    re.compile(r"\.tox"),
+    re.compile(r"build"),
+)
 
 
 class BlockLine(NamedTuple):
@@ -45,6 +51,7 @@ def _format_block(
     errors: list[tuple[int, str, Exception]],
     is_doctest: bool,
     file: str,
+    is_python_file: bool,
 ) -> list[str]:
     if not is_doctest:
         # The first line may have additional padding. Remove then restore later
@@ -58,8 +65,9 @@ def _format_block(
         add_padding = None
         code = "\n".join(l.code for l in input_block)
 
+    mode = PYTHON_BLACK_MODE if is_python_file else RST_BLACK_MODE
     try:
-        formatted = format_str(code, mode=BLACK_MODE)
+        formatted = format_str(code, mode=mode)
     except Exception as e:
         start_line = input_block[0].line_no
         first_error = not errors
@@ -119,6 +127,7 @@ start_code_section = re.compile(
     r"^(((?!\.\.).+::)|(\.\.\s*sourcecode::(.*py.*)?)|(::))$"
 )
 start_space = re.compile(r"^(\s*)[^ ]?")
+not_python_line = re.compile(r"^\s+[$:]")
 
 
 def format_file(
@@ -131,6 +140,8 @@ def format_file(
     doctest_block: _Block | None = None
     plain_block: _Block | None = None
 
+    is_python_file = file.suffix == ".py"
+
     plain_code_section = False
     plain_padding = None
     plain_padding_len = None
@@ -144,6 +155,7 @@ def format_file(
         errors=errors,
         is_doctest=True,
         file=str(file),
+        is_python_file=is_python_file,
     )
 
     def doctest_format():
@@ -158,6 +170,7 @@ def format_file(
         errors=errors,
         is_doctest=False,
         file=str(file),
+        is_python_file=is_python_file,
     )
 
     def plain_format():
@@ -246,6 +259,14 @@ def format_file(
                         ]
                         continue
                 buffer.append(line)
+            elif (
+                is_python_file
+                and not plain_block
+                and not_python_line.match(line)
+            ):
+                # not a python block. ignore it
+                plain_code_section = False
+                buffer.append(line)
             else:
                 # start of a plain block
                 assert not doctest_block
@@ -288,9 +309,12 @@ def format_file(
 
 
 def iter_files(directory: str) -> Iterator[Path]:
+    dir_path = home / directory
     yield from (
         file
-        for file in (home / directory).glob("./**/*.rst")
+        for file in chain(
+            dir_path.glob("./**/*.rst"), dir_path.glob("./**/*.py")
+        )
         if not any(pattern.search(file.as_posix()) for pattern in ignore_paths)
     )
 
@@ -352,7 +376,7 @@ Use --report-doctest to ignore errors on plain code blocks.
         "-d",
         "--directory",
         help="Find documents in this directory and its sub dirs",
-        default="doc/build",
+        default=".",
     )
     parser.add_argument(
         "-c",
@@ -372,7 +396,8 @@ Use --report-doctest to ignore errors on plain code blocks.
         "-l",
         "--project-line-length",
         help="Configure the line length to the project value instead "
-        "of using the black default of 88",
+        "of using the black default of 88. Python files always use the"
+        "project line length",
         action="store_true",
     )
     parser.add_argument(
@@ -385,18 +410,25 @@ Use --report-doctest to ignore errors on plain code blocks.
     args = parser.parse_args()
 
     config = parse_pyproject_toml(home / "pyproject.toml")
-    BLACK_MODE = Mode(
-        target_versions={
-            TargetVersion[val.upper()]
-            for val in config.get("target_version", [])
-            if val != "py27"
-        },
+    target_versions = {
+        TargetVersion[val.upper()]
+        for val in config.get("target_version", [])
+        if val != "py27"
+    }
+
+    RST_BLACK_MODE = Mode(
+        target_versions=target_versions,
         line_length=(
             config.get("line_length", DEFAULT_LINE_LENGTH)
             if args.project_line_length
             else DEFAULT_LINE_LENGTH
         ),
     )
+    PYTHON_BLACK_MODE = Mode(
+        target_versions=target_versions,
+        # Remove a few char to account for normal indent
+        line_length=(config.get("line_length", 4) - 4 or DEFAULT_LINE_LENGTH),
+    )
     REPORT_ONLY_DOCTEST = args.report_doctest
 
     main(args.file, args.directory, args.exit_on_error, args.check)
index 31832ae8bfadec72306daa6e193f44477481a894..b9f9d572b00517e19778cbc1f28ead136cbc4c45 100644 (file)
@@ -370,11 +370,14 @@ def process_module(modname: str, filename: str, cmd: code_writer_cmd) -> str:
     # use tempfile in same path as the module, or at least in the
     # current working directory, so that black / zimports use
     # local pyproject.toml
-    with NamedTemporaryFile(
-        mode="w",
-        delete=False,
-        suffix=".py",
-    ) as buf, open(filename) as orig_py:
+    with (
+        NamedTemporaryFile(
+            mode="w",
+            delete=False,
+            suffix=".py",
+        ) as buf,
+        open(filename) as orig_py,
+    ):
         in_block = False
         current_clsname = None
         for line in orig_py:
index b777ae406a28d628fca2b53b96e25d142edec246..dc68b40f0a195e72ebe3763fae1d88daead34091 100644 (file)
@@ -27,11 +27,14 @@ def _fns_in_deterministic_order():
 
 
 def process_functions(filename: str, cmd: code_writer_cmd) -> str:
-    with NamedTemporaryFile(
-        mode="w",
-        delete=False,
-        suffix=".py",
-    ) as buf, open(filename) as orig_py:
+    with (
+        NamedTemporaryFile(
+            mode="w",
+            delete=False,
+            suffix=".py",
+        ) as buf,
+        open(filename) as orig_py,
+    ):
         indent = ""
         in_block = False
 
index a7a2eb5f4308182b63caef4734289f27680ae7a7..a2b38b7c47f7404d0641d28fe43fd35dafc034ee 100644 (file)
@@ -44,11 +44,14 @@ def process_module(
     # current working directory, so that black / zimports use
     # local pyproject.toml
     found = 0
-    with NamedTemporaryFile(
-        mode="w",
-        delete=False,
-        suffix=".py",
-    ) as buf, open(filename) as orig_py:
+    with (
+        NamedTemporaryFile(
+            mode="w",
+            delete=False,
+            suffix=".py",
+        ) as buf,
+        open(filename) as orig_py,
+    ):
         indent = ""
         in_block = False
         current_fnname = given_fnname = None
index 966705690de4e83326b36df646a199df336fd8e3..72bb08cc484d9c72d058adf5f129fc2e12ccd90f 100644 (file)
@@ -3,22 +3,22 @@ Debug ORMAdapter calls within ORM runs.
 
 Demos::
 
-    python tools/trace_orm_adapter.py -m pytest \
+    python tools/trace_orm_adapter.py -m pytest \
         test/orm/inheritance/test_polymorphic_rel.py::PolymorphicAliasedJoinsTest::test_primary_eager_aliasing_joinedload
 
-    python tools/trace_orm_adapter.py -m pytest \
+    python tools/trace_orm_adapter.py -m pytest \
         test/orm/test_eager_relations.py::LazyLoadOptSpecificityTest::test_pathed_joinedload_aliased_abs_bcs
 
-    python tools/trace_orm_adapter.py my_test_script.py
+    python tools/trace_orm_adapter.py my_test_script.py
 
 
 The above two tests should spit out a ton of debug output.  If a test or program
 has no debug output at all, that's a good thing!  it means ORMAdapter isn't
 used for that case.
 
-You can then set a breakpoint at the end of any adapt step:
+You can then set a breakpoint at the end of any adapt step::
 
-    python tools/trace_orm_adapter.py -d 10 -m pytest -s \
+    python tools/trace_orm_adapter.py -d 10 -m pytest -s \
         test/orm/test_eager_relations.py::LazyLoadOptSpecificityTest::test_pathed_joinedload_aliased_abs_bcs