command run is "pyupgrade --py37-plus --keep-runtime-typing --keep-percent-format <files...>"
pyupgrade will change assert_ to assertTrue. That was reverted since assertTrue does not
exists in sqlalchemy fixtures
Change-Id: Ie1ed2675c7b11d893d78e028aad0d1576baebb55
# only "A" is referenced explicitly. Using "collections",
# we deal with a dict of key/sets of integers directly.
- session.add_all([A(collections={"1": set([1, 2, 3])})])
+ session.add_all([A(collections={"1": {1, 2, 3}})])
session.commit()
a1 = session.query(A).first()
return len(self.path.split(".")) - 1
def __repr__(self):
- return "Node(id={})".format(self.id)
+ return f"Node(id={self.id})"
def __str__(self):
root_depth = self.depth
# reduce some verbosity when we make a new object
kw.setdefault("start", current_time() - datetime.timedelta(days=3))
kw.setdefault("end", current_time() + datetime.timedelta(days=3))
- super(VersionedStartEnd, self).__init__(**kw)
+ super().__init__(**kw)
def new_version(self, session):
dbapi: ModuleType
def __init__(self, use_setinputsizes: bool = False, **kw: Any):
- super(PyODBCConnector, self).__init__(**kw)
+ super().__init__(**kw)
if use_setinputsizes:
self.bind_typing = interfaces.BindTyping.SETINPUTSIZES
token = "{%s}" % token.replace("}", "}}")
return token
- keys = dict((k, check_quote(v)) for k, v in keys.items())
+ keys = {k: check_quote(v) for k, v in keys.items()}
dsn_connection = "dsn" in keys or (
"host" in keys and "database" not in keys
MS_2005_VERSION = (9,)
MS_2000_VERSION = (8,)
-RESERVED_WORDS = set(
- [
- "add",
- "all",
- "alter",
- "and",
- "any",
- "as",
- "asc",
- "authorization",
- "backup",
- "begin",
- "between",
- "break",
- "browse",
- "bulk",
- "by",
- "cascade",
- "case",
- "check",
- "checkpoint",
- "close",
- "clustered",
- "coalesce",
- "collate",
- "column",
- "commit",
- "compute",
- "constraint",
- "contains",
- "containstable",
- "continue",
- "convert",
- "create",
- "cross",
- "current",
- "current_date",
- "current_time",
- "current_timestamp",
- "current_user",
- "cursor",
- "database",
- "dbcc",
- "deallocate",
- "declare",
- "default",
- "delete",
- "deny",
- "desc",
- "disk",
- "distinct",
- "distributed",
- "double",
- "drop",
- "dump",
- "else",
- "end",
- "errlvl",
- "escape",
- "except",
- "exec",
- "execute",
- "exists",
- "exit",
- "external",
- "fetch",
- "file",
- "fillfactor",
- "for",
- "foreign",
- "freetext",
- "freetexttable",
- "from",
- "full",
- "function",
- "goto",
- "grant",
- "group",
- "having",
- "holdlock",
- "identity",
- "identity_insert",
- "identitycol",
- "if",
- "in",
- "index",
- "inner",
- "insert",
- "intersect",
- "into",
- "is",
- "join",
- "key",
- "kill",
- "left",
- "like",
- "lineno",
- "load",
- "merge",
- "national",
- "nocheck",
- "nonclustered",
- "not",
- "null",
- "nullif",
- "of",
- "off",
- "offsets",
- "on",
- "open",
- "opendatasource",
- "openquery",
- "openrowset",
- "openxml",
- "option",
- "or",
- "order",
- "outer",
- "over",
- "percent",
- "pivot",
- "plan",
- "precision",
- "primary",
- "print",
- "proc",
- "procedure",
- "public",
- "raiserror",
- "read",
- "readtext",
- "reconfigure",
- "references",
- "replication",
- "restore",
- "restrict",
- "return",
- "revert",
- "revoke",
- "right",
- "rollback",
- "rowcount",
- "rowguidcol",
- "rule",
- "save",
- "schema",
- "securityaudit",
- "select",
- "session_user",
- "set",
- "setuser",
- "shutdown",
- "some",
- "statistics",
- "system_user",
- "table",
- "tablesample",
- "textsize",
- "then",
- "to",
- "top",
- "tran",
- "transaction",
- "trigger",
- "truncate",
- "tsequal",
- "union",
- "unique",
- "unpivot",
- "update",
- "updatetext",
- "use",
- "user",
- "values",
- "varying",
- "view",
- "waitfor",
- "when",
- "where",
- "while",
- "with",
- "writetext",
- ]
-)
+RESERVED_WORDS = {
+ "add",
+ "all",
+ "alter",
+ "and",
+ "any",
+ "as",
+ "asc",
+ "authorization",
+ "backup",
+ "begin",
+ "between",
+ "break",
+ "browse",
+ "bulk",
+ "by",
+ "cascade",
+ "case",
+ "check",
+ "checkpoint",
+ "close",
+ "clustered",
+ "coalesce",
+ "collate",
+ "column",
+ "commit",
+ "compute",
+ "constraint",
+ "contains",
+ "containstable",
+ "continue",
+ "convert",
+ "create",
+ "cross",
+ "current",
+ "current_date",
+ "current_time",
+ "current_timestamp",
+ "current_user",
+ "cursor",
+ "database",
+ "dbcc",
+ "deallocate",
+ "declare",
+ "default",
+ "delete",
+ "deny",
+ "desc",
+ "disk",
+ "distinct",
+ "distributed",
+ "double",
+ "drop",
+ "dump",
+ "else",
+ "end",
+ "errlvl",
+ "escape",
+ "except",
+ "exec",
+ "execute",
+ "exists",
+ "exit",
+ "external",
+ "fetch",
+ "file",
+ "fillfactor",
+ "for",
+ "foreign",
+ "freetext",
+ "freetexttable",
+ "from",
+ "full",
+ "function",
+ "goto",
+ "grant",
+ "group",
+ "having",
+ "holdlock",
+ "identity",
+ "identity_insert",
+ "identitycol",
+ "if",
+ "in",
+ "index",
+ "inner",
+ "insert",
+ "intersect",
+ "into",
+ "is",
+ "join",
+ "key",
+ "kill",
+ "left",
+ "like",
+ "lineno",
+ "load",
+ "merge",
+ "national",
+ "nocheck",
+ "nonclustered",
+ "not",
+ "null",
+ "nullif",
+ "of",
+ "off",
+ "offsets",
+ "on",
+ "open",
+ "opendatasource",
+ "openquery",
+ "openrowset",
+ "openxml",
+ "option",
+ "or",
+ "order",
+ "outer",
+ "over",
+ "percent",
+ "pivot",
+ "plan",
+ "precision",
+ "primary",
+ "print",
+ "proc",
+ "procedure",
+ "public",
+ "raiserror",
+ "read",
+ "readtext",
+ "reconfigure",
+ "references",
+ "replication",
+ "restore",
+ "restrict",
+ "return",
+ "revert",
+ "revoke",
+ "right",
+ "rollback",
+ "rowcount",
+ "rowguidcol",
+ "rule",
+ "save",
+ "schema",
+ "securityaudit",
+ "select",
+ "session_user",
+ "set",
+ "setuser",
+ "shutdown",
+ "some",
+ "statistics",
+ "system_user",
+ "table",
+ "tablesample",
+ "textsize",
+ "then",
+ "to",
+ "top",
+ "tran",
+ "transaction",
+ "trigger",
+ "truncate",
+ "tsequal",
+ "union",
+ "unique",
+ "unpivot",
+ "update",
+ "updatetext",
+ "use",
+ "user",
+ "values",
+ "varying",
+ "view",
+ "waitfor",
+ "when",
+ "where",
+ "while",
+ "with",
+ "writetext",
+}
class REAL(sqltypes.REAL):
# it is only accepted as the word "REAL" in DDL, the numeric
# precision value is not allowed to be present
kw.setdefault("precision", 24)
- super(REAL, self).__init__(**kw)
+ super().__init__(**kw)
class TINYINT(sqltypes.Integer):
class TIME(sqltypes.TIME):
def __init__(self, precision=None, **kwargs):
self.precision = precision
- super(TIME, self).__init__()
+ super().__init__()
__zero_date = datetime.date(1900, 1, 1)
__visit_name__ = "DATETIME2"
def __init__(self, precision=None, **kw):
- super(DATETIME2, self).__init__(**kw)
+ super().__init__(**kw)
self.precision = precision
__visit_name__ = "DATETIMEOFFSET"
def __init__(self, precision=None, **kw):
- super(DATETIMEOFFSET, self).__init__(**kw)
+ super().__init__(**kw)
self.precision = precision
self.convert_int = convert_int
def result_processor(self, dialect, coltype):
- super_ = super(TIMESTAMP, self).result_processor(dialect, coltype)
+ super_ = super().result_processor(dialect, coltype)
if self.convert_int:
def process(value):
raise ValueError(
"length must be None or 'max' when setting filestream"
)
- super(VARBINARY, self).__init__(length=length)
+ super().__init__(length=length)
class IMAGE(sqltypes.LargeBinary):
@overload
def __init__(
- self: "UNIQUEIDENTIFIER[_python_UUID]", as_uuid: Literal[True] = ...
+ self: UNIQUEIDENTIFIER[_python_UUID], as_uuid: Literal[True] = ...
):
...
@overload
- def __init__(self: "UNIQUEIDENTIFIER[str]", as_uuid: Literal[False] = ...):
+ def __init__(self: UNIQUEIDENTIFIER[str], as_uuid: Literal[False] = ...):
...
def __init__(self, as_uuid: bool = True):
and column.default.optional
):
return None
- return super(MSExecutionContext, self).get_insert_default(column)
+ return super().get_insert_default(column)
class MSSQLCompiler(compiler.SQLCompiler):
def __init__(self, *args, **kwargs):
self.tablealiases = {}
- super(MSSQLCompiler, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
def _with_legacy_schema_aliasing(fn):
def decorate(self, *arg, **kw):
def get_select_precolumns(self, select, **kw):
"""MS-SQL puts TOP, it's version of LIMIT here"""
- s = super(MSSQLCompiler, self).get_select_precolumns(select, **kw)
+ s = super().get_select_precolumns(select, **kw)
if select._has_row_limiting_clause and self._use_top(select):
# ODBC drivers and possibly others
@_with_legacy_schema_aliasing
def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs):
if mssql_aliased is table or iscrud:
- return super(MSSQLCompiler, self).visit_table(table, **kwargs)
+ return super().visit_table(table, **kwargs)
# alias schema-qualified tables
alias = self._schema_aliased_table(table)
if alias is not None:
return self.process(alias, mssql_aliased=table, **kwargs)
else:
- return super(MSSQLCompiler, self).visit_table(table, **kwargs)
+ return super().visit_table(table, **kwargs)
@_with_legacy_schema_aliasing
def visit_alias(self, alias, **kw):
# translate for schema-qualified table aliases
kw["mssql_aliased"] = alias.element
- return super(MSSQLCompiler, self).visit_alias(alias, **kw)
+ return super().visit_alias(alias, **kw)
@_with_legacy_schema_aliasing
def visit_column(self, column, add_to_result_map=None, **kw):
column.type,
)
- return super(MSSQLCompiler, self).visit_column(converted, **kw)
+ return super().visit_column(converted, **kw)
- return super(MSSQLCompiler, self).visit_column(
+ return super().visit_column(
column, add_to_result_map=add_to_result_map, **kw
)
),
**kwargs,
)
- return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
+ return super().visit_binary(binary, **kwargs)
def returning_clause(
self, stmt, returning_cols, *, populate_result_map, **kw
if isinstance(column, expression.Function):
return column.label(None)
else:
- return super(MSSQLCompiler, self).label_select_column(
- select, column, asfrom
- )
+ return super().label_select_column(select, column, asfrom)
def for_update_clause(self, select, **kw):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
# SQL Server wants single quotes around the date string.
return "'" + str(value) + "'"
else:
- return super(MSSQLStrictCompiler, self).render_literal_value(
- value, type_
- )
+ return super().render_literal_value(value, type_)
class MSDDLCompiler(compiler.DDLCompiler):
schema_name = schema if schema else self.dialect.default_schema_name
return (
"execute sp_addextendedproperty 'MS_Description', "
- "{0}, 'schema', {1}, 'table', {2}".format(
+ "{}, 'schema', {}, 'table', {}".format(
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.NVARCHAR()
),
schema_name = schema if schema else self.dialect.default_schema_name
return (
"execute sp_dropextendedproperty 'MS_Description', 'schema', "
- "{0}, 'table', {1}".format(
+ "{}, 'table', {}".format(
self.preparer.quote_schema(schema_name),
self.preparer.format_table(drop.element, use_schema=False),
)
schema_name = schema if schema else self.dialect.default_schema_name
return (
"execute sp_addextendedproperty 'MS_Description', "
- "{0}, 'schema', {1}, 'table', {2}, 'column', {3}".format(
+ "{}, 'schema', {}, 'table', {}, 'column', {}".format(
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.NVARCHAR()
),
schema_name = schema if schema else self.dialect.default_schema_name
return (
"execute sp_dropextendedproperty 'MS_Description', 'schema', "
- "{0}, 'table', {1}, 'column', {2}".format(
+ "{}, 'table', {}, 'column', {}".format(
self.preparer.quote_schema(schema_name),
self.preparer.format_table(
drop.element.table, use_schema=False
if create.element.data_type is not None:
data_type = create.element.data_type
prefix = " AS %s" % self.type_compiler.process(data_type)
- return super(MSDDLCompiler, self).visit_create_sequence(
- create, prefix=prefix, **kw
- )
+ return super().visit_create_sequence(create, prefix=prefix, **kw)
def visit_identity_column(self, identity, **kw):
text = " IDENTITY"
reserved_words = RESERVED_WORDS
def __init__(self, dialect):
- super(MSIdentifierPreparer, self).__init__(
+ super().__init__(
dialect,
initial_quote="[",
final_quote="]",
)
self.legacy_schema_aliasing = legacy_schema_aliasing
- super(MSDialect, self).__init__(**opts)
+ super().__init__(**opts)
self._json_serializer = json_serializer
self._json_deserializer = json_deserializer
def do_savepoint(self, connection, name):
# give the DBAPI a push
connection.exec_driver_sql("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
- super(MSDialect, self).do_savepoint(connection, name)
+ super().do_savepoint(connection, name)
def do_release_savepoint(self, connection, name):
# SQL Server does not support RELEASE SAVEPOINT
def do_rollback(self, dbapi_connection):
try:
- super(MSDialect, self).do_rollback(dbapi_connection)
+ super().do_rollback(dbapi_connection)
except self.dbapi.ProgrammingError as e:
if self.ignore_no_transaction_on_rollback and re.match(
r".*\b111214\b", str(e)
else:
raise
- _isolation_lookup = set(
- [
- "SERIALIZABLE",
- "READ UNCOMMITTED",
- "READ COMMITTED",
- "REPEATABLE READ",
- "SNAPSHOT",
- ]
- )
+ _isolation_lookup = {
+ "SERIALIZABLE",
+ "READ UNCOMMITTED",
+ "READ COMMITTED",
+ "REPEATABLE READ",
+ "SNAPSHOT",
+ }
def get_isolation_level_values(self, dbapi_connection):
return list(self._isolation_lookup)
"SQL Server version."
)
- view_name = "sys.{}".format(row[0])
+ view_name = f"sys.{row[0]}"
cursor.execute(
"""
cursor.close()
def initialize(self, connection):
- super(MSDialect, self).initialize(connection)
+ super().initialize(connection)
self._setup_version_attributes()
self._setup_supports_nvarchar_max(connection)
connection.scalar(
# U filters on user tables only.
text("SELECT object_id(:table_name, 'U')"),
- {"table_name": "tempdb.dbo.[{}]".format(tablename)},
+ {"table_name": f"tempdb.dbo.[{tablename}]"},
)
)
else:
class MSIdentifierPreparer_pymssql(MSIdentifierPreparer):
def __init__(self, dialect):
- super(MSIdentifierPreparer_pymssql, self).__init__(dialect)
+ super().__init__(dialect)
# pymssql has the very unusual behavior that it uses pyformat
# yet does not require that percent signs be doubled
self._double_percents = False
dbapi_connection.autocommit(True)
else:
dbapi_connection.autocommit(False)
- super(MSDialect_pymssql, self).set_isolation_level(
- dbapi_connection, level
- )
+ super().set_isolation_level(dbapi_connection, level)
dialect = MSDialect_pymssql
def bind_processor(self, dialect):
- super_process = super(_ms_numeric_pyodbc, self).bind_processor(dialect)
+ super_process = super().bind_processor(dialect)
if not dialect._need_decimal_fix:
return super_process
"""
- super(MSExecutionContext_pyodbc, self).pre_exec()
+ super().pre_exec()
# don't embed the scope_identity select into an
# "INSERT .. DEFAULT VALUES"
self._lastrowid = int(row[0])
else:
- super(MSExecutionContext_pyodbc, self).post_exec()
+ super().post_exec()
class MSDialect_pyodbc(PyODBCConnector, MSDialect):
use_setinputsizes=True,
**params,
):
- super(MSDialect_pyodbc, self).__init__(
- use_setinputsizes=use_setinputsizes, **params
- )
+ super().__init__(use_setinputsizes=use_setinputsizes, **params)
self.use_scope_identity = (
self.use_scope_identity
and self.dbapi
# SQL Server docs indicate this function isn't present prior to
# 2008. Before we had the VARCHAR cast above, pyodbc would also
# fail on this query.
- return super(MSDialect_pyodbc, self)._get_server_version_info(
- connection
- )
+ return super()._get_server_version_info(connection)
else:
version = []
r = re.compile(r"[.\-]")
return tuple(version)
def on_connect(self):
- super_ = super(MSDialect_pyodbc, self).on_connect()
+ super_ = super().on_connect()
def on_connect(conn):
if super_ is not None:
def do_executemany(self, cursor, statement, parameters, context=None):
if self.fast_executemany:
cursor.fast_executemany = True
- super(MSDialect_pyodbc, self).do_executemany(
- cursor, statement, parameters, context=context
- )
+ super().do_executemany(cursor, statement, parameters, context=context)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
"10054",
}:
return True
- return super(MSDialect_pyodbc, self).is_disconnect(
- e, connection, cursor
- )
+ return super().is_disconnect(e, connection, cursor)
dialect = MSDialect_pyodbc
return pool.AsyncAdaptedQueuePool
def create_connect_args(self, url):
- return super(MySQLDialect_aiomysql, self).create_connect_args(
+ return super().create_connect_args(
url, _translate_args=dict(username="user", database="db")
)
def is_disconnect(self, e, connection, cursor):
- if super(MySQLDialect_aiomysql, self).is_disconnect(
- e, connection, cursor
- ):
+ if super().is_disconnect(e, connection, cursor):
return True
else:
str_e = str(e).lower()
return pool.AsyncAdaptedQueuePool
def create_connect_args(self, url):
- return super(MySQLDialect_asyncmy, self).create_connect_args(
+ return super().create_connect_args(
url, _translate_args=dict(username="user", database="db")
)
def is_disconnect(self, e, connection, cursor):
- if super(MySQLDialect_asyncmy, self).is_disconnect(
- e, connection, cursor
- ):
+ if super().is_disconnect(e, connection, cursor):
return True
else:
str_e = str(e).lower()
name_text = self.preparer.quote(column.name)
clauses.append("%s = %s" % (name_text, value_text))
- non_matching = set(on_duplicate.update) - set(c.key for c in cols)
+ non_matching = set(on_duplicate.update) - {c.key for c in cols}
if non_matching:
util.warn(
"Additional column names not matching "
return "CAST(%s AS %s)" % (self.process(cast.clause, **kw), type_)
def render_literal_value(self, value, type_):
- value = super(MySQLCompiler, self).render_literal_value(value, type_)
+ value = super().render_literal_value(value, type_)
if self.dialect._backslash_escapes:
value = value.replace("\\", "\\\\")
return value
)
return select._distinct.upper() + " "
- return super(MySQLCompiler, self).get_select_precolumns(select, **kw)
+ return super().get_select_precolumns(select, **kw)
def visit_join(self, join, asfrom=False, from_linter=None, **kwargs):
if from_linter:
table_opts = []
- opts = dict(
- (k[len(self.dialect.name) + 1 :].upper(), v)
+ opts = {
+ k[len(self.dialect.name) + 1 :].upper(): v
for k, v in table.kwargs.items()
if k.startswith("%s_" % self.dialect.name)
- )
+ }
if table.comment is not None:
opts["COMMENT"] = table.comment
return text
def visit_primary_key_constraint(self, constraint):
- text = super(MySQLDDLCompiler, self).visit_primary_key_constraint(
- constraint
- )
+ text = super().visit_primary_key_constraint(constraint)
using = constraint.dialect_options["mysql"]["using"]
if using:
text += " USING %s" % (self.preparer.quote(using))
def visit_enum(self, type_, **kw):
if not type_.native_enum:
- return super(MySQLTypeCompiler, self).visit_enum(type_)
+ return super().visit_enum(type_)
else:
return self._visit_enumerated_values("ENUM", type_, type_.enums)
else:
quote = '"'
- super(MySQLIdentifierPreparer, self).__init__(
- dialect, initial_quote=quote, escape_quote=quote
- )
+ super().__init__(dialect, initial_quote=quote, escape_quote=quote)
def _quote_free_identifiers(self, *ids):
"""Unilaterally identifier-quote any number of strings."""
if elem == "":
return elem
else:
- return super(ENUM, self)._object_value_for_elem(elem)
+ return super()._object_value_for_elem(elem)
def __repr__(self):
return util.generic_repr(
"setting retrieve_as_bitwise=True"
)
if self.retrieve_as_bitwise:
- self._bitmap = dict(
- (value, 2**idx) for idx, value in enumerate(self.values)
- )
+ self._bitmap = {
+ value: 2**idx for idx, value in enumerate(self.values)
+ }
self._bitmap.update(
(2**idx, value) for idx, value in enumerate(self.values)
)
length = max([len(v) for v in values] + [0])
kw.setdefault("length", length)
- super(SET, self).__init__(**kw)
+ super().__init__(**kw)
def column_expression(self, colexpr):
if self.retrieve_as_bitwise:
return None
else:
- super_convert = super(SET, self).result_processor(dialect, coltype)
+ super_convert = super().result_processor(dialect, coltype)
def process(value):
if isinstance(value, str):
return process
def bind_processor(self, dialect):
- super_convert = super(SET, self).bind_processor(dialect)
+ super_convert = super().bind_processor(dialect)
if self.retrieve_as_bitwise:
def process(value):
if kw:
raise exc.ArgumentError("unknown arguments: %s" % (", ".join(kw)))
- super(match, self).__init__(
- left, against, operators.match_op, modifiers=flags
- )
+ super().__init__(left, against, operators.match_op, modifiers=flags)
@_generative
def in_boolean_mode(self: Selfmatch) -> Selfmatch:
return (99, 99, 99)
def __init__(self, **kwargs):
- super(MySQLDialect_mariadbconnector, self).__init__(**kwargs)
+ super().__init__(**kwargs)
self.paramstyle = "qmark"
if self.dbapi is not None:
if self._dbapi_version < mariadb_cpy_minimum_version:
return __import__("mariadb")
def is_disconnect(self, e, connection, cursor):
- if super(MySQLDialect_mariadbconnector, self).is_disconnect(
- e, connection, cursor
- ):
+ if super().is_disconnect(e, connection, cursor):
return True
elif isinstance(e, self.dbapi.Error):
str_e = str(e).lower()
connection.autocommit = True
else:
connection.autocommit = False
- super(MySQLDialect_mariadbconnector, self).set_isolation_level(
- connection, level
- )
+ super().set_isolation_level(connection, level)
def do_begin_twophase(self, connection, xid):
connection.execute(
def _compat_fetchone(self, rp, charset=None):
return rp.fetchone()
- _isolation_lookup = set(
- [
- "SERIALIZABLE",
- "READ UNCOMMITTED",
- "READ COMMITTED",
- "REPEATABLE READ",
- "AUTOCOMMIT",
- ]
- )
+ _isolation_lookup = {
+ "SERIALIZABLE",
+ "READ UNCOMMITTED",
+ "READ COMMITTED",
+ "REPEATABLE READ",
+ "AUTOCOMMIT",
+ }
def _set_isolation_level(self, connection, level):
if level == "AUTOCOMMIT":
connection.autocommit = True
else:
connection.autocommit = False
- super(MySQLDialect_mysqlconnector, self)._set_isolation_level(
- connection, level
- )
+ super()._set_isolation_level(connection, level)
dialect = MySQLDialect_mysqlconnector
preparer = MySQLIdentifierPreparer
def __init__(self, **kwargs):
- super(MySQLDialect_mysqldb, self).__init__(**kwargs)
+ super().__init__(**kwargs)
self._mysql_dbapi_version = (
self._parse_dbapi_version(self.dbapi.__version__)
if self.dbapi is not None and hasattr(self.dbapi, "__version__")
return __import__("MySQLdb")
def on_connect(self):
- super_ = super(MySQLDialect_mysqldb, self).on_connect()
+ super_ = super().on_connect()
def on_connect(conn):
if super_ is not None:
]
else:
additional_tests = []
- return super(MySQLDialect_mysqldb, self)._check_unicode_returns(
- connection, additional_tests
- )
+ return super()._check_unicode_returns(connection, additional_tests)
def create_connect_args(self, url, _translate_args=None):
if _translate_args is None:
dbapi_connection.autocommit(True)
else:
dbapi_connection.autocommit(False)
- super(MySQLDialect_mysqldb, self).set_isolation_level(
- dbapi_connection, level
- )
+ super().set_isolation_level(dbapi_connection, level)
dialect = MySQLDialect_mysqldb
def create_connect_args(self, url, _translate_args=None):
if _translate_args is None:
_translate_args = dict(username="user")
- return super(MySQLDialect_pymysql, self).create_connect_args(
+ return super().create_connect_args(
url, _translate_args=_translate_args
)
def is_disconnect(self, e, connection, cursor):
- if super(MySQLDialect_pymysql, self).is_disconnect(
- e, connection, cursor
- ):
+ if super().is_disconnect(e, connection, cursor):
return True
elif isinstance(e, self.dbapi.Error):
str_e = str(e).lower()
return None
def on_connect(self):
- super_ = super(MySQLDialect_pyodbc, self).on_connect()
+ super_ = super().on_connect()
def on_connect(conn):
if super_ is not None:
buffer = []
for row in columns:
- (name, col_type, nullable, default, extra) = [
+ (name, col_type, nullable, default, extra) = (
row[i] for i in (0, 1, 2, 4, 5)
- ]
+ )
line = [" "]
line.append(self.preparer.quote_identifier(name))
def __init__(self, unsigned=False, zerofill=False, **kw):
self.unsigned = unsigned
self.zerofill = zerofill
- super(_NumericType, self).__init__(**kw)
+ super().__init__(**kw)
def __repr__(self):
return util.generic_repr(
"You must specify both precision and scale or omit "
"both altogether."
)
- super(_FloatType, self).__init__(
- precision=precision, asdecimal=asdecimal, **kw
- )
+ super().__init__(precision=precision, asdecimal=asdecimal, **kw)
self.scale = scale
def __repr__(self):
class _IntegerType(_NumericType, sqltypes.Integer):
def __init__(self, display_width=None, **kw):
self.display_width = display_width
- super(_IntegerType, self).__init__(**kw)
+ super().__init__(**kw)
def __repr__(self):
return util.generic_repr(
self.unicode = unicode
self.binary = binary
self.national = national
- super(_StringType, self).__init__(**kw)
+ super().__init__(**kw)
def __repr__(self):
return util.generic_repr(
numeric.
"""
- super(NUMERIC, self).__init__(
+ super().__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
numeric.
"""
- super(DECIMAL, self).__init__(
+ super().__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
numeric.
"""
- super(DOUBLE, self).__init__(
+ super().__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
numeric.
"""
- super(REAL, self).__init__(
+ super().__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
numeric.
"""
- super(FLOAT, self).__init__(
+ super().__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
numeric.
"""
- super(INTEGER, self).__init__(display_width=display_width, **kw)
+ super().__init__(display_width=display_width, **kw)
class BIGINT(_IntegerType, sqltypes.BIGINT):
numeric.
"""
- super(BIGINT, self).__init__(display_width=display_width, **kw)
+ super().__init__(display_width=display_width, **kw)
class MEDIUMINT(_IntegerType):
numeric.
"""
- super(MEDIUMINT, self).__init__(display_width=display_width, **kw)
+ super().__init__(display_width=display_width, **kw)
class TINYINT(_IntegerType):
numeric.
"""
- super(TINYINT, self).__init__(display_width=display_width, **kw)
+ super().__init__(display_width=display_width, **kw)
class SMALLINT(_IntegerType, sqltypes.SMALLINT):
numeric.
"""
- super(SMALLINT, self).__init__(display_width=display_width, **kw)
+ super().__init__(display_width=display_width, **kw)
class BIT(sqltypes.TypeEngine):
MySQL Connector/Python.
"""
- super(TIME, self).__init__(timezone=timezone)
+ super().__init__(timezone=timezone)
self.fsp = fsp
def result_processor(self, dialect, coltype):
MySQL Connector/Python.
"""
- super(TIMESTAMP, self).__init__(timezone=timezone)
+ super().__init__(timezone=timezone)
self.fsp = fsp
MySQL Connector/Python.
"""
- super(DATETIME, self).__init__(timezone=timezone)
+ super().__init__(timezone=timezone)
self.fsp = fsp
only the collation of character data.
"""
- super(TEXT, self).__init__(length=length, **kw)
+ super().__init__(length=length, **kw)
class TINYTEXT(_StringType):
only the collation of character data.
"""
- super(TINYTEXT, self).__init__(**kwargs)
+ super().__init__(**kwargs)
class MEDIUMTEXT(_StringType):
only the collation of character data.
"""
- super(MEDIUMTEXT, self).__init__(**kwargs)
+ super().__init__(**kwargs)
class LONGTEXT(_StringType):
only the collation of character data.
"""
- super(LONGTEXT, self).__init__(**kwargs)
+ super().__init__(**kwargs)
class VARCHAR(_StringType, sqltypes.VARCHAR):
only the collation of character data.
"""
- super(VARCHAR, self).__init__(length=length, **kwargs)
+ super().__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
compatible with the national character set.
"""
- super(CHAR, self).__init__(length=length, **kwargs)
+ super().__init__(length=length, **kwargs)
@classmethod
def _adapt_string_for_cast(self, type_):
"""
kwargs["national"] = True
- super(NVARCHAR, self).__init__(length=length, **kwargs)
+ super().__init__(length=length, **kwargs)
class NCHAR(_StringType, sqltypes.NCHAR):
"""
kwargs["national"] = True
- super(NCHAR, self).__init__(length=length, **kwargs)
+ super().__init__(length=length, **kwargs)
class TINYBLOB(sqltypes._Binary):
def __init__(self, *args, **kwargs):
self.__wheres = {}
- super(OracleCompiler, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
def visit_mod_binary(self, binary, operator, **kw):
return "mod(%s, %s)" % (
return ""
def visit_function(self, func, **kw):
- text = super(OracleCompiler, self).visit_function(func, **kw)
+ text = super().visit_function(func, **kw)
if kw.get("asfrom", False):
text = "TABLE (%s)" % func
return text
def visit_table_valued_column(self, element, **kw):
- text = super(OracleCompiler, self).visit_table_valued_column(
- element, **kw
- )
+ text = super().visit_table_valued_column(element, **kw)
text = "COLUMN_VALUE " + text
return text
return "".join(table_opts)
def get_identity_options(self, identity_options):
- text = super(OracleDDLCompiler, self).get_identity_options(
- identity_options
- )
+ text = super().get_identity_options(identity_options)
text = text.replace("NO MINVALUE", "NOMINVALUE")
text = text.replace("NO MAXVALUE", "NOMAXVALUE")
text = text.replace("NO CYCLE", "NOCYCLE")
def format_savepoint(self, savepoint):
name = savepoint.ident.lstrip("_")
- return super(OracleIdentifierPreparer, self).format_savepoint(
- savepoint, name
- )
+ return super().format_savepoint(savepoint, name)
class OracleExecutionContext(default.DefaultExecutionContext):
) = enable_offset_fetch
def initialize(self, connection):
- super(OracleDialect, self).initialize(connection)
+ super().initialize(connection)
# Oracle 8i has RETURNING:
# https://docs.oracle.com/cd/A87860_01/doc/index.htm
if not dialect.auto_convert_lobs:
return None
else:
- return super(_OracleBinary, self).result_processor(
- dialect, coltype
- )
+ return super().result_processor(dialect, coltype)
class _OracleInterval(oracle.INTERVAL):
if asdecimal is None:
asdecimal = bool(scale and scale > 0)
- super(NUMBER, self).__init__(
- precision=precision, scale=scale, asdecimal=asdecimal
- )
+ super().__init__(precision=precision, scale=scale, asdecimal=asdecimal)
def adapt(self, impltype):
- ret = super(NUMBER, self).adapt(impltype)
+ ret = super().adapt(impltype)
# leave a hint for the DBAPI handler
ret._is_oracle_number = True
return ret
if dialect._has_native_hstore:
return None
else:
- return super(_PsycopgHStore, self).bind_processor(dialect)
+ return super().bind_processor(dialect)
def result_processor(self, dialect, coltype):
if dialect._has_native_hstore:
return None
else:
- return super(_PsycopgHStore, self).result_processor(
- dialect, coltype
- )
+ return super().result_processor(dialect, coltype)
class _PsycopgARRAY(PGARRAY):
def __init__(self, clauses, **kw):
type_arg = kw.pop("type_", None)
- super(array, self).__init__(operators.comma_op, *clauses, **kw)
+ super().__init__(operators.comma_op, *clauses, **kw)
self._type_tuple = [arg.type for arg in self.clauses]
__slots__ = ("_rowbuffer",)
def __init__(self, adapt_connection):
- super(AsyncAdapt_asyncpg_ss_cursor, self).__init__(adapt_connection)
+ super().__init__(adapt_connection)
self._rowbuffer = None
def close(self):
class InvalidCachedStatementError(NotSupportedError):
def __init__(self, message):
- super(
- AsyncAdapt_asyncpg_dbapi.InvalidCachedStatementError, self
- ).__init__(
+ super().__init__(
message + " (SQLAlchemy asyncpg dialect will now invalidate "
"all prepared caches in response to this exception)",
)
"""
- super_connect = super(PGDialect_asyncpg, self).on_connect()
+ super_connect = super().on_connect()
def connect(conn):
conn.await_(self.setup_asyncpg_json_codec(conn))
IDX_USING = re.compile(r"^(?:btree|hash|gist|gin|[\w_]+)$", re.I)
-RESERVED_WORDS = set(
- [
- "all",
- "analyse",
- "analyze",
- "and",
- "any",
- "array",
- "as",
- "asc",
- "asymmetric",
- "both",
- "case",
- "cast",
- "check",
- "collate",
- "column",
- "constraint",
- "create",
- "current_catalog",
- "current_date",
- "current_role",
- "current_time",
- "current_timestamp",
- "current_user",
- "default",
- "deferrable",
- "desc",
- "distinct",
- "do",
- "else",
- "end",
- "except",
- "false",
- "fetch",
- "for",
- "foreign",
- "from",
- "grant",
- "group",
- "having",
- "in",
- "initially",
- "intersect",
- "into",
- "leading",
- "limit",
- "localtime",
- "localtimestamp",
- "new",
- "not",
- "null",
- "of",
- "off",
- "offset",
- "old",
- "on",
- "only",
- "or",
- "order",
- "placing",
- "primary",
- "references",
- "returning",
- "select",
- "session_user",
- "some",
- "symmetric",
- "table",
- "then",
- "to",
- "trailing",
- "true",
- "union",
- "unique",
- "user",
- "using",
- "variadic",
- "when",
- "where",
- "window",
- "with",
- "authorization",
- "between",
- "binary",
- "cross",
- "current_schema",
- "freeze",
- "full",
- "ilike",
- "inner",
- "is",
- "isnull",
- "join",
- "left",
- "like",
- "natural",
- "notnull",
- "outer",
- "over",
- "overlaps",
- "right",
- "similar",
- "verbose",
- ]
-)
+RESERVED_WORDS = {
+ "all",
+ "analyse",
+ "analyze",
+ "and",
+ "any",
+ "array",
+ "as",
+ "asc",
+ "asymmetric",
+ "both",
+ "case",
+ "cast",
+ "check",
+ "collate",
+ "column",
+ "constraint",
+ "create",
+ "current_catalog",
+ "current_date",
+ "current_role",
+ "current_time",
+ "current_timestamp",
+ "current_user",
+ "default",
+ "deferrable",
+ "desc",
+ "distinct",
+ "do",
+ "else",
+ "end",
+ "except",
+ "false",
+ "fetch",
+ "for",
+ "foreign",
+ "from",
+ "grant",
+ "group",
+ "having",
+ "in",
+ "initially",
+ "intersect",
+ "into",
+ "leading",
+ "limit",
+ "localtime",
+ "localtimestamp",
+ "new",
+ "not",
+ "null",
+ "of",
+ "off",
+ "offset",
+ "old",
+ "on",
+ "only",
+ "or",
+ "order",
+ "placing",
+ "primary",
+ "references",
+ "returning",
+ "select",
+ "session_user",
+ "some",
+ "symmetric",
+ "table",
+ "then",
+ "to",
+ "trailing",
+ "true",
+ "union",
+ "unique",
+ "user",
+ "using",
+ "variadic",
+ "when",
+ "where",
+ "window",
+ "with",
+ "authorization",
+ "between",
+ "binary",
+ "cross",
+ "current_schema",
+ "freeze",
+ "full",
+ "ilike",
+ "inner",
+ "is",
+ "isnull",
+ "join",
+ "left",
+ "like",
+ "natural",
+ "notnull",
+ "outer",
+ "over",
+ "overlaps",
+ "right",
+ "similar",
+ "verbose",
+}
colspecs = {
sqltypes.ARRAY: _array.ARRAY,
)
def render_literal_value(self, value, type_):
- value = super(PGCompiler, self).render_literal_value(value, type_)
+ value = super().render_literal_value(value, type_)
if self.dialect._backslash_escapes:
value = value.replace("\\", "\\\\")
"create_constraint=False on this Enum datatype."
)
- text = super(PGDDLCompiler, self).visit_check_constraint(constraint)
+ text = super().visit_check_constraint(constraint)
text += self._define_constraint_validity(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
- text = super(PGDDLCompiler, self).visit_foreign_key_constraint(
- constraint
- )
+ text = super().visit_foreign_key_constraint(constraint)
text += self._define_constraint_validity(constraint)
return text
create.element.data_type
)
- return super(PGDDLCompiler, self).visit_create_sequence(
- create, prefix=prefix, **kw
- )
+ return super().visit_create_sequence(create, prefix=prefix, **kw)
def _can_comment_on_constraint(self, ddl_instance):
constraint = ddl_instance.element
def visit_enum(self, type_, **kw):
if not type_.native_enum or not self.dialect.supports_native_enum:
- return super(PGTypeCompiler, self).visit_enum(type_, **kw)
+ return super().visit_enum(type_, **kw)
else:
return self.visit_ENUM(type_, **kw)
return self._execute_scalar(exc, column.type)
- return super(PGExecutionContext, self).get_insert_default(column)
+ return super().get_insert_default(column)
class PGReadOnlyConnectionCharacteristic(
self._json_serializer = json_serializer
def initialize(self, connection):
- super(PGDialect, self).initialize(connection)
+ super().initialize(connection)
# https://www.postgresql.org/docs/9.3/static/release-9-2.html#AEN116689
self.supports_smallserial = self.server_version_info >= (9, 2)
set_=None,
where=None,
):
- super(OnConflictDoUpdate, self).__init__(
+ super().__init__(
constraint=constraint,
index_elements=index_elements,
index_where=index_where,
self.ops = kw.get("ops", {})
def _set_parent(self, table, **kw):
- super(ExcludeConstraint, self)._set_parent(table)
+ super()._set_parent(table)
self._render_exprs = [
(
.. versionadded:: 1.1
"""
- super(JSON, self).__init__(none_as_null=none_as_null)
+ super().__init__(none_as_null=none_as_null)
if astext_type is not None:
self.astext_type = astext_type
"""Base for named types."""
__abstract__ = True
- DDLGenerator: Type["NamedTypeGenerator"]
- DDLDropper: Type["NamedTypeDropper"]
+ DDLGenerator: Type[NamedTypeGenerator]
+ DDLDropper: Type[NamedTypeDropper]
create_type: bool
def create(self, bind, checkfirst=True, **kw):
)
def initialize(self, connection):
- super(PGDialect_psycopg2, self).initialize(connection)
+ super().initialize(connection)
self._has_native_hstore = (
self.use_native_hstore
and self._hstore_oids(connection.connection.dbapi_connection)
.. versionadded:: 1.4
"""
- super(TIMESTAMP, self).__init__(timezone=timezone)
+ super().__init__(timezone=timezone)
self.precision = precision
.. versionadded:: 1.4
"""
- super(TIME, self).__init__(timezone=timezone)
+ super().__init__(timezone=timezone)
self.precision = precision
class _SQliteJson(JSON):
def result_processor(self, dialect, coltype):
- default_processor = super(_SQliteJson, self).result_processor(
- dialect, coltype
- )
+ default_processor = super().result_processor(dialect, coltype)
def process(value):
try:
_storage_format = None
def __init__(self, storage_format=None, regexp=None, **kw):
- super(_DateTimeMixin, self).__init__(**kw)
+ super().__init__(**kw)
if regexp is not None:
self._reg = re.compile(regexp)
if storage_format is not None:
kw["storage_format"] = self._storage_format
if self._reg:
kw["regexp"] = self._reg
- return super(_DateTimeMixin, self).adapt(cls, **kw)
+ return super().adapt(cls, **kw)
def literal_processor(self, dialect):
bp = self.bind_processor(dialect)
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
- super(DATETIME, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
- super(TIME, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
def visit_cast(self, cast, **kwargs):
if self.dialect.supports_cast:
- return super(SQLiteCompiler, self).visit_cast(cast, **kwargs)
+ return super().visit_cast(cast, **kwargs)
else:
return self.process(cast.clause, **kwargs)
):
return None
- text = super(SQLiteDDLCompiler, self).visit_primary_key_constraint(
- constraint
- )
+ text = super().visit_primary_key_constraint(constraint)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
return text
def visit_unique_constraint(self, constraint):
- text = super(SQLiteDDLCompiler, self).visit_unique_constraint(
- constraint
- )
+ text = super().visit_unique_constraint(constraint)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
return text
def visit_check_constraint(self, constraint):
- text = super(SQLiteDDLCompiler, self).visit_check_constraint(
- constraint
- )
+ text = super().visit_check_constraint(constraint)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
return text
def visit_column_check_constraint(self, constraint):
- text = super(SQLiteDDLCompiler, self).visit_column_check_constraint(
- constraint
- )
+ text = super().visit_column_check_constraint(constraint)
if constraint.dialect_options["sqlite"]["on_conflict"] is not None:
raise exc.CompileError(
if local_table.schema != remote_table.schema:
return None
else:
- return super(SQLiteDDLCompiler, self).visit_foreign_key_constraint(
- constraint
- )
+ return super().visit_foreign_key_constraint(constraint)
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
- return super(SQLiteTypeCompiler, self).visit_DATETIME(type_)
+ return super().visit_DATETIME(type_)
else:
return "DATETIME_CHAR"
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
- return super(SQLiteTypeCompiler, self).visit_DATE(type_)
+ return super().visit_DATE(type_)
else:
return "DATE_CHAR"
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
- return super(SQLiteTypeCompiler, self).visit_TIME(type_)
+ return super().visit_TIME(type_)
else:
return "TIME_CHAR"
class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
- reserved_words = set(
- [
- "add",
- "after",
- "all",
- "alter",
- "analyze",
- "and",
- "as",
- "asc",
- "attach",
- "autoincrement",
- "before",
- "begin",
- "between",
- "by",
- "cascade",
- "case",
- "cast",
- "check",
- "collate",
- "column",
- "commit",
- "conflict",
- "constraint",
- "create",
- "cross",
- "current_date",
- "current_time",
- "current_timestamp",
- "database",
- "default",
- "deferrable",
- "deferred",
- "delete",
- "desc",
- "detach",
- "distinct",
- "drop",
- "each",
- "else",
- "end",
- "escape",
- "except",
- "exclusive",
- "exists",
- "explain",
- "false",
- "fail",
- "for",
- "foreign",
- "from",
- "full",
- "glob",
- "group",
- "having",
- "if",
- "ignore",
- "immediate",
- "in",
- "index",
- "indexed",
- "initially",
- "inner",
- "insert",
- "instead",
- "intersect",
- "into",
- "is",
- "isnull",
- "join",
- "key",
- "left",
- "like",
- "limit",
- "match",
- "natural",
- "not",
- "notnull",
- "null",
- "of",
- "offset",
- "on",
- "or",
- "order",
- "outer",
- "plan",
- "pragma",
- "primary",
- "query",
- "raise",
- "references",
- "reindex",
- "rename",
- "replace",
- "restrict",
- "right",
- "rollback",
- "row",
- "select",
- "set",
- "table",
- "temp",
- "temporary",
- "then",
- "to",
- "transaction",
- "trigger",
- "true",
- "union",
- "unique",
- "update",
- "using",
- "vacuum",
- "values",
- "view",
- "virtual",
- "when",
- "where",
- ]
- )
+ reserved_words = {
+ "add",
+ "after",
+ "all",
+ "alter",
+ "analyze",
+ "and",
+ "as",
+ "asc",
+ "attach",
+ "autoincrement",
+ "before",
+ "begin",
+ "between",
+ "by",
+ "cascade",
+ "case",
+ "cast",
+ "check",
+ "collate",
+ "column",
+ "commit",
+ "conflict",
+ "constraint",
+ "create",
+ "cross",
+ "current_date",
+ "current_time",
+ "current_timestamp",
+ "database",
+ "default",
+ "deferrable",
+ "deferred",
+ "delete",
+ "desc",
+ "detach",
+ "distinct",
+ "drop",
+ "each",
+ "else",
+ "end",
+ "escape",
+ "except",
+ "exclusive",
+ "exists",
+ "explain",
+ "false",
+ "fail",
+ "for",
+ "foreign",
+ "from",
+ "full",
+ "glob",
+ "group",
+ "having",
+ "if",
+ "ignore",
+ "immediate",
+ "in",
+ "index",
+ "indexed",
+ "initially",
+ "inner",
+ "insert",
+ "instead",
+ "intersect",
+ "into",
+ "is",
+ "isnull",
+ "join",
+ "key",
+ "left",
+ "like",
+ "limit",
+ "match",
+ "natural",
+ "not",
+ "notnull",
+ "null",
+ "of",
+ "offset",
+ "on",
+ "or",
+ "order",
+ "outer",
+ "plan",
+ "pragma",
+ "primary",
+ "query",
+ "raise",
+ "references",
+ "reindex",
+ "rename",
+ "replace",
+ "restrict",
+ "right",
+ "rollback",
+ "row",
+ "select",
+ "set",
+ "table",
+ "temp",
+ "temporary",
+ "then",
+ "to",
+ "transaction",
+ "trigger",
+ "true",
+ "union",
+ "unique",
+ "update",
+ "using",
+ "vacuum",
+ "values",
+ "view",
+ "virtual",
+ "when",
+ "where",
+ }
class SQLiteExecutionContext(default.DefaultExecutionContext):
# the names as well. SQLite saves the DDL in whatever format
# it was typed in as, so need to be liberal here.
- keys_by_signature = dict(
- (
- fk_sig(
- fk["constrained_columns"],
- fk["referred_table"],
- fk["referred_columns"],
- ),
- fk,
- )
+ keys_by_signature = {
+ fk_sig(
+ fk["constrained_columns"],
+ fk["referred_table"],
+ fk["referred_columns"],
+ ): fk
for fk in fks.values()
- )
+ }
table_data = self._get_table_sql(connection, table_name, schema=schema)
set_=None,
where=None,
):
- super(OnConflictDoUpdate, self).__init__(
+ super().__init__(
index_elements=index_elements,
index_where=index_where,
)
return pool.SingletonThreadPool
def on_connect_url(self, url):
- super_on_connect = super(
- SQLiteDialect_pysqlcipher, self
- ).on_connect_url(url)
+ super_on_connect = super().on_connect_url(url)
# pull the info we need from the URL early. Even though URL
# is immutable, we don't want any in-place changes to the URL
def create_connect_args(self, url):
plain_url = url._replace(password=None)
plain_url = plain_url.difference_update_query(self.pragmas)
- return super(SQLiteDialect_pysqlcipher, self).create_connect_args(
- plain_url
- )
+ return super().create_connect_args(plain_url)
dialect = SQLiteDialect_pysqlcipher
dbapi_connection.isolation_level = None
else:
dbapi_connection.isolation_level = ""
- return super(SQLiteDialect_pysqlite, self).set_isolation_level(
- dbapi_connection, level
- )
+ return super().set_isolation_level(dbapi_connection, level)
def on_connect(self):
def regexp(a, b):
def __init__(self, connection: Connection, xid: Any):
self._is_prepared = False
self.xid = xid
- super(TwoPhaseTransaction, self).__init__(connection)
+ super().__init__(connection)
def prepare(self) -> None:
"""Prepare this :class:`.TwoPhaseTransaction`.
"is deprecated and will be removed in a future release. ",
),
)
-def create_engine(url: Union[str, "_url.URL"], **kwargs: Any) -> Engine:
+def create_engine(url: Union[str, _url.URL], **kwargs: Any) -> Engine:
"""Create a new :class:`_engine.Engine` instance.
The standard calling form is to send the :ref:`URL <database_urls>` as the
"""
- options = dict(
- (key[len(prefix) :], configuration[key])
+ options = {
+ key[len(prefix) :]: configuration[key]
for key in configuration
if key.startswith(prefix)
- )
+ }
options["_coerce_config"] = True
options.update(kwargs)
url = options.pop("url")
def soft_close(self, result, dbapi_cursor):
self._rowbuffer.clear()
- super(BufferedRowCursorFetchStrategy, self).soft_close(
- result, dbapi_cursor
- )
+ super().soft_close(result, dbapi_cursor)
def hard_close(self, result, dbapi_cursor):
self._rowbuffer.clear()
- super(BufferedRowCursorFetchStrategy, self).hard_close(
- result, dbapi_cursor
- )
+ super().hard_close(result, dbapi_cursor)
def fetchone(self, result, dbapi_cursor, hard_close=False):
if not self._rowbuffer:
def soft_close(self, result, dbapi_cursor):
self._rowbuffer.clear()
- super(FullyBufferedCursorFetchStrategy, self).soft_close(
- result, dbapi_cursor
- )
+ super().soft_close(result, dbapi_cursor)
def hard_close(self, result, dbapi_cursor):
self._rowbuffer.clear()
- super(FullyBufferedCursorFetchStrategy, self).hard_close(
- result, dbapi_cursor
- )
+ super().hard_close(result, dbapi_cursor)
def fetchone(self, result, dbapi_cursor, hard_close=False):
if self._rowbuffer:
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
- colspecs: MutableMapping[
- Type["TypeEngine[Any]"], Type["TypeEngine[Any]"]
- ] = {}
+ colspecs: MutableMapping[Type[TypeEngine[Any]], Type[TypeEngine[Any]]] = {}
default_paramstyle = "named"
supports_default_values = False
use_server_side = self.execution_options.get(
"stream_results", True
) and (
- (
- self.compiled
- and isinstance(
- self.compiled.statement, expression.Selectable
- )
- or (
- (
- not self.compiled
- or isinstance(
- self.compiled.statement, expression.TextClause
- )
+ self.compiled
+ and isinstance(self.compiled.statement, expression.Selectable)
+ or (
+ (
+ not self.compiled
+ or isinstance(
+ self.compiled.statement, expression.TextClause
)
- and self.unicode_statement
- and SERVER_SIDE_CURSOR_RE.match(self.unicode_statement)
)
+ and self.unicode_statement
+ and SERVER_SIDE_CURSOR_RE.match(self.unicode_statement)
)
)
else:
]
)
else:
- parameters = dict(
- (
- key,
- processors[key](compiled_params[key]) # type: ignore
- if key in processors
- else compiled_params[key],
- )
+ parameters = {
+ key: processors[key](compiled_params[key]) # type: ignore
+ if key in processors
+ else compiled_params[key]
for key in compiled_params
- )
+ }
return self._execute_scalar(
str(compiled), type_, parameters=parameters
)
# create_engine() -> isolation_level currently goes here
_on_connect_isolation_level: Optional[IsolationLevel]
- execution_ctx_cls: Type["ExecutionContext"]
+ execution_ctx_cls: Type[ExecutionContext]
"""a :class:`.ExecutionContext` class used to handle statement execution"""
execute_sequence_format: Union[
"""target database, when given a CTE with an INSERT statement, needs
the CTE to be below the INSERT"""
- colspecs: MutableMapping[Type["TypeEngine[Any]"], Type["TypeEngine[Any]"]]
+ colspecs: MutableMapping[Type[TypeEngine[Any]], Type[TypeEngine[Any]]]
"""A dictionary of TypeEngine classes from sqlalchemy.types mapped
to subclasses that are specific to the dialect class. This
dictionary is class-level only and is not accessed from the
_bind_typing_render_casts: bool
- _type_memos: MutableMapping[TypeEngine[Any], "_TypeMemoDict"]
+ _type_memos: MutableMapping[TypeEngine[Any], _TypeMemoDict]
def _builtin_onconnect(self) -> Optional[_ListenerFnType]:
raise NotImplementedError()
- def create_connect_args(self, url: "URL") -> ConnectArgsType:
+ def create_connect_args(self, url: URL) -> ConnectArgsType:
"""Build DB-API compatible connection arguments.
Given a :class:`.URL` object, returns a tuple
raise NotImplementedError()
@classmethod
- def type_descriptor(cls, typeobj: "TypeEngine[_T]") -> "TypeEngine[_T]":
+ def type_descriptor(cls, typeobj: TypeEngine[_T]) -> TypeEngine[_T]:
"""Transform a generic type to a dialect-specific type.
Dialect classes will usually use the
self,
cursor: DBAPICursor,
statement: str,
- context: Optional["ExecutionContext"] = None,
+ context: Optional[ExecutionContext] = None,
) -> None:
"""Provide an implementation of ``cursor.execute(statement)``.
"""
raise NotImplementedError()
- def on_connect_url(self, url: "URL") -> Optional[Callable[[Any], Any]]:
+ def on_connect_url(self, url: URL) -> Optional[Callable[[Any], Any]]:
"""return a callable which sets up a newly created DBAPI connection.
This method is a new hook that supersedes the
"""
@classmethod
- def engine_created(cls, engine: "Engine") -> None:
+ def engine_created(cls, engine: Engine) -> None:
"""A convenience hook called before returning the final
:class:`_engine.Engine`.
schema_fkeys = self.get_multi_foreign_keys(schname, **kw)
tnames.extend(schema_fkeys)
for (_, tname), fkeys in schema_fkeys.items():
- fknames_for_table[(schname, tname)] = set(
- [fk["name"] for fk in fkeys]
- )
+ fknames_for_table[(schname, tname)] = {
+ fk["name"] for fk in fkeys
+ }
for fkey in fkeys:
if (
tname != fkey["referred_table"]
# intended for reflection, e.g. oracle_resolve_synonyms.
# these are unconditionally passed to related Table
# objects
- reflection_options = dict(
- (k, table.dialect_kwargs.get(k))
+ reflection_options = {
+ k: table.dialect_kwargs.get(k)
for k in dialect.reflection_options
if k in table.dialect_kwargs
- )
+ }
table_key = (schema, table_name)
if _reflect_info is None or table_key not in _reflect_info.columns:
coltype = col_d["type"]
- col_kw = dict(
- (k, col_d[k]) # type: ignore[literal-required]
+ col_kw = {
+ k: col_d[k] # type: ignore[literal-required]
for k in [
"nullable",
"autoincrement",
"comment",
]
if k in col_d
- )
+ }
if "dialect_options" in col_d:
col_kw.update(col_d["dialect_options"])
return self
def _soft_close(self, hard: bool = False, **kw: Any) -> None:
- super(ChunkedIteratorResult, self)._soft_close(hard=hard, **kw)
+ super()._soft_close(hard=hard, **kw)
self.chunks = lambda size: [] # type: ignore
def _fetchmany_impl(
self, cursor_metadata: ResultMetaData, results: Sequence[Result[_TP]]
):
self._results = results
- super(MergedResult, self).__init__(
+ super().__init__(
cursor_metadata,
itertools.chain.from_iterable(
r._raw_row_iterator() for r in results
def _wrap_fn_for_legacy(
- dispatch_collection: "_ClsLevelDispatch[_ET]",
+ dispatch_collection: _ClsLevelDispatch[_ET],
fn: _ListenerFnType,
argspec: FullArgSpec,
) -> _ListenerFnType:
def _standard_listen_example(
- dispatch_collection: "_ClsLevelDispatch[_ET]",
+ dispatch_collection: _ClsLevelDispatch[_ET],
sample_target: Any,
fn: _ListenerFnType,
) -> str:
def _legacy_listen_examples(
- dispatch_collection: "_ClsLevelDispatch[_ET]",
+ dispatch_collection: _ClsLevelDispatch[_ET],
sample_target: str,
fn: _ListenerFnType,
) -> str:
def _version_signature_changes(
- parent_dispatch_cls: Type["_HasEventsDispatch[_ET]"],
- dispatch_collection: "_ClsLevelDispatch[_ET]",
+ parent_dispatch_cls: Type[_HasEventsDispatch[_ET]],
+ dispatch_collection: _ClsLevelDispatch[_ET],
) -> str:
since, args, conv = dispatch_collection.legacy_signatures[0]
return (
def _augment_fn_docs(
- dispatch_collection: "_ClsLevelDispatch[_ET]",
- parent_dispatch_cls: Type["_HasEventsDispatch[_ET]"],
+ dispatch_collection: _ClsLevelDispatch[_ET],
+ parent_dispatch_cls: Type[_HasEventsDispatch[_ET]],
fn: _ListenerFnType,
) -> str:
header = (
code = kw.pop("code", None)
if code is not None:
self.code = code
- super(HasDescriptionCode, self).__init__(*arg, **kw)
+ super().__init__(*arg, **kw)
def _code_str(self) -> str:
if not self.code:
)
def __str__(self) -> str:
- message = super(HasDescriptionCode, self).__str__()
+ message = super().__str__()
if self.code:
message = "%s %s" % (message, self._code_str())
return message
"""
def __init__(self, target: Any):
- super(ObjectNotExecutableError, self).__init__(
- "Not an executable object: %r" % target
- )
+ super().__init__("Not an executable object: %r" % target)
self.target = target
def __reduce__(self) -> Union[str, Tuple[Any, ...]]:
element_type: Type[ClauseElement],
message: Optional[str] = None,
):
- super(UnsupportedCompilationError, self).__init__(
+ super().__init__(
"Compiler %r can't render element of type %s%s"
% (compiler, element_type, ": %s" % message if message else "")
)
dbapi_base_err: Type[Exception],
hide_parameters: bool = False,
connection_invalidated: bool = False,
- dialect: Optional["Dialect"] = None,
+ dialect: Optional[Dialect] = None,
ismulti: Optional[bool] = None,
) -> StatementError:
...
dbapi_base_err: Type[Exception],
hide_parameters: bool = False,
connection_invalidated: bool = False,
- dialect: Optional["Dialect"] = None,
+ dialect: Optional[Dialect] = None,
ismulti: Optional[bool] = None,
) -> DontWrapMixin:
...
dbapi_base_err: Type[Exception],
hide_parameters: bool = False,
connection_invalidated: bool = False,
- dialect: Optional["Dialect"] = None,
+ dialect: Optional[Dialect] = None,
ismulti: Optional[bool] = None,
) -> BaseException:
...
dbapi_base_err: Type[Exception],
hide_parameters: bool = False,
connection_invalidated: bool = False,
- dialect: Optional["Dialect"] = None,
+ dialect: Optional[Dialect] = None,
ismulti: Optional[bool] = None,
) -> Union[BaseException, DontWrapMixin]:
# Don't ever wrap these, just return them directly as if
def __str__(self) -> str:
return (
- super(Base20DeprecationWarning, self).__str__()
+ super().__str__()
+ " (Background on SQLAlchemy 2.0 at: https://sqlalche.me/e/b8d9)"
)
if obj is None:
return self
else:
- return super(AmbiguousAssociationProxyInstance, self).get(obj)
+ return super().get(obj)
def __eq__(self, obj: object) -> NoReturn:
self._ambiguous()
cls.metadata.reflect(autoload_with, **opts)
with _CONFIGURE_MUTEX:
- table_to_map_config = dict(
- (m.local_table, m)
+ table_to_map_config = {
+ m.local_table: m
for m in _DeferredMapperConfig.classes_for_base(
cls, sort=False
)
- )
+ }
many_to_many = []
# None present in ident - turn those comparisons
# into "IS NULL"
if None in primary_key_identity:
- nones = set(
- [
- _get_params[col].key
- for col, value in zip(
- mapper.primary_key, primary_key_identity
- )
- if value is None
- ]
- )
+ nones = {
+ _get_params[col].key
+ for col, value in zip(
+ mapper.primary_key, primary_key_identity
+ )
+ if value is None
+ }
_lcl_get_clause = sql_util.adapt_criterion_to_null(
_lcl_get_clause, nones
)
setup, tuple(elem is None for elem in primary_key_identity)
)
- params = dict(
- [
- (_get_params[primary_key].key, id_val)
- for id_val, primary_key in zip(
- primary_key_identity, mapper.primary_key
- )
- ]
- )
+ params = {
+ _get_params[primary_key].key: id_val
+ for id_val, primary_key in zip(
+ primary_key_identity, mapper.primary_key
+ )
+ }
result = list(bq.for_session(self.session).params(**params))
l = len(result)
class ShardedQuery(Query):
def __init__(self, *args, **kwargs):
- super(ShardedQuery, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
self.id_chooser = self.session.id_chooser
self.query_chooser = self.session.query_chooser
self.execute_chooser = self.session.execute_chooser
"""
query_chooser = kwargs.pop("query_chooser", None)
- super(ShardedSession, self).__init__(query_cls=query_cls, **kwargs)
+ super().__init__(query_cls=query_cls, **kwargs)
event.listen(
self, "do_orm_execute", execute_and_instances, retval=True
"""
if identity_token is not None:
- return super(ShardedSession, self)._identity_lookup(
+ return super()._identity_lookup(
mapper,
primary_key_identity,
identity_token=identity_token,
if lazy_loaded_from:
q = q._set_lazyload_from(lazy_loaded_from)
for shard_id in self.id_chooser(q, primary_key_identity):
- obj = super(ShardedSession, self)._identity_lookup(
+ obj = super()._identity_lookup(
mapper,
primary_key_identity,
identity_token=shard_id,
"""
if mutable:
- super(index_property, self).__init__(
- self.fget, self.fset, self.fdel, self.expr
- )
+ super().__init__(self.fget, self.fset, self.fdel, self.expr)
else:
- super(index_property, self).__init__(
- self.fget, None, None, self.expr
- )
+ super().__init__(self.fget, None, None, self.expr)
self.attr_name = attr_name
self.index = index
self.default = default
return factories
def unregister(self, class_):
- super(ExtendedInstrumentationRegistry, self).unregister(class_)
+ super().unregister(class_)
if class_ in self._manager_finders:
del self._manager_finders[class_]
del self._state_finders[class_]
self._adapted.instrument_attribute(self.class_, key, inst)
def post_configure_attribute(self, key):
- super(_ClassInstrumentationAdapter, self).post_configure_attribute(key)
+ super().post_configure_attribute(key)
self._adapted.post_configure_attribute(self.class_, key, self[key])
def install_descriptor(self, key, inst):
):
break
else:
- util.fail(api, "Can't find mapped attribute {}".format(name), cls)
+ util.fail(api, f"Can't find mapped attribute {name}", cls)
return None
if stmt.type is None:
info: TypeInfo,
data: JsonDict,
api: SemanticAnalyzerPluginInterface,
- ) -> "SQLAlchemyAttribute":
+ ) -> SQLAlchemyAttribute:
data = data.copy()
typ = deserialize_and_fixup_type(data.pop("type"), api)
return cls(typ=typ, info=info, **data)
and isinstance(stmt.expr[0], NameExpr)
and stmt.expr[0].fullname == "typing.TYPE_CHECKING"
):
- for substmt in stmt.body[0].body:
- yield substmt
+ yield from stmt.body[0].body
else:
yield stmt
count_from: Optional[int] = None,
ordering_func: Optional[OrderingFunc] = None,
reorder_on_append: bool = False,
-) -> Callable[[], "OrderingList"]:
+) -> Callable[[], OrderingList]:
"""Prepares an :class:`OrderingList` factory for use in mapper definitions.
Returns an object suitable for use as an argument to a Mapper
self._set_order_value(entity, should_be)
def append(self, entity):
- super(OrderingList, self).append(entity)
+ super().append(entity)
self._order_entity(len(self) - 1, entity, self.reorder_on_append)
def _raw_append(self, entity):
"""Append without any ordering behavior."""
- super(OrderingList, self).append(entity)
+ super().append(entity)
_raw_append = collection.adds(1)(_raw_append)
def insert(self, index, entity):
- super(OrderingList, self).insert(index, entity)
+ super().insert(index, entity)
self._reorder()
def remove(self, entity):
- super(OrderingList, self).remove(entity)
+ super().remove(entity)
adapter = collection_adapter(self)
if adapter and adapter._referenced_by_owner:
self._reorder()
def pop(self, index=-1):
- entity = super(OrderingList, self).pop(index)
+ entity = super().pop(index)
self._reorder()
return entity
self.__setitem__(i, entity[i])
else:
self._order_entity(index, entity, True)
- super(OrderingList, self).__setitem__(index, entity)
+ super().__setitem__(index, entity)
def __delitem__(self, index):
- super(OrderingList, self).__delitem__(index)
+ super().__delitem__(index)
self._reorder()
def __setslice__(self, start, end, values):
- super(OrderingList, self).__setslice__(start, end, values)
+ super().__setslice__(start, end, values)
self._reorder()
def __delslice__(self, start, end):
- super(OrderingList, self).__delslice__(start, end)
+ super().__delslice__(start, end)
self._reorder()
def __reduce__(self):
logger.addHandler(handler)
-_logged_classes: Set[Type["Identified"]] = set()
+_logged_classes: Set[Type[Identified]] = set()
-def _qual_logger_name_for_cls(cls: Type["Identified"]) -> str:
+def _qual_logger_name_for_cls(cls: Type[Identified]) -> str:
return (
getattr(cls, "_sqla_logger_namespace", None)
or cls.__module__ + "." + cls.__name__
__slots__ = "_replace_token", "_append_token", "_remove_token"
def __init__(self, *arg, **kw):
- super(ScalarAttributeImpl, self).__init__(*arg, **kw)
+ super().__init__(*arg, **kw)
self._replace_token = self._append_token = AttributeEventToken(
self, OP_REPLACE
)
compare_function=None,
**kwargs,
):
- super(CollectionAttributeImpl, self).__init__(
+ super().__init__(
class_,
key,
callable_,
search_keys = {mapper._version_id_prop.key}.union(search_keys)
def _changed_dict(mapper, state):
- return dict(
- (k, v)
+ return {
+ k: v
for k, v in state.dict.items()
if k in state.committed_state or k in search_keys
- )
+ }
if isstates:
if update_changed_only:
value_evaluators[key] = _evaluator
evaluated_keys = list(value_evaluators.keys())
- attrib = set(k for k, v in resolved_keys_as_propnames)
+ attrib = {k for k, v in resolved_keys_as_propnames}
states = set()
for obj, state, dict_ in matched_objects:
on_remove: Optional[Callable[[], None]] = None,
):
self.on_remove = on_remove
- self.contents = set(
- [weakref.ref(item, self._remove_item) for item in classes]
- )
+ self.contents = {
+ weakref.ref(item, self._remove_item) for item in classes
+ }
_registries.add(self)
def remove_item(self, cls: Type[Any]) -> None:
# protect against class registration race condition against
# asynchronous garbage collection calling _remove_item,
# [ticket:3208]
- modules = set(
- [
- cls.__module__
- for cls in [ref() for ref in self.contents]
- if cls is not None
- ]
- )
+ modules = {
+ cls.__module__
+ for cls in [ref() for ref in self.contents]
+ if cls is not None
+ }
if item.__module__ in modules:
util.warn(
"This declarative base already contains a class with the "
return self
def get_children(self, **kw):
- for elem in itertools.chain.from_iterable(
+ yield from itertools.chain.from_iterable(
element._from_objects for element in self._raw_columns
- ):
- yield elem
- for elem in super(FromStatement, self).get_children(**kw):
- yield elem
+ )
+ yield from super().get_children(**kw)
@property
def _all_selected_columns(self):
):
ens = element._annotations["entity_namespace"]
if not ens.is_mapper and not ens.is_aliased_class:
- for elem in _select_iterables([element]):
- yield elem
+ yield from _select_iterables([element])
else:
- for elem in _select_iterables(ens._all_column_expressions):
- yield elem
+ yield from _select_iterables(ens._all_column_expressions)
else:
- for elem in _select_iterables([element]):
- yield elem
+ yield from _select_iterables([element])
@classmethod
def get_columns_clause_froms(cls, statement):
table: Optional[FromClause],
mapper_kw: _MapperKwArgs,
):
- super(_ImperativeMapperConfig, self).__init__(
- registry, cls_, mapper_kw
- )
+ super().__init__(registry, cls_, mapper_kw)
self.local_table = self.set_cls_attribute("__table__", table)
self.clsdict_view = (
util.immutabledict(dict_) if dict_ else util.EMPTY_DICT
)
- super(_ClassScanMapperConfig, self).__init__(registry, cls_, mapper_kw)
+ super().__init__(registry, cls_, mapper_kw)
self.registry = registry
self.persist_selectable = None
inherited_table = inherited_mapper.local_table
if "exclude_properties" not in mapper_args:
- mapper_args["exclude_properties"] = exclude_properties = set(
- [
- c.key
- for c in inherited_table.c
- if c not in inherited_mapper._columntoproperty
- ]
- ).union(inherited_mapper.exclude_properties or ())
+ mapper_args["exclude_properties"] = exclude_properties = {
+ c.key
+ for c in inherited_table.c
+ if c not in inherited_mapper._columntoproperty
+ }.union(inherited_mapper.exclude_properties or ())
exclude_properties.difference_update(
[c.key for c in self.declared_columns]
)
if not sort:
return classes_for_base
- all_m_by_cls = dict((m.cls, m) for m in classes_for_base)
+ all_m_by_cls = {m.cls: m for m in classes_for_base}
tuples: List[Tuple[_DeferredMapperConfig, _DeferredMapperConfig]] = []
for m_cls in all_m_by_cls:
def map(self, mapper_kw: _MapperKwArgs = util.EMPTY_DICT) -> Mapper[Any]:
self._configs.pop(self._cls, None)
- return super(_DeferredMapperConfig, self).map(mapper_kw)
+ return super().map(mapper_kw)
def _add_attribute(
@classmethod
def _clear(cls):
- super(InstrumentationEvents, cls)._clear()
+ super()._clear()
instrumentation._instrumentation_factory.dispatch._clear()
def class_instrument(self, cls):
@classmethod
def _clear(cls):
- super(InstanceEvents, cls)._clear()
+ super()._clear()
_InstanceEventsHold._clear()
def first_init(self, manager, cls):
@classmethod
def _clear(cls):
- super(MapperEvents, cls)._clear()
+ super()._clear()
_MapperEventsHold._clear()
def instrument_class(self, mapper, class_):
if mgr is not None and mgr is not self:
yield mgr
if recursive:
- for m in mgr.subclass_managers(True):
- yield m
+ yield from mgr.subclass_managers(True)
def post_configure_attribute(self, key):
_instrumentation_factory.dispatch.attribute_instrument(
# None present in ident - turn those comparisons
# into "IS NULL"
if None in primary_key_identity:
- nones = set(
- [
- _get_params[col].key
- for col, value in zip(
- mapper.primary_key, primary_key_identity
- )
- if value is None
- ]
- )
+ nones = {
+ _get_params[col].key
+ for col, value in zip(mapper.primary_key, primary_key_identity)
+ if value is None
+ }
_get_clause = sql_util.adapt_criterion_to_null(_get_clause, nones)
sql_util._deep_annotate(_get_clause, {"_orm_adapt": True}),
)
- params = dict(
- [
- (_get_params[primary_key].key, id_val)
- for id_val, primary_key in zip(
- primary_key_identity, mapper.primary_key
- )
- ]
- )
+ params = {
+ _get_params[primary_key].key: id_val
+ for id_val, primary_key in zip(
+ primary_key_identity, mapper.primary_key
+ )
+ }
else:
params = None
def attribute_keyed_dict(
attr_name: str, *, ignore_unpopulated_attribute: bool = False
-) -> Type["KeyFuncDict"]:
+) -> Type[KeyFuncDict]:
"""A dictionary-based collection type with attribute-based keying.
.. versionchanged:: 2.0 Renamed :data:`.attribute_mapped_collection` to
keyfunc: Callable[[Any], _KT],
*,
ignore_unpopulated_attribute: bool = False,
-) -> Type["KeyFuncDict[_KT, Any]"]:
+) -> Type[KeyFuncDict[_KT, Any]]:
"""A dictionary-based collection type with arbitrary keying.
.. versionchanged:: 2.0 Renamed :data:`.mapped_collection` to
def _unconfigured_mappers() -> Iterator[Mapper[Any]]:
for reg in _all_registries():
- for mapper in reg._mappers_to_configure():
- yield mapper
+ yield from reg._mappers_to_configure()
_already_compiling = False
with_polymorphic: Optional[
Tuple[
- Union[Literal["*"], Sequence[Union["Mapper[Any]", Type[Any]]]],
- Optional["FromClause"],
+ Union[Literal["*"], Sequence[Union[Mapper[Any], Type[Any]]]],
+ Optional[FromClause],
]
]
@HasMemoized_ro_memoized_attribute
def _insert_cols_evaluating_none(self):
- return dict(
- (
- table,
- frozenset(
- col for col in columns if col.type.should_evaluate_none
- ),
+ return {
+ table: frozenset(
+ col for col in columns if col.type.should_evaluate_none
)
for table, columns in self._cols_by_table.items()
- )
+ }
@HasMemoized.memoized_attribute
def _insert_cols_as_none(self):
- return dict(
- (
- table,
- frozenset(
- col.key
- for col in columns
- if not col.primary_key
- and not col.server_default
- and not col.default
- and not col.type.should_evaluate_none
- ),
+ return {
+ table: frozenset(
+ col.key
+ for col in columns
+ if not col.primary_key
+ and not col.server_default
+ and not col.default
+ and not col.type.should_evaluate_none
)
for table, columns in self._cols_by_table.items()
- )
+ }
@HasMemoized.memoized_attribute
def _propkey_to_col(self):
- return dict(
- (
- table,
- dict(
- (self._columntoproperty[col].key, col) for col in columns
- ),
- )
+ return {
+ table: {self._columntoproperty[col].key: col for col in columns}
for table, columns in self._cols_by_table.items()
- )
+ }
@HasMemoized.memoized_attribute
def _pk_keys_by_table(self):
- return dict(
- (table, frozenset([col.key for col in pks]))
+ return {
+ table: frozenset([col.key for col in pks])
for table, pks in self._pks_by_table.items()
- )
+ }
@HasMemoized.memoized_attribute
def _pk_attr_keys_by_table(self):
- return dict(
- (
- table,
- frozenset([self._columntoproperty[col].key for col in pks]),
- )
+ return {
+ table: frozenset([self._columntoproperty[col].key for col in pks])
for table, pks in self._pks_by_table.items()
- )
+ }
@HasMemoized.memoized_attribute
def _server_default_cols(
self,
) -> Mapping[FromClause, FrozenSet[Column[Any]]]:
- return dict(
- (
- table,
- frozenset(
- [
- col
- for col in cast("Iterable[Column[Any]]", columns)
- if col.server_default is not None
- or (
- col.default is not None
- and col.default.is_clause_element
- )
- ]
- ),
+ return {
+ table: frozenset(
+ [
+ col
+ for col in cast("Iterable[Column[Any]]", columns)
+ if col.server_default is not None
+ or (
+ col.default is not None
+ and col.default.is_clause_element
+ )
+ ]
)
for table, columns in self._cols_by_table.items()
- )
+ }
@HasMemoized.memoized_attribute
def _server_onupdate_default_cols(
self,
) -> Mapping[FromClause, FrozenSet[Column[Any]]]:
- return dict(
- (
- table,
- frozenset(
- [
- col
- for col in cast("Iterable[Column[Any]]", columns)
- if col.server_onupdate is not None
- or (
- col.onupdate is not None
- and col.onupdate.is_clause_element
- )
- ]
- ),
+ return {
+ table: frozenset(
+ [
+ col
+ for col in cast("Iterable[Column[Any]]", columns)
+ if col.server_onupdate is not None
+ or (
+ col.onupdate is not None
+ and col.onupdate.is_clause_element
+ )
+ ]
)
for table, columns in self._cols_by_table.items()
- )
+ }
@HasMemoized.memoized_attribute
def _server_default_col_keys(self) -> Mapping[FromClause, FrozenSet[str]]:
if bulk:
# keys here are mapped attribute keys, so
# look at mapper attribute keys for pk
- params = dict(
- (propkey_to_col[propkey].key, state_dict[propkey])
+ params = {
+ propkey_to_col[propkey].key: state_dict[propkey]
for propkey in set(propkey_to_col)
.intersection(state_dict)
.difference(mapper._pk_attr_keys_by_table[table])
- )
+ }
has_all_defaults = True
else:
params = {}
if bulk:
# keys here are mapped attribute keys, so
# look at mapper attribute keys for pk
- pk_params = dict(
- (propkey_to_col[propkey]._label, state_dict.get(propkey))
+ pk_params = {
+ propkey_to_col[propkey]._label: state_dict.get(propkey)
for propkey in set(propkey_to_col).intersection(
mapper._pk_attr_keys_by_table[table]
)
- )
+ }
else:
pk_params = {}
for col in pks:
def _sort_states(mapper, states):
pending = set(states)
- persistent = set(s for s in pending if s.key is not None)
+ persistent = {s for s in pending if s.key is not None}
pending.difference_update(persistent)
try:
doc: Optional[str] = None,
_instrument: bool = True,
):
- super(ColumnProperty, self).__init__(
- attribute_options=attribute_options
- )
+ super().__init__(attribute_options=attribute_options)
columns = (column,) + additional_columns
self.columns = [
coercions.expect(roles.LabeledColumnExprRole, c) for c in columns
column.name = key
@property
- def mapper_property_to_assign(self) -> Optional["MapperProperty[_T]"]:
+ def mapper_property_to_assign(self) -> Optional[MapperProperty[_T]]:
return self
@property
return self.column.name
@property
- def mapper_property_to_assign(self) -> Optional["MapperProperty[_T]"]:
+ def mapper_property_to_assign(self) -> Optional[MapperProperty[_T]]:
if self.deferred:
return ColumnProperty(
self.column,
values: Dict[_DMLColumnArgument, Any],
update_kwargs: Optional[Dict[Any, Any]],
):
- super(BulkUpdate, self).__init__(query)
+ super().__init__(query)
self.values = values
self.update_kwargs = update_kwargs
# 2. columns that are FK but are not remote (e.g. local)
# suggest manytoone.
- manytoone_local = set(
- [
- c
- for c in self._gather_columns_with_annotation(
- self.primaryjoin, "foreign"
- )
- if "remote" not in c._annotations
- ]
- )
+ manytoone_local = {
+ c
+ for c in self._gather_columns_with_annotation(
+ self.primaryjoin, "foreign"
+ )
+ if "remote" not in c._annotations
+ }
# 3. if both collections are present, remove columns that
# refer to themselves. This is for the case of
self, clause: ColumnElement[Any], *annotation: Iterable[str]
) -> Set[ColumnElement[Any]]:
annotation_set = set(annotation)
- return set(
- [
- cast(ColumnElement[Any], col)
- for col in visitors.iterate(clause, {})
- if annotation_set.issubset(col._annotations)
- ]
- )
+ return {
+ cast(ColumnElement[Any], col)
+ for col in visitors.iterate(clause, {})
+ if annotation_set.issubset(col._annotations)
+ }
def join_targets(
self,
__slots__ = ("columns",)
def __init__(self, parent, strategy_key):
- super(UninstrumentedColumnLoader, self).__init__(parent, strategy_key)
+ super().__init__(parent, strategy_key)
self.columns = self.parent_property.columns
def setup_query(
__slots__ = "columns", "is_composite"
def __init__(self, parent, strategy_key):
- super(ColumnLoader, self).__init__(parent, strategy_key)
+ super().__init__(parent, strategy_key)
self.columns = self.parent_property.columns
self.is_composite = hasattr(self.parent_property, "composite_class")
@properties.ColumnProperty.strategy_for(query_expression=True)
class ExpressionColumnLoader(ColumnLoader):
def __init__(self, parent, strategy_key):
- super(ExpressionColumnLoader, self).__init__(parent, strategy_key)
+ super().__init__(parent, strategy_key)
# compare to the "default" expression that is mapped in
# the column. If it's sql.null, we don't need to render
__slots__ = "columns", "group", "raiseload"
def __init__(self, parent, strategy_key):
- super(DeferredColumnLoader, self).__init__(parent, strategy_key)
+ super().__init__(parent, strategy_key)
if hasattr(self.parent_property, "composite_class"):
raise NotImplementedError(
"Deferred loading for composite " "types not implemented yet"
__slots__ = "mapper", "target", "uselist", "entity"
def __init__(self, parent, strategy_key):
- super(AbstractRelationshipLoader, self).__init__(parent, strategy_key)
+ super().__init__(parent, strategy_key)
self.mapper = self.parent_property.mapper
self.entity = self.parent_property.entity
self.target = self.parent_property.target
def __init__(
self, parent: RelationshipProperty[Any], strategy_key: Tuple[Any, ...]
):
- super(LazyLoader, self).__init__(parent, strategy_key)
+ super().__init__(parent, strategy_key)
self._raise_always = self.strategy_opts["lazy"] == "raise"
self._raise_on_sql = self.strategy_opts["lazy"] == "raise_on_sql"
__slots__ = ("join_depth",)
def __init__(self, parent, strategy_key):
- super(SubqueryLoader, self).__init__(parent, strategy_key)
+ super().__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
elif distinct_target_key is None:
# if target_cols refer to a non-primary key or only
# part of a composite primary key, set the q as distinct
- for t in set(c.table for c in target_cols):
+ for t in {c.table for c in target_cols}:
if not set(target_cols).issuperset(t.primary_key):
q._distinct = True
break
__slots__ = "join_depth", "_aliased_class_pool"
def __init__(self, parent, strategy_key):
- super(JoinedLoader, self).__init__(parent, strategy_key)
+ super().__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
self._aliased_class_pool = []
_chunksize = 500
def __init__(self, parent, strategy_key):
- super(SelectInLoader, self).__init__(parent, strategy_key)
+ super().__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
is_m2o = self.parent_property.direction is interfaces.MANYTOONE
),
]
- _of_type: Union["Mapper[Any]", "AliasedInsp[Any]", None]
+ _of_type: Union[Mapper[Any], AliasedInsp[Any], None]
_path_with_polymorphic_path: Optional[PathRegistry]
is_class_strategy = False
if cycles:
# if yes, break the per-mapper actions into
# per-state actions
- convert = dict(
- (rec, set(rec.per_state_flush_actions(self))) for rec in cycles
- )
+ convert = {
+ rec: set(rec.per_state_flush_actions(self)) for rec in cycles
+ }
# rewrite the existing dependencies to point to
# the per-state actions for those per-mapper actions
for dep in convert[edge[1]]:
self.dependencies.add((edge[0], dep))
- return set(
- [a for a in self.postsort_actions.values() if not a.disabled]
- ).difference(cycles)
+ return {
+ a for a in self.postsort_actions.values() if not a.disabled
+ }.difference(cycles)
def execute(self) -> None:
postsort_actions = self._generate_actions()
return
states = set(self.states)
- isdel = set(
+ isdel = {
s for (s, (isdelete, listonly)) in self.states.items() if isdelete
- )
+ }
other = states.difference(isdel)
if isdel:
self.session._remove_newly_deleted(isdel)
our_classes = util.to_set(
mp.class_ for mp in self.with_polymorphic_mappers
)
- new_classes = set([mp.class_ for mp in other.with_polymorphic_mappers])
+ new_classes = {mp.class_ for mp in other.with_polymorphic_mappers}
if our_classes == new_classes:
return other
else:
def _all_mappers(self) -> Iterator[Mapper[Any]]:
if self.entity:
- for mp_ent in self.entity.mapper.self_and_descendants:
- yield mp_ent
+ yield from self.entity.mapper.self_and_descendants
else:
assert self.root_entity
stack = list(self.root_entity.__subclasses__())
inspection.inspect(subclass, raiseerr=False),
)
if ent:
- for mp in ent.mapper.self_and_descendants:
- yield mp
+ yield from ent.mapper.self_and_descendants
else:
stack.extend(subclass.__subclasses__())
name: str,
element: _ColumnExpressionArgument[_T],
type_: Optional[_TypeEngineArgument[_T]] = None,
-) -> "Label[_T]":
+) -> Label[_T]:
"""Return a :class:`Label` object for the
given :class:`_expression.ColumnElement`.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
- return set(
- elem for elem in a if all_overlap.intersection(elem._cloned_set)
- )
+ return {elem for elem in a if all_overlap.intersection(elem._cloned_set)}
def _cloned_difference(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
- return set(
+ return {
elem for elem in a if not all_overlap.intersection(elem._cloned_set)
- )
+ }
class _DialectArgView(MutableMapping[str, Any]):
else:
advice = None
- return super(ExpressionElementImpl, self)._raise_for_expected(
+ return super()._raise_for_expected(
element, argname=argname, resolved=resolved, advice=advice, **kw
)
if isinstance(resolved, roles.ExpressionElementRole):
return resolved.label(None)
else:
- new = super(LabeledColumnExprImpl, self)._implicit_coercions(
+ new = super()._implicit_coercions(
element, resolved, argname=argname, **kw
)
if isinstance(new, roles.ExpressionElementRole):
f"{', '.join(repr(e) for e in element)})?"
)
- return super(ColumnsClauseImpl, self)._raise_for_expected(
+ return super()._raise_for_expected(
element, argname=argname, resolved=resolved, advice=advice, **kw
)
)
else:
advice = None
- return super(CompoundElementImpl, self)._raise_for_expected(
+ return super()._raise_for_expected(
element, argname=argname, resolved=resolved, advice=advice, **kw
)
_FromHintsType = Dict["FromClause", str]
-RESERVED_WORDS = set(
- [
- "all",
- "analyse",
- "analyze",
- "and",
- "any",
- "array",
- "as",
- "asc",
- "asymmetric",
- "authorization",
- "between",
- "binary",
- "both",
- "case",
- "cast",
- "check",
- "collate",
- "column",
- "constraint",
- "create",
- "cross",
- "current_date",
- "current_role",
- "current_time",
- "current_timestamp",
- "current_user",
- "default",
- "deferrable",
- "desc",
- "distinct",
- "do",
- "else",
- "end",
- "except",
- "false",
- "for",
- "foreign",
- "freeze",
- "from",
- "full",
- "grant",
- "group",
- "having",
- "ilike",
- "in",
- "initially",
- "inner",
- "intersect",
- "into",
- "is",
- "isnull",
- "join",
- "leading",
- "left",
- "like",
- "limit",
- "localtime",
- "localtimestamp",
- "natural",
- "new",
- "not",
- "notnull",
- "null",
- "off",
- "offset",
- "old",
- "on",
- "only",
- "or",
- "order",
- "outer",
- "overlaps",
- "placing",
- "primary",
- "references",
- "right",
- "select",
- "session_user",
- "set",
- "similar",
- "some",
- "symmetric",
- "table",
- "then",
- "to",
- "trailing",
- "true",
- "union",
- "unique",
- "user",
- "using",
- "verbose",
- "when",
- "where",
- ]
-)
+RESERVED_WORDS = {
+ "all",
+ "analyse",
+ "analyze",
+ "and",
+ "any",
+ "array",
+ "as",
+ "asc",
+ "asymmetric",
+ "authorization",
+ "between",
+ "binary",
+ "both",
+ "case",
+ "cast",
+ "check",
+ "collate",
+ "column",
+ "constraint",
+ "create",
+ "cross",
+ "current_date",
+ "current_role",
+ "current_time",
+ "current_timestamp",
+ "current_user",
+ "default",
+ "deferrable",
+ "desc",
+ "distinct",
+ "do",
+ "else",
+ "end",
+ "except",
+ "false",
+ "for",
+ "foreign",
+ "freeze",
+ "from",
+ "full",
+ "grant",
+ "group",
+ "having",
+ "ilike",
+ "in",
+ "initially",
+ "inner",
+ "intersect",
+ "into",
+ "is",
+ "isnull",
+ "join",
+ "leading",
+ "left",
+ "like",
+ "limit",
+ "localtime",
+ "localtimestamp",
+ "natural",
+ "new",
+ "not",
+ "notnull",
+ "null",
+ "off",
+ "offset",
+ "old",
+ "on",
+ "only",
+ "or",
+ "order",
+ "outer",
+ "overlaps",
+ "placing",
+ "primary",
+ "references",
+ "right",
+ "select",
+ "session_user",
+ "set",
+ "similar",
+ "some",
+ "symmetric",
+ "table",
+ "then",
+ "to",
+ "trailing",
+ "true",
+ "union",
+ "unique",
+ "user",
+ "using",
+ "verbose",
+ "when",
+ "where",
+}
LEGAL_CHARACTERS = re.compile(r"^[A-Z0-9_$]+$", re.I)
LEGAL_CHARACTERS_PLUS_SPACE = re.compile(r"^[A-Z0-9_ $]+$", re.I)
"between each element to resolve."
)
froms_str = ", ".join(
- '"{elem}"'.format(elem=self.froms[from_])
- for from_ in froms
+ f'"{self.froms[from_]}"' for from_ in froms
)
message = template.format(
froms=froms_str, start=self.froms[start_with]
# mypy is not able to see the two value types as the above Union,
# it just sees "object". don't know how to resolve
- return dict(
- (
- key,
- value,
- ) # type: ignore
+ return {
+ key: value # type: ignore
for key, value in (
(
self.bind_names[bindparam],
for bindparam in self.bind_names
)
if value is not None
- )
+ }
def is_subquery(self):
return len(self.stack) > 1
def _setup_select_hints(
self, select: Select[Any]
) -> Tuple[str, _FromHintsType]:
- byfrom = dict(
- [
- (
- from_,
- hinttext
- % {"name": from_._compiler_dispatch(self, ashint=True)},
- )
- for (from_, dialect), hinttext in select._hints.items()
- if dialect in ("*", self.dialect.name)
- ]
- )
+ byfrom = {
+ from_: hinttext
+ % {"name": from_._compiler_dispatch(self, ashint=True)}
+ for (from_, dialect), hinttext in select._hints.items()
+ if dialect in ("*", self.dialect.name)
+ }
hint_text = self.get_select_hint_text(byfrom)
return hint_text, byfrom
)
def _setup_crud_hints(self, stmt, table_text):
- dialect_hints = dict(
- [
- (table, hint_text)
- for (table, dialect), hint_text in stmt._hints.items()
- if dialect in ("*", self.dialect.name)
- ]
- )
+ dialect_hints = {
+ table: hint_text
+ for (table, dialect), hint_text in stmt._hints.items()
+ if dialect in ("*", self.dialect.name)
+ }
if stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text, stmt.table, dialect_hints[stmt.table], True
if not isinstance(compiler, StrSQLCompiler):
return compiler.process(element)
- return super(StrSQLCompiler, self).visit_unsupported_compilation(
- element, err
- )
+ return super().visit_unsupported_compilation(element, err)
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
@util.memoized_property
def _r_identifiers(self):
- initial, final, escaped_final = [
+ initial, final, escaped_final = (
re.escape(s)
for s in (
self.initial_quote,
self.final_quote,
self._escape_identifier(self.final_quote),
)
- ]
+ )
r = re.compile(
r"(?:"
r"(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s"
parameters = {}
elif stmt_parameter_tuples:
assert spd is not None
- parameters = dict(
- (_column_as_key(key), REQUIRED)
+ parameters = {
+ _column_as_key(key): REQUIRED
for key in compiler.column_keys
if key not in spd
- )
+ }
else:
- parameters = dict(
- (_column_as_key(key), REQUIRED) for key in compiler.column_keys
- )
+ parameters = {
+ _column_as_key(key): REQUIRED for key in compiler.column_keys
+ }
# create a list of column assignment clauses as tuples
values: List[_CrudParamElement] = []
values,
kw,
):
- normalized_params = dict(
- (coercions.expect(roles.DMLColumnRole, c), param)
+ normalized_params = {
+ coercions.expect(roles.DMLColumnRole, c): param
for c, param in stmt_parameter_tuples
- )
+ }
include_table = compile_state.include_table_with_column_exprs
"""
_ddl_if: Optional[DDLIf] = None
- target: Optional["SchemaItem"] = None
+ target: Optional[SchemaItem] = None
def _execute_on_connection(
self, connection, distilled_params, execution_options
def sort_tables(
- tables: Iterable["Table"],
- skip_fn: Optional[Callable[["ForeignKeyConstraint"], bool]] = None,
- extra_dependencies: Optional[
- typing_Sequence[Tuple["Table", "Table"]]
- ] = None,
-) -> List["Table"]:
+ tables: Iterable[Table],
+ skip_fn: Optional[Callable[[ForeignKeyConstraint], bool]] = None,
+ extra_dependencies: Optional[typing_Sequence[Tuple[Table, Table]]] = None,
+) -> List[Table]:
"""Sort a collection of :class:`_schema.Table` objects based on
dependency.
)
def __init__(self, table: _DMLTableArgument):
- super(Insert, self).__init__(table)
+ super().__init__(table)
@_generative
def inline(self: SelfInsert) -> SelfInsert:
)
def __init__(self, table: _DMLTableArgument):
- super(Update, self).__init__(table)
+ super().__init__(table)
@_generative
def ordered_values(
if not self.clauses:
return self
else:
- return super(BooleanClauseList, self).self_group(against=against)
+ return super().self_group(against=against)
and_ = BooleanClauseList.and_
]
self.type = sqltypes.TupleType(*[arg.type for arg in init_clauses])
- super(Tuple, self).__init__(*init_clauses)
+ super().__init__(*init_clauses)
@property
def _select_iterable(self) -> _SelectIterable:
if typing.TYPE_CHECKING:
def __invert__(
- self: "BinaryExpression[_T]",
- ) -> "BinaryExpression[_T]":
+ self: BinaryExpression[_T],
+ ) -> BinaryExpression[_T]:
...
@util.ro_non_memoized_property
modifiers=self.modifiers,
)
else:
- return super(BinaryExpression, self)._negate()
+ return super()._negate()
class Slice(ColumnElement[Any]):
if self.table is not None:
return self.table.entity_namespace
else:
- return super(ColumnClause, self).entity_namespace
+ return super().entity_namespace
def _clone(self, detect_subquery_cols=False, **kw):
if (
new = table.c.corresponding_column(self)
return new
- return super(ColumnClause, self)._clone(**kw)
+ return super()._clone(**kw)
@HasMemoized_ro_memoized_attribute
def _from_objects(self) -> List[FromClause]:
self.__dict__.pop(attr)
def _with_annotations(self, values):
- clone = super(AnnotatedColumnElement, self)._with_annotations(values)
+ clone = super()._with_annotations(values)
clone.__dict__.pop("comparator", None)
return clone
def __new__(cls, value: str, quote: Optional[bool] = None) -> Any:
quote = getattr(value, "quote", quote)
# return super(_truncated_label, cls).__new__(cls, value, quote, True)
- return super(_truncated_label, cls).__new__(cls, value, quote)
+ return super().__new__(cls, value, quote)
def __reduce__(self) -> Any:
return self.__class__, (str(self), self.quote)
@property
def _proxy_key(self):
- return super(FunctionElement, self)._proxy_key or getattr(
- self, "name", None
- )
+ return super()._proxy_key or getattr(self, "name", None)
def _execute_on_connection(
self,
):
return Grouping(self)
else:
- return super(FunctionElement, self).self_group(against=against)
+ return super().self_group(against=against)
@property
def entity_namespace(self):
]
kwargs.setdefault("type_", _type_from_args(fn_args))
kwargs["_parsed_args"] = fn_args
- super(ReturnTypeFromArgs, self).__init__(*fn_args, **kwargs)
+ super().__init__(*fn_args, **kwargs)
class coalesce(ReturnTypeFromArgs[_T]):
def __init__(self, expression=None, **kwargs):
if expression is None:
expression = literal_column("*")
- super(count, self).__init__(expression, **kwargs)
+ super().__init__(expression, **kwargs)
class current_date(AnsiFunction[datetime.date]):
type_from_args, dimensions=1
)
kwargs["_parsed_args"] = fn_args
- super(array_agg, self).__init__(*fn_args, **kwargs)
+ super().__init__(*fn_args, **kwargs)
class OrderedSetAgg(GenericFunction[_T]):
lambda_args: Tuple[Any, ...] = (),
):
self.lambda_args = lambda_args
- super(DeferredLambdaElement, self).__init__(fn, role, opts)
+ super().__init__(fn, role, opts)
def _invoke_user_fn(self, fn, *arg):
return fn(*self.lambda_args)
def _copy_internals(
self, clone=_clone, deferred_copy_internals=None, **kw
):
- super(DeferredLambdaElement, self)._copy_internals(
+ super()._copy_internals(
clone=clone,
deferred_copy_internals=deferred_copy_internals, # **kw
opts=kw,
def __call__(
self,
- left: "Operators",
+ left: Operators,
right: Optional[Any] = None,
*other: Any,
**kwargs: Any,
- ) -> "Operators":
+ ) -> Operators:
...
precedence: int = 0,
is_comparison: bool = False,
return_type: Optional[
- Union[Type["TypeEngine[Any]"], "TypeEngine[Any]"]
+ Union[Type[TypeEngine[Any]], TypeEngine[Any]]
] = None,
python_impl: Optional[Callable[..., Any]] = None,
) -> Callable[[Any], Operators]:
precedence: int = 0,
is_comparison: bool = False,
return_type: Optional[
- Union[Type["TypeEngine[_T]"], "TypeEngine[_T]"]
+ Union[Type[TypeEngine[_T]], TypeEngine[_T]]
] = None,
natural_self_precedent: bool = False,
eager_grouping: bool = False,
:attr:`_schema.Table.indexes`
"""
- return set(
+ return {
fkc.constraint
for fkc in self.foreign_keys
if fkc.constraint is not None
- )
+ }
def _init_existing(self, *args: Any, **kwargs: Any) -> None:
autoload_with = kwargs.pop("autoload_with", None)
# name = None is expected to be an interim state
# note this use case is legacy now that ORM declarative has a
# dedicated "column" construct local to the ORM
- super(Column, self).__init__(name, type_) # type: ignore
+ super().__init__(name, type_) # type: ignore
self.key = key if key is not None else name # type: ignore
self.primary_key = primary_key
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
column = parent
assert isinstance(column, Column)
- super(Sequence, self)._set_parent(column)
+ super()._set_parent(column)
column._on_table_attach(self._set_table)
def _copy(self) -> Sequence:
_reflected: bool = False,
) -> None:
util.assert_arg_type(arg, (str, ClauseElement, TextClause), "arg")
- super(DefaultClause, self).__init__(for_update)
+ super().__init__(for_update)
self.arg = arg
self.reflected = _reflected
# issue #3411 - don't do the per-column auto-attach if some of the
# columns are specified as strings.
- has_string_cols = set(
+ has_string_cols = {
c for c in self._pending_colargs if c is not None
- ).difference(col_objs)
+ }.difference(col_objs)
if not has_string_cols:
def _col_attached(column: Column[Any], table: Table) -> None:
return self.elements[0].column.table
def _validate_dest_table(self, table: Table) -> None:
- table_keys = set([elem._table_key() for elem in self.elements])
+ table_keys = {elem._table_key() for elem in self.elements}
if None not in table_keys and len(table_keys) > 1:
elem0, elem1 = sorted(table_keys)[0:2]
raise exc.ArgumentError(
**dialect_kw: Any,
) -> None:
self._implicit_generated = _implicit_generated
- super(PrimaryKeyConstraint, self).__init__(
+ super().__init__(
*columns,
name=name,
deferrable=deferrable,
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
table = parent
assert isinstance(table, Table)
- super(PrimaryKeyConstraint, self)._set_parent(table)
+ super()._set_parent(table)
if table.primary_key is not self:
table.constraints.discard(table.primary_key)
for fk in removed.foreign_keys:
fk._remove_from_metadata(self)
if self._schemas:
- self._schemas = set(
- [
- t.schema
- for t in self.tables.values()
- if t.schema is not None
- ]
- )
+ self._schemas = {
+ t.schema for t in self.tables.values() if t.schema is not None
+ }
def __getstate__(self) -> Dict[str, Any]:
return {
# run normal _copy_internals. the clones for
# left and right will come from the clone function's
# cache
- super(Join, self)._copy_internals(clone=clone, **kw)
+ super()._copy_internals(clone=clone, **kw)
self._reset_memoizations()
def _refresh_for_new_column(self, column: ColumnElement[Any]) -> None:
- super(Join, self)._refresh_for_new_column(column)
+ super()._refresh_for_new_column(column)
self.left._refresh_for_new_column(column)
self.right._refresh_for_new_column(column)
# "consider_as_foreign_keys".
if consider_as_foreign_keys:
for const in list(constraints):
- if set(f.parent for f in const.elements) != set(
+ if {f.parent for f in const.elements} != set(
consider_as_foreign_keys
):
del constraints[const]
# if still multiple constraints, but
# they all refer to the exact same end result, use it.
if len(constraints) > 1:
- dedupe = set(tuple(crit) for crit in constraints.values())
+ dedupe = {tuple(crit) for crit in constraints.values()}
if len(dedupe) == 1:
key = list(constraints)[0]
constraints = {key: constraints[key]}
self.name = name
def _refresh_for_new_column(self, column: ColumnElement[Any]) -> None:
- super(AliasedReturnsRows, self)._refresh_for_new_column(column)
+ super()._refresh_for_new_column(column)
self.element._refresh_for_new_column(column)
def _populate_column_collection(self):
) -> None:
existing_element = self.element
- super(AliasedReturnsRows, self)._copy_internals(clone=clone, **kw)
+ super()._copy_internals(clone=clone, **kw)
# the element clone is usually against a Table that returns the
# same object. don't reset exported .c. collections and other
table_value_type=None,
joins_implicitly=False,
):
- super(TableValuedAlias, self)._init(selectable, name=name)
+ super()._init(selectable, name=name)
self.joins_implicitly = joins_implicitly
self._tableval_type = (
self.sampling = sampling
self.seed = seed
- super(TableSample, self)._init(selectable, name=name)
+ super()._init(selectable, name=name)
def _get_method(self):
return self.sampling
self._prefixes = _prefixes
if _suffixes:
self._suffixes = _suffixes
- super(CTE, self)._init(selectable, name=name)
+ super()._init(selectable, name=name)
def _populate_column_collection(self):
if self._cte_alias is not None:
return None
def __init__(self, name: str, *columns: ColumnClause[Any], **kw: Any):
- super(TableClause, self).__init__()
+ super().__init__()
self.name = name
self._columns = DedupeColumnCollection()
self.primary_key = ColumnSet() # type: ignore
name: Optional[str] = None,
literal_binds: bool = False,
):
- super(Values, self).__init__()
+ super().__init__()
self._column_args = columns
if name is None:
self._unnamed = True
# TODO: this is hacky and slow
hacky_subquery = self.statement.subquery()
hacky_subquery.named_with_column = False
- d = dict((c.key, c) for c in hacky_subquery.c)
+ d = {c.key: c for c in hacky_subquery.c}
return d, d, d
)
def _refresh_for_new_column(self, column):
- super(CompoundSelect, self)._refresh_for_new_column(column)
+ super()._refresh_for_new_column(column)
for select in self.selects:
select._refresh_for_new_column(column)
Dict[str, ColumnElement[Any]],
Dict[str, ColumnElement[Any]],
]:
- with_cols: Dict[str, ColumnElement[Any]] = dict(
- (c._tq_label or c.key, c) # type: ignore
+ with_cols: Dict[str, ColumnElement[Any]] = {
+ c._tq_label or c.key: c # type: ignore
for c in self.statement._all_selected_columns
if c._allow_label_resolve
- )
- only_froms: Dict[str, ColumnElement[Any]] = dict(
- (c.key, c) # type: ignore
+ }
+ only_froms: Dict[str, ColumnElement[Any]] = {
+ c.key: c # type: ignore
for c in _select_iterables(self.froms)
if c._allow_label_resolve
- )
+ }
only_cols: Dict[str, ColumnElement[Any]] = with_cols.copy()
for key, value in only_froms.items():
with_cols.setdefault(key, value)
# 2. copy FROM collections, adding in joins that we've created.
existing_from_obj = [clone(f, **kw) for f in self._from_obj]
add_froms = (
- set(f for f in new_froms.values() if isinstance(f, Join))
+ {f for f in new_froms.values() if isinstance(f, Join)}
.difference(all_the_froms)
.difference(existing_from_obj)
)
# correlate_except, setup_joins, these clone normally. For
# column-expression oriented things like raw_columns, where_criteria,
# order by, we get this from the new froms.
- super(Select, self)._copy_internals(
- clone=clone, omit_attrs=("_from_obj",), **kw
- )
+ super()._copy_internals(clone=clone, omit_attrs=("_from_obj",), **kw)
self._reset_memoizations()
def get_children(self, **kw: Any) -> Iterable[ClauseElement]:
return itertools.chain(
- super(Select, self).get_children(
+ super().get_children(
omit_attrs=("_from_obj", "_correlate", "_correlate_except"),
**kw,
),
):
return operators.concat_op, self.expr.type
else:
- return super(Concatenable.Comparator, self)._adapt_expression(
- op, other_comparator
- )
+ return super()._adapt_expression(op, other_comparator)
comparator_factory: _ComparatorFactory[Any] = Comparator
Parameters are the same as that of :class:`.String`.
"""
- super(Unicode, self).__init__(length=length, **kwargs)
+ super().__init__(length=length, **kwargs)
class UnicodeText(Text):
Parameters are the same as that of :class:`_expression.TextClause`.
"""
- super(UnicodeText, self).__init__(length=length, **kwargs)
+ super().__init__(length=length, **kwargs)
class Integer(HasExpressionLookup, TypeEngine[int]):
if isinstance(value, str):
return self
else:
- return super(_Binary, self).coerce_compared_value(op, value)
+ return super().coerce_compared_value(op, value)
def get_dbapi_type(self, dbapi):
return dbapi.BINARY
self._valid_lookup[None] = self._object_lookup[None] = None
- super(Enum, self).__init__(length=length)
+ super().__init__(length=length)
if self.enum_class:
kw.setdefault("name", self.enum_class.__name__.lower())
op: OperatorType,
other_comparator: TypeEngine.Comparator[Any],
) -> Tuple[OperatorType, TypeEngine[Any]]:
- op, typ = super(Enum.Comparator, self)._adapt_expression(
- op, other_comparator
- )
+ op, typ = super()._adapt_expression(op, other_comparator)
if op is operators.concat_op:
typ = String(self.type.length)
return op, typ
def adapt(self, impltype, **kw):
kw["_enums"] = self._enums_argument
kw["_disable_warnings"] = True
- return super(Enum, self).adapt(impltype, **kw)
+ return super().adapt(impltype, **kw)
def _should_create_constraint(self, compiler, **kw):
if not self._is_impl_for_variant(compiler.dialect, kw):
assert e.table is table
def literal_processor(self, dialect):
- parent_processor = super(Enum, self).literal_processor(dialect)
+ parent_processor = super().literal_processor(dialect)
def process(value):
value = self._db_value_for_elem(value)
return process
def bind_processor(self, dialect):
- parent_processor = super(Enum, self).bind_processor(dialect)
+ parent_processor = super().bind_processor(dialect)
def process(value):
value = self._db_value_for_elem(value)
return process
def result_processor(self, dialect, coltype):
- parent_processor = super(Enum, self).result_processor(dialect, coltype)
+ parent_processor = super().result_processor(dialect, coltype)
def process(value):
if parent_processor:
if self.enum_class:
return self.enum_class
else:
- return super(Enum, self).python_type
+ return super().python_type
class PickleType(TypeDecorator[object]):
self.protocol = protocol
self.pickler = pickler or pickle
self.comparator = comparator
- super(PickleType, self).__init__()
+ super().__init__()
if impl:
# custom impl is not necessarily a LargeBinary subclass.
support a "day precision" parameter, i.e. Oracle.
"""
- super(Interval, self).__init__()
+ super().__init__()
self.native = native
self.second_precision = second_precision
self.day_precision = day_precision
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEventTarget"""
- super(ARRAY, self)._set_parent_with_dispatch(parent, outer=True)
+ super()._set_parent_with_dispatch(parent, outer=True)
if isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent_with_dispatch(parent)
"""
- super(TIMESTAMP, self).__init__(timezone=timezone)
+ super().__init__(timezone=timezone)
def get_dbapi_type(self, dbapi):
return dbapi.TIMESTAMP
@overload
def __init__(
- self: "Uuid[_python_UUID]",
+ self: Uuid[_python_UUID],
as_uuid: Literal[True] = ...,
native_uuid: bool = ...,
):
@overload
def __init__(
- self: "Uuid[str]",
+ self: Uuid[str],
as_uuid: Literal[False] = ...,
native_uuid: bool = ...,
):
__visit_name__ = "UUID"
@overload
- def __init__(self: "UUID[_python_UUID]", as_uuid: Literal[True] = ...):
+ def __init__(self: UUID[_python_UUID], as_uuid: Literal[True] = ...):
...
@overload
- def __init__(self: "UUID[str]", as_uuid: Literal[False] = ...):
+ def __init__(self: UUID[str], as_uuid: Literal[False] = ...):
...
def __init__(self, as_uuid: bool = True):
def visit_string_clauseelement_dict(
self, attrname, parent, element, clone=_clone, **kw
):
- return dict(
- (key, clone(value, **kw)) for key, value in element.items()
- )
+ return {key: clone(value, **kw) for key, value in element.items()}
def visit_setup_join_tuple(
self, attrname, parent, element, clone=_clone, **kw
def _is_native_for_emulated(
typ: Type[Union[TypeEngine[Any], TypeEngineMixin]],
-) -> TypeGuard["Type[NativeForEmulated]"]:
+) -> TypeGuard[Type[NativeForEmulated]]:
return hasattr(typ, "adapt_emulated_to_native")
if TYPE_CHECKING:
assert isinstance(self.expr.type, TypeDecorator)
kwargs["_python_is_types"] = self.expr.type.coerce_to_is_types
- return super(TypeDecorator.Comparator, self).operate(
- op, *other, **kwargs
- )
+ return super().operate(op, *other, **kwargs)
def reverse_operate(
self, op: OperatorType, other: Any, **kwargs: Any
if TYPE_CHECKING:
assert isinstance(self.expr.type, TypeDecorator)
kwargs["_python_is_types"] = self.expr.type.coerce_to_is_types
- return super(TypeDecorator.Comparator, self).reverse_operate(
- op, other, **kwargs
- )
+ return super().reverse_operate(op, other, **kwargs)
@property
def comparator_factory( # type: ignore # mypy properties bug
if isinstance(element, ColumnClause):
yield element
for elem in element.get_children():
- for e in visit(elem):
- yield e
+ yield from visit(elem)
list(visit(expr))
visit = None # type: ignore # remove gc cycles
in the collist.
"""
- cols_already_present = set(
- [
- col.element if col._order_by_label_element is not None else col
- for col in collist
- ]
- )
+ cols_already_present = {
+ col.element if col._order_by_label_element is not None else col
+ for col in collist
+ }
to_look_for = list(chain(*[unwrap_order_by(o) for o in order_by]))
def tables_from_leftmost(clause: FromClause) -> Iterator[FromClause]:
if isinstance(clause, Join):
- for t in tables_from_leftmost(clause.left):
- yield t
- for t in tables_from_leftmost(clause.right):
- yield t
+ yield from tables_from_leftmost(clause.left)
+ yield from tables_from_leftmost(clause.right)
elif isinstance(clause, FromGrouping):
- for t in tables_from_leftmost(clause.element):
- yield t
+ yield from tables_from_leftmost(clause.element)
else:
yield clause
__slots__ = ("row",)
- def __init__(self, row: "Row[Any]", max_chars: int = 300):
+ def __init__(self, row: Row[Any], max_chars: int = 300):
self.row = row
self.max_chars = max_chars
)
return text
- def _repr_param_tuple(self, params: "Sequence[Any]") -> str:
+ def _repr_param_tuple(self, params: Sequence[Any]) -> str:
trunc = self.trunc
(
return received_stmt == stmt
def _received_statement(self, execute_observed):
- received_stmt, received_params = super(
- DialectSQL, self
- )._received_statement(execute_observed)
+ received_stmt, received_params = super()._received_statement(
+ execute_observed
+ )
# TODO: why do we need this part?
for real_stmt in execute_observed.statements:
if self.rules and not self.rules[0].is_consumed:
self.rules[0].no_more_statements()
elif self.rules:
- super(EachOf, self).no_more_statements()
+ super().no_more_statements()
class Conditional(EachOf):
def __init__(self, condition, rules, else_rules):
if condition:
- super(Conditional, self).__init__(*rules)
+ super().__init__(*rules)
else:
- super(Conditional, self).__init__(*else_rules)
+ super().__init__(*else_rules)
class Or(AllOf):
@typing.overload
def testing_engine(
- url: Optional["URL"] = None,
+ url: Optional[URL] = None,
options: Optional[Dict[str, Any]] = None,
asyncio: Literal[False] = False,
transfer_staticpool: bool = False,
-) -> "Engine":
+) -> Engine:
...
@typing.overload
def testing_engine(
- url: Optional["URL"] = None,
+ url: Optional[URL] = None,
options: Optional[Dict[str, Any]] = None,
asyncio: Literal[True] = True,
transfer_staticpool: bool = False,
-) -> "AsyncEngine":
+) -> AsyncEngine:
...
for fail in self.fails:
if fail(config):
print(
- (
- "%s failed as expected (%s): %s "
- % (name, fail._as_string(config), ex)
- )
+ "%s failed as expected (%s): %s "
+ % (name, fail._as_string(config), ex)
)
break
else:
# sets up cls.Basic which is helpful for things like composite
# classes
- super(DeclarativeMappedTest, cls)._with_register_classes(fn)
+ super()._with_register_classes(fn)
if cls._tables_metadata.tables and cls.run_create_tables:
cls._tables_metadata.create_all(config.db)
required=False,
help=None, # noqa
):
- super(CallableAction, self).__init__(
+ super().__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
and not item.getparent(pytest.Class).name.startswith("_")
]
- test_classes = set(item.getparent(pytest.Class) for item in items)
+ test_classes = {item.getparent(pytest.Class) for item in items}
def collect(element):
for inst_or_fn in element.collect():
def _read(self):
try:
profile_f = open(self.fname)
- except IOError:
+ except OSError:
return
for lineno, line in enumerate(profile_f):
line = line.strip()
profile_f.close()
def _write(self):
- print(("Writing profile file %s" % self.fname))
+ print("Writing profile file %s" % self.fname)
profile_f = open(self.fname, "w")
profile_f.write(self._header())
for test_key in sorted(self.data):
else:
line_no, expected_count = expected
- print(("Pstats calls: %d Expected %s" % (callcount, expected_count)))
+ print("Pstats calls: %d Expected %s" % (callcount, expected_count))
stats.sort_stats(*re.split(r"[, ]", _profile_stats.sort))
stats.print_stats()
if _profile_stats.dump:
-#! coding: utf-8
# mypy: ignore-errors
# "unique constraints" are actually unique indexes (with possible
# exception of a unique that is a dupe of another one in the case
# of Oracle). make sure # they aren't duplicated.
- idx_names = set([idx.name for idx in reflected.indexes])
- uq_names = set(
- [
- uq.name
- for uq in reflected.constraints
- if isinstance(uq, sa.UniqueConstraint)
- ]
- ).difference(["unique_c_a_b"])
+ idx_names = {idx.name for idx in reflected.indexes}
+ uq_names = {
+ uq.name
+ for uq in reflected.constraints
+ if isinstance(uq, sa.UniqueConstraint)
+ }.difference(["unique_c_a_b"])
assert not idx_names.intersection(uq_names)
if names_that_duplicate_index:
)
t.create(connection)
eq_(
- dict(
- (col["name"], col["nullable"])
+ {
+ col["name"]: col["nullable"]
for col in inspect(connection).get_columns("t")
- ),
+ },
{"a": True, "b": False},
)
# that can reflect these, since alembic looks for this
opts = insp.get_foreign_keys("table")[0]["options"]
- eq_(dict((k, opts[k]) for k in opts if opts[k]), {})
+ eq_({k: opts[k] for k in opts if opts[k]}, {})
opts = insp.get_foreign_keys("user")[0]["options"]
eq_(opts, expected)
.offset(2)
).fetchall()
eq_(fa[0], (3, 3, 4))
- eq_(set(fa), set([(3, 3, 4), (4, 4, 5), (5, 4, 6)]))
+ eq_(set(fa), {(3, 3, 4), (4, 4, 5), (5, 4, 6)})
@testing.requires.fetch_ties
@testing.requires.fetch_offset_with_options
.offset(2)
).fetchall()
eq_(fa[0], (3, 3, 4))
- eq_(set(fa), set([(3, 3, 4), (4, 4, 5), (5, 4, 6)]))
+ eq_(set(fa), {(3, 3, 4), (4, 4, 5), (5, 4, 6)})
class SameNamedSchemaTableTest(fixtures.TablesTest):
result = {row[0] for row in connection.execute(t.select())}
output = set(output)
if filter_:
- result = set(filter_(x) for x in result)
- output = set(filter_(x) for x in output)
+ result = {filter_(x) for x in result}
+ output = {filter_(x) for x in output}
eq_(result, output)
if check_scale:
eq_([str(x) for x in result], [str(x) for x in output])
@testing.requires.precision_numerics_general
def test_precision_decimal(self, do_numeric_test):
- numbers = set(
- [
- decimal.Decimal("54.234246451650"),
- decimal.Decimal("0.004354"),
- decimal.Decimal("900.0"),
- ]
- )
+ numbers = {
+ decimal.Decimal("54.234246451650"),
+ decimal.Decimal("0.004354"),
+ decimal.Decimal("900.0"),
+ }
do_numeric_test(Numeric(precision=18, scale=12), numbers, numbers)
"""
- numbers = set(
- [
- decimal.Decimal("1E-2"),
- decimal.Decimal("1E-3"),
- decimal.Decimal("1E-4"),
- decimal.Decimal("1E-5"),
- decimal.Decimal("1E-6"),
- decimal.Decimal("1E-7"),
- decimal.Decimal("1E-8"),
- decimal.Decimal("0.01000005940696"),
- decimal.Decimal("0.00000005940696"),
- decimal.Decimal("0.00000000000696"),
- decimal.Decimal("0.70000000000696"),
- decimal.Decimal("696E-12"),
- ]
- )
+ numbers = {
+ decimal.Decimal("1E-2"),
+ decimal.Decimal("1E-3"),
+ decimal.Decimal("1E-4"),
+ decimal.Decimal("1E-5"),
+ decimal.Decimal("1E-6"),
+ decimal.Decimal("1E-7"),
+ decimal.Decimal("1E-8"),
+ decimal.Decimal("0.01000005940696"),
+ decimal.Decimal("0.00000005940696"),
+ decimal.Decimal("0.00000000000696"),
+ decimal.Decimal("0.70000000000696"),
+ decimal.Decimal("696E-12"),
+ }
do_numeric_test(Numeric(precision=18, scale=14), numbers, numbers)
@testing.requires.precision_numerics_enotation_large
def test_enotation_decimal_large(self, do_numeric_test):
"""test exceedingly large decimals."""
- numbers = set(
- [
- decimal.Decimal("4E+8"),
- decimal.Decimal("5748E+15"),
- decimal.Decimal("1.521E+15"),
- decimal.Decimal("00000000000000.1E+12"),
- ]
- )
+ numbers = {
+ decimal.Decimal("4E+8"),
+ decimal.Decimal("5748E+15"),
+ decimal.Decimal("1.521E+15"),
+ decimal.Decimal("00000000000000.1E+12"),
+ }
do_numeric_test(Numeric(precision=25, scale=2), numbers, numbers)
@testing.requires.precision_numerics_many_significant_digits
def test_many_significant_digits(self, do_numeric_test):
- numbers = set(
- [
- decimal.Decimal("31943874831932418390.01"),
- decimal.Decimal("319438950232418390.273596"),
- decimal.Decimal("87673.594069654243"),
- ]
- )
+ numbers = {
+ decimal.Decimal("31943874831932418390.01"),
+ decimal.Decimal("319438950232418390.273596"),
+ decimal.Decimal("87673.594069654243"),
+ }
do_numeric_test(Numeric(precision=38, scale=12), numbers, numbers)
@testing.requires.precision_numerics_retains_significant_digits
def test_numeric_no_decimal(self, do_numeric_test):
- numbers = set([decimal.Decimal("1.000")])
+ numbers = {decimal.Decimal("1.000")}
do_numeric_test(
Numeric(precision=5, scale=3), numbers, numbers, check_scale=True
)
def default(self, o):
if isinstance(o, decimal.Decimal):
return str(o)
- return super(DecimalEncoder, self).default(o)
+ return super().default(o)
json_data = json.dumps(data_element, cls=DecimalEncoder)
return iter(list(self._data.values()))
def __dir__(self) -> List[str]:
- return dir(super(Properties, self)) + [
- str(k) for k in self._data.keys()
- ]
+ return dir(super()) + [str(k) for k in self._data.keys()]
def __add__(self, other: Properties[_F]) -> List[Union[_T, _F]]:
return list(self) + list(other) # type: ignore
elem: _T
for elem in x:
if not isinstance(elem, str) and hasattr(elem, "__iter__"):
- for y in flatten_iterator(elem):
- yield y
+ yield from flatten_iterator(elem)
else:
yield elem
capacity: int
threshold: float
- size_alert: Optional[Callable[["LRUCache[_KT, _VT]"], None]]
+ size_alert: Optional[Callable[[LRUCache[_KT, _VT]], None]]
def __init__(
self,
dead: bool
gr_context: Optional[Context]
- def __init__(self, fn: Callable[..., Any], driver: "greenlet"):
+ def __init__(self, fn: Callable[..., Any], driver: greenlet):
...
def throw(self, *arg: Any) -> Any:
if inspect.ismethod(func):
func = func.__func__
if not inspect.isfunction(func):
- raise TypeError("{!r} is not a Python function".format(func))
+ raise TypeError(f"{func!r} is not a Python function")
co = func.__code__
if not inspect.iscode(co):
- raise TypeError("{!r} is not a code object".format(co))
+ raise TypeError(f"{co!r} is not a code object")
nargs = co.co_argcount
names = co.co_varnames
fget: Callable[[Any], Any]
def __init__(self, fget: Callable[[Any], Any], *arg: Any, **kw: Any):
- super(classproperty, self).__init__(fget, *arg, **kw)
+ super().__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(self, obj: Any, cls: Optional[type] = None) -> Any:
interpolated = (value % args) + (
" (this warning may be suppressed after %d occurrences)" % num
)
- self = super(_hash_limit_string, cls).__new__(cls, interpolated)
+ self = super().__new__(cls, interpolated)
self._hash = hash("%s_%d" % (value, hash(interpolated) % num))
return self
"""
for set_ in sort_as_subsets(tuples, allitems):
- for s in set_:
- yield s
+ yield from set_
def find_cycles(
go()
def test_visit_binary_product(self):
- a, b, q, e, f, j, r = [column(chr_) for chr_ in "abqefjr"]
+ a, b, q, e, f, j, r = (column(chr_) for chr_ in "abqefjr")
from sqlalchemy import and_, func
from sqlalchemy.sql.util import visit_binary_product
[
A(
id=i,
- **dict(
- (letter, "%s%d" % (letter, i))
+ **{
+ letter: "%s%d" % (letter, i)
for letter in ["x", "y", "z", "p", "q", "r"]
- ),
+ },
)
for i in range(1, 1001)
]
conn.execute(
t.insert(),
[
- dict(
- ("field%d" % fnum, "value%d" % fnum)
+ {
+ "field%d" % fnum: "value%d" % fnum
for fnum in range(NUM_FIELDS)
- )
+ }
for r_num in range(NUM_RECORDS)
],
)
conn.execute(
t2.insert(),
[
- dict(
- ("field%d" % fnum, "value%d" % fnum)
+ {
+ "field%d" % fnum: "value%d" % fnum
for fnum in range(NUM_FIELDS)
- )
+ }
for r_num in range(NUM_RECORDS)
],
)
list(topological.sort(tuples, allitems))
assert False
except exc.CircularDependencyError as err:
- eq_(err.cycles, set(["node1", "node3", "node2", "node5", "node4"]))
+ eq_(err.cycles, {"node1", "node3", "node2", "node5", "node4"})
eq_(
err.edges,
- set(
- [
- ("node3", "node1"),
- ("node4", "node1"),
- ("node2", "node3"),
- ("node1", "node2"),
- ("node4", "node5"),
- ("node5", "node4"),
- ]
- ),
+ {
+ ("node3", "node1"),
+ ("node4", "node1"),
+ ("node2", "node3"),
+ ("node1", "node2"),
+ ("node4", "node5"),
+ ("node5", "node4"),
+ },
)
def test_raise_on_cycle_two(self):
list(topological.sort(tuples, allitems))
assert False
except exc.CircularDependencyError as err:
- eq_(err.cycles, set(["node1", "node3", "node2"]))
+ eq_(err.cycles, {"node1", "node3", "node2"})
eq_(
err.edges,
- set(
- [
- ("node3", "node1"),
- ("node2", "node3"),
- ("node3", "node2"),
- ("node1", "node2"),
- ("node2", "node4"),
- ]
- ),
+ {
+ ("node3", "node1"),
+ ("node2", "node3"),
+ ("node3", "node2"),
+ ("node1", "node2"),
+ ("node2", "node4"),
+ },
)
def test_raise_on_cycle_three(self):
]
eq_(
topological.find_cycles(tuples, self._nodes_from_tuples(tuples)),
- set([node1, node2, node3]),
+ {node1, node2, node3},
)
def test_find_multiple_cycles_one(self):
(node3, node1),
(node3, node2),
]
- allnodes = set(
- [node1, node2, node3, node4, node5, node6, node7, node8, node9]
- )
+ allnodes = {
+ node1,
+ node2,
+ node3,
+ node4,
+ node5,
+ node6,
+ node7,
+ node8,
+ node9,
+ }
eq_(
topological.find_cycles(tuples, allnodes),
- set(
- [
- "node8",
- "node1",
- "node2",
- "node5",
- "node4",
- "node7",
- "node6",
- "node9",
- ]
- ),
+ {
+ "node8",
+ "node1",
+ "node2",
+ "node5",
+ "node4",
+ "node7",
+ "node6",
+ "node9",
+ },
)
def test_find_multiple_cycles_two(self):
(node2, node4),
(node4, node1),
]
- allnodes = set([node1, node2, node3, node4, node5, node6])
+ allnodes = {node1, node2, node3, node4, node5, node6}
# node6 only became present here once [ticket:2282] was addressed.
eq_(
topological.find_cycles(tuples, allnodes),
- set(["node1", "node2", "node4", "node6"]),
+ {"node1", "node2", "node4", "node6"},
)
def test_find_multiple_cycles_three(self):
(node5, node6),
(node6, node2),
]
- allnodes = set([node1, node2, node3, node4, node5, node6])
+ allnodes = {node1, node2, node3, node4, node5, node6}
eq_(topological.find_cycles(tuples, allnodes), allnodes)
def test_find_multiple_cycles_four(self):
allnodes = ["node%d" % i for i in range(1, 21)]
eq_(
topological.find_cycles(tuples, allnodes),
- set(
- [
- "node11",
- "node10",
- "node13",
- "node15",
- "node14",
- "node17",
- "node19",
- "node20",
- "node8",
- "node1",
- "node3",
- "node2",
- "node4",
- "node6",
- ]
- ),
+ {
+ "node11",
+ "node10",
+ "node13",
+ "node15",
+ "node14",
+ "node17",
+ "node19",
+ "node20",
+ "node8",
+ "node1",
+ "node3",
+ "node2",
+ "node4",
+ "node6",
+ },
)
-#! coding:utf-8
-
"""Tests exceptions and DB-API exception wrapping."""
from itertools import product
-#! coding: utf-8
-
import copy
import inspect
from pathlib import Path
eq_(util.to_list("xyz"), ["xyz"])
def test_from_set(self):
- spec = util.to_list(set([1, 2, 3]))
+ spec = util.to_list({1, 2, 3})
assert isinstance(spec, list)
eq_(sorted(spec), [1, 2, 3])
class ColumnCollectionCommon(testing.AssertsCompiledSQL):
def _assert_collection_integrity(self, coll):
- eq_(coll._colset, set(c for k, c, _ in coll._collection))
+ eq_(coll._colset, {c for k, c, _ in coll._collection})
d = {}
for k, col, _ in coll._collection:
d.setdefault(k, (k, col))
assert True
try:
- s = set([o1, o2])
+ s = {o1, o2}
s |= ids
assert False
except TypeError:
class DictlikeIteritemsTest(fixtures.TestBase):
- baseline = set([("a", 1), ("b", 2), ("c", 3)])
+ baseline = {("a", 1), ("b", 2), ("c", 3)}
def _ok(self, instance):
iterator = util.dictlike_iteritems(instance)
self.e = e
self.f = f
self.g = g
- super(Bar, self).__init__(**kw)
+ super().__init__(**kw)
eq_(
util.generic_repr(
class Bar(Foo):
def __init__(self, b=3, c=4, **kw):
self.c = c
- super(Bar, self).__init__(b=b, **kw)
+ super().__init__(b=b, **kw)
eq_(
util.generic_repr(Bar(a="a", b="b", c="c"), to_inspect=[Bar, Foo]),
def assertAdapted(obj, *methods):
assert isinstance(obj, type)
- found = set([m for m in dir(obj) if not m.startswith("_")])
+ found = {m for m in dir(obj) if not m.startswith("_")}
for method in methods:
assert method in found
found.remove(method)
class TestClassHierarchy(fixtures.TestBase):
def test_object(self):
- eq_(set(util.class_hierarchy(object)), set((object,)))
+ eq_(set(util.class_hierarchy(object)), {object})
def test_single(self):
class A:
class B:
pass
- eq_(set(util.class_hierarchy(A)), set((A, object)))
- eq_(set(util.class_hierarchy(B)), set((B, object)))
+ eq_(set(util.class_hierarchy(A)), {A, object})
+ eq_(set(util.class_hierarchy(B)), {B, object})
class C(A, B):
pass
- eq_(set(util.class_hierarchy(A)), set((A, B, C, object)))
- eq_(set(util.class_hierarchy(B)), set((A, B, C, object)))
+ eq_(set(util.class_hierarchy(A)), {A, B, C, object})
+ eq_(set(util.class_hierarchy(B)), {A, B, C, object})
class TestClassProperty(fixtures.TestBase):
class B(A):
@classproperty
def something(cls):
- d = dict(super(B, cls).something)
+ d = dict(super().something)
d.update({"bazz": 2})
return d
def test_utf8_to_utf8(self):
eq_(
compat.decode_backslashreplace(
- "some message méil".encode("utf-8"), "utf-8"
+ "some message méil".encode(), "utf-8"
),
"some message méil",
)
class ClsWarningTest(fixtures.TestBase):
@testing.fixture
def dep_cls_fixture(self):
- class Connectable(object):
+ class Connectable:
"""a docstring"""
some_member = "foo"
import inspect
- class PlainClass(object):
+ class PlainClass:
some_member = "bar"
pc_keys = dict(inspect.getmembers(PlainClass()))
-# -*- encoding: utf-8
from sqlalchemy import bindparam
from sqlalchemy import Column
from sqlalchemy import Computed
-# -*- encoding: utf-8
from unittest.mock import Mock
from sqlalchemy import Column
-# -*- encoding: utf-8
-
from decimal import Decimal
import re
from unittest.mock import Mock
-# -*- encoding: utf-8
import decimal
from sqlalchemy import and_
-# -*- encoding: utf-8
import datetime
import decimal
import random
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection)
- eq_(set(list(t2.indexes)[0].columns), set([t2.c["x"], t2.c.y]))
+ eq_(set(list(t2.indexes)[0].columns), {t2.c["x"], t2.c.y})
def test_indexes_cols_with_commas(self, metadata, connection):
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection)
- eq_(set(list(t2.indexes)[0].columns), set([t2.c["x, col"], t2.c.y]))
+ eq_(set(list(t2.indexes)[0].columns), {t2.c["x, col"], t2.c.y})
def test_indexes_cols_with_spaces(self, metadata, connection):
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection)
- eq_(set(list(t2.indexes)[0].columns), set([t2.c["x col"], t2.c.y]))
+ eq_(set(list(t2.indexes)[0].columns), {t2.c["x col"], t2.c.y})
def test_indexes_with_filtered(self, metadata, connection):
-# -*- encoding: utf-8
import codecs
import datetime
import decimal
-# coding: utf-8
-
from sqlalchemy import BLOB
from sqlalchemy import BOOLEAN
from sqlalchemy import Boolean
-# coding: utf-8
-
import datetime
from sqlalchemy import bindparam
-# coding: utf-8
-
from sqlalchemy import all_
from sqlalchemy import and_
from sqlalchemy import any_
-# coding: utf-8
-
import re
from sqlalchemy import BigInteger
# MySQL converts unique constraints into unique indexes.
# separately we get both
- indexes = dict((i["name"], i) for i in insp.get_indexes("mysql_uc"))
- constraints = set(
+ indexes = {i["name"]: i for i in insp.get_indexes("mysql_uc")}
+ constraints = {
i["name"] for i in insp.get_unique_constraints("mysql_uc")
- )
+ }
self.assert_("uc_a" in indexes)
self.assert_(indexes["uc_a"]["unique"])
# more "official" MySQL construct
reflected = Table("mysql_uc", MetaData(), autoload_with=testing.db)
- indexes = dict((i.name, i) for i in reflected.indexes)
- constraints = set(uc.name for uc in reflected.constraints)
+ indexes = {i.name: i for i in reflected.indexes}
+ constraints = {uc.name for uc in reflected.constraints}
self.assert_("uc_a" in indexes)
self.assert_(indexes["uc_a"].unique)
m.create_all(connection)
eq_(
- dict(
- (rec["name"], rec)
+ {
+ rec["name"]: rec
for rec in inspect(connection).get_foreign_keys("t2")
- ),
+ },
{
"cap_t1id_fk": {
"name": "cap_t1id_fk",
-# coding: utf-8
from collections import OrderedDict
import datetime
import decimal
t.insert(),
[
{"id": 1, "data": set()},
- {"id": 2, "data": set([""])},
- {"id": 3, "data": set(["a", ""])},
- {"id": 4, "data": set(["b"])},
+ {"id": 2, "data": {""}},
+ {"id": 3, "data": {"a", ""}},
+ {"id": 4, "data": {"b"}},
],
)
eq_(
connection.execute(t.select().order_by(t.c.id)).fetchall(),
- [(1, set()), (2, set()), (3, set(["a"])), (4, set(["b"]))],
+ [(1, set()), (2, set()), (3, {"a"}), (4, {"b"})],
)
def test_bitwise_required_for_empty(self):
t.insert(),
[
{"id": 1, "data": set()},
- {"id": 2, "data": set([""])},
- {"id": 3, "data": set(["a", ""])},
- {"id": 4, "data": set(["b"])},
+ {"id": 2, "data": {""}},
+ {"id": 3, "data": {"a", ""}},
+ {"id": 4, "data": {"b"}},
],
)
eq_(
connection.execute(t.select().order_by(t.c.id)).fetchall(),
[
(1, set()),
- (2, set([""])),
- (3, set(["a", ""])),
- (4, set(["b"])),
+ (2, {""}),
+ (3, {"a", ""}),
+ (4, {"b"}),
],
)
expected = [
(
- set(["a"]),
- set(["a"]),
- set(["a"]),
- set(["'a'"]),
- set(["a", "b"]),
+ {"a"},
+ {"a"},
+ {"a"},
+ {"'a'"},
+ {"a", "b"},
),
(
- set(["b"]),
- set(["b"]),
- set(["b"]),
- set(["b"]),
- set(["a", "b"]),
+ {"b"},
+ {"b"},
+ {"b"},
+ {"b"},
+ {"a", "b"},
),
]
res = connection.execute(set_table.select()).fetchall()
)
set_table.create(connection)
- connection.execute(
- set_table.insert(), {"data": set(["réveillé", "drôle"])}
- )
+ connection.execute(set_table.insert(), {"data": {"réveillé", "drôle"}})
row = connection.execute(set_table.select()).first()
- eq_(row, (1, set(["réveillé", "drôle"])))
+ eq_(row, (1, {"réveillé", "drôle"}))
def test_int_roundtrip(self, metadata, connection):
set_table = self._set_fixture_one(metadata)
eq_(
res,
(
- set(["a"]),
- set(["b"]),
- set(["a", "b"]),
- set(["'a'", "b"]),
- set([]),
+ {"a"},
+ {"b"},
+ {"a", "b"},
+ {"'a'", "b"},
+ set(),
),
)
connection.execute(table.delete())
roundtrip([None, None, None], [None] * 3)
- roundtrip(["", "", ""], [set([])] * 3)
- roundtrip([set(["dq"]), set(["a"]), set(["5"])])
- roundtrip(["dq", "a", "5"], [set(["dq"]), set(["a"]), set(["5"])])
- roundtrip([1, 1, 1], [set(["dq"]), set(["a"]), set(["5"])])
- roundtrip([set(["dq", "sq"]), None, set(["9", "5", "7"])])
+ roundtrip(["", "", ""], [set()] * 3)
+ roundtrip([{"dq"}, {"a"}, {"5"}])
+ roundtrip(["dq", "a", "5"], [{"dq"}, {"a"}, {"5"}])
+ roundtrip([1, 1, 1], [{"dq"}, {"a"}, {"5"}])
+ roundtrip([{"dq", "sq"}, None, {"9", "5", "7"}])
connection.execute(
set_table.insert(),
[
- {"s3": set(["5"])},
- {"s3": set(["5", "7"])},
- {"s3": set(["5", "7", "9"])},
- {"s3": set(["7", "9"])},
+ {"s3": {"5"}},
+ {"s3": {"5", "7"}},
+ {"s3": {"5", "7", "9"}},
+ {"s3": {"7", "9"}},
],
)
rows = connection.execute(
select(set_table.c.s3).where(
- set_table.c.s3.in_([set(["5"]), ["5", "7"]])
+ set_table.c.s3.in_([{"5"}, ["5", "7"]])
)
).fetchall()
-# coding: utf-8
from sqlalchemy import and_
from sqlalchemy import bindparam
from sqlalchemy import cast
-# coding: utf-8
-
from multiprocessing import get_context
import re
from unittest import mock
-# coding: utf-8
-
-
from sqlalchemy import CHAR
from sqlalchemy import Double
from sqlalchemy import exc
set(insp.get_table_names()).intersection(
["my_table", "foo_table"]
),
- set(["my_table", "foo_table"]),
+ {"my_table", "foo_table"},
)
def test_reflect_system_table(self):
def test_reflect_all(self, connection):
m = MetaData()
m.reflect(connection)
- eq_(set(t.name for t in m.tables.values()), set(["admin_docindex"]))
+ eq_({t.name for t in m.tables.values()}, {"admin_docindex"})
def all_tables_compression_missing():
# make a dictionary of the reflected objects:
- reflected = dict(
- [
- (obj_definition(i), i)
- for i in reflectedtable.indexes | reflectedtable.constraints
- ]
- )
+ reflected = {
+ obj_definition(i): i
+ for i in reflectedtable.indexes | reflectedtable.constraints
+ }
# assert we got primary key constraint and its name, Error
# if not in dict
-# coding: utf-8
-
-
import datetime
import decimal
import os
-# coding: utf-8
from sqlalchemy import and_
from sqlalchemy import BigInteger
from sqlalchemy import bindparam
i = i.on_conflict_do_update(
constraint=self.excl_constr_anon,
set_=dict(name=i.excluded.name),
- where=((self.table1.c.name != i.excluded.name)),
+ where=(self.table1.c.name != i.excluded.name),
)
self.assert_compile(
i,
i.on_conflict_do_update(
constraint=self.excl_constr_anon,
set_=dict(name=i.excluded.name),
- where=((self.table1.c.name != i.excluded.name)),
+ where=(self.table1.c.name != i.excluded.name),
)
.returning(literal_column("1"))
.cte("i_upsert")
-# coding: utf-8
import dataclasses
import datetime
import logging
-# coding: utf-8
-
from sqlalchemy import Column
from sqlalchemy import exc
from sqlalchemy import Integer
-# coding: utf-8
-
import datetime
from sqlalchemy import and_
-# coding: utf-8
-
import itertools
from operator import itemgetter
import re
table = Table("test_foreigntable", metadata, autoload_with=connection)
eq_(
set(table.columns.keys()),
- set(["id", "data"]),
+ {"id", "data"},
"Columns of reflected foreign table didn't equal expected columns",
)
table = Table("test_mview", metadata, autoload_with=connection)
eq_(
set(table.columns.keys()),
- set(["id", "data"]),
+ {"id", "data"},
"Columns of reflected mview didn't equal expected columns",
)
def test_get_view_names(self, inspect_fixture):
insp, conn = inspect_fixture
- eq_(set(insp.get_view_names()), set(["test_regview"]))
+ eq_(set(insp.get_view_names()), {"test_regview"})
def test_get_materialized_view_names(self, inspect_fixture):
insp, conn = inspect_fixture
- eq_(set(insp.get_materialized_view_names()), set(["test_mview"]))
+ eq_(set(insp.get_materialized_view_names()), {"test_mview"})
def test_get_view_names_reflection_cache_ok(self, connection):
insp = inspect(connection)
- eq_(set(insp.get_view_names()), set(["test_regview"]))
+ eq_(set(insp.get_view_names()), {"test_regview"})
eq_(
set(insp.get_materialized_view_names()),
- set(["test_mview"]),
+ {"test_mview"},
)
eq_(
set(insp.get_view_names()).union(
insp.get_materialized_view_names()
),
- set(["test_regview", "test_mview"]),
+ {"test_regview", "test_mview"},
)
def test_get_view_definition(self, connection):
table = Table("testtable", metadata, autoload_with=connection)
eq_(
set(table.columns.keys()),
- set(["question", "answer"]),
+ {"question", "answer"},
"Columns of reflected table didn't equal expected columns",
)
assert isinstance(table.c.answer.type, Integer)
)
eq_(
set(table.columns.keys()),
- set(["question", "answer", "anything"]),
+ {"question", "answer", "anything"},
"Columns of reflected table didn't equal expected columns",
)
assert isinstance(table.c.anything.type, Integer)
eq_(
set(meta2.tables),
- set(["test_schema_2.some_other_table", "some_table"]),
+ {"test_schema_2.some_other_table", "some_table"},
)
meta3 = MetaData()
eq_(
set(meta3.tables),
- set(
- [
- "test_schema_2.some_other_table",
- "test_schema.some_table",
- ]
- ),
+ {
+ "test_schema_2.some_other_table",
+ "test_schema.some_table",
+ },
)
def test_cross_schema_reflection_metadata_uses_schema(
eq_(
set(meta2.tables),
- set(["some_other_table", "test_schema.some_table"]),
+ {"some_other_table", "test_schema.some_table"},
)
def test_uppercase_lowercase_table(self, metadata, connection):
# PostgreSQL will create an implicit index for a unique
# constraint. Separately we get both
- indexes = set(i["name"] for i in insp.get_indexes("pgsql_uc"))
- constraints = set(
+ indexes = {i["name"] for i in insp.get_indexes("pgsql_uc")}
+ constraints = {
i["name"] for i in insp.get_unique_constraints("pgsql_uc")
- )
+ }
self.assert_("uc_a" in indexes)
self.assert_("uc_a" in constraints)
# reflection corrects for the dupe
reflected = Table("pgsql_uc", MetaData(), autoload_with=connection)
- indexes = set(i.name for i in reflected.indexes)
- constraints = set(uc.name for uc in reflected.constraints)
+ indexes = {i.name for i in reflected.indexes}
+ constraints = {uc.name for uc in reflected.constraints}
self.assert_("uc_a" not in indexes)
self.assert_("uc_a" in constraints)
uc_table.create(connection)
- indexes = dict((i["name"], i) for i in insp.get_indexes("pgsql_uc"))
- constraints = set(
+ indexes = {i["name"]: i for i in insp.get_indexes("pgsql_uc")}
+ constraints = {
i["name"] for i in insp.get_unique_constraints("pgsql_uc")
- )
+ }
self.assert_("ix_a" in indexes)
assert indexes["ix_a"]["unique"]
reflected = Table("pgsql_uc", MetaData(), autoload_with=connection)
- indexes = dict((i.name, i) for i in reflected.indexes)
- constraints = set(uc.name for uc in reflected.constraints)
+ indexes = {i.name: i for i in reflected.indexes}
+ constraints = {uc.name for uc in reflected.constraints}
self.assert_("ix_a" in indexes)
assert indexes["ix_a"].unique
reflected = Table("pgsql_cc", MetaData(), autoload_with=connection)
- check_constraints = dict(
- (uc.name, uc.sqltext.text)
+ check_constraints = {
+ uc.name: uc.sqltext.text
for uc in reflected.constraints
if isinstance(uc, CheckConstraint)
- )
+ }
eq_(
check_constraints,
-# coding: utf-8
import datetime
import decimal
from enum import Enum as _PY_Enum
)
# hashable
eq_(
- set(row[1] for row in r),
- set([("1", "2", "3"), ("4", "5", "6"), (("4", "5"), ("6", "7"))]),
+ {row[1] for row in r},
+ {("1", "2", "3"), ("4", "5", "6"), (("4", "5"), ("6", "7"))},
)
def test_array_plus_native_enum_create(self, metadata, connection):
t.create(connection)
eq_(
- set(e["name"] for e in inspect(connection).get_enums()),
- set(["my_enum_1", "my_enum_2", "my_enum_3"]),
+ {e["name"] for e in inspect(connection).get_enums()},
+ {"my_enum_1", "my_enum_2", "my_enum_3"},
)
t.drop(connection)
eq_(inspect(connection).get_enums(), [])
return sa.cast(bindvalue, self)
def result_processor(self, dialect, coltype):
- super_rp = super(_ArrayOfEnum, self).result_processor(dialect, coltype)
+ super_rp = super().result_processor(dialect, coltype)
def handle_raw_string(value):
inner = re.match(r"^{(.*)}$", value).group(1)
),
)
def test_where(self, whereclause_fn, expected):
- super(JSONBTest, self).test_where(whereclause_fn, expected)
+ super().test_where(whereclause_fn, expected)
class JSONBRoundTripTest(JSONRoundTripTest):
@testing.requires.postgresql_utf8_server_encoding
def test_unicode_round_trip(self, connection):
- super(JSONBRoundTripTest, self).test_unicode_round_trip(connection)
+ super().test_unicode_round_trip(connection)
@testing.only_on("postgresql >= 12")
def test_cast_jsonpath(self, connection):
-#!coding: utf-8
-
"""SQLite-specific tests."""
import datetime
import json
-# coding: utf-8
-
import collections.abc as collections_abc
from contextlib import contextmanager
from contextlib import nullcontext
def test_stmt_exception_bytestring_utf8(self):
# uncommon case for Py3K, bytestring object passed
# as the error message
- message = "some message méil".encode("utf-8")
+ message = "some message méil".encode()
err = tsa.exc.SQLAlchemyError(message)
eq_(str(err), "some message méil")
eq_(str(err), "('some message', 206)")
def test_stmt_exception_str_multi_args_bytestring(self):
- message = "some message méil".encode("utf-8")
+ message = "some message méil".encode()
err = tsa.exc.SQLAlchemyError(message, 206)
eq_(str(err), str((message, 206)))
eq_(
canary,
[
- ("begin", set(["conn"])),
+ ("begin", {"conn"}),
(
"execute",
- set(
- [
- "conn",
- "clauseelement",
- "multiparams",
- "params",
- "execution_options",
- ]
- ),
+ {
+ "conn",
+ "clauseelement",
+ "multiparams",
+ "params",
+ "execution_options",
+ },
),
(
"cursor_execute",
- set(
- [
- "conn",
- "cursor",
- "executemany",
- "statement",
- "parameters",
- "context",
- ]
- ),
+ {
+ "conn",
+ "cursor",
+ "executemany",
+ "statement",
+ "parameters",
+ "context",
+ },
),
- ("rollback", set(["conn"])),
- ("begin", set(["conn"])),
+ ("rollback", {"conn"}),
+ ("begin", {"conn"}),
(
"execute",
- set(
- [
- "conn",
- "clauseelement",
- "multiparams",
- "params",
- "execution_options",
- ]
- ),
+ {
+ "conn",
+ "clauseelement",
+ "multiparams",
+ "params",
+ "execution_options",
+ },
),
(
"cursor_execute",
- set(
- [
- "conn",
- "cursor",
- "executemany",
- "statement",
- "parameters",
- "context",
- ]
- ),
+ {
+ "conn",
+ "cursor",
+ "executemany",
+ "statement",
+ "parameters",
+ "context",
+ },
),
- ("commit", set(["conn"])),
+ ("commit", {"conn"}),
],
)
class SomeDialect(cls_):
def initialize(self, connection):
- super(SomeDialect, self).initialize(connection)
+ super().initialize(connection)
m1.initialize(connection)
def on_connect(self):
- oc = super(SomeDialect, self).on_connect()
+ oc = super().on_connect()
def my_on_connect(conn):
if oc:
supports_statement_cache = True
def initialize(self, connection):
- super(SomeDialect, self).initialize(connection)
+ super().initialize(connection)
m1.append("initialize")
def on_connect(self):
- oc = super(SomeDialect, self).on_connect()
+ oc = super().on_connect()
def my_on_connect(conn):
if oc:
# two pooled connections unclosed.
eq_(
- set([c.close.call_count for c in strong_refs]),
- set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0]),
+ {c.close.call_count for c in strong_refs},
+ {1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0},
)
def test_recycle(self):
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
- super(TrackQueuePool, self).__init__(*arg, **kw)
+ super().__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
)
meta.create_all(connection)
meta2 = MetaData()
- t1r, t2r, t3r = [
+ t1r, t2r, t3r = (
Table(x, meta2, autoload_with=connection)
for x in ("t1", "t2", "t3")
- ]
+ )
assert t1r.c.t2id.references(t2r.c.id)
assert t1r.c.t3id.references(t3r.c.id)
extend_existing=True,
autoload_with=connection,
)
- eq_(set(t2.columns.keys()), set(["x", "y", "z", "q", "id"]))
+ eq_(set(t2.columns.keys()), {"x", "y", "z", "q", "id"})
# this has been the actual behavior, the cols are added together,
# however the test wasn't checking this correctly
extend_existing=False,
autoload_with=connection,
)
- eq_(set(t3.columns.keys()), set(["z"]))
+ eq_(set(t3.columns.keys()), {"z"})
m4 = MetaData()
old_z = Column("z", String, primary_key=True)
autoload_replace=False,
autoload_with=connection,
)
- eq_(set(t4.columns.keys()), set(["x", "y", "z", "q", "id"]))
+ eq_(set(t4.columns.keys()), {"x", "y", "z", "q", "id"})
eq_(list(t4.primary_key.columns), [t4.c.z, t4.c.id])
assert t4.c.z is old_z
assert t4.c.y is old_y
m2 = MetaData()
m2.reflect(connection, only=["rt_a", "rt_b"])
- eq_(set(m2.tables.keys()), set(["rt_a", "rt_b"]))
+ eq_(set(m2.tables.keys()), {"rt_a", "rt_b"})
m3 = MetaData()
m3.reflect(connection, only=lambda name, meta: name == "rt_c")
- eq_(set(m3.tables.keys()), set(["rt_c"]))
+ eq_(set(m3.tables.keys()), {"rt_c"})
m4 = MetaData()
m8_e2 = MetaData()
rt_c = Table("rt_c", m8_e2)
m8_e2.reflect(connection, extend_existing=True, only=["rt_a", "rt_c"])
- eq_(set(m8_e2.tables.keys()), set(["rt_a", "rt_c"]))
+ eq_(set(m8_e2.tables.keys()), {"rt_a", "rt_c"})
eq_(rt_c.c.keys(), ["id"])
baseline.drop_all(connection)
# Make sure indexes are in the order we expect them in
tmp = [(idx.name, idx) for idx in t2.indexes]
tmp.sort()
- r1, r2, r3 = [idx[1] for idx in tmp]
+ r1, r2, r3 = (idx[1] for idx in tmp)
assert r1.name == "idx1"
assert r2.name == "idx2"
assert r1.unique == True # noqa
assert r2.unique == False # noqa
assert r3.unique == False # noqa
- assert set([t2.c.id]) == set(r1.columns)
- assert set([t2.c.name, t2.c.id]) == set(r2.columns)
- assert set([t2.c.name]) == set(r3.columns)
+ assert {t2.c.id} == set(r1.columns)
+ assert {t2.c.name, t2.c.id} == set(r2.columns)
+ assert {t2.c.name} == set(r3.columns)
@testing.requires.comment_reflection
def test_comment_reflection(self, connection, metadata):
m2 = MetaData()
m2.reflect(connection, views=False)
- eq_(
- set(m2.tables), set(["users", "email_addresses", "dingalings"])
- )
+ eq_(set(m2.tables), {"users", "email_addresses", "dingalings"})
m2 = MetaData()
m2.reflect(connection, views=True)
eq_(
set(m2.tables),
- set(
- [
- "email_addresses_v",
- "users_v",
- "users",
- "dingalings",
- "email_addresses",
- ]
- ),
+ {
+ "email_addresses_v",
+ "users_v",
+ "users",
+ "dingalings",
+ "email_addresses",
+ },
)
finally:
_drop_views(connection)
addresses.append_constraint(fk)
addresses.append_constraint(fk)
assert len(addresses.c.user_id.foreign_keys) == 1
- assert addresses.constraints == set([addresses.primary_key, fk])
+ assert addresses.constraints == {addresses.primary_key, fk}
class UnicodeReflectionTest(fixtures.TablesTest):
@classmethod
def define_tables(cls, metadata):
- no_multibyte_period = set([("plain", "col_plain", "ix_plain")])
+ no_multibyte_period = {("plain", "col_plain", "ix_plain")}
no_has_table = [
(
"no_has_table_1",
# (others?) expect non-unicode strings in result sets/bind
# params
- names = set([rec[0] for rec in self.names])
+ names = {rec[0] for rec in self.names}
reflected = set(inspect(connection).get_table_names())
# explicitly NFC). Maybe this database normalizes NFD
# on reflection.
- nfc = set([unicodedata.normalize("NFC", n) for n in names])
+ nfc = {unicodedata.normalize("NFC", n) for n in names}
self.assert_(nfc == names)
# Yep. But still ensure that bulk reflection and
@testing.requires.unicode_connections
def test_get_names(self, connection):
inspector = inspect(connection)
- names = dict(
- (tname, (cname, ixname)) for tname, cname, ixname in self.names
- )
+ names = {tname: (cname, ixname) for tname, cname, ixname in self.names}
for tname in inspector.get_table_names():
assert tname in names
eq_(
eq_(
set(meta2.tables),
- set(
- [
- "some_other_table",
- "%s.some_table" % testing.config.test_schema,
- ]
- ),
+ {
+ "some_other_table",
+ "%s.some_table" % testing.config.test_schema,
+ },
)
@testing.requires.schemas
m2.reflect(connection)
eq_(
set(m2.tables),
- set(
- [
- "%s.dingalings" % testing.config.test_schema,
- "%s.users" % testing.config.test_schema,
- "%s.email_addresses" % testing.config.test_schema,
- ]
- ),
+ {
+ "%s.dingalings" % testing.config.test_schema,
+ "%s.users" % testing.config.test_schema,
+ "%s.email_addresses" % testing.config.test_schema,
+ },
)
@testing.requires.schemas
m3.reflect(connection, schema=testing.config.test_schema)
eq_(
- set((t.name, t.schema) for t in m2.tables.values()),
- set((t.name, t.schema) for t in m3.tables.values()),
+ {(t.name, t.schema) for t in m2.tables.values()},
+ {(t.name, t.schema) for t in m3.tables.values()},
)
@testing.fails_if(testing.requires._has_mysql_on_windows)
def test_table_names(self, connection):
x = inspect(connection).get_table_names()
- assert set(["SomeTable", "SomeOtherTable"]).issubset(x)
+ assert {"SomeTable", "SomeOtherTable"}.issubset(x)
def test_reflect_exact_name(self, connection):
m = MetaData()
def test_override_key(self, connection):
def assertions(table):
eq_(table.c.YXZ.name, "x")
- eq_(set(table.primary_key), set([table.c.YXZ]))
+ eq_(set(table.primary_key), {table.c.YXZ})
self._do_test(connection, "x", {"key": "YXZ"}, assertions)
eq_([c.name for c in b2.c], ["x", "q", "p"])
# no FK, whether or not resolve_fks was called
- eq_(b2.constraints, set((b2.primary_key,)))
+ eq_(b2.constraints, {b2.primary_key})
b2a = b2.alias()
eq_([c.name for c in b2a.c], ["x", "q", "p"])
[
A(
data="a1",
- collection=set(
- [
- B(data="a1b1", b_data="a1b1"),
- C(data="a1b2", c_data="a1c1"),
- B(data="a1b2", b_data="a1b2"),
- C(data="a1c2", c_data="a1c2"),
- ]
- ),
+ collection={
+ B(data="a1b1", b_data="a1b1"),
+ C(data="a1b2", c_data="a1c1"),
+ B(data="a1b2", b_data="a1b2"),
+ C(data="a1c2", c_data="a1c2"),
+ },
),
A(
data="a2",
- collection=set(
- [
- B(data="a2b1", b_data="a2b1"),
- C(data="a2c1", c_data="a2c1"),
- B(data="a2b2", b_data="a2b2"),
- C(data="a2c2", c_data="a2c2"),
- ]
- ),
+ collection={
+ B(data="a2b1", b_data="a2b1"),
+ C(data="a2c1", c_data="a2c1"),
+ B(data="a2b2", b_data="a2b2"),
+ C(data="a2c2", c_data="a2c2"),
+ },
),
]
)
[
A(
data="a2",
- collection=set(
- [
- B(data="a2b1", b_data="a2b1"),
- B(data="a2b2", b_data="a2b2"),
- C(data="a2c1", c_data="a2c1"),
- C(data="a2c2", c_data="a2c2"),
- ]
- ),
+ collection={
+ B(data="a2b1", b_data="a2b1"),
+ B(data="a2b2", b_data="a2b2"),
+ C(data="a2c1", c_data="a2c1"),
+ C(data="a2c2", c_data="a2c2"),
+ },
)
],
)
class DeferredReflectBase(DeclarativeReflectionBase):
def teardown_test(self):
- super(DeferredReflectBase, self).teardown_test()
+ super().teardown_test()
_DeferredMapperConfig._configs.clear()
# EXPECTED_TYPE: AppenderQuery[Address]
reveal_type(u.addresses)
- u.addresses = set([Address(), Address()])
+ u.addresses = {Address(), Address()}
if typing.TYPE_CHECKING:
# still an AppenderQuery
@testing.fixture(scope="function")
def per_func_cachedir(self):
- for item in self._cachedir():
- yield item
+ yield from self._cachedir()
@testing.fixture(scope="class")
def cachedir(self):
- for item in self._cachedir():
- yield item
+ yield from self._cachedir()
def _cachedir(self):
# as of mypy 0.971 i think we need to keep mypy_path empty
self.assert_(len(p1._children) == 3)
self.assert_(len(p1.children) == 3)
- self.assert_(set(p1.children) == set(["d", "e", "f"]))
+ self.assert_(set(p1.children) == {"d", "e", "f"})
del ch
p1 = self.roundtrip(p1)
self.assert_(len(p1.children) == 2)
self.assert_(len(p1._children) == 2)
- self.assert_(
- set([o.name for o in p1._children]) == set(["regular", "proxied"])
- )
+ self.assert_({o.name for o in p1._children} == {"regular", "proxied"})
ch2 = None
for o in p1._children:
self.assert_(len(p1._children) == 1)
self.assert_(len(p1.children) == 1)
- self.assert_(p1._children == set([ch1]))
+ self.assert_(p1._children == {ch1})
p1.children.remove("regular")
self.assert_("b" in p1.children)
self.assert_("d" not in p1.children)
- self.assert_(p1.children == set(["a", "b", "c"]))
+ self.assert_(p1.children == {"a", "b", "c"})
assert_raises(KeyError, p1.children.remove, "d")
p1.children = ["a", "b", "c"]
p1 = self.roundtrip(p1)
- self.assert_(p1.children == set(["a", "b", "c"]))
+ self.assert_(p1.children == {"a", "b", "c"})
p1.children.discard("b")
p1 = self.roundtrip(p1)
- self.assert_(p1.children == set(["a", "c"]))
+ self.assert_(p1.children == {"a", "c"})
p1.children.remove("a")
p1 = self.roundtrip(p1)
- self.assert_(p1.children == set(["c"]))
+ self.assert_(p1.children == {"c"})
p1._children = set()
self.assert_(len(p1.children) == 0)
p1 = Parent("P1")
p1.children = ["a", "b", "c"]
- control = set(["a", "b", "c"])
+ control = {"a", "b", "c"}
for other in (
- set(["a", "b", "c"]),
- set(["a", "b", "c", "d"]),
- set(["a"]),
- set(["a", "b"]),
- set(["c", "d"]),
- set(["e", "f", "g"]),
+ {"a", "b", "c"},
+ {"a", "b", "c", "d"},
+ {"a"},
+ {"a", "b"},
+ {"c", "d"},
+ {"e", "f", "g"},
set(),
):
):
for base in (["a", "b", "c"], []):
for other in (
- set(["a", "b", "c"]),
- set(["a", "b", "c", "d"]),
- set(["a"]),
- set(["a", "b"]),
- set(["c", "d"]),
- set(["e", "f", "g"]),
+ {"a", "b", "c"},
+ {"a", "b", "c", "d"},
+ {"a"},
+ {"a", "b"},
+ {"c", "d"},
+ {"e", "f", "g"},
set(),
):
p = Parent("p")
for op in ("|=", "-=", "&=", "^="):
for base in (["a", "b", "c"], []):
for other in (
- set(["a", "b", "c"]),
- set(["a", "b", "c", "d"]),
- set(["a"]),
- set(["a", "b"]),
- set(["c", "d"]),
- set(["e", "f", "g"]),
+ {"a", "b", "c"},
+ {"a", "b", "c", "d"},
+ {"a"},
+ {"a", "b"},
+ {"c", "d"},
+ {"e", "f", "g"},
frozenset(["e", "f", "g"]),
set(),
):
add_child("p1", "c2")
session.flush()
p = session.query(Parent).filter_by(name="p1").one()
- assert set(p.kids) == set(["c1", "c2"]), p.kids
+ assert set(p.kids) == {"c1", "c2"}, p.kids
def test_copy(self):
self.mapper_registry.map_imperatively(
p_copy = copy.copy(p)
del p
gc_collect()
- assert set(p_copy.kids) == set(["c1", "c2"]), p_copy.kids
+ assert set(p_copy.kids) == {"c1", "c2"}, p_copy.kids
def test_pickle_list(self):
self.mapper_registry.map_imperatively(
p = Parent("p1")
p.kids.update(["c1", "c2"])
r1 = pickle.loads(pickle.dumps(p))
- assert r1.kids == set(["c1", "c2"])
+ assert r1.kids == {"c1", "c2"}
# can't do this without parent having a cycle
# r2 = pickle.loads(pickle.dumps(p.kids))
Address = Base.classes.addresses
a1 = Address(email_address="e1")
- u1 = User(name="u1", addresses_collection=set([a1]))
+ u1 = User(name="u1", addresses_collection={a1})
assert a1.user is u1
def test_prepare_w_only(self):
)
Base.prepare(generate_relationship=_gen_relationship)
- assert set(tuple(c[1]) for c in mock.mock_calls).issuperset(
+ assert {tuple(c[1]) for c in mock.mock_calls}.issuperset(
[
(Base, interfaces.MANYTOONE, "nodes"),
(Base, interfaces.MANYTOMANY, "keywords_collection"),
# original query still works
eq_(
- set([(u.id, u.name) for u in bq(sess).all()]),
- set([(8, "ed"), (9, "fred")]),
+ {(u.id, u.name) for u in bq(sess).all()},
+ {(8, "ed"), (9, "fred")},
)
def test_count_with_bindparams(self):
inherit_cache = False
def __init__(self, arg=None):
- super(MyThingy, self).__init__(arg or "MYTHINGY!")
+ super().__init__(arg or "MYTHINGY!")
@compiles(MyThingy)
def visit_thingy(thingy, compiler, **kw):
inherit_cache = False
def __init__(self):
- super(MyThingy, self).__init__("MYTHINGY!")
+ super().__init__("MYTHINGY!")
@compiles(MyThingy)
def visit_thingy(thingy, compiler, **kw):
sess = self._fixture_data()
eq_(
- set(row.temperature for row in sess.query(Report.temperature)),
+ {row.temperature for row in sess.query(Report.temperature)},
{80.0, 75.0, 85.0},
)
temps = sess.query(Report).all()
- eq_(set(t.temperature for t in temps), {80.0, 75.0, 85.0})
+ eq_({t.temperature for t in temps}, {80.0, 75.0, 85.0})
if legacy:
sess.query(Report).filter(Report.temperature >= 80).update(
# test synchronize session
def go():
- eq_(set(t.temperature for t in temps), {86.0, 75.0, 91.0})
+ eq_({t.temperature for t in temps}, {86.0, 75.0, 91.0})
self.assert_sql_count(
sess._ShardedSession__binds["north_america"], go, 0
)
eq_(
- set(row.temperature for row in sess.query(Report.temperature)),
+ {row.temperature for row in sess.query(Report.temperature)},
{86.0, 75.0, 91.0},
)
sess = self._fixture_data()
temps = sess.query(Report).all()
- eq_(set(t.temperature for t in temps), {80.0, 75.0, 85.0})
+ eq_({t.temperature for t in temps}, {80.0, 75.0, 85.0})
if legacy:
sess.query(Report).filter(Report.temperature >= 80).delete(
)
eq_(
- set(row.temperature for row in sess.query(Report.temperature)),
+ {row.temperature for row in sess.query(Report.temperature)},
{75.0},
)
from sqlalchemy import literal
symbols = ("usd", "gbp", "cad", "eur", "aud")
- currency_lookup = dict(
- ((currency_from, currency_to), Decimal(str(rate)))
+ currency_lookup = {
+ (currency_from, currency_to): Decimal(str(rate))
for currency_to, values in zip(
symbols,
[
],
)
for currency_from, rate in zip(symbols, values)
- )
+ }
class Amount:
def __init__(self, amount, currency):
class json_property(index_property):
def __init__(self, attr_name, index, cast_type):
- super(json_property, self).__init__(attr_name, index)
+ super().__init__(attr_name, index)
self.cast_type = cast_type
def expr(self, model):
- expr = super(json_property, self).expr(model)
+ expr = super().expr(model)
return expr.astext.cast(self.cast_type)
class Json(fixtures.ComparableEntity, Base):
ValueError,
"Attribute 'data' does not accept objects of type",
Foo,
- data=set([1, 2, 3]),
+ data={1, 2, 3},
)
def test_in_place_mutation(self):
ValueError,
"Attribute 'data' does not accept objects of type",
Foo,
- data=set([1, 2, 3]),
+ data={1, 2, 3},
)
def test_in_place_mutation(self):
def test_clear(self):
sess = fixture_session()
- f1 = Foo(data=set([1, 2]))
+ f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
def test_pop(self):
sess = fixture_session()
- f1 = Foo(data=set([1]))
+ f1 = Foo(data={1})
sess.add(f1)
sess.commit()
def test_add(self):
sess = fixture_session()
- f1 = Foo(data=set([1, 2]))
+ f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
f1.data.add(5)
sess.commit()
- eq_(f1.data, set([1, 2, 5]))
+ eq_(f1.data, {1, 2, 5})
def test_update(self):
sess = fixture_session()
- f1 = Foo(data=set([1, 2]))
+ f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
- f1.data.update(set([2, 5]))
+ f1.data.update({2, 5})
sess.commit()
- eq_(f1.data, set([1, 2, 5]))
+ eq_(f1.data, {1, 2, 5})
def test_binary_update(self):
sess = fixture_session()
- f1 = Foo(data=set([1, 2]))
+ f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
- f1.data |= set([2, 5])
+ f1.data |= {2, 5}
sess.commit()
- eq_(f1.data, set([1, 2, 5]))
+ eq_(f1.data, {1, 2, 5})
def test_intersection_update(self):
sess = fixture_session()
- f1 = Foo(data=set([1, 2]))
+ f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
- f1.data.intersection_update(set([2, 5]))
+ f1.data.intersection_update({2, 5})
sess.commit()
- eq_(f1.data, set([2]))
+ eq_(f1.data, {2})
def test_binary_intersection_update(self):
sess = fixture_session()
- f1 = Foo(data=set([1, 2]))
+ f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
- f1.data &= set([2, 5])
+ f1.data &= {2, 5}
sess.commit()
- eq_(f1.data, set([2]))
+ eq_(f1.data, {2})
def test_difference_update(self):
sess = fixture_session()
- f1 = Foo(data=set([1, 2]))
+ f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
- f1.data.difference_update(set([2, 5]))
+ f1.data.difference_update({2, 5})
sess.commit()
- eq_(f1.data, set([1]))
+ eq_(f1.data, {1})
def test_operator_difference_update(self):
sess = fixture_session()
- f1 = Foo(data=set([1, 2]))
+ f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
- f1.data -= set([2, 5])
+ f1.data -= {2, 5}
sess.commit()
- eq_(f1.data, set([1]))
+ eq_(f1.data, {1})
def test_symmetric_difference_update(self):
sess = fixture_session()
- f1 = Foo(data=set([1, 2]))
+ f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
- f1.data.symmetric_difference_update(set([2, 5]))
+ f1.data.symmetric_difference_update({2, 5})
sess.commit()
- eq_(f1.data, set([1, 5]))
+ eq_(f1.data, {1, 5})
def test_binary_symmetric_difference_update(self):
sess = fixture_session()
- f1 = Foo(data=set([1, 2]))
+ f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
- f1.data ^= set([2, 5])
+ f1.data ^= {2, 5}
sess.commit()
- eq_(f1.data, set([1, 5]))
+ eq_(f1.data, {1, 5})
def test_remove(self):
sess = fixture_session()
- f1 = Foo(data=set([1, 2, 3]))
+ f1 = Foo(data={1, 2, 3})
sess.add(f1)
sess.commit()
f1.data.remove(2)
sess.commit()
- eq_(f1.data, set([1, 3]))
+ eq_(f1.data, {1, 3})
def test_discard(self):
sess = fixture_session()
- f1 = Foo(data=set([1, 2, 3]))
+ f1 = Foo(data={1, 2, 3})
sess.add(f1)
sess.commit()
f1.data.discard(2)
sess.commit()
- eq_(f1.data, set([1, 3]))
+ eq_(f1.data, {1, 3})
f1.data.discard(2)
sess.commit()
- eq_(f1.data, set([1, 3]))
+ eq_(f1.data, {1, 3})
def test_pickle_parent(self):
sess = fixture_session()
- f1 = Foo(data=set([1, 2]))
+ f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
f1.data
def test_unrelated_flush(self):
sess = fixture_session()
- f1 = Foo(data=set([1, 2]), unrelated_data="unrelated")
+ f1 = Foo(data={1, 2}, unrelated_data="unrelated")
sess.add(f1)
sess.flush()
f1.unrelated_data = "unrelated 2"
sess.flush()
f1.data.add(3)
sess.commit()
- eq_(f1.data, set([1, 2, 3]))
+ eq_(f1.data, {1, 2, 3})
def test_copy(self):
- f1 = Foo(data=set([1, 2]))
+ f1 = Foo(data={1, 2})
f1.data = copy.copy(f1.data)
- eq_(f1.data, set([1, 2]))
+ eq_(f1.data, {1, 2})
def test_deepcopy(self):
- f1 = Foo(data=set([1, 2]))
+ f1 = Foo(data={1, 2})
f1.data = copy.deepcopy(f1.data)
- eq_(f1.data, set([1, 2]))
+ eq_(f1.data, {1, 2})
class _MutableNoHashFixture:
@classmethod
def _type_fixture(cls):
if not (getattr(cls, "CustomMutableDict")):
- MutableDict = super(
- CustomMutableAssociationScalarJSONTest, cls
- )._type_fixture()
+ MutableDict = super()._type_fixture()
class CustomMutableDict(MutableDict):
pass
-# coding: utf-8
-
from sqlalchemy import desc
from sqlalchemy import ForeignKey
from sqlalchemy import func
configure_mappers()
eq_(
Parent.children.property._calculated_foreign_keys,
- set([Child.name_upper.property.columns[0]]),
+ {Child.name_upper.property.columns[0]},
)
def test_class_has_registry_attr(self, registry):
adr_count = Address.id
- eq_(set(User.__table__.c.keys()), set(["id", "name"]))
- eq_(set(Address.__table__.c.keys()), set(["id", "email", "user_id"]))
+ eq_(set(User.__table__.c.keys()), {"id", "name"})
+ eq_(set(Address.__table__.c.keys()), {"id", "email", "user_id"})
def test_deferred(self):
class User(Base, fixtures.ComparableEntity):
}
assert class_mapper(Person).version_id_col == "a"
- assert class_mapper(Person).include_properties == set(["id", "a", "b"])
+ assert class_mapper(Person).include_properties == {"id", "a", "b"}
def test_custom_join_condition(self):
class Foo(Base):
baz = _column(String(100), nullable=False, index=True)
@mapper_registry.mapped
- class MyModel(MyMixin, object):
+ class MyModel(MyMixin):
__tablename__ = "test"
name = Column(String(100), nullable=False, index=True)
eq_(
asserted,
{
- "a": set([A.my_attr.property.columns[0]]),
- "b": set([B.my_attr.property.columns[0]]),
+ "a": {A.my_attr.property.columns[0]},
+ "b": {B.my_attr.property.columns[0]},
},
)
__tablename__ = "q"
id = Column(Integer, primary_key=True)
- eq_(set(Base.metadata.tables), set(["y", "z", "q"]))
+ eq_(set(Base.metadata.tables), {"y", "z", "q"})
def test_middle_abstract_attributes(self):
# test for [ticket:3219]
class RoutingSession(Session):
def get_bind(self, **kw):
received.append(type(kw["clause"]))
- return super(RoutingSession, self).get_bind(**kw)
+ return super().get_bind(**kw)
stmt = stmt.execution_options(synchronize_session=sync_type)
eq_(
set(s.query(Document.id, Document.flag)),
- set(
- [
- (1, True),
- (2, None),
- (3, None),
- (4, True),
- (5, True),
- (6, None),
- ]
- ),
+ {
+ (1, True),
+ (2, None),
+ (3, None),
+ (4, True),
+ (5, True),
+ (6, None),
+ },
)
@testing.requires.delete_using
eq_(
set(s.query(Document.id, Document.flag)),
- set([(2, None), (3, None), (6, None)]),
+ {(2, None), (3, None), (6, None)},
)
def test_no_eval_against_multi_table_criteria(self):
eq_(
set(s.query(Document.id, Document.flag)),
- set(
- [
- (1, True),
- (2, None),
- (3, None),
- (4, True),
- (5, True),
- (6, None),
- ]
- ),
+ {
+ (1, True),
+ (2, None),
+ (3, None),
+ (4, True),
+ (5, True),
+ (6, None),
+ },
)
@testing.requires.update_where_target_in_subquery
eq_(
set(s.query(Document.id, Document.flag)),
- set(
- [
- (1, True),
- (2, False),
- (3, False),
- (4, True),
- (5, True),
- (6, False),
- ]
- ),
+ {
+ (1, True),
+ (2, False),
+ (3, False),
+ (4, True),
+ (5, True),
+ (6, False),
+ },
)
@testing.requires.multi_table_update
eq_(
set(s.query(Person.name, Engineer.engineer_name)),
- set([("e1", "e1"), ("e2", "e5"), ("pp1", "pp1")]),
+ {("e1", "e1"), ("e2", "e5"), ("pp1", "pp1")},
)
@testing.requires.delete_using
# delete actually worked
eq_(
set(s.query(Person.name, Engineer.engineer_name)),
- set([("pp1", "pp1"), ("e1", "e1")]),
+ {("pp1", "pp1"), ("e1", "e1")},
)
@testing.only_on(["mysql", "mariadb"], "Multi table update")
eq_(
set(s.query(Person.name, Engineer.engineer_name)),
- set([("e1", "e1"), ("e22", "e55"), ("pp1", "pp1")]),
+ {("e1", "e1"), ("e22", "e55"), ("pp1", "pp1")},
)
session.add(b)
session.add(c)
session.flush()
- assert set(session.query(Employee).all()) == set([a, b, c])
- assert set(session.query(Engineer).all()) == set([b, c])
+ assert set(session.query(Employee).all()) == {a, b, c}
+ assert set(session.query(Engineer).all()) == {b, c}
assert session.query(Manager).all() == [c]
session.add(Engineer("Karina", "knows how to hack"))
session.flush()
session.expunge_all()
- assert set([repr(x) for x in session.query(Employee)]) == set(
- [
- "Engineer Karina knows how to hack",
- "Manager Sally knows how to manage things",
- ]
- )
-
- assert set([repr(x) for x in session.query(Manager)]) == set(
- ["Manager Sally knows how to manage things"]
- )
- assert set([repr(x) for x in session.query(Engineer)]) == set(
- ["Engineer Karina knows how to hack"]
- )
+ assert {repr(x) for x in session.query(Employee)} == {
+ "Engineer Karina knows how to hack",
+ "Manager Sally knows how to manage things",
+ }
+
+ assert {repr(x) for x in session.query(Manager)} == {
+ "Manager Sally knows how to manage things"
+ }
+ assert {repr(x) for x in session.query(Engineer)} == {
+ "Engineer Karina knows how to hack"
+ }
manager = session.query(Manager).one()
session.expire(manager, ["manager_data"])
eq_(manager.manager_data, "knows how to manage things")
repr(session.query(Manager).filter(Manager.name == "Sally").one())
== "Manager Sally knows how to manage things"
)
- assert set([repr(x) for x in session.query(Employee).all()]) == set(
- [
- "Engineer Jenn knows how to program",
- "Manager Sally knows how to manage things",
- "Hacker Karina 'Badass' knows how to hack",
- ]
- )
- assert set([repr(x) for x in session.query(Manager).all()]) == set(
- ["Manager Sally knows how to manage things"]
- )
- assert set([repr(x) for x in session.query(Engineer).all()]) == set(
- [
- "Engineer Jenn knows how to program",
- "Hacker Karina 'Badass' knows how to hack",
- ]
- )
- assert set([repr(x) for x in session.query(Hacker).all()]) == set(
- ["Hacker Karina 'Badass' knows how to hack"]
- )
+ assert {repr(x) for x in session.query(Employee).all()} == {
+ "Engineer Jenn knows how to program",
+ "Manager Sally knows how to manage things",
+ "Hacker Karina 'Badass' knows how to hack",
+ }
+ assert {repr(x) for x in session.query(Manager).all()} == {
+ "Manager Sally knows how to manage things"
+ }
+ assert {repr(x) for x in session.query(Engineer).all()} == {
+ "Engineer Jenn knows how to program",
+ "Hacker Karina 'Badass' knows how to hack",
+ }
+ assert {repr(x) for x in session.query(Hacker).all()} == {
+ "Hacker Karina 'Badass' knows how to hack"
+ }
def test_multi_level_no_base_w_hybrid(self):
Employee, Engineer, Manager = self.classes(
)
== 3
)
- assert set([repr(x) for x in session.query(Employee)]) == set(
- [
- "Engineer Jenn knows how to program",
- "Manager Sally knows how to manage things",
- "Hacker Karina 'Badass' knows how to hack",
- ]
- )
- assert set([repr(x) for x in session.query(Manager)]) == set(
- ["Manager Sally knows how to manage things"]
- )
- assert set([repr(x) for x in session.query(Engineer)]) == set(
- [
- "Engineer Jenn knows how to program",
- "Hacker Karina 'Badass' knows how to hack",
- ]
- )
- assert set([repr(x) for x in session.query(Hacker)]) == set(
- ["Hacker Karina 'Badass' knows how to hack"]
- )
+ assert {repr(x) for x in session.query(Employee)} == {
+ "Engineer Jenn knows how to program",
+ "Manager Sally knows how to manage things",
+ "Hacker Karina 'Badass' knows how to hack",
+ }
+ assert {repr(x) for x in session.query(Manager)} == {
+ "Manager Sally knows how to manage things"
+ }
+ assert {repr(x) for x in session.query(Engineer)} == {
+ "Engineer Jenn knows how to program",
+ "Hacker Karina 'Badass' knows how to hack",
+ }
+ assert {repr(x) for x in session.query(Hacker)} == {
+ "Hacker Karina 'Badass' knows how to hack"
+ }
@testing.fixture
def two_pjoin_fixture(self):
def go():
c2 = session.get(Company, c.id)
- assert set([repr(x) for x in c2.employees]) == set(
- [
- "Engineer Karina knows how to hack",
- "Manager Sally knows how to manage things",
- ]
- )
+ assert {repr(x) for x in c2.employees} == {
+ "Engineer Karina knows how to hack",
+ "Manager Sally knows how to manage things",
+ }
self.assert_sql_count(testing.db, go, 2)
session.expunge_all()
c2 = session.get(
Company, c.id, options=[joinedload(Company.employees)]
)
- assert set([repr(x) for x in c2.employees]) == set(
- [
- "Engineer Karina knows how to hack",
- "Manager Sally knows how to manage things",
- ]
- )
+ assert {repr(x) for x in c2.employees} == {
+ "Engineer Karina knows how to hack",
+ "Manager Sally knows how to manage things",
+ }
self.assert_sql_count(testing.db, go, 1)
@classmethod
def setup_mappers(cls):
- super(_PolymorphicTestBase, cls).setup_mappers()
+ super().setup_mappers()
global people, engineers, managers, boss
global companies, paperwork, machines
people, engineers, managers, boss, companies, paperwork, machines = (
@classmethod
def insert_data(cls, connection):
- super(_PolymorphicTestBase, cls).insert_data(connection)
+ super().insert_data(connection)
global all_employees, c1_employees, c2_employees
global c1, c2, e1, e2, e3, b1, m1
def _five_obj_fixture(self):
sess = fixture_session()
- e1, e2, e3, e4, e5 = [Engineer(name="e%d" % (i + 1)) for i in range(5)]
+ e1, e2, e3, e4, e5 = (Engineer(name="e%d" % (i + 1)) for i in range(5))
e3.reports_to = e1
e4.reports_to = e2
sess.add_all([e1, e2, e3, e4, e5])
with _aliased_join_warning(r"Child2\(child2\)"):
eq_(
set(sess.execute(stmt).scalars().unique()),
- set([c11, c12, c13]),
+ {c11, c12, c13},
)
with _aliased_join_warning(r"Child2\(child2\)"):
eq_(
set(sess.query(Child1, Child2).join(Child1.left_child2)),
- set([(c11, c22), (c12, c22), (c13, c23)]),
+ {(c11, c22), (c12, c22), (c13, c23)},
)
# manual alias test:
eq_(
set(sess.execute(stmt).scalars().unique()),
- set([c11, c12, c13]),
+ {c11, c12, c13},
)
eq_(
set(sess.query(Child1, c2).join(Child1.left_child2.of_type(c2))),
- set([(c11, c22), (c12, c22), (c13, c23)]),
+ {(c11, c22), (c12, c22), (c13, c23)},
)
# test __eq__() on property is annotating correctly
with _aliased_join_warning(r"Child1\(child1\)"):
eq_(
set(sess.execute(stmt).scalars().unique()),
- set([c22]),
+ {c22},
)
# manual aliased version
)
eq_(
set(sess.execute(stmt).scalars().unique()),
- set([c22]),
+ {c22},
)
# test the same again
class EagerDefaultEvalTestSubDefaults(EagerDefaultEvalTest):
@classmethod
def setup_classes(cls):
- super(EagerDefaultEvalTestSubDefaults, cls).setup_classes(
- include_sub_defaults=True
- )
+ super().setup_classes(include_sub_defaults=True)
class EagerDefaultEvalTestPolymorphic(EagerDefaultEvalTest):
@classmethod
def setup_classes(cls):
- super(EagerDefaultEvalTestPolymorphic, cls).setup_classes(
- with_polymorphic="*"
- )
+ super().setup_classes(with_polymorphic="*")
class ColExprTest(AssertsCompiledSQL, fixtures.TestBase):
)
]
),
- (set([hi, there]), set(), set()),
+ ({hi, there}, set(), set()),
)
self._commit_someattr(f)
eq_(
)
]
),
- (set(), set([hi, there]), set()),
+ (set(), {hi, there}, set()),
)
def test_object_collections_mutate(self):
._pending_mutations["keywords"]
.added_items
),
- set([k2]),
+ {k2},
)
# because autoflush is off, k2 is still
# coming in from pending
"""test for issue discovered in #7394"""
@registry.mapped
- class User2(object):
+ class User2:
__table__ = self.tables.users
name_syn = synonym("name")
@registry.mapped
- class Address2(object):
+ class Address2:
__table__ = self.tables.addresses
name_syn = synonym("email_address")
users, addresses = self.tables.users, self.tables.addresses
rel = relationship(Address)
- eq_(rel.cascade, set(["save-update", "merge"]))
+ eq_(rel.cascade, {"save-update", "merge"})
rel.cascade = "save-update, merge, expunge"
- eq_(rel.cascade, set(["save-update", "merge", "expunge"]))
+ eq_(rel.cascade, {"save-update", "merge", "expunge"})
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": rel}
am = self.mapper_registry.map_imperatively(Address, addresses)
configure_mappers()
- eq_(rel.cascade, set(["save-update", "merge", "expunge"]))
+ eq_(rel.cascade, {"save-update", "merge", "expunge"})
assert ("addresses", User) not in am._delete_orphans
rel.cascade = "all, delete, delete-orphan"
eq_(
rel.cascade,
- set(
- [
- "delete",
- "delete-orphan",
- "expunge",
- "merge",
- "refresh-expire",
- "save-update",
- ]
- ),
+ {
+ "delete",
+ "delete-orphan",
+ "expunge",
+ "merge",
+ "refresh-expire",
+ "save-update",
+ },
)
def test_cascade_unicode(self):
rel = relationship(Address)
rel.cascade = "save-update, merge, expunge"
- eq_(rel.cascade, set(["save-update", "merge", "expunge"]))
+ eq_(rel.cascade, {"save-update", "merge", "expunge"})
class O2MCascadeDeleteOrphanTest(fixtures.MappedTest):
state = inspect(obj)
it = inspect(Company).cascade_iterator("save-update", state)
- eq_(set([rec[0] for rec in it]), set([eng, maven_build, lang]))
+ eq_({rec[0] for rec in it}, {eng, maven_build, lang})
state = inspect(eng)
it = inspect(Employee).cascade_iterator("save-update", state)
- eq_(set([rec[0] for rec in it]), set([maven_build, lang]))
+ eq_({rec[0] for rec in it}, {maven_build, lang})
def test_delete_orphan_round_trip(self):
(
obj.attr = like_me
self.assert_(obj.attr is not direct)
self.assert_(obj.attr is not like_me)
- self.assert_(set(obj.attr) == set([e2]))
+ self.assert_(set(obj.attr) == {e2})
self.assert_(e1 in canary.removed)
self.assert_(e2 in canary.added)
real_list = [e3]
obj.attr = real_list
self.assert_(obj.attr is not real_list)
- self.assert_(set(obj.attr) == set([e3]))
+ self.assert_(set(obj.attr) == {e3})
self.assert_(e2 in canary.removed)
self.assert_(e3 in canary.added)
e4 = creator()
try:
- obj.attr = set([e4])
+ obj.attr = {e4}
self.assert_(False)
except TypeError:
self.assert_(e4 not in canary.data)
e = creator()
addall(e)
- values = set([e, creator(), creator()])
+ values = {e, creator(), creator()}
direct.update(values)
control.update(values)
e = creator()
addall(e)
- values = set([e, creator(), creator()])
+ values = {e, creator(), creator()}
direct |= values
control |= values
assert_eq()
# cover self-assignment short-circuit
- values = set([e, creator(), creator()])
+ values = {e, creator(), creator()}
obj.attr |= values
control |= values
assert_eq()
zap()
e = creator()
addall(creator(), creator())
- values = set([creator()])
+ values = {creator()}
direct.difference_update(values)
control.difference_update(values)
assert_eq()
- values.update(set([e, creator()]))
+ values.update({e, creator()})
direct.difference_update(values)
control.difference_update(values)
assert_eq()
zap()
e = creator()
addall(creator(), creator())
- values = set([creator()])
+ values = {creator()}
direct -= values
control -= values
assert_eq()
- values.update(set([e, creator()]))
+ values.update({e, creator()})
direct -= values
control -= values
assert_eq()
- values = set([creator()])
+ values = {creator()}
obj.attr -= values
control -= values
assert_eq()
control.intersection_update(values)
assert_eq()
- values.update(set([e, creator()]))
+ values.update({e, creator()})
direct.intersection_update(values)
control.intersection_update(values)
assert_eq()
control &= values
assert_eq()
- values.update(set([e, creator()]))
+ values.update({e, creator()})
direct &= values
control &= values
assert_eq()
- values.update(set([creator()]))
+ values.update({creator()})
obj.attr &= values
control &= values
assert_eq()
e = creator()
addall(e, creator(), creator())
- values = set([e, creator()])
+ values = {e, creator()}
direct.symmetric_difference_update(values)
control.symmetric_difference_update(values)
assert_eq()
e = creator()
addall(e)
- values = set([e])
+ values = {e}
direct.symmetric_difference_update(values)
control.symmetric_difference_update(values)
assert_eq()
e = creator()
addall(e, creator(), creator())
- values = set([e, creator()])
+ values = {e, creator()}
direct ^= values
control ^= values
assert_eq()
e = creator()
addall(e)
- values = set([e])
+ values = {e}
direct ^= values
control ^= values
assert_eq()
control ^= values
assert_eq()
- values = set([creator()])
+ values = {creator()}
obj.attr ^= values
control ^= values
assert_eq()
obj.attr = like_me
self.assert_(obj.attr is not direct)
self.assert_(obj.attr is not like_me)
- self.assert_(obj.attr == set([e2]))
+ self.assert_(obj.attr == {e2})
self.assert_(e1 in canary.removed)
self.assert_(e2 in canary.added)
e3 = creator()
- real_set = set([e3])
+ real_set = {e3}
obj.attr = real_set
self.assert_(obj.attr is not real_set)
- self.assert_(obj.attr == set([e3]))
+ self.assert_(obj.attr == {e3})
self.assert_(e2 in canary.removed)
self.assert_(e3 in canary.added)
if hasattr(direct, "update"):
e = creator()
- d = dict([(ee.a, ee) for ee in [e, creator(), creator()]])
+ d = {ee.a: ee for ee in [e, creator(), creator()]}
addall(e, creator())
direct.update(d)
control.update(d)
assert_eq()
- kw = dict([(ee.a, ee) for ee in [e, creator()]])
+ kw = {ee.a: ee for ee in [e, creator()]}
direct.update(**kw)
control.update(**kw)
assert_eq()
obj.attr = like_me
self.assert_(obj.attr is not direct)
self.assert_(obj.attr is not like_me)
- self.assert_(
- set(collections.collection_adapter(obj.attr)) == set([e2])
- )
+ self.assert_(set(collections.collection_adapter(obj.attr)) == {e2})
self.assert_(e1 in canary.removed)
self.assert_(e2 in canary.added)
obj.attr = real_dict
self.assert_(obj.attr is not real_dict)
self.assert_("keyignored1" not in obj.attr)
- eq_(set(collections.collection_adapter(obj.attr)), set([e3]))
+ eq_(set(collections.collection_adapter(obj.attr)), {e3})
self.assert_(e2 in canary.removed)
self.assert_(e3 in canary.added)
def test_dict_subclass2(self):
class MyEasyDict(collections.KeyFuncDict):
def __init__(self):
- super(MyEasyDict, self).__init__(lambda e: e.a)
+ super().__init__(lambda e: e.a)
self._test_adapter(
MyEasyDict, self.dictable_entity, to_set=lambda c: set(c.values())
p = session.get(Parent, pid)
- eq_(set(p.children.keys()), set(["foo", "bar"]))
+ eq_(set(p.children.keys()), {"foo", "bar"})
cid = p.children["foo"].id
collections.collection_adapter(p.children).append_with_event(
p = session.get(Parent, pid)
- self.assert_(set(p.children.keys()) == set(["foo", "bar"]))
+ self.assert_(set(p.children.keys()) == {"foo", "bar"})
self.assert_(p.children["foo"].id != cid)
self.assert_(
p = session.get(Parent, pid)
- self.assert_(
- set(p.children.keys()) == set([("foo", "1"), ("foo", "2")])
- )
+ self.assert_(set(p.children.keys()) == {("foo", "1"), ("foo", "2")})
cid = p.children[("foo", "1")].id
collections.collection_adapter(p.children).append_with_event(
p = session.get(Parent, pid)
- self.assert_(
- set(p.children.keys()) == set([("foo", "1"), ("foo", "2")])
- )
+ self.assert_(set(p.children.keys()) == {("foo", "1"), ("foo", "2")})
self.assert_(p.children[("foo", "1")].id != cid)
self.assert_(
assert len(list(f.bars)) == 2
strongref = list(f.bars.values())
- existing = set([id(b) for b in strongref])
+ existing = {id(b) for b in strongref}
col = collections.collection_adapter(f.bars)
col.append_with_event(Bar("b"))
f = sess.get(Foo, f.col1)
assert len(list(f.bars)) == 2
- replaced = set([id(b) for b in list(f.bars.values())])
+ replaced = {id(b) for b in list(f.bars.values())}
ne_(existing, replaced)
@testing.combinations("direct", "as_callable", argnames="factory_type")
users,
properties=util.OrderedDict(
[
- ("concat", column_property((users.c.id * 2))),
+ ("concat", column_property(users.c.id * 2)),
(
"count",
column_property(
@classmethod
def setup_mappers(cls):
- super(InheritanceTest, cls).setup_mappers()
+ super().setup_mappers()
from sqlalchemy import inspect
inspect(Company).add_property(
class MyDict(collections.KeyFuncDict):
def __init__(self):
- super(MyDict, self).__init__(lambda value: "k%d" % value)
+ super().__init__(lambda value: "k%d" % value)
@collection.converter
def _convert(self, dictlike):
# test cancellation of None, replacement with nothing
eq_(
set(u.addresses.order_by(None)),
- set(
- [
- Address(email_address="ed@bettyboop.com"),
- Address(email_address="ed@lala.com"),
- Address(email_address="ed@wood.com"),
- ]
- ),
+ {
+ Address(email_address="ed@bettyboop.com"),
+ Address(email_address="ed@lala.com"),
+ Address(email_address="ed@wood.com"),
+ },
)
def test_count(self, user_address_fixture):
# test cancellation of None, replacement with nothing
eq_(
set(sess.scalars(u.addresses.select().order_by(None))),
- set(
- [
- Address(email_address="ed@bettyboop.com"),
- Address(email_address="ed@lala.com"),
- Address(email_address="ed@wood.com"),
- ]
- ),
+ {
+ Address(email_address="ed@bettyboop.com"),
+ Address(email_address="ed@lala.com"),
+ Address(email_address="ed@wood.com"),
+ },
)
def test_secondary_as_join(self):
u.addresses.remove(a)
eq_(
- set(ad for ad, in sess.query(Address.email_address)),
- set(["a", "b", "d"]),
+ {ad for ad, in sess.query(Address.email_address)},
+ {"a", "b", "d"},
)
@testing.combinations(True, False, argnames="autoflush")
@classmethod
def define_tables(cls, metadata):
- super(MapperEventsTest, cls).define_tables(metadata)
+ super().define_tables(metadata)
metadata.tables["users"].append_column(
Column("extra", Integer, default=5, onupdate=10)
)
sess.commit()
sess.query(User).union_all(sess.query(User)).all()
- eq_(canary, [("refresh", set(["id", "name"]))])
+ eq_(canary, [("refresh", {"id", "name"})])
def test_via_refresh_state(self):
User = self.classes.User
sess.commit()
u1.name
- eq_(canary, [("refresh", set(["id", "name"]))])
+ eq_(canary, [("refresh", {"id", "name"})])
def test_was_expired(self):
User = self.classes.User
sess.expire(u1)
sess.query(User).first()
- eq_(canary, [("refresh", set(["id", "name"]))])
+ eq_(canary, [("refresh", {"id", "name"})])
def test_was_expired_via_commit(self):
User = self.classes.User
sess.commit()
sess.query(User).first()
- eq_(canary, [("refresh", set(["id", "name"]))])
+ eq_(canary, [("refresh", {"id", "name"})])
def test_was_expired_attrs(self):
User = self.classes.User
sess.expire(u1, ["name"])
sess.query(User).first()
- eq_(canary, [("refresh", set(["name"]))])
+ eq_(canary, [("refresh", {"name"})])
def test_populate_existing(self):
User = self.classes.User
User,
users,
properties={
- "concat": column_property((users.c.id * 2)),
+ "concat": column_property(users.c.id * 2),
"count": column_property(
select(func.count(addresses.c.id))
.where(
"addresses": relationship(
Address, backref="user", order_by=addresses.c.id
),
- "concat": column_property((users.c.id * 2)),
+ "concat": column_property(users.c.id * 2),
"count": column_property(
select(func.count(addresses.c.id))
.where(
.outerjoin(Order.addresses)
.filter(sa.or_(Order.id == None, Address.id == 1))
) # noqa
- eq_(set([User(id=7), User(id=8), User(id=10)]), set(q.all()))
+ eq_({User(id=7), User(id=8), User(id=10)}, set(q.all()))
def test_outer_join_count(self):
"""test the join and outerjoin functions on Query"""
.select_from(sel)
.filter(sa.or_(Order.id == None, Address.id == 1))
) # noqa
- eq_(set([User(id=7), User(id=8), User(id=10)]), set(q.all()))
+ eq_({User(id=7), User(id=8), User(id=10)}, set(q.all()))
class CaseSensitiveTest(fixtures.MappedTest):
rel = inspect(User).relationships
eq_(rel.addresses, User.addresses.property)
- eq_(set(rel.keys()), set(["orders", "addresses"]))
+ eq_(set(rel.keys()), {"orders", "addresses"})
def test_insp_relationship_prop(self):
User = self.classes.User
insp = inspect(SomeSubClass)
eq_(
- dict(
- (k, v.extension_type)
+ {
+ k: v.extension_type
for k, v in list(insp.all_orm_descriptors.items())
- ),
+ },
{
"id": NotExtension.NOT_EXTENSION,
"name": NotExtension.NOT_EXTENSION,
eq_(
set(insp.attrs.keys()),
- set(["id", "name", "name_syn", "addresses", "orders"]),
+ {"id", "name", "name_syn", "addresses", "orders"},
)
eq_(insp.attrs.name.value, "ed")
eq_(insp.attrs.name.loaded_value, "ed")
m = self.mapper_registry.map_imperatively(AnonClass, self.tables.users)
- eq_(set(inspect(AnonClass).attrs.keys()), set(["id", "name"]))
+ eq_(set(inspect(AnonClass).attrs.keys()), {"id", "name"})
eq_(
set(inspect(AnonClass).all_orm_descriptors.keys()),
- set(["id", "name"]),
+ {"id", "name"},
)
m.add_property("q", column_property(self.tables.users.c.name))
AnonClass.foob = hybrid_property(desc)
- eq_(set(inspect(AnonClass).attrs.keys()), set(["id", "name", "q"]))
+ eq_(set(inspect(AnonClass).attrs.keys()), {"id", "name", "q"})
eq_(
set(inspect(AnonClass).all_orm_descriptors.keys()),
- set(["id", "name", "q", "foob"]),
+ {"id", "name", "q", "foob"},
)
def _random_names(self):
names = self._random_names()
if base is supercls:
- pk_names = set(
+ pk_names = {
random.choice(names) for i in range(random.randint(1, 3))
- )
+ }
fk_name = random.choice(
[name for name in names if name not in pk_names]
)
class B(A):
def __init__(self):
inits.append((B, "__init__"))
- super(B, self).__init__()
+ super().__init__()
self.register(B, inits)
class B(A):
def __init__(self):
inits.append((B, "__init__"))
- super(B, self).__init__()
+ super().__init__()
A()
eq_(inits, [(A, "init", A), (A, "__init__")])
class B(A):
def __init__(self):
inits.append((B, "__init__"))
- super(B, self).__init__()
+ super().__init__()
self.register(B, inits)
class B(A):
def __init__(self):
inits.append((B, "__init__"))
- super(B, self).__init__()
+ super().__init__()
self.register(B, inits)
class C(B):
def __init__(self):
inits.append((C, "__init__"))
- super(C, self).__init__()
+ super().__init__()
self.register(C, inits)
class B(A):
def __init__(self):
inits.append((B, "__init__"))
- super(B, self).__init__()
+ super().__init__()
class C(B):
def __init__(self):
inits.append((C, "__init__"))
- super(C, self).__init__()
+ super().__init__()
self.register(C, inits)
class C(B):
def __init__(self):
inits.append((C, "__init__"))
- super(C, self).__init__()
+ super().__init__()
self.register(C, inits)
class C(B):
def __init__(self):
inits.append((C, "__init__"))
- super(C, self).__init__()
+ super().__init__()
self.register(C, inits)
assert_raises(TypeError, cls, "a", "b", c="c")
def _kw_only_fixture(self):
- class A(object):
+ class A:
def __init__(self, a, *, b, c):
self.a = a
self.b = b
return self._instrument(A)
def _kw_plus_posn_fixture(self):
- class A(object):
+ class A:
def __init__(self, a, *args, b, c):
self.a = a
self.b = b
return self._instrument(A)
def _kw_opt_fixture(self):
- class A(object):
+ class A:
def __init__(self, a, *, b, c="c"):
self.a = a
self.b = b
configure_mappers()
def assert_props(cls, want):
- have = set([n for n in dir(cls) if not n.startswith("_")])
+ have = {n for n in dir(cls) if not n.startswith("_")}
want = set(want)
eq_(have, want)
def assert_instrumented(cls, want):
- have = set([p.key for p in class_mapper(cls).iterate_properties])
+ have = {p.key for p in class_mapper(cls).iterate_properties}
want = set(want)
eq_(have, want)
class MyFakeProperty(sa.orm.properties.ColumnProperty):
def post_instrument_class(self, mapper):
- super(MyFakeProperty, self).post_instrument_class(mapper)
+ super().post_instrument_class(mapper)
configure_mappers()
self.mapper(
class MyFakeProperty(sa.orm.properties.ColumnProperty):
def post_instrument_class(self, mapper):
- super(MyFakeProperty, self).post_instrument_class(mapper)
+ super().post_instrument_class(mapper)
configure_mappers()
self.mapper(
self._test({"bar": "bat"})
def test_set(self):
- self._test(set([6]))
+ self._test({6})
def test_column(self):
self._test_not(self.tables.foo.c.someprop)
assert_paths = [k[1] for k in attr]
eq_(
- set([p for p in assert_paths]),
- set([self._make_path(p) for p in paths]),
+ {p for p in assert_paths},
+ {self._make_path(p) for p in paths},
)
self.selfref,
self.selfref,
prop=self.relationship,
- remote_side=set([self.selfref.c.id]),
+ remote_side={self.selfref.c.id},
**kw,
)
self.composite_selfref,
self.composite_selfref,
prop=self.relationship,
- remote_side=set(
- [
- self.composite_selfref.c.id,
- self.composite_selfref.c.group_id,
- ]
- ),
+ remote_side={
+ self.composite_selfref.c.id,
+ self.composite_selfref.c.group_id,
+ },
**kw,
)
self.composite_selfref.c.parent_id
== self.composite_selfref.c.id,
),
- remote_side=set([self.composite_selfref.c.parent_id]),
+ remote_side={self.composite_selfref.c.parent_id},
**kw,
)
def test_determine_remote_columns_compound_1(self):
joincond = self._join_fixture_compound_expression_1(support_sync=False)
- eq_(joincond.remote_columns, set([self.right.c.x, self.right.c.y]))
+ eq_(joincond.remote_columns, {self.right.c.x, self.right.c.y})
def test_determine_local_remote_compound_1(self):
joincond = self._join_fixture_compound_expression_1(support_sync=False)
def test_determine_remote_columns_compound_2(self):
joincond = self._join_fixture_compound_expression_2(support_sync=False)
- eq_(joincond.remote_columns, set([self.right.c.x, self.right.c.y]))
+ eq_(joincond.remote_columns, {self.right.c.x, self.right.c.y})
def test_determine_remote_columns_o2m(self):
joincond = self._join_fixture_o2m()
- eq_(joincond.remote_columns, set([self.right.c.lid]))
+ eq_(joincond.remote_columns, {self.right.c.lid})
def test_determine_remote_columns_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
- eq_(joincond.remote_columns, set([self.selfref.c.sid]))
+ eq_(joincond.remote_columns, {self.selfref.c.sid})
def test_determine_local_remote_pairs_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
joincond = self._join_fixture_m2o_composite_selfref()
eq_(
joincond.remote_columns,
- set(
- [
- self.composite_selfref.c.id,
- self.composite_selfref.c.group_id,
- ]
- ),
+ {
+ self.composite_selfref.c.id,
+ self.composite_selfref.c.group_id,
+ },
)
def test_determine_remote_columns_m2o(self):
joincond = self._join_fixture_m2o()
- eq_(joincond.remote_columns, set([self.left.c.id]))
+ eq_(joincond.remote_columns, {self.left.c.id})
def test_determine_local_remote_pairs_o2m(self):
joincond = self._join_fixture_o2m()
def test_determine_local_columns_m2m_backref(self):
j1, j2 = self._join_fixture_m2m_backref()
- eq_(j1.local_columns, set([self.m2mleft.c.id]))
- eq_(j2.local_columns, set([self.m2mright.c.id]))
+ eq_(j1.local_columns, {self.m2mleft.c.id})
+ eq_(j2.local_columns, {self.m2mright.c.id})
def test_determine_remote_columns_m2m_backref(self):
j1, j2 = self._join_fixture_m2m_backref()
eq_(
j1.remote_columns,
- set([self.m2msecondary.c.lid, self.m2msecondary.c.rid]),
+ {self.m2msecondary.c.lid, self.m2msecondary.c.rid},
)
eq_(
j2.remote_columns,
- set([self.m2msecondary.c.lid, self.m2msecondary.c.rid]),
+ {self.m2msecondary.c.lid, self.m2msecondary.c.rid},
)
def test_determine_remote_columns_m2o_selfref(self):
joincond = self._join_fixture_m2o_selfref()
- eq_(joincond.remote_columns, set([self.selfref.c.id]))
+ eq_(joincond.remote_columns, {self.selfref.c.id})
def test_determine_local_remote_cols_three_tab_viewonly(self):
joincond = self._join_fixture_overlapping_three_tables()
)
eq_(
joincond.remote_columns,
- set([self.three_tab_b.c.id, self.three_tab_b.c.aid]),
+ {self.three_tab_b.c.id, self.three_tab_b.c.aid},
)
def test_determine_local_remote_overlapping_composite_fks(self):
)
eq_(
joincond.remote_columns,
- set([self.base.c.flag, self.sub_w_sub_rel.c.sub_id]),
+ {self.base.c.flag, self.sub_w_sub_rel.c.sub_id},
)
employee_t = self.tables.employee_t
eq_(
set(Employee.employees.property.local_remote_pairs),
- set(
- [
- (employee_t.c.company_id, employee_t.c.company_id),
- (employee_t.c.emp_id, employee_t.c.reports_to_id),
- ]
- ),
+ {
+ (employee_t.c.company_id, employee_t.c.company_id),
+ (employee_t.c.emp_id, employee_t.c.reports_to_id),
+ },
)
eq_(
Employee.employees.property.remote_side,
- set([employee_t.c.company_id, employee_t.c.reports_to_id]),
+ {employee_t.c.company_id, employee_t.c.reports_to_id},
)
eq_(
set(Employee.reports_to.property.local_remote_pairs),
- set(
- [
- (employee_t.c.company_id, employee_t.c.company_id),
- (employee_t.c.reports_to_id, employee_t.c.emp_id),
- ]
- ),
+ {
+ (employee_t.c.company_id, employee_t.c.company_id),
+ (employee_t.c.reports_to_id, employee_t.c.emp_id),
+ },
)
def _setup_data(self, sess):
sess.expunge_all()
c1 = sess.get(C1, c1.id)
- assert set([x.id for x in c1.t2s]) == set([c2a.id, c2b.id])
- assert set([x.id for x in c1.t2_view]) == set([c2b.id])
+ assert {x.id for x in c1.t2s} == {c2a.id, c2b.id}
+ assert {x.id for x in c1.t2_view} == {c2b.id}
class ViewOnlySyncBackref(fixtures.MappedTest):
sess.expunge_all()
c1 = sess.get(C1, c1.t1id)
- assert set([x.t2id for x in c1.t2s]) == set([c2a.t2id, c2b.t2id])
- assert set([x.t2id for x in c1.t2_view]) == set([c2b.t2id])
+ assert {x.t2id for x in c1.t2s} == {c2a.t2id, c2b.t2id}
+ assert {x.t2id for x in c1.t2_view} == {c2b.t2id}
class ViewOnlyLocalRemoteM2M(fixtures.TestBase):
self.mapper_registry.map_imperatively(B, b)
sa.orm.configure_mappers()
assert A.bs.property.primaryjoin.compare(a.c.id == b.c.aid_1)
- eq_(A.bs.property._calculated_foreign_keys, set([b.c.aid_1]))
+ eq_(A.bs.property._calculated_foreign_keys, {b.c.aid_1})
def test_with_pj_o2m(self):
A, B = self.classes.A, self.classes.B
self.mapper_registry.map_imperatively(B, b)
sa.orm.configure_mappers()
assert A.bs.property.primaryjoin.compare(a.c.id == b.c.aid_1)
- eq_(A.bs.property._calculated_foreign_keys, set([b.c.aid_1]))
+ eq_(A.bs.property._calculated_foreign_keys, {b.c.aid_1})
def test_with_annotated_pj_o2m(self):
A, B = self.classes.A, self.classes.B
self.mapper_registry.map_imperatively(B, b)
sa.orm.configure_mappers()
assert A.bs.property.primaryjoin.compare(a.c.id == b.c.aid_1)
- eq_(A.bs.property._calculated_foreign_keys, set([b.c.aid_1]))
+ eq_(A.bs.property._calculated_foreign_keys, {b.c.aid_1})
def test_no_fks_m2m(self):
A, B = self.classes.A, self.classes.B
)
self.mapper_registry.map_imperatively(Bar, bars)
sa.orm.configure_mappers()
- eq_(Foo.bars.property._join_condition.local_columns, set([foos.c.id]))
- eq_(Bar.foos.property._join_condition.local_columns, set([bars.c.id]))
+ eq_(Foo.bars.property._join_condition.local_columns, {foos.c.id})
+ eq_(Bar.foos.property._join_condition.local_columns, {bars.c.id})
def test_bad_primaryjoin(self):
foobars_with_fks, bars, Bar, foobars, Foo, foos = (
s = fixture_session()
with assert_engine(testing.db) as asserter_:
- eq_(set(id_ for id_, in s.query(A.id).filter(A.bs.any())), {1, 2})
+ eq_({id_ for id_, in s.query(A.id).filter(A.bs.any())}, {1, 2})
asserter_.assert_(
CompiledSQL(
elif style == "style3":
# py2k style
def get_bind(self, mapper=None, *args, **kwargs):
- return super(MySession, self).get_bind(
- mapper, *args, **kwargs
- )
+ return super().get_bind(mapper, *args, **kwargs)
elif style == "style4":
# py2k style
def get_bind(self, mapper=None, **kwargs):
- return super(MySession, self).get_bind(
- mapper=mapper, **kwargs
- )
+ return super().get_bind(mapper=mapper, **kwargs)
s1 = MySession(testing.db)
is_(s1.get_bind(), testing.db)
class SessionInterface(fixtures.MappedTest):
"""Bogus args to Session methods produce actionable exceptions."""
- _class_methods = set(
- ("connection", "execute", "get_bind", "scalar", "scalars")
- )
+ _class_methods = {"connection", "execute", "get_bind", "scalar", "scalars"}
def _public_session_methods(self):
Session = sa.orm.session.Session
instance_methods = (
self._public_session_methods()
- self._class_methods
- - set(
- [
- "bulk_update_mappings",
- "bulk_insert_mappings",
- "bulk_save_objects",
- ]
- )
+ - {
+ "bulk_update_mappings",
+ "bulk_insert_mappings",
+ "bulk_save_objects",
+ }
)
eq_(
sess.commit()
- eq_(set(sess.query(User).all()), set([u2]))
+ eq_(set(sess.query(User).all()), {u2})
sess.rollback()
sess.begin()
n1.commit() # commit the nested transaction
sess.rollback()
- eq_(set(sess.query(User).all()), set([u2]))
+ eq_(set(sess.query(User).all()), {u2})
sess.close()
-# coding: utf-8
"""Tests unitofwork operations."""
import datetime
session = fixture_session()
objects = []
- _keywords = dict([(k.name, k) for k in session.query(Keyword)])
+ _keywords = {k.name: k for k in session.query(Keyword)}
for elem in data[1:]:
item = Item(description=elem["description"])
session = fixture_session()
def fixture():
- _kw = dict([(k.name, k) for k in session.query(Keyword)])
+ _kw = {k.name: k for k in session.query(Keyword)}
for n in (
"big",
"green",
t5t7.select(),
)
),
- set([(1, 1), (1, 2)]),
+ {(1, 1), (1, 2)},
)
eq_(
list(
@testing.fixture
def null_server_default_fixture(self, registry, connection):
@registry.mapped
- class MyClass(object):
+ class MyClass:
__tablename__ = "my_table"
id = Column(Integer, primary_key=True)
class T3(cls.Basic):
def __str__(self):
- return "T3(id={})".format(self.id)
+ return f"T3(id={self.id})"
@classmethod
def setup_mappers(cls):
self.mapper_registry.map_imperatively(Address, addresses)
eq_(
- dict((k, v[0].__name__) for k, v in list(u_m.validators.items())),
+ {k: v[0].__name__ for k, v in list(u_m.validators.items())},
{"name": "validate_name", "addresses": "validate_address"},
)
)
stats = pstats.Stats(filename)
- counts_by_methname = dict(
- (key[2], stats.stats[key][0]) for key in stats.stats
- )
+ counts_by_methname = {key[2]: stats.stats[key][0] for key in stats.stats}
print("SQLA Version: %s" % __version__)
print("Total calls %d" % stats.total_calls)
also included in the fixtures above.
"""
- need = set(
+ need = {
cls
for cls in class_hierarchy(ClauseElement)
if issubclass(cls, (ColumnElement, Selectable, LambdaElement))
and "compiler" not in cls.__module__
and "crud" not in cls.__module__
and "dialects" not in cls.__module__ # TODO: dialects?
- ).difference({ColumnElement, UnaryExpression})
+ }.difference({ColumnElement, UnaryExpression})
for fixture in self.fixtures + self.dont_compare_values_fixtures:
case_a = fixture()
-#! coding:utf-8
-
"""
compiler tests.
def test_custom_order_by_clause(self):
class CustomCompiler(PGCompiler):
def order_by_clause(self, select, **kw):
- return (
- super(CustomCompiler, self).order_by_clause(select, **kw)
- + " CUSTOMIZED"
- )
+ return super().order_by_clause(select, **kw) + " CUSTOMIZED"
class CustomDialect(PGDialect):
name = "custom"
def test_custom_group_by_clause(self):
class CustomCompiler(PGCompiler):
def group_by_clause(self, select, **kw):
- return (
- super(CustomCompiler, self).group_by_clause(select, **kw)
- + " CUSTOMIZED"
- )
+ return super().group_by_clause(select, **kw) + " CUSTOMIZED"
class CustomDialect(PGDialect):
name = "custom"
class MyCompiler(compiler.SQLCompiler):
def bindparam_string(self, name, **kw):
kw["escaped_from"] = name
- return super(MyCompiler, self).bindparam_string(
- '"%s"' % name, **kw
- )
+ return super().bindparam_string('"%s"' % name, **kw)
dialect = default.DefaultDialect()
dialect.statement_compiler = MyCompiler
total_params = 100000
in_clause = [":in%d" % i for i in range(total_params)]
- params = dict(("in%d" % i, i) for i in range(total_params))
+ params = {"in%d" % i: i for i in range(total_params)}
t = text("text clause %s" % ", ".join(in_clause))
eq_(len(t.bindparams), total_params)
c = t.compile()
comp = stmt.compile()
eq_(
set(comp._create_result_map()),
- set(["t1_1_b", "t1_1_a", "t1_a", "t1_b"]),
+ {"t1_1_b", "t1_1_a", "t1_a", "t1_b"},
)
is_(comp._create_result_map()["t1_a"][1][2], t1.c.a)
if stmt is stmt2.element:
with self._nested_result() as nested:
contexts[stmt2.element] = nested
- text = super(MyCompiler, self).visit_select(
+ text = super().visit_select(
stmt2.element,
)
self._add_to_result_map("k1", "k1", (1, 2, 3), int_)
else:
- text = super(MyCompiler, self).visit_select(
- stmt, *arg, **kw
- )
+ text = super().visit_select(stmt, *arg, **kw)
self._add_to_result_map("k2", "k2", (3, 4, 5), int_)
return text
-# coding: utf-8
from sqlalchemy import Column
from sqlalchemy import Computed
from sqlalchemy import Integer
Index("idx_winners", events.c.winner)
eq_(
- set(ix.name for ix in events.indexes),
- set(
- [
- "ix_events_name",
- "ix_events_location",
- "sport_announcer",
- "idx_winners",
- ]
- ),
+ {ix.name for ix in events.indexes},
+ {
+ "ix_events_name",
+ "ix_events_location",
+ "sport_announcer",
+ "idx_winners",
+ },
)
self.assert_sql_execution(
assert r.lastrow_has_defaults()
eq_(
set(r.context.postfetch_cols),
- set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]),
+ {t.c.col3, t.c.col5, t.c.col4, t.c.col6},
)
r = connection.execute(t.insert().inline())
assert r.lastrow_has_defaults()
eq_(
set(r.context.postfetch_cols),
- set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]),
+ {t.c.col3, t.c.col5, t.c.col4, t.c.col6},
)
connection.execute(t.insert())
eq_(
set(r.context.postfetch_cols),
- set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]),
+ {t.c.col3, t.c.col5, t.c.col4, t.c.col6},
)
eq_(
-#! coding:utf-8
-
from sqlalchemy import and_
from sqlalchemy import delete
from sqlalchemy import exc
-#! coding: utf-8
-
from sqlalchemy import alias
from sqlalchemy import and_
from sqlalchemy import bindparam
foo, bar = CustomObj("foo", String), CustomObj("bar", String)
bin_ = foo == bar
set(ClauseVisitor().iterate(bin_))
- assert set(ClauseVisitor().iterate(bin_)) == set([foo, bar, bin_])
+ assert set(ClauseVisitor().iterate(bin_)) == {foo, bar, bin_}
class BinaryEndpointTraversalTest(fixtures.TestBase):
assert c1 == str(clause)
assert str(clause2) == c1 + " SOME MODIFIER=:lala"
assert list(clause._bindparams.keys()) == ["bar"]
- assert set(clause2._bindparams.keys()) == set(["bar", "lala"])
+ assert set(clause2._bindparams.keys()) == {"bar", "lala"}
def test_select(self):
s2 = select(t1)
e = sql_util.ClauseAdapter(
b,
- include_fn=lambda x: x in set([a.c.id]),
- equivalents={a.c.id: set([a.c.id])},
+ include_fn=lambda x: x in {a.c.id},
+ equivalents={a.c.id: {a.c.id}},
).traverse(e)
assert str(e) == "a_1.id = a.xxx_id"
# asking for a nonexistent col. corresponding_column should prevent
# endless depth.
adapt = sql_util.ClauseAdapter(
- b, equivalents={a.c.x: set([c.c.x]), c.c.x: set([a.c.x])}
+ b, equivalents={a.c.x: {c.c.x}, c.c.x: {a.c.x}}
)
assert adapt._corresponding_column(a.c.x, False) is None
# two levels of indirection from c.x->b.x->a.x, requires recursive
# corresponding_column call
adapt = sql_util.ClauseAdapter(
- alias, equivalents={b.c.x: set([a.c.x]), c.c.x: set([b.c.x])}
+ alias, equivalents={b.c.x: {a.c.x}, c.c.x: {b.c.x}}
)
assert adapt._corresponding_column(a.c.x, False) is alias.c.x
assert adapt._corresponding_column(c.c.x, False) is alias.c.x
def _table(name):
return table(name, column("col1"), column("col2"), column("col3"))
- table1, table2, table3, table4 = [
+ table1, table2, table3, table4 = (
_table(name) for name in ("table1", "table2", "table3", "table4")
- ]
+ )
def test_splice(self):
t1, t2, t3, t4 = table1, table2, table1.alias(), table2.alias()
def __init__(self, *args):
args = args + (3,)
- super(MyFunction, self).__init__(*args)
+ super().__init__(*args)
self.assert_compile(
func.my_func(1, 2), "my_func(:my_func_1, :my_func_2, :my_func_3)"
-#! coding:utf-8
from __future__ import annotations
from typing import Tuple
stmt = table.insert().values(values)
eq_(
- dict(
- [
- (k, v.type._type_affinity)
- for (k, v) in stmt.compile(
- dialect=postgresql.dialect()
- ).binds.items()
- ]
- ),
+ {
+ k: v.type._type_affinity
+ for (k, v) in stmt.compile(
+ dialect=postgresql.dialect()
+ ).binds.items()
+ },
{
"foo": Integer,
"data_m2": String,
stmt = table.insert().values(values)
eq_(
- dict(
- [
- (k, v.type._type_affinity)
- for (k, v) in stmt.compile(
- dialect=postgresql.dialect()
- ).binds.items()
- ]
- ),
+ {
+ k: v.type._type_affinity
+ for (k, v) in stmt.compile(
+ dialect=postgresql.dialect()
+ ).binds.items()
+ },
{
"foo": Integer,
"data_m2": String,
compiled = stmt.compile(dialect=dialect)
eq_(
set(compiled._create_result_map()),
- set(["tablename_columnn_1", "tablename_columnn_2"]),
+ {"tablename_columnn_1", "tablename_columnn_2"},
)
class MyColumn(schema.Column):
def __init__(self, *args, **kw):
self.widget = kw.pop("widget", None)
- super(MyColumn, self).__init__(*args, **kw)
+ super().__init__(*args, **kw)
def _copy(self, *arg, **kw):
- c = super(MyColumn, self)._copy(*arg, **kw)
+ c = super()._copy(*arg, **kw)
c.widget = self.widget
return c
Table("t2", metadata, Column("x", Integer), schema="bar")
Table("t3", metadata, Column("x", Integer))
- eq_(metadata._schemas, set(["foo", "bar"]))
+ eq_(metadata._schemas, {"foo", "bar"})
eq_(len(metadata.tables), 3)
def test_schema_collection_remove(self):
t3 = Table("t3", metadata, Column("x", Integer), schema="bar")
metadata.remove(t3)
- eq_(metadata._schemas, set(["foo", "bar"]))
+ eq_(metadata._schemas, {"foo", "bar"})
eq_(len(metadata.tables), 2)
metadata.remove(t1)
- eq_(metadata._schemas, set(["bar"]))
+ eq_(metadata._schemas, {"bar"})
eq_(len(metadata.tables), 1)
def test_schema_collection_remove_all(self):
fk3 = ForeignKeyConstraint(["b", "c"], ["r.x", "r.y"])
t1.append_column(Column("b", Integer, fk1))
- eq_(t1.foreign_key_constraints, set([fk1.constraint]))
+ eq_(t1.foreign_key_constraints, {fk1.constraint})
t1.append_column(Column("c", Integer, fk2))
- eq_(t1.foreign_key_constraints, set([fk1.constraint, fk2.constraint]))
+ eq_(t1.foreign_key_constraints, {fk1.constraint, fk2.constraint})
t1.append_constraint(fk3)
eq_(
t1.foreign_key_constraints,
- set([fk1.constraint, fk2.constraint, fk3]),
+ {fk1.constraint, fk2.constraint, fk3},
)
def test_c_immutable(self):
evt_targets = ()
def _set_table(self, column, table):
- super(SchemaTypeTest.TrackEvents, self)._set_table(column, table)
+ super()._set_table(column, table)
self.column = column
self.table = table
def _on_table_create(self, target, bind, **kw):
- super(SchemaTypeTest.TrackEvents, self)._on_table_create(
- target, bind, **kw
- )
+ super()._on_table_create(target, bind, **kw)
self.evt_targets += (target,)
def _on_metadata_create(self, target, bind, **kw):
- super(SchemaTypeTest.TrackEvents, self)._on_metadata_create(
- target, bind, **kw
- )
+ super()._on_metadata_create(target, bind, **kw)
self.evt_targets += (target,)
# TODO: Enum and Boolean put TypeEngine first. Changing that here
return t1, t2, t3
def _assert_index_col_x(self, t, i, columns=True):
- eq_(t.indexes, set([i]))
+ eq_(t.indexes, {i})
if columns:
eq_(list(i.columns), [t.c.x])
else:
idx = Index("bar", MyThing(), t.c.y)
- eq_(set(t.indexes), set([idx]))
+ eq_(set(t.indexes), {idx})
def test_clauseelement_extraction_three(self):
t = Table("t", MetaData(), Column("x", Integer), Column("y", Integer))
class MyInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
- super(MyInteger.comparator_factory, self).__init__(expr)
+ super().__init__(expr)
def __add__(self, other):
return self.expr.op("goofy")(other)
class comparator_factory(TypeDecorator.Comparator):
def __init__(self, expr):
- super(MyInteger.comparator_factory, self).__init__(expr)
+ super().__init__(expr)
def __add__(self, other):
return self.expr.op("goofy")(other)
class comparator_factory(TypeDecorator.Comparator):
def __init__(self, expr):
- super(MyIntegerOne.comparator_factory, self).__init__(expr)
+ super().__init__(expr)
def __add__(self, other):
return self.expr.op("goofy")(other)
class SomeOtherInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
- super(SomeOtherInteger.comparator_factory, self).__init__(
- expr
- )
+ super().__init__(expr)
def __add__(self, other):
return self.expr.op("not goofy")(other)
class comparator_factory(TypeDecorator.Comparator):
def __init__(self, expr):
- super(MyInteger.comparator_factory, self).__init__(expr)
+ super().__init__(expr)
def __add__(self, other):
return self.expr.op("goofy")(other)
class MyInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
- super(MyInteger.comparator_factory, self).__init__(expr)
+ super().__init__(expr)
def __add__(self, other):
return self.expr.op("goofy")(other)
class MyInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
- super(MyInteger.comparator_factory, self).__init__(expr)
+ super().__init__(expr)
def foob(self, other):
return self.expr.op("foob")(other)
select(t1.c.t1_id, t2.c.t2_id, t3.c.t3_id)
.where(t1.c.name == "t1 #10")
.select_from(
- (
- t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
- t3, criteria
- )
+ t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
+ t3, criteria
)
)
)
select(t1.c.t1_id, t2.c.t2_id, t3.c.t3_id)
.where(t1.c.t1_id < 12)
.select_from(
- (
- t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
- t3, criteria
- )
+ t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
+ t3, criteria
)
)
)
select(t1.c.t1_id, t2.c.t2_id, t3.c.t3_id)
.where(t2.c.name == "t2 #20")
.select_from(
- (
- t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
- t3, criteria
- )
+ t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
+ t3, criteria
)
)
)
select(t1.c.t1_id, t2.c.t2_id, t3.c.t3_id)
.where(t2.c.t2_id < 29)
.select_from(
- (
- t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
- t3, criteria
- )
+ t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
+ t3, criteria
)
)
)
select(t1.c.t1_id, t2.c.t2_id, t3.c.t3_id)
.where(t3.c.name == "t3 #30")
.select_from(
- (
- t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
- t3, criteria
- )
+ t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
+ t3, criteria
)
)
)
select(t1.c.t1_id, t2.c.t2_id, t3.c.t3_id)
.where(t3.c.t3_id < 39)
.select_from(
- (
- t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
- t3, criteria
- )
+ t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
+ t3, criteria
)
)
)
select(t1.c.t1_id, t2.c.t2_id, t3.c.t3_id)
.where(and_(t1.c.t1_id < 19, t3.c.t3_id < 39))
.select_from(
- (
- t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
- t3, criteria
- )
+ t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
+ t3, criteria
)
)
)
select(t1.c.t1_id, t2.c.t2_id, t3.c.t3_id)
.where(and_(t1.c.name == "t1 #10", t2.c.name == "t2 #20"))
.select_from(
- (
- t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
- t3, criteria
- )
+ t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
+ t3, criteria
)
)
)
select(t1.c.t1_id, t2.c.t2_id, t3.c.t3_id)
.where(and_(t1.c.t1_id < 12, t2.c.t2_id < 39))
.select_from(
- (
- t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
- t3, criteria
- )
+ t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
+ t3, criteria
)
)
)
)
)
.select_from(
- (
- t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
- t3, criteria
- )
+ t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
+ t3, criteria
)
)
)
select(t1.c.t1_id, t2.c.t2_id, t3.c.t3_id)
.where(and_(t1.c.t1_id < 19, t2.c.t2_id < 29, t3.c.t3_id < 39))
.select_from(
- (
- t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
- t3, criteria
- )
+ t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).outerjoin(
+ t3, criteria
)
)
)
.where(
t1.c.name == "t1 #10",
)
- .select_from((t1.join(t2).outerjoin(t3, criteria)))
+ .select_from(t1.join(t2).outerjoin(t3, criteria))
)
self.assertRows(expr, [(10, 20, 30)])
.where(
t2.c.name == "t2 #20",
)
- .select_from((t1.join(t2).outerjoin(t3, criteria)))
+ .select_from(t1.join(t2).outerjoin(t3, criteria))
)
self.assertRows(expr, [(10, 20, 30)])
.where(
t3.c.name == "t3 #30",
)
- .select_from((t1.join(t2).outerjoin(t3, criteria)))
+ .select_from(t1.join(t2).outerjoin(t3, criteria))
)
self.assertRows(expr, [(10, 20, 30)])
.where(
and_(t1.c.name == "t1 #10", t2.c.name == "t2 #20"),
)
- .select_from((t1.join(t2).outerjoin(t3, criteria)))
+ .select_from(t1.join(t2).outerjoin(t3, criteria))
)
self.assertRows(expr, [(10, 20, 30)])
.where(
and_(t2.c.name == "t2 #20", t3.c.name == "t3 #30"),
)
- .select_from((t1.join(t2).outerjoin(t3, criteria)))
+ .select_from(t1.join(t2).outerjoin(t3, criteria))
)
self.assertRows(expr, [(10, 20, 30)])
t3.c.name == "t3 #30",
),
)
- .select_from((t1.join(t2).outerjoin(t3, criteria)))
+ .select_from(t1.join(t2).outerjoin(t3, criteria))
)
self.assertRows(expr, [(10, 20, 30)])
-#!coding: utf-8
-
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import column
def test_unformat_custom(self):
class Custom(compiler.IdentifierPreparer):
def __init__(self, dialect):
- super(Custom, self).__init__(
- dialect, initial_quote="`", final_quote="`"
- )
+ super().__init__(dialect, initial_quote="`", final_quote="`")
def _escape_identifier(self, value):
return value.replace("`", "``")
def test_apply_map_quoted(self):
q1 = _anonymous_label(quoted_name("x%s", True))
- q2 = q1.apply_map(("bar"))
+ q2 = q1.apply_map("bar")
eq_(q2, "xbar")
eq_(q2.quote, True)
def test_apply_map_plain(self):
q1 = _anonymous_label(quoted_name("x%s", None))
- q2 = q1.apply_map(("bar"))
+ q2 = q1.apply_map("bar")
eq_(q2, "xbar")
self._assert_not_quoted(q2)
row = result.first()
eq_(
- set(
- [
- users.c.user_id in row._mapping,
- addresses.c.user_id in row._mapping,
- ]
- ),
- set([True]),
+ {
+ users.c.user_id in row._mapping,
+ addresses.c.user_id in row._mapping,
+ },
+ {True},
)
@testing.combinations(
def test_handle_error_in_fetch(self, strategy_cls, method_name):
class cursor:
def raise_(self):
- raise IOError("random non-DBAPI error during cursor operation")
+ raise OSError("random non-DBAPI error during cursor operation")
def fetchone(self):
self.raise_()
def test_buffered_row_close_error_during_fetchone(self):
def raise_(**kw):
- raise IOError("random non-DBAPI error during cursor operation")
+ raise OSError("random non-DBAPI error during cursor operation")
with self._proxy_fixture(_cursor.BufferedRowCursorFetchStrategy):
with self.engine.connect() as conn:
eq_(
s1.selected_columns.foo.proxy_set,
- set(
- [s1.selected_columns.foo, scalar_select, scalar_select.element]
- ),
+ {s1.selected_columns.foo, scalar_select, scalar_select.element},
)
eq_(
s2.selected_columns.foo.proxy_set,
- set(
- [s2.selected_columns.foo, scalar_select, scalar_select.element]
- ),
+ {s2.selected_columns.foo, scalar_select, scalar_select.element},
)
assert (
eq_(
s1.c.foo.proxy_set,
- set([s1.c.foo, scalar_select, scalar_select.element]),
+ {s1.c.foo, scalar_select, scalar_select.element},
)
eq_(
s2.c.foo.proxy_set,
- set([s2.c.foo, scalar_select, scalar_select.element]),
+ {s2.c.foo, scalar_select, scalar_select.element},
)
assert s1.corresponding_column(scalar_select) is s1.c.foo
s2c1 = s2._clone()
s3c1 = s3._clone()
- eq_(base._cloned_intersection([s1c1, s3c1], [s2c1, s1c2]), set([s1c1]))
+ eq_(base._cloned_intersection([s1c1, s3c1], [s2c1, s1c2]), {s1c1})
def test_cloned_difference(self):
t1 = table("t1", column("x"))
eq_(
base._cloned_difference([s1c1, s2c1, s3c1], [s2c1, s1c2]),
- set([s3c1]),
+ {s3c1},
)
def test_distance_on_aliases(self):
q = Column("q", Integer)
a.append_column(q)
a._refresh_for_new_column(q)
- eq_(a.foreign_keys, set([fk]))
+ eq_(a.foreign_keys, {fk})
fk2 = ForeignKey("g.id")
p = Column("p", Integer, fk2)
a.append_column(p)
a._refresh_for_new_column(p)
- eq_(a.foreign_keys, set([fk, fk2]))
+ eq_(a.foreign_keys, {fk, fk2})
def test_fk_join(self):
m = MetaData()
q = Column("q", Integer)
b.append_column(q)
j._refresh_for_new_column(q)
- eq_(j.foreign_keys, set([fk]))
+ eq_(j.foreign_keys, {fk})
fk2 = ForeignKey("g.id")
p = Column("p", Integer, fk2)
b.append_column(p)
j._refresh_for_new_column(p)
- eq_(j.foreign_keys, set([fk, fk2]))
+ eq_(j.foreign_keys, {fk, fk2})
class AnonLabelTest(fixtures.TestBase):
)
s1 = select(t1, t2)
s2 = s1.reduce_columns(only_synonyms=False)
- eq_(set(s2.selected_columns), set([t1.c.x, t1.c.y, t2.c.q]))
+ eq_(set(s2.selected_columns), {t1.c.x, t1.c.y, t2.c.q})
s2 = s1.reduce_columns()
- eq_(set(s2.selected_columns), set([t1.c.x, t1.c.y, t2.c.z, t2.c.q]))
+ eq_(set(s2.selected_columns), {t1.c.x, t1.c.y, t2.c.z, t2.c.q})
def test_reduce_only_synonym_fk(self):
m = MetaData()
s1 = s1.reduce_columns(only_synonyms=True)
eq_(
set(s1.selected_columns),
- set(
- [
- s1.selected_columns.x,
- s1.selected_columns.y,
- s1.selected_columns.q,
- ]
- ),
+ {
+ s1.selected_columns.x,
+ s1.selected_columns.y,
+ s1.selected_columns.q,
+ },
)
def test_reduce_only_synonym_lineage(self):
s2 = select(t1, s1).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z)
eq_(
set(s2.reduce_columns().selected_columns),
- set([t1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z]),
+ {t1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z},
)
# reverse order, s1.c.x wins
s2 = select(s1, t1).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z)
eq_(
set(s2.reduce_columns().selected_columns),
- set([s1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z]),
+ {s1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z},
)
def test_reduce_aliased_join(self):
for obj in [t, t.c.x, a, t.c.x > 1, (t.c.x > 1).label(None)]:
annot = obj._annotate({})
- eq_(set([obj]), set([annot]))
+ eq_({obj}, {annot})
def test_clone_annotations_dont_hash(self):
t = table("t", column("x"))
for obj in [s, s2]:
annot = obj._annotate({})
- ne_(set([obj]), set([annot]))
+ ne_({obj}, {annot})
def test_replacement_traverse_preserve(self):
"""test that replacement traverse that hits an unannotated column
def _mapping(self, stmt):
compiled = stmt.compile()
- return dict(
- (elem, key)
+ return {
+ elem: key
for key, elements in compiled._create_result_map().items()
for elem in elements[1]
- )
+ }
def test_select_label_alt_name(self):
t = self._fixture()
)
def _assert_type_map(self, t, compare):
- map_ = dict((b.key, b.type) for b in t._bindparams.values())
+ map_ = {b.key: b.type for b in t._bindparams.values()}
for k in compare:
assert compare[k]._type_affinity is map_[k]._type_affinity
def _mapping(self, stmt):
compiled = stmt.compile()
- return dict(
- (elem, key)
+ return {
+ elem: key
for key, elements in compiled._create_result_map().items()
for elem in elements[1]
- )
+ }
def test_select_label_alt_name(self):
t = self._xy_table_fixture()
t = t.bindparams(bar=String)
t = t.bindparams(bindparam("bat", value="bat"))
- eq_(set(t.element._bindparams), set(["bat", "foo", "bar"]))
+ eq_(set(t.element._bindparams), {"bat", "foo", "bar"})
class TextErrorsTest(fixtures.TestBase, AssertsCompiledSQL):
-# coding: utf-8
import datetime
import decimal
import importlib
cache_ok = True
def bind_processor(self, dialect):
- impl_processor = super(MyDecoratedType, self).bind_processor(
- dialect
- ) or (lambda value: value)
+ impl_processor = super().bind_processor(dialect) or (
+ lambda value: value
+ )
def process(value):
if value is None:
return process
def result_processor(self, dialect, coltype):
- impl_processor = super(MyDecoratedType, self).result_processor(
+ impl_processor = super().result_processor(
dialect, coltype
) or (lambda value: value)
cache_ok = True
def bind_processor(self, dialect):
- impl_processor = super(MyUnicodeType, self).bind_processor(
- dialect
- ) or (lambda value: value)
+ impl_processor = super().bind_processor(dialect) or (
+ lambda value: value
+ )
def process(value):
if value is None:
return process
def result_processor(self, dialect, coltype):
- impl_processor = super(MyUnicodeType, self).result_processor(
+ impl_processor = super().result_processor(
dialect, coltype
) or (lambda value: value)
if dialect.name == "sqlite":
return String(50)
else:
- return super(MyType, self).load_dialect_impl(dialect)
+ return super().load_dialect_impl(dialect)
sl = dialects.sqlite.dialect()
pg = dialects.postgresql.dialect()
def test_user_defined_dialect_specific_args(self):
class MyType(types.UserDefinedType):
def __init__(self, foo="foo", **kwargs):
- super(MyType, self).__init__()
+ super().__init__()
self.foo = foo
self.dialect_specific_args = kwargs
.where(users.c.name == "ed")
)
- eq_(set(ret.prefetch_cols()), set([users.c.some_update]))
+ eq_(set(ret.prefetch_cols()), {users.c.some_update})
expected = [
(2, 8, "updated"),
eq_(
set(ret.prefetch_cols()),
- set([users.c.some_update, foobar.c.some_update]),
+ {users.c.some_update, foobar.c.some_update},
)
expected = [
config = parse_pyproject_toml(home / "pyproject.toml")
BLACK_MODE = Mode(
- target_versions=set(
+ target_versions={
TargetVersion[val.upper()]
for val in config.get("target_version", [])
if val != "py27"
- ),
+ },
line_length=config.get("line_length", DEFAULT_LINE_LENGTH)
if args.project_line_length
else DEFAULT_LINE_LENGTH,