- id: pyupgrade
args: ["--py36-plus"]
- repo: https://github.com/asottile/reorder_python_imports
- rev: v2.6.0
+ rev: v2.7.1
hooks:
- id: reorder-python-imports
args: ["--application-directories", "src"]
+ additional_dependencies: ["setuptools>60.9"]
- repo: https://github.com/psf/black
- rev: 21.12b0
+ rev: 22.1.0
hooks:
- id: black
- repo: https://github.com/PyCQA/flake8
return round(value, precision)
func = getattr(math, method)
- return t.cast(float, func(value * (10 ** precision)) / (10 ** precision))
+ return t.cast(float, func(value * (10**precision)) / (10**precision))
class _GroupTuple(t.NamedTuple):
self.keep_trailing_newline = environment.keep_trailing_newline
root_raw_re = (
- fr"(?P<raw_begin>{block_start_re}(\-|\+|)\s*raw\s*"
- fr"(?:\-{block_end_re}\s*|{block_end_re}))"
+ rf"(?P<raw_begin>{block_start_re}(\-|\+|)\s*raw\s*"
+ rf"(?:\-{block_end_re}\s*|{block_end_re}))"
)
root_parts_re = "|".join(
- [root_raw_re] + [fr"(?P<{n}>{r}(\-|\+|))" for n, r in root_tag_rules]
+ [root_raw_re] + [rf"(?P<{n}>{r}(\-|\+|))" for n, r in root_tag_rules]
)
# global lexing rules
"root": [
# directives
_Rule(
- c(fr"(.*?)(?:{root_parts_re})"),
+ c(rf"(.*?)(?:{root_parts_re})"),
OptionalLStrip(TOKEN_DATA, "#bygroup"), # type: ignore
"#bygroup",
),
TOKEN_COMMENT_BEGIN: [
_Rule(
c(
- fr"(.*?)((?:\+{comment_end_re}|\-{comment_end_re}\s*"
- fr"|{comment_end_re}{block_suffix_re}))"
+ rf"(.*?)((?:\+{comment_end_re}|\-{comment_end_re}\s*"
+ rf"|{comment_end_re}{block_suffix_re}))"
),
(TOKEN_COMMENT, TOKEN_COMMENT_END),
"#pop",
TOKEN_BLOCK_BEGIN: [
_Rule(
c(
- fr"(?:\+{block_end_re}|\-{block_end_re}\s*"
- fr"|{block_end_re}{block_suffix_re})"
+ rf"(?:\+{block_end_re}|\-{block_end_re}\s*"
+ rf"|{block_end_re}{block_suffix_re})"
),
TOKEN_BLOCK_END,
"#pop",
# variables
TOKEN_VARIABLE_BEGIN: [
_Rule(
- c(fr"\-{variable_end_re}\s*|{variable_end_re}"),
+ c(rf"\-{variable_end_re}\s*|{variable_end_re}"),
TOKEN_VARIABLE_END,
"#pop",
)
TOKEN_RAW_BEGIN: [
_Rule(
c(
- fr"(.*?)((?:{block_start_re}(\-|\+|))\s*endraw\s*"
- fr"(?:\+{block_end_re}|\-{block_end_re}\s*"
- fr"|{block_end_re}{block_suffix_re}))"
+ rf"(.*?)((?:{block_start_re}(\-|\+|))\s*endraw\s*"
+ rf"(?:\+{block_end_re}|\-{block_end_re}\s*"
+ rf"|{block_end_re}{block_suffix_re}))"
),
OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END), # type: ignore
"#pop",
node.names = []
def parse_context() -> bool:
- if (
- self.stream.current.value
- in {
- "with",
- "without",
- }
- and self.stream.look().test("name:context")
- ):
+ if self.stream.current.value in {
+ "with",
+ "without",
+ } and self.stream.look().test("name:context"):
node.with_context = next(self.stream).value == "with"
self.stream.skip()
return True
kwargs = []
if self.stream.current.type == "lparen":
args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args()
- elif (
- self.stream.current.type
- in {
- "name",
- "string",
- "integer",
- "float",
- "lparen",
- "lbracket",
- "lbrace",
- }
- and not self.stream.current.test_any("name:else", "name:or", "name:and")
- ):
+ elif self.stream.current.type in {
+ "name",
+ "string",
+ "integer",
+ "float",
+ "lparen",
+ "lbracket",
+ "lbrace",
+ } and not self.stream.current.test_any("name:else", "name:or", "name:and"):
if self.stream.current.test("name:is"):
self.fail("You cannot chain multiple tests with is")
arg_node = self.parse_primary()