]> git.ipfire.org Git - thirdparty/jinja.git/commitdiff
update pre-commit hooks
authorDavid Lord <davidism@gmail.com>
Fri, 18 Feb 2022 00:53:43 +0000 (16:53 -0800)
committerDavid Lord <davidism@gmail.com>
Fri, 18 Feb 2022 00:53:43 +0000 (16:53 -0800)
.pre-commit-config.yaml
src/jinja2/filters.py
src/jinja2/lexer.py
src/jinja2/parser.py

index b4a7c7d559052b02f5aa66016b1f20e35f685732..559156dc72b4e6e18b061dcab07818dca666fa9f 100644 (file)
@@ -8,12 +8,13 @@ repos:
       - id: pyupgrade
         args: ["--py36-plus"]
   - repo: https://github.com/asottile/reorder_python_imports
-    rev: v2.6.0
+    rev: v2.7.1
     hooks:
       - id: reorder-python-imports
         args: ["--application-directories", "src"]
+        additional_dependencies: ["setuptools>60.9"]
   - repo: https://github.com/psf/black
-    rev: 21.12b0
+    rev: 22.1.0
     hooks:
       - id: black
   - repo: https://github.com/PyCQA/flake8
index 7ab929248e6434fe39154386b36a89a34836ffd6..eeed47604f9bcab4e1608cf76ac3a4c607006f64 100644 (file)
@@ -1164,7 +1164,7 @@ def do_round(
         return round(value, precision)
 
     func = getattr(math, method)
-    return t.cast(float, func(value * (10 ** precision)) / (10 ** precision))
+    return t.cast(float, func(value * (10**precision)) / (10**precision))
 
 
 class _GroupTuple(t.NamedTuple):
index b46a7e1d260a4b252453f397a950808ae595248b..a2eaa8debf1784e5730486b0b348c54b27365e0f 100644 (file)
@@ -515,11 +515,11 @@ class Lexer:
         self.keep_trailing_newline = environment.keep_trailing_newline
 
         root_raw_re = (
-            fr"(?P<raw_begin>{block_start_re}(\-|\+|)\s*raw\s*"
-            fr"(?:\-{block_end_re}\s*|{block_end_re}))"
+            rf"(?P<raw_begin>{block_start_re}(\-|\+|)\s*raw\s*"
+            rf"(?:\-{block_end_re}\s*|{block_end_re}))"
         )
         root_parts_re = "|".join(
-            [root_raw_re] + [fr"(?P<{n}>{r}(\-|\+|))" for n, r in root_tag_rules]
+            [root_raw_re] + [rf"(?P<{n}>{r}(\-|\+|))" for n, r in root_tag_rules]
         )
 
         # global lexing rules
@@ -527,7 +527,7 @@ class Lexer:
             "root": [
                 # directives
                 _Rule(
-                    c(fr"(.*?)(?:{root_parts_re})"),
+                    c(rf"(.*?)(?:{root_parts_re})"),
                     OptionalLStrip(TOKEN_DATA, "#bygroup"),  # type: ignore
                     "#bygroup",
                 ),
@@ -538,8 +538,8 @@ class Lexer:
             TOKEN_COMMENT_BEGIN: [
                 _Rule(
                     c(
-                        fr"(.*?)((?:\+{comment_end_re}|\-{comment_end_re}\s*"
-                        fr"|{comment_end_re}{block_suffix_re}))"
+                        rf"(.*?)((?:\+{comment_end_re}|\-{comment_end_re}\s*"
+                        rf"|{comment_end_re}{block_suffix_re}))"
                     ),
                     (TOKEN_COMMENT, TOKEN_COMMENT_END),
                     "#pop",
@@ -550,8 +550,8 @@ class Lexer:
             TOKEN_BLOCK_BEGIN: [
                 _Rule(
                     c(
-                        fr"(?:\+{block_end_re}|\-{block_end_re}\s*"
-                        fr"|{block_end_re}{block_suffix_re})"
+                        rf"(?:\+{block_end_re}|\-{block_end_re}\s*"
+                        rf"|{block_end_re}{block_suffix_re})"
                     ),
                     TOKEN_BLOCK_END,
                     "#pop",
@@ -561,7 +561,7 @@ class Lexer:
             # variables
             TOKEN_VARIABLE_BEGIN: [
                 _Rule(
-                    c(fr"\-{variable_end_re}\s*|{variable_end_re}"),
+                    c(rf"\-{variable_end_re}\s*|{variable_end_re}"),
                     TOKEN_VARIABLE_END,
                     "#pop",
                 )
@@ -571,9 +571,9 @@ class Lexer:
             TOKEN_RAW_BEGIN: [
                 _Rule(
                     c(
-                        fr"(.*?)((?:{block_start_re}(\-|\+|))\s*endraw\s*"
-                        fr"(?:\+{block_end_re}|\-{block_end_re}\s*"
-                        fr"|{block_end_re}{block_suffix_re}))"
+                        rf"(.*?)((?:{block_start_re}(\-|\+|))\s*endraw\s*"
+                        rf"(?:\+{block_end_re}|\-{block_end_re}\s*"
+                        rf"|{block_end_re}{block_suffix_re}))"
                     ),
                     OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END),  # type: ignore
                     "#pop",
index 44473963142dc8735baeededc33054253c4ef819..cefce2dfa1d2a4171838b0d0135af8ea3ff7d62c 100644 (file)
@@ -364,14 +364,10 @@ class Parser:
         node.names = []
 
         def parse_context() -> bool:
-            if (
-                self.stream.current.value
-                in {
-                    "with",
-                    "without",
-                }
-                and self.stream.look().test("name:context")
-            ):
+            if self.stream.current.value in {
+                "with",
+                "without",
+            } and self.stream.look().test("name:context"):
                 node.with_context = next(self.stream).value == "with"
                 self.stream.skip()
                 return True
@@ -957,19 +953,15 @@ class Parser:
         kwargs = []
         if self.stream.current.type == "lparen":
             args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args()
-        elif (
-            self.stream.current.type
-            in {
-                "name",
-                "string",
-                "integer",
-                "float",
-                "lparen",
-                "lbracket",
-                "lbrace",
-            }
-            and not self.stream.current.test_any("name:else", "name:or", "name:and")
-        ):
+        elif self.stream.current.type in {
+            "name",
+            "string",
+            "integer",
+            "float",
+            "lparen",
+            "lbracket",
+            "lbrace",
+        } and not self.stream.current.test_any("name:else", "name:or", "name:and"):
             if self.stream.current.test("name:is"):
                 self.fail("You cannot chain multiple tests with is")
             arg_node = self.parse_primary()