]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
arm: suppress aes erratum when forwarding from aes
authorRichard Earnshaw <rearnsha@arm.com>
Mon, 1 Nov 2021 13:23:26 +0000 (13:23 +0000)
committerRichard Earnshaw <rearnsha@arm.com>
Thu, 20 Jan 2022 11:15:22 +0000 (11:15 +0000)
AES operations are commonly chained and since the result of one AES
operation is never a 32-bit value, they do not need an additional
mitigation instruction for the forwarded result.  We handle this
common case by adding additional patterns that allow for this.

gcc/ChangeLog:

* config/arm/crypto.md (crypto_<CRYPTO_AESMC:crypto_pattern>_protected):
New pattern.
(aarch32_crypto_aese_fused_protected): Likewise.
(aarch32_crypto_aesd_fused_protected): Likewise.

gcc/config/arm/crypto.md

index fbee1829ce8e2cc836f845e0cd18fb1055eb7da0..df857352382bec328f93fbb9cbca198851f2de97 100644 (file)
   [(set_attr "type" "neon_move_q")]
 )
 
+;; An AESMC operation can feed directly into a subsequent AES
+;; operation without needing mitigation.
+(define_insn "*crypto_<CRYPTO_AESMC:crypto_pattern>_protected"
+  [(set (match_operand:<crypto_mode> 0 "register_operand" "=w")
+       (unspec:<crypto_mode>
+        [(unspec:<crypto_mode>
+          [(match_operand:<crypto_mode> 1 "register_operand" "w")]
+          CRYPTO_AESMC)]
+        UNSPEC_AES_PROTECT))]
+  "TARGET_CRYPTO && fix_aes_erratum_1742098"
+  "<crypto_pattern>.<crypto_size_sfx>\\t%q0, %q1"
+  [(set_attr "type" "<crypto_type>")]
+)
+
 ;; When AESE/AESMC fusion is enabled we really want to keep the two together
 ;; and enforce the register dependency without scheduling or register
 ;; allocation messing up the order or introducing moves inbetween.
    (set_attr "length" "8")]
 )
 
+;; And similarly when mitigation is enabled, but not needed in this
+;; case.
+(define_insn "*aarch32_crypto_aese_fused_protected"
+  [(set (match_operand:V16QI 0 "register_operand" "=w")
+       (unspec:V16QI
+        [(unspec:V16QI
+          [(unspec:V16QI [(xor:V16QI
+                           (match_operand:V16QI 1 "register_operand" "%0")
+                           (match_operand:V16QI 2 "register_operand" "w"))]
+            UNSPEC_AESE)]
+          UNSPEC_AESMC)]
+        UNSPEC_AES_PROTECT))]
+  "TARGET_CRYPTO && fix_aes_erratum_1742098
+   && arm_fusion_enabled_p (tune_params::FUSE_AES_AESMC)"
+  "aese.8\\t%q0, %q2\;aesmc.8\\t%q0, %q0"
+  [(set_attr "type" "crypto_aese")
+   (set_attr "length" "8")]
+)
+
 ;; When AESD/AESIMC fusion is enabled we really want to keep the two together
 ;; and enforce the register dependency without scheduling or register
 ;; allocation messing up the order or introducing moves inbetween.
    (set_attr "length" "8")]
 )
 
+(define_insn "*aarch32_crypto_aesd_fused_protected"
+  [(set (match_operand:V16QI 0 "register_operand" "=w")
+       (unspec:V16QI
+        [(unspec:V16QI
+          [(unspec:V16QI [(xor:V16QI
+                           (match_operand:V16QI 1 "register_operand" "%0")
+                           (match_operand:V16QI 2 "register_operand" "w"))]
+            UNSPEC_AESD)]
+          UNSPEC_AESIMC)]
+        UNSPEC_AES_PROTECT))]
+  "TARGET_CRYPTO && fix_aes_erratum_1742098
+   && arm_fusion_enabled_p (tune_params::FUSE_AES_AESMC)"
+  "aesd.8\\t%q0, %q2\;aesimc.8\\t%q0, %q0"
+  [(set_attr "type" "crypto_aese")
+   (set_attr "length" "8")]
+)
+
 (define_insn "crypto_<CRYPTO_BINARY:crypto_pattern>"
   [(set (match_operand:<crypto_mode> 0 "register_operand" "=w")
        (unspec:<crypto_mode>