]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
arm: gimple fold aes[ed] [PR114522]
authorChristophe Lyon <christophe.lyon@linaro.org>
Tue, 11 Feb 2025 20:51:23 +0000 (20:51 +0000)
committerChristophe Lyon <christophe.lyon@linaro.org>
Thu, 13 Feb 2025 12:23:34 +0000 (12:23 +0000)
Almost a copy/paste from the recent aarch64 version of this patch,
this one is a bit more intrusive because it also introduces
arm_general_gimple_fold_builtin.

With this patch,
gcc.target/arm/aes_xor_combine.c scan-assembler-not veor
passes again.

gcc/ChangeLog:

PR target/114522
* config/arm/arm-builtins.cc (arm_fold_aes_op): New function.
(arm_general_gimple_fold_builtin): New function.
* config/arm/arm-builtins.h (arm_general_gimple_fold_builtin): New
prototype.
* config/arm/arm.cc (arm_gimple_fold_builtin): Call
arm_general_gimple_fold_builtin as needed.

gcc/config/arm/arm-builtins.cc
gcc/config/arm/arm-builtins.h
gcc/config/arm/arm.cc

index e860607686c6ae2b456c07748e108a38e74fd778..c56ab5db985b2d04c0815a722ba6417d0266e6b6 100644 (file)
@@ -45,6 +45,9 @@
 #include "arm-builtins.h"
 #include "stringpool.h"
 #include "attribs.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "ssa.h"
 
 #define SIMD_MAX_BUILTIN_ARGS 7
 
@@ -4053,4 +4056,56 @@ arm_cde_end_args (tree fndecl)
     }
 }
 
+/* Fold a call to vaeseq_u8 and vaesdq_u8.
+   That is `vaeseq_u8 (x ^ y, 0)` gets folded
+   into `vaeseq_u8 (x, y)`.*/
+static gimple *
+arm_fold_aes_op (gcall *stmt)
+{
+  tree arg0 = gimple_call_arg (stmt, 0);
+  tree arg1 = gimple_call_arg (stmt, 1);
+  if (integer_zerop (arg0))
+    arg0 = arg1;
+  else if (!integer_zerop (arg1))
+    return nullptr;
+  if (TREE_CODE (arg0) != SSA_NAME)
+    return nullptr;
+  if (!has_single_use (arg0))
+    return nullptr;
+  auto *s = dyn_cast<gassign *> (SSA_NAME_DEF_STMT (arg0));
+  if (!s || gimple_assign_rhs_code (s) != BIT_XOR_EXPR)
+    return nullptr;
+  gimple_call_set_arg (stmt, 0, gimple_assign_rhs1 (s));
+  gimple_call_set_arg (stmt, 1, gimple_assign_rhs2 (s));
+  return stmt;
+}
+
+/* Try to fold STMT, given that it's a call to the built-in function with
+   subcode FCODE.  Return the new statement on success and null on
+   failure.  */
+gimple *
+arm_general_gimple_fold_builtin (unsigned int fcode, gcall *stmt)
+{
+  gimple *new_stmt = NULL;
+
+  switch (fcode)
+    {
+    case ARM_BUILTIN_CRYPTO_AESE:
+    case ARM_BUILTIN_CRYPTO_AESD:
+      new_stmt = arm_fold_aes_op (stmt);
+      break;
+    }
+
+  /* GIMPLE assign statements (unlike calls) require a non-null lhs.  If we
+     created an assign statement with a null lhs, then fix this by assigning
+     to a new (and subsequently unused) variable.  */
+  if (new_stmt && is_gimple_assign (new_stmt) && !gimple_assign_lhs (new_stmt))
+    {
+      tree new_lhs = make_ssa_name (gimple_call_return_type (stmt));
+      gimple_assign_set_lhs (new_stmt, new_lhs);
+    }
+
+  return new_stmt;
+}
+
 #include "gt-arm-builtins.h"
index 1fa85b602d92385648ac2004c0f3f88cf400b327..3a646619f4463948b5d229978a5a3ca86c238a4d 100644 (file)
@@ -32,6 +32,7 @@ enum resolver_ident {
 };
 enum resolver_ident arm_describe_resolver (tree);
 unsigned arm_cde_end_args (tree);
+gimple *arm_general_gimple_fold_builtin (unsigned int, gcall *);
 
 #define ENTRY(E, M, Q, S, T, G) E,
 enum arm_simd_type
index a95ddf8201fa7ba08469ed8f43a964a2c9a33486..00499a26bae19955c7b200934c2cc5a379963959 100644 (file)
@@ -76,6 +76,7 @@
 #include "aarch-common.h"
 #include "aarch-common-protos.h"
 #include "machmode.h"
+#include "arm-builtins.h"
 
 /* This file should be included last.  */
 #include "target-def.h"
@@ -2859,7 +2860,9 @@ arm_gimple_fold_builtin (gimple_stmt_iterator *gsi)
   switch (code & ARM_BUILTIN_CLASS)
     {
     case ARM_BUILTIN_GENERAL:
+      new_stmt = arm_general_gimple_fold_builtin (subcode, stmt);
       break;
+
     case ARM_BUILTIN_MVE:
       new_stmt = arm_mve::gimple_fold_builtin (subcode, stmt);
     }