]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
aarch64: Remove redundant TARGET_* checks
authorRichard Sandiford <richard.sandiford@arm.com>
Thu, 29 Sep 2022 10:32:57 +0000 (11:32 +0100)
committerRichard Sandiford <richard.sandiford@arm.com>
Thu, 29 Sep 2022 10:32:57 +0000 (11:32 +0100)
After previous patches, it's possible to remove TARGET_*
options that are redundant due to (IMO) obvious dependencies.

gcc/
* config/aarch64/aarch64.h (TARGET_CRYPTO, TARGET_SHA3, TARGET_SM4)
(TARGET_DOTPROD): Don't depend on TARGET_SIMD.
(TARGET_AES, TARGET_SHA2): Likewise.  Remove TARGET_CRYPTO test.
(TARGET_FP_F16INST): Don't depend on TARGET_FLOAT.
(TARGET_SVE2, TARGET_SVE_F32MM, TARGET_SVE_F64MM): Don't depend
on TARGET_SVE.
(TARGET_SVE2_AES, TARGET_SVE2_BITPERM, TARGET_SVE2_SHA3)
(TARGET_SVE2_SM4): Don't depend on TARGET_SVE2.
(TARGET_F32MM, TARGET_F64MM): Delete.
* config/aarch64/aarch64-c.cc (aarch64_update_cpp_builtins): Guard
float macros with just TARGET_FLOAT rather than TARGET_FLOAT
|| TARGET_SIMD.
* config/aarch64/aarch64-simd.md (copysign<mode>3): Depend
only on TARGET_SIMD, rather than TARGET_FLOAT && TARGET_SIMD.
(aarch64_crypto_aes<aes_op>v16qi): Depend only on TARGET_AES,
rather than TARGET_SIMD && TARGET_AES.
(aarch64_crypto_aes<aesmc_op>v16qi): Likewise.
(*aarch64_crypto_aese_fused): Likewise.
(*aarch64_crypto_aesd_fused): Likewise.
(aarch64_crypto_pmulldi): Likewise.
(aarch64_crypto_pmullv2di): Likewise.
(aarch64_crypto_sha1hsi): Likewise TARGET_SHA2.
(aarch64_crypto_sha1hv4si): Likewise.
(aarch64_be_crypto_sha1hv4si): Likewise.
(aarch64_crypto_sha1su1v4si): Likewise.
(aarch64_crypto_sha1<sha1_op>v4si): Likewise.
(aarch64_crypto_sha1su0v4si): Likewise.
(aarch64_crypto_sha256h<sha256_op>v4si): Likewise.
(aarch64_crypto_sha256su0v4si): Likewise.
(aarch64_crypto_sha256su1v4si): Likewise.
(aarch64_crypto_sha512h<sha512_op>qv2di): Likewise TARGET_SHA3.
(aarch64_crypto_sha512su0qv2di): Likewise.
(aarch64_crypto_sha512su1qv2di, eor3q<mode>4): Likewise.
(aarch64_rax1qv2di, aarch64_xarqv2di, bcaxq<mode>4): Likewise.
(aarch64_sm3ss1qv4si): Likewise TARGET_SM4.
(aarch64_sm3tt<sm3tt_op>qv4si): Likewise.
(aarch64_sm3partw<sm3part_op>qv4si): Likewise.
(aarch64_sm4eqv4si, aarch64_sm4ekeyqv4si): Likewise.
* config/aarch64/aarch64.md (<FLOATUORS:optab>dihf2)
(copysign<GPF:mode>3, copysign<GPF:mode>3_insn)
(xorsign<mode>3): Remove redundant TARGET_FLOAT condition.

gcc/config/aarch64/aarch64-c.cc
gcc/config/aarch64/aarch64-simd.md
gcc/config/aarch64/aarch64.h
gcc/config/aarch64/aarch64.md

index e066ca5f43cc1a801c3b9dc3f78eeddad3ebed83..592af8cd7299be157f74950d1591a4fdb44510de 100644 (file)
@@ -92,7 +92,7 @@ aarch64_update_cpp_builtins (cpp_reader *pfile)
 
   aarch64_def_or_undef (TARGET_FLOAT, "__ARM_FEATURE_FMA", pfile);
 
-  if (TARGET_FLOAT || TARGET_SIMD)
+  if (TARGET_FLOAT)
     {
       builtin_define_with_int_value ("__ARM_FP", 0x0E);
       builtin_define ("__ARM_FP16_FORMAT_IEEE");
index dc80f826100ea4feb4150f117eaf53a4f2afbbff..5386043739a9b2e328bfb2fc9067da8feeac1a92 100644 (file)
   [(match_operand:VHSDF 0 "register_operand")
    (match_operand:VHSDF 1 "register_operand")
    (match_operand:VHSDF 2 "register_operand")]
-  "TARGET_FLOAT && TARGET_SIMD"
+  "TARGET_SIMD"
 {
   rtx v_bitmask = gen_reg_rtx (<V_INT_EQUIV>mode);
   int bits = GET_MODE_UNIT_BITSIZE (<MODE>mode) - 1;
                 (match_operand:V16QI 1 "register_operand" "%0")
                 (match_operand:V16QI 2 "register_operand" "w"))]
          CRYPTO_AES))]
-  "TARGET_SIMD && TARGET_AES"
+  "TARGET_AES"
   "aes<aes_op>\\t%0.16b, %2.16b"
   [(set_attr "type" "crypto_aese")]
 )
   [(set (match_operand:V16QI 0 "register_operand" "=w")
        (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "w")]
         CRYPTO_AESMC))]
-  "TARGET_SIMD && TARGET_AES"
+  "TARGET_AES"
   "aes<aesmc_op>\\t%0.16b, %1.16b"
   [(set_attr "type" "crypto_aesmc")]
 )
                (match_operand:V16QI 2 "register_operand" "w"))]
             UNSPEC_AESE)]
        UNSPEC_AESMC))]
-  "TARGET_SIMD && TARGET_AES
+  "TARGET_AES
    && aarch64_fusion_enabled_p (AARCH64_FUSE_AES_AESMC)"
   "aese\\t%0.16b, %2.16b\;aesmc\\t%0.16b, %0.16b"
   [(set_attr "type" "crypto_aese")
                        (match_operand:V16QI 2 "register_operand" "w"))]
                UNSPEC_AESD)]
          UNSPEC_AESIMC))]
-  "TARGET_SIMD && TARGET_AES
+  "TARGET_AES
    && aarch64_fusion_enabled_p (AARCH64_FUSE_AES_AESMC)"
   "aesd\\t%0.16b, %2.16b\;aesimc\\t%0.16b, %0.16b"
   [(set_attr "type" "crypto_aese")
         (unspec:SI [(match_operand:SI 1
                        "register_operand" "w")]
          UNSPEC_SHA1H))]
-  "TARGET_SIMD && TARGET_SHA2"
+  "TARGET_SHA2"
   "sha1h\\t%s0, %s1"
   [(set_attr "type" "crypto_sha1_fast")]
 )
        (unspec:SI [(vec_select:SI (match_operand:V4SI 1 "register_operand" "w")
                     (parallel [(const_int 0)]))]
         UNSPEC_SHA1H))]
-  "TARGET_SIMD && TARGET_SHA2 && !BYTES_BIG_ENDIAN"
+  "TARGET_SHA2 && !BYTES_BIG_ENDIAN"
   "sha1h\\t%s0, %s1"
   [(set_attr "type" "crypto_sha1_fast")]
 )
        (unspec:SI [(vec_select:SI (match_operand:V4SI 1 "register_operand" "w")
                     (parallel [(const_int 3)]))]
         UNSPEC_SHA1H))]
-  "TARGET_SIMD && TARGET_SHA2 && BYTES_BIG_ENDIAN"
+  "TARGET_SHA2 && BYTES_BIG_ENDIAN"
   "sha1h\\t%s0, %s1"
   [(set_attr "type" "crypto_sha1_fast")]
 )
         (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
                       (match_operand:V4SI 2 "register_operand" "w")]
          UNSPEC_SHA1SU1))]
-  "TARGET_SIMD && TARGET_SHA2"
+  "TARGET_SHA2"
   "sha1su1\\t%0.4s, %2.4s"
   [(set_attr "type" "crypto_sha1_fast")]
 )
                       (match_operand:SI 2 "register_operand" "w")
                       (match_operand:V4SI 3 "register_operand" "w")]
          CRYPTO_SHA1))]
-  "TARGET_SIMD && TARGET_SHA2"
+  "TARGET_SHA2"
   "sha1<sha1_op>\\t%q0, %s2, %3.4s"
   [(set_attr "type" "crypto_sha1_slow")]
 )
                       (match_operand:V4SI 2 "register_operand" "w")
                       (match_operand:V4SI 3 "register_operand" "w")]
          UNSPEC_SHA1SU0))]
-  "TARGET_SIMD && TARGET_SHA2"
+  "TARGET_SHA2"
   "sha1su0\\t%0.4s, %2.4s, %3.4s"
   [(set_attr "type" "crypto_sha1_xor")]
 )
                       (match_operand:V4SI 2 "register_operand" "w")
                       (match_operand:V4SI 3 "register_operand" "w")]
          CRYPTO_SHA256))]
-  "TARGET_SIMD && TARGET_SHA2"
+  "TARGET_SHA2"
   "sha256h<sha256_op>\\t%q0, %q2, %3.4s"
   [(set_attr "type" "crypto_sha256_slow")]
 )
         (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
                       (match_operand:V4SI 2 "register_operand" "w")]
          UNSPEC_SHA256SU0))]
-  "TARGET_SIMD && TARGET_SHA2"
+  "TARGET_SHA2"
   "sha256su0\\t%0.4s, %2.4s"
   [(set_attr "type" "crypto_sha256_fast")]
 )
                       (match_operand:V4SI 2 "register_operand" "w")
                       (match_operand:V4SI 3 "register_operand" "w")]
          UNSPEC_SHA256SU1))]
-  "TARGET_SIMD && TARGET_SHA2"
+  "TARGET_SHA2"
   "sha256su1\\t%0.4s, %2.4s, %3.4s"
   [(set_attr "type" "crypto_sha256_slow")]
 )
                       (match_operand:V2DI 2 "register_operand" "w")
                       (match_operand:V2DI 3 "register_operand" "w")]
          CRYPTO_SHA512))]
-  "TARGET_SIMD && TARGET_SHA3"
+  "TARGET_SHA3"
   "sha512h<sha512_op>\\t%q0, %q2, %3.2d"
   [(set_attr "type" "crypto_sha512")]
 )
         (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
                       (match_operand:V2DI 2 "register_operand" "w")]
          UNSPEC_SHA512SU0))]
-  "TARGET_SIMD && TARGET_SHA3"
+  "TARGET_SHA3"
   "sha512su0\\t%0.2d, %2.2d"
   [(set_attr "type" "crypto_sha512")]
 )
                       (match_operand:V2DI 2 "register_operand" "w")
                       (match_operand:V2DI 3 "register_operand" "w")]
          UNSPEC_SHA512SU1))]
-  "TARGET_SIMD && TARGET_SHA3"
+  "TARGET_SHA3"
   "sha512su1\\t%0.2d, %2.2d, %3.2d"
   [(set_attr "type" "crypto_sha512")]
 )
          (match_operand:VQ_I 2 "register_operand" "w")
          (match_operand:VQ_I 3 "register_operand" "w"))
         (match_operand:VQ_I 1 "register_operand" "w")))]
-  "TARGET_SIMD && TARGET_SHA3"
+  "TARGET_SHA3"
   "eor3\\t%0.16b, %1.16b, %2.16b, %3.16b"
   [(set_attr "type" "crypto_sha3")]
 )
          (match_operand:V2DI 2 "register_operand" "w")
          (const_int 1))
         (match_operand:V2DI 1 "register_operand" "w")))]
-  "TARGET_SIMD && TARGET_SHA3"
+  "TARGET_SHA3"
   "rax1\\t%0.2d, %1.2d, %2.2d"
   [(set_attr "type" "crypto_sha3")]
 )
          (match_operand:V2DI 1 "register_operand" "%w")
          (match_operand:V2DI 2 "register_operand" "w"))
         (match_operand:SI 3 "aarch64_simd_shift_imm_di" "Usd")))]
-  "TARGET_SIMD && TARGET_SHA3"
+  "TARGET_SHA3"
   "xar\\t%0.2d, %1.2d, %2.2d, %3"
   [(set_attr "type" "crypto_sha3")]
 )
          (not:VQ_I (match_operand:VQ_I 3 "register_operand" "w"))
          (match_operand:VQ_I 2 "register_operand" "w"))
         (match_operand:VQ_I 1 "register_operand" "w")))]
-  "TARGET_SIMD && TARGET_SHA3"
+  "TARGET_SHA3"
   "bcax\\t%0.16b, %1.16b, %2.16b, %3.16b"
   [(set_attr "type" "crypto_sha3")]
 )
                      (match_operand:V4SI 2 "register_operand" "w")
                      (match_operand:V4SI 3 "register_operand" "w")]
         UNSPEC_SM3SS1))]
-  "TARGET_SIMD && TARGET_SM4"
+  "TARGET_SM4"
   "sm3ss1\\t%0.4s, %1.4s, %2.4s, %3.4s"
   [(set_attr "type" "crypto_sm3")]
 )
                      (match_operand:V4SI 3 "register_operand" "w")
                      (match_operand:SI 4 "aarch64_imm2" "Ui2")]
         CRYPTO_SM3TT))]
-  "TARGET_SIMD && TARGET_SM4"
+  "TARGET_SM4"
   "sm3tt<sm3tt_op>\\t%0.4s, %2.4s, %3.4s[%4]"
   [(set_attr "type" "crypto_sm3")]
 )
                      (match_operand:V4SI 2 "register_operand" "w")
                      (match_operand:V4SI 3 "register_operand" "w")]
         CRYPTO_SM3PART))]
-  "TARGET_SIMD && TARGET_SM4"
+  "TARGET_SM4"
   "sm3partw<sm3part_op>\\t%0.4s, %2.4s, %3.4s"
   [(set_attr "type" "crypto_sm3")]
 )
        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
                      (match_operand:V4SI 2 "register_operand" "w")]
         UNSPEC_SM4E))]
-  "TARGET_SIMD && TARGET_SM4"
+  "TARGET_SM4"
   "sm4e\\t%0.4s, %2.4s"
   [(set_attr "type" "crypto_sm4")]
 )
        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "w")
                      (match_operand:V4SI 2 "register_operand" "w")]
         UNSPEC_SM4EKEY))]
-  "TARGET_SIMD && TARGET_SM4"
+  "TARGET_SM4"
   "sm4ekey\\t%0.4s, %1.4s, %2.4s"
   [(set_attr "type" "crypto_sm4")]
 )
         (unspec:TI  [(match_operand:DI 1 "register_operand" "w")
                     (match_operand:DI 2 "register_operand" "w")]
                    UNSPEC_PMULL))]
- "TARGET_SIMD && TARGET_AES"
+ "TARGET_AES"
  "pmull\\t%0.1q, %1.1d, %2.1d"
   [(set_attr "type" "crypto_pmull")]
 )
        (unspec:TI [(match_operand:V2DI 1 "register_operand" "w")
                   (match_operand:V2DI 2 "register_operand" "w")]
                  UNSPEC_PMULL2))]
-  "TARGET_SIMD && TARGET_AES"
+  "TARGET_AES"
   "pmull2\\t%0.1q, %1.2d, %2.2d"
   [(set_attr "type" "crypto_pmull")]
 )
index 6ee63570551d73e188c7abe48e1d17632858d2be..2d6221826bb30e4e57cbae193849823d510fd838 100644 (file)
@@ -222,19 +222,19 @@ enum class aarch64_feature : unsigned char {
 #define AARCH64_ISA_LS64          (aarch64_isa_flags & AARCH64_FL_LS64)
 
 /* Crypto is an optional extension to AdvSIMD.  */
-#define TARGET_CRYPTO (TARGET_SIMD && AARCH64_ISA_CRYPTO)
+#define TARGET_CRYPTO (AARCH64_ISA_CRYPTO)
 
 /* SHA2 is an optional extension to AdvSIMD.  */
-#define TARGET_SHA2 ((TARGET_SIMD && AARCH64_ISA_SHA2) || TARGET_CRYPTO)
+#define TARGET_SHA2 (AARCH64_ISA_SHA2)
 
 /* SHA3 is an optional extension to AdvSIMD.  */
-#define TARGET_SHA3 (TARGET_SIMD && AARCH64_ISA_SHA3)
+#define TARGET_SHA3 (AARCH64_ISA_SHA3)
 
 /* AES is an optional extension to AdvSIMD.  */
-#define TARGET_AES ((TARGET_SIMD && AARCH64_ISA_AES) || TARGET_CRYPTO)
+#define TARGET_AES (AARCH64_ISA_AES)
 
 /* SM is an optional extension to AdvSIMD.  */
-#define TARGET_SM4 (TARGET_SIMD && AARCH64_ISA_SM4)
+#define TARGET_SM4 (AARCH64_ISA_SM4)
 
 /* FP16FML is an optional extension to AdvSIMD.  */
 #define TARGET_F16FML (TARGET_SIMD && AARCH64_ISA_F16FML && TARGET_FP_F16INST)
@@ -246,29 +246,29 @@ enum class aarch64_feature : unsigned char {
 #define TARGET_LSE (AARCH64_ISA_LSE)
 
 /* ARMv8.2-A FP16 support that can be enabled through the +fp16 extension.  */
-#define TARGET_FP_F16INST (TARGET_FLOAT && AARCH64_ISA_F16)
+#define TARGET_FP_F16INST (AARCH64_ISA_F16)
 #define TARGET_SIMD_F16INST (TARGET_SIMD && AARCH64_ISA_F16)
 
 /* Dot Product is an optional extension to AdvSIMD enabled through +dotprod.  */
-#define TARGET_DOTPROD (TARGET_SIMD && AARCH64_ISA_DOTPROD)
+#define TARGET_DOTPROD (AARCH64_ISA_DOTPROD)
 
 /* SVE instructions, enabled through +sve.  */
 #define TARGET_SVE (AARCH64_ISA_SVE)
 
 /* SVE2 instructions, enabled through +sve2.  */
-#define TARGET_SVE2 (TARGET_SVE && AARCH64_ISA_SVE2)
+#define TARGET_SVE2 (AARCH64_ISA_SVE2)
 
 /* SVE2 AES instructions, enabled through +sve2-aes.  */
-#define TARGET_SVE2_AES (TARGET_SVE2 && AARCH64_ISA_SVE2_AES)
+#define TARGET_SVE2_AES (AARCH64_ISA_SVE2_AES)
 
 /* SVE2 BITPERM instructions, enabled through +sve2-bitperm.  */
-#define TARGET_SVE2_BITPERM (TARGET_SVE2 && AARCH64_ISA_SVE2_BITPERM)
+#define TARGET_SVE2_BITPERM (AARCH64_ISA_SVE2_BITPERM)
 
 /* SVE2 SHA3 instructions, enabled through +sve2-sha3.  */
-#define TARGET_SVE2_SHA3 (TARGET_SVE2 && AARCH64_ISA_SVE2_SHA3)
+#define TARGET_SVE2_SHA3 (AARCH64_ISA_SVE2_SHA3)
 
 /* SVE2 SM4 instructions, enabled through +sve2-sm4.  */
-#define TARGET_SVE2_SM4 (TARGET_SVE2 && AARCH64_ISA_SVE2_SM4)
+#define TARGET_SVE2_SM4 (AARCH64_ISA_SVE2_SM4)
 
 /* ARMv8.3-A features.  */
 #define TARGET_ARMV8_3 (AARCH64_ISA_V8_3A)
@@ -296,12 +296,10 @@ enum class aarch64_feature : unsigned char {
 #define TARGET_SVE_I8MM (TARGET_SVE && AARCH64_ISA_I8MM)
 
 /* F32MM instructions are enabled through +f32mm.  */
-#define TARGET_F32MM (AARCH64_ISA_F32MM)
-#define TARGET_SVE_F32MM (TARGET_SVE && AARCH64_ISA_F32MM)
+#define TARGET_SVE_F32MM (AARCH64_ISA_F32MM)
 
 /* F64MM instructions are enabled through +f64mm.  */
-#define TARGET_F64MM (AARCH64_ISA_F64MM)
-#define TARGET_SVE_F64MM (TARGET_SVE && AARCH64_ISA_F64MM)
+#define TARGET_SVE_F64MM (AARCH64_ISA_F64MM)
 
 /* BF16 instructions are enabled through +bf16.  */
 #define TARGET_BF16_FP (AARCH64_ISA_BF16)
index 3f8e40a48b5e512600caefbf73508e4da213df7a..0a7633e5dd6d45282edd7a1088c14b555bc09b40 100644 (file)
 (define_expand "<optab>dihf2"
   [(set (match_operand:HF 0 "register_operand")
        (FLOATUORS:HF (match_operand:DI 1 "register_operand")))]
-  "TARGET_FLOAT && (TARGET_FP_F16INST || TARGET_SIMD)"
+  "TARGET_FP_F16INST || TARGET_SIMD"
 {
   if (TARGET_FP_F16INST)
     emit_insn (gen_aarch64_fp16_<optab>dihf2 (operands[0], operands[1]));
   [(match_operand:GPF 0 "register_operand")
    (match_operand:GPF 1 "register_operand")
    (match_operand:GPF 2 "register_operand")]
-  "TARGET_FLOAT && TARGET_SIMD"
+  "TARGET_SIMD"
 {
   rtx bitmask = gen_reg_rtx (<V_INT_EQUIV>mode);
   emit_move_insn (bitmask, GEN_INT (HOST_WIDE_INT_M1U
                     (match_operand:GPF 2 "register_operand" "w,w,0,0")
                     (match_operand:<V_INT_EQUIV> 3 "register_operand" "0,w,w,X")]
         UNSPEC_COPYSIGN))]
-  "TARGET_FLOAT && TARGET_SIMD"
+  "TARGET_SIMD"
   "@
    bsl\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
    bit\\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype>
   [(match_operand:GPF 0 "register_operand")
    (match_operand:GPF 1 "register_operand")
    (match_operand:GPF 2 "register_operand")]
-  "TARGET_FLOAT && TARGET_SIMD"
+  "TARGET_SIMD"
 {
 
   machine_mode imode = <V_INT_EQUIV>mode;