]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
AArch64: Use SVE unpredicated LOGICAL expressions when Advanced SIMD inefficient...
authorTamar Christina <tamar.christina@arm.com>
Thu, 9 Nov 2023 14:18:48 +0000 (14:18 +0000)
committerTamar Christina <tamar.christina@arm.com>
Thu, 9 Nov 2023 14:18:48 +0000 (14:18 +0000)
SVE has much bigger immediate encoding range for bitmasks than Advanced SIMD has
and so on a system that is SVE capable if we need an Advanced SIMD Inclusive-OR
by immediate and would require a reload then use an unpredicated SVE ORR instead.

This has both speed and size improvements.

gcc/ChangeLog:

PR tree-optimization/109154
* config/aarch64/aarch64.md (<optab><mode>3): Add SVE split case.
* config/aarch64/aarch64-simd.md (ior<mode>3<vczle><vczbe>): Likewise.
* config/aarch64/predicates.md(aarch64_orr_imm_sve_advsimd): New.

gcc/testsuite/ChangeLog:

PR tree-optimization/109154
* gcc.target/aarch64/sve/fneg-abs_1.c: Updated.
* gcc.target/aarch64/sve/fneg-abs_2.c: Updated.
* gcc.target/aarch64/sve/fneg-abs_4.c: Updated.

gcc/config/aarch64/aarch64-simd.md
gcc/config/aarch64/aarch64.md
gcc/config/aarch64/predicates.md
gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_1.c
gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_4.c

index 33eceb436584ff73c7271f93639f2246d1af19e0..98c418c54a82a348c597310caa23916f9c16f9b6 100644 (file)
 (define_insn "ior<mode>3<vczle><vczbe>"
   [(set (match_operand:VDQ_I 0 "register_operand")
        (ior:VDQ_I (match_operand:VDQ_I 1 "register_operand")
-                  (match_operand:VDQ_I 2 "aarch64_reg_or_orr_imm")))]
-  "TARGET_SIMD"
-  {@ [ cons: =0 , 1 , 2   ]
-     [ w        , w , w   ] orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
-     [ w        , 0 , Do  ] << aarch64_output_simd_mov_immediate (operands[2], <bitsize>, AARCH64_CHECK_ORR);
+                  (match_operand:VDQ_I 2 "aarch64_orr_imm_sve_advsimd")))]
+  "TARGET_SIMD"
+  {@ [ cons: =0 , 1 , 2; attrs: arch ]
+     [ w        , w , w  ; simd      ] orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
+     [ w        , 0 , vsl; sve       ] orr\t%Z0.<Vetype>, %Z0.<Vetype>, #%2
+     [ w        , 0 , Do ; simd      ] \
+       << aarch64_output_simd_mov_immediate (operands[2], <bitsize>, \
+                                            AARCH64_CHECK_ORR);
   }
   [(set_attr "type" "neon_logic<q>")]
 )
index 4fcd71a2e9d1e8c35f35593255c4f66a68856a79..c6b1506fe7b47dd40741f26ef0cc92692008a631 100644 (file)
   ""
   {@ [ cons: =0 , 1  , 2        ; attrs: type , arch  ]
      [ r        , %r , r        ; logic_reg   , *     ] <logical>\t%<w>0, %<w>1, %<w>2
-     [ rk       , r  , <lconst> ; logic_imm   , *     ] <logical>\t%<w>0, %<w>1, %2
+     [ rk       , ^r , <lconst> ; logic_imm   , *     ] <logical>\t%<w>0, %<w>1, %2
+     [ w        , 0  , <lconst> ; *           , sve   ] <logical>\t%Z0.<s>, %Z0.<s>, #%2
      [ w        , w  , w        ; neon_logic  , simd  ] <logical>\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
   }
 )
index 01de47439744acb3708c645b98eaa607294a1f1f..a73724a7fc05636d4c0643a291f40f2609564778 100644 (file)
   (ior (match_operand 0 "register_operand")
        (match_operand 0 "aarch64_sve_logical_immediate")))
 
+(define_predicate "aarch64_orr_imm_sve_advsimd"
+  (ior (match_operand 0 "aarch64_reg_or_orr_imm")
+       (and (match_test "TARGET_SVE")
+           (match_operand 0 "aarch64_sve_logical_operand"))))
+
 (define_predicate "aarch64_sve_gather_offset_b"
   (ior (match_operand 0 "register_operand")
        (match_operand 0 "aarch64_sve_gather_immediate_b")))
index 0c7664e6de77a497682952653ffd417453854d52..a8b27199ff83d0eebadfc7dcf03f94e1229d76b8 100644 (file)
@@ -6,7 +6,7 @@
 
 /*
 ** t1:
-**     orr     v[0-9]+.2s, #128, lsl #24
+**     orr     z[0-9]+.s, z[0-9]+.s, #-2147483648
 **     ret
 */
 float32x2_t t1 (float32x2_t a)
@@ -16,7 +16,7 @@ float32x2_t t1 (float32x2_t a)
 
 /*
 ** t2:
-**     orr     v[0-9]+.4s, #128, lsl #24
+**     orr     z[0-9]+.s, z[0-9]+.s, #-2147483648
 **     ret
 */
 float32x4_t t2 (float32x4_t a)
@@ -26,9 +26,7 @@ float32x4_t t2 (float32x4_t a)
 
 /*
 ** t3:
-**     adrp    x0, .LC[0-9]+
-**     ldr     q[0-9]+, \[x0, #:lo12:.LC0\]
-**     orr     v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b
+**     orr     z[0-9]+.d, z[0-9]+.d, #-9223372036854775808
 **     ret
 */
 float64x2_t t3 (float64x2_t a)
index a60cd31b9294af2dac69eed1c93f899bd5c78fca..19a7695e605bc8aced486a9c450d1cdc6be4691a 100644 (file)
@@ -7,8 +7,7 @@
 
 /*
 ** f1:
-**     movi    v[0-9]+.2s, 0x80, lsl 24
-**     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+**     orr     z0.s, z0.s, #-2147483648
 **     ret
 */
 float32_t f1 (float32_t a)
@@ -18,9 +17,7 @@ float32_t f1 (float32_t a)
 
 /*
 ** f2:
-**     mov     x0, -9223372036854775808
-**     fmov    d[0-9]+, x0
-**     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+**     orr     z0.d, z0.d, #-9223372036854775808
 **     ret
 */
 float64_t f2 (float64_t a)
index 21f2a8da2a5d44e3d01f6604ca7be87e3744d494..663d5fe17e091d128313b6b8b8dc918a01a96c4f 100644 (file)
@@ -6,9 +6,7 @@
 
 /*
 ** negabs:
-**     mov     x0, -9223372036854775808
-**     fmov    d[0-9]+, x0
-**     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+**     orr     z0.d, z0.d, #-9223372036854775808
 **     ret
 */
 double negabs (double x)
@@ -22,8 +20,7 @@ double negabs (double x)
 
 /*
 ** negabsf:
-**     movi    v[0-9]+.2s, 0x80, lsl 24
-**     orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+**     orr     z0.s, z0.s, #-2147483648
 **     ret
 */
 float negabsf (float x)