SVE has much bigger immediate encoding range for bitmasks than Advanced SIMD has
and so on a system that is SVE capable if we need an Advanced SIMD Inclusive-OR
by immediate and would require a reload then use an unpredicated SVE ORR instead.
This has both speed and size improvements.
gcc/ChangeLog:
PR tree-optimization/109154
* config/aarch64/aarch64.md (<optab><mode>3): Add SVE split case.
* config/aarch64/aarch64-simd.md (ior<mode>3<vczle><vczbe>): Likewise.
* config/aarch64/predicates.md(aarch64_orr_imm_sve_advsimd): New.
gcc/testsuite/ChangeLog:
PR tree-optimization/109154
* gcc.target/aarch64/sve/fneg-abs_1.c: Updated.
* gcc.target/aarch64/sve/fneg-abs_2.c: Updated.
* gcc.target/aarch64/sve/fneg-abs_4.c: Updated.
(define_insn "ior<mode>3<vczle><vczbe>"
[(set (match_operand:VDQ_I 0 "register_operand")
(ior:VDQ_I (match_operand:VDQ_I 1 "register_operand")
- (match_operand:VDQ_I 2 "aarch64_reg_or_orr_imm")))]
- "TARGET_SIMD"
- {@ [ cons: =0 , 1 , 2 ]
- [ w , w , w ] orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
- [ w , 0 , Do ] << aarch64_output_simd_mov_immediate (operands[2], <bitsize>, AARCH64_CHECK_ORR);
+ (match_operand:VDQ_I 2 "aarch64_orr_imm_sve_advsimd")))]
+ "TARGET_SIMD"
+ {@ [ cons: =0 , 1 , 2; attrs: arch ]
+ [ w , w , w ; simd ] orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
+ [ w , 0 , vsl; sve ] orr\t%Z0.<Vetype>, %Z0.<Vetype>, #%2
+ [ w , 0 , Do ; simd ] \
+ << aarch64_output_simd_mov_immediate (operands[2], <bitsize>, \
+ AARCH64_CHECK_ORR);
}
[(set_attr "type" "neon_logic<q>")]
)
""
{@ [ cons: =0 , 1 , 2 ; attrs: type , arch ]
[ r , %r , r ; logic_reg , * ] <logical>\t%<w>0, %<w>1, %<w>2
- [ rk , r , <lconst> ; logic_imm , * ] <logical>\t%<w>0, %<w>1, %2
+ [ rk , ^r , <lconst> ; logic_imm , * ] <logical>\t%<w>0, %<w>1, %2
+ [ w , 0 , <lconst> ; * , sve ] <logical>\t%Z0.<s>, %Z0.<s>, #%2
[ w , w , w ; neon_logic , simd ] <logical>\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
}
)
(ior (match_operand 0 "register_operand")
(match_operand 0 "aarch64_sve_logical_immediate")))
+(define_predicate "aarch64_orr_imm_sve_advsimd"
+ (ior (match_operand 0 "aarch64_reg_or_orr_imm")
+ (and (match_test "TARGET_SVE")
+ (match_operand 0 "aarch64_sve_logical_operand"))))
+
(define_predicate "aarch64_sve_gather_offset_b"
(ior (match_operand 0 "register_operand")
(match_operand 0 "aarch64_sve_gather_immediate_b")))
/*
** t1:
-** orr v[0-9]+.2s, #128, lsl #24
+** orr z[0-9]+.s, z[0-9]+.s, #-2147483648
** ret
*/
float32x2_t t1 (float32x2_t a)
/*
** t2:
-** orr v[0-9]+.4s, #128, lsl #24
+** orr z[0-9]+.s, z[0-9]+.s, #-2147483648
** ret
*/
float32x4_t t2 (float32x4_t a)
/*
** t3:
-** adrp x0, .LC[0-9]+
-** ldr q[0-9]+, \[x0, #:lo12:.LC0\]
-** orr v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b
+** orr z[0-9]+.d, z[0-9]+.d, #-9223372036854775808
** ret
*/
float64x2_t t3 (float64x2_t a)
/*
** f1:
-** movi v[0-9]+.2s, 0x80, lsl 24
-** orr v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+** orr z0.s, z0.s, #-2147483648
** ret
*/
float32_t f1 (float32_t a)
/*
** f2:
-** mov x0, -9223372036854775808
-** fmov d[0-9]+, x0
-** orr v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+** orr z0.d, z0.d, #-9223372036854775808
** ret
*/
float64_t f2 (float64_t a)
/*
** negabs:
-** mov x0, -9223372036854775808
-** fmov d[0-9]+, x0
-** orr v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+** orr z0.d, z0.d, #-9223372036854775808
** ret
*/
double negabs (double x)
/*
** negabsf:
-** movi v[0-9]+.2s, 0x80, lsl 24
-** orr v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
+** orr z0.s, z0.s, #-2147483648
** ret
*/
float negabsf (float x)