From 659f80c36357ffc440b1adaa0082326f55fd2197 Mon Sep 17 00:00:00 2001 From: Spencer Abson Date: Mon, 7 Jul 2025 15:00:31 +0000 Subject: [PATCH] aarch64: Relaxed SEL combiner patterns for unpacked SVE FP conversions Add UNSPEC_SEL combiner patterns for unpacked FP conversions, where the strictness value is SVE_RELAXED_GP. gcc/ChangeLog: * config/aarch64/aarch64-sve.md (*cond__nontrunc_relaxed): New FCVT/SEL combiner pattern. (*cond__trunc_relaxed): New FCVTZ{S,U}/SEL combiner pattern. (*cond__nonextend_relaxed): New {S,U}CVTF/SEL combiner pattern. (*cond__trunc): New FCVT/SEL combiner pattern. (*cond__nontrunc_relaxed): New FCVTZ{S,U}/SEL combiner pattern. * config/aarch64/iterators.md: New mode iterator for VNx2SI. gcc/testsuite/ChangeLog: * gcc.target/aarch64/sve/unpacked_cond_cvtf_1.c: New test. * gcc.target/aarch64/sve/unpacked_cond_fcvt_1.c: Likewise. * gcc.target/aarch64/sve/unpacked_cond_fcvtz_1.c: Likewise. --- gcc/config/aarch64/aarch64-sve.md | 121 ++++++++++++++++++ gcc/config/aarch64/iterators.md | 1 + .../aarch64/sve/unpacked_cond_cvtf_1.c | 51 ++++++++ .../aarch64/sve/unpacked_cond_fcvt_1.c | 41 ++++++ .../aarch64/sve/unpacked_cond_fcvtz_1.c | 55 ++++++++ 5 files changed, 269 insertions(+) create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/unpacked_cond_cvtf_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/unpacked_cond_fcvt_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/unpacked_cond_fcvtz_1.c diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md index 10aecf1f190..d53d297e7e4 100644 --- a/gcc/config/aarch64/aarch64-sve.md +++ b/gcc/config/aarch64/aarch64-sve.md @@ -9653,6 +9653,31 @@ } ) +;; As above, for pairs that are used by the auto-vectorizer only. +(define_insn_and_rewrite "*cond__nontrunc_relaxed" + [(set (match_operand:SVE_HSDI 0 "register_operand") + (unspec:SVE_HSDI + [(match_operand: 1 "register_operand") + (unspec:SVE_HSDI + [(match_operand 4) + (const_int SVE_RELAXED_GP) + (match_operand:SVE_PARTIAL_F 2 "register_operand")] + SVE_COND_FCVTI) + (match_operand:SVE_HSDI 3 "aarch64_simd_reg_or_zero")] + UNSPEC_SEL))] + "TARGET_SVE + && (~( | ) & ) == 0" + {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] + [ &w , Upl , w , 0 ; * ] fcvtz\t%0., %1/m, %2. + [ &w , Upl , w , Dz ; yes ] movprfx\t%0., %1/z, %2.\;fcvtz\t%0., %1/m, %2. + [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fcvtz\t%0., %1/m, %2. + } + "&& !rtx_equal_p (operands[1], operands[4])" + { + operands[4] = copy_rtx (operands[1]); + } +) + (define_insn "*cond__nontrunc_strict" [(set (match_operand:SVE_FULL_HSDI 0 "register_operand") (unspec:SVE_FULL_HSDI @@ -9706,6 +9731,29 @@ } ) +(define_insn_and_rewrite "*cond__trunc_relaxed" + [(set (match_operand:VNx2SI_ONLY 0 "register_operand") + (unspec:VNx2SI_ONLY + [(match_operand:VNx2BI 1 "register_operand") + (unspec:VNx2SI_ONLY + [(match_operand 4) + (const_int SVE_RELAXED_GP) + (match_operand:VNx2DF_ONLY 2 "register_operand")] + SVE_COND_FCVTI) + (match_operand:VNx2SI_ONLY 3 "aarch64_simd_reg_or_zero")] + UNSPEC_SEL))] + "TARGET_SVE" + {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] + [ &w , Upl , w , 0 ; * ] fcvtz\t%0., %1/m, %2. + [ &w , Upl , w , Dz ; yes ] movprfx\t%0., %1/z, %2.\;fcvtz\t%0., %1/m, %2. + [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fcvtz\t%0., %1/m, %2. + } + "&& !rtx_equal_p (operands[1], operands[4])" + { + operands[4] = copy_rtx (operands[1]); + } +) + ;; ------------------------------------------------------------------------- ;; ---- [INT<-FP] Packs ;; ------------------------------------------------------------------------- @@ -9857,6 +9905,31 @@ } ) +;; As above, for pairs that are used by the auto-vectorizer only. +(define_insn_and_rewrite "*cond__nonextend_relaxed" + [(set (match_operand:SVE_PARTIAL_F 0 "register_operand") + (unspec:SVE_PARTIAL_F + [(match_operand: 1 "register_operand") + (unspec:SVE_PARTIAL_F + [(match_operand 4) + (const_int SVE_RELAXED_GP) + (match_operand:SVE_HSDI 2 "register_operand")] + SVE_COND_ICVTF) + (match_operand:SVE_PARTIAL_F 3 "aarch64_simd_reg_or_zero")] + UNSPEC_SEL))] + "TARGET_SVE + && (~( | ) & ) == 0" + {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] + [ &w , Upl , w , 0 ; * ] cvtf\t%0., %1/m, %2. + [ &w , Upl , w , Dz ; yes ] movprfx\t%0., %1/z, %2.\;cvtf\t%0., %1/m, %2. + [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;cvtf\t%0., %1/m, %2. + } + "&& !rtx_equal_p (operands[1], operands[4])" + { + operands[4] = copy_rtx (operands[1]); + } +) + (define_insn "*cond__nonextend_strict" [(set (match_operand:SVE_FULL_F 0 "register_operand") (unspec:SVE_FULL_F @@ -10066,6 +10139,30 @@ } ) +;; As above, for pairs that are used by the auto-vectorizer only. +(define_insn_and_rewrite "*cond__trunc" + [(set (match_operand:SVE_PARTIAL_HSF 0 "register_operand") + (unspec:SVE_PARTIAL_HSF + [(match_operand: 1 "register_operand") + (unspec:SVE_PARTIAL_HSF + [(match_operand 4) + (const_int SVE_RELAXED_GP) + (match_operand:SVE_SDF 2 "register_operand")] + SVE_COND_FCVT) + (match_operand:SVE_PARTIAL_HSF 3 "aarch64_simd_reg_or_zero")] + UNSPEC_SEL))] + "TARGET_SVE && (~ & ) == 0" + {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] + [ w , Upl , w , 0 ; * ] fcvt\t%0., %1/m, %2. + [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0., %1/z, %2.\;fcvt\t%0., %1/m, %2. + [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fcvt\t%0., %1/m, %2. + } + "&& !rtx_equal_p (operands[1], operands[4])" + { + operands[4] = copy_rtx (operands[1]); + } +) + ;; ------------------------------------------------------------------------- ;; ---- [FP<-FP] Packs (bfloat16) ;; ------------------------------------------------------------------------- @@ -10259,6 +10356,30 @@ } ) +;; As above, for pairs that are used by the auto-vectorizer only. +(define_insn_and_rewrite "*cond__nontrunc_relaxed" + [(set (match_operand:SVE_SDF 0 "register_operand") + (unspec:SVE_SDF + [(match_operand: 1 "register_operand") + (unspec:SVE_SDF + [(match_operand 4) + (const_int SVE_RELAXED_GP) + (match_operand:SVE_PARTIAL_HSF 2 "register_operand")] + SVE_COND_FCVT) + (match_operand:SVE_SDF 3 "aarch64_simd_reg_or_zero")] + UNSPEC_SEL))] + "TARGET_SVE && (~ & ) == 0" + {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] + [ w , Upl , w , 0 ; * ] fcvt\t%0., %1/m, %2. + [ ?&w , Upl , w , Dz ; yes ] movprfx\t%0., %1/z, %2.\;fcvt\t%0., %1/m, %2. + [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;fcvt\t%0., %1/m, %2. + } + "&& !rtx_equal_p (operands[1], operands[4])" + { + operands[4] = copy_rtx (operands[1]); + } +) + ;; ------------------------------------------------------------------------- ;; ---- [PRED<-PRED] Packs ;; ------------------------------------------------------------------------- diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index c59fcd679d7..22d7aa9cf9d 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -463,6 +463,7 @@ (define_mode_iterator VNx8SI_ONLY [VNx8SI]) (define_mode_iterator VNx8SF_ONLY [VNx8SF]) (define_mode_iterator VNx8DI_ONLY [VNx8DI]) +(define_mode_iterator VNx2SI_ONLY [VNx2SI]) (define_mode_iterator VNx4SI_ONLY [VNx4SI]) (define_mode_iterator VNx4SF_ONLY [VNx4SF]) (define_mode_iterator VNx2DI_ONLY [VNx2DI]) diff --git a/gcc/testsuite/gcc.target/aarch64/sve/unpacked_cond_cvtf_1.c b/gcc/testsuite/gcc.target/aarch64/sve/unpacked_cond_cvtf_1.c new file mode 100644 index 00000000000..fa4dd155a46 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/sve/unpacked_cond_cvtf_1.c @@ -0,0 +1,51 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -ftree-vectorize -moverride=sve_width=2048 -fno-trapping-math" } */ + +#include + +#define COND_CVT(TYPE0, TYPE1, TYPE2, COUNT) \ + void \ + test_##TYPE0##_##TYPE1##_##TYPE2 (TYPE0 *__restrict out, \ + TYPE1 *__restrict a, \ + TYPE0 *__restrict b, \ + TYPE2 *__restrict p) \ + { \ + for (unsigned int i = 0; i < COUNT; i++) \ + out[i] = p[i] ? (TYPE0)a[i] : b[i]; \ + } + +#define TEST_CVTF(PFX, T) \ + T (_Float16, PFX##int16_t, uint64_t, 32) \ + T (_Float16, PFX##int16_t, uint32_t, 64) \ + T (_Float16, PFX##int32_t, uint64_t, 32) \ + T (_Float16, PFX##int32_t, uint32_t, 64) \ + T (_Float16, PFX##int64_t, uint64_t, 32) \ + T (float, PFX##int32_t, uint64_t, 32) \ + T (float, PFX##int64_t, uint64_t, 32) + +#define TEST_ALL(T) \ + TEST_CVTF (, T) \ + TEST_CVTF (u, T) + +TEST_ALL (COND_CVT) + +/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.d} 8 } } */ +/* { dg-final { scan-assembler-times {\tld1h\tz[0-9]+\.s} 6 } } */ +/* { dg-final { scan-assembler-times {\tld1h\tz[0-9]+\.d} 8 } } */ + +/* { dg-final { scan-assembler-times {\tscvtf\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.h\n} 2 } } */ +/* { dg-final { scan-assembler-times {\tucvtf\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.h\n} 2 } } */ + +/* { dg-final { scan-assembler-times {\tscvtf\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.s\n} 2 } } */ +/* { dg-final { scan-assembler-times {\tucvtf\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.s\n} 2 } } */ + +/* { dg-final { scan-assembler-times {\tscvtf\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.d\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tucvtf\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.d\n} 1 } } */ + +/* { dg-final { scan-assembler-times {\tscvtf\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.s\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tucvtf\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.s\n} 1 } } */ + +/* { dg-final { scan-assembler-times {\tscvtf\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.d\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tucvtf\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.d\n} 1 } } */ + +/* { dg-final { scan-assembler-not {\tsel\t} } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/unpacked_cond_fcvt_1.c b/gcc/testsuite/gcc.target/aarch64/sve/unpacked_cond_fcvt_1.c new file mode 100644 index 00000000000..3caae199e57 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/sve/unpacked_cond_fcvt_1.c @@ -0,0 +1,41 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -ftree-vectorize -moverride=sve_width=2048 -fno-trapping-math" } */ + +#include + +#define COND_CVT(TYPE0, TYPE1, TYPE2, COUNT) \ + void \ + test_##TYPE0##_##TYPE1##_##TYPE2 (TYPE0 *__restrict out, \ + TYPE1 *__restrict a, \ + TYPE0 *__restrict b, \ + TYPE2 *__restrict p) \ + { \ + for (unsigned int i = 0; i < COUNT; i++) \ + out[i] = p[i] ? (TYPE0)a[i] : b[i]; \ + } + +#define TEST_FCVT(T) \ + T (_Float16, float, uint64_t, 32) \ + T (_Float16, float, uint32_t, 64) \ + T (_Float16, double, uint64_t, 32) \ + T (float, double, uint64_t, 32) \ + T (float, _Float16, uint64_t, 32) \ + T (float, _Float16, uint32_t, 64) \ + T (double, _Float16, uint64_t,32) \ + T (double, float, uint64_t, 32) + +TEST_FCVT (COND_CVT) + +/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.d} 4 } } */ +/* { dg-final { scan-assembler-times {\tld1h\tz[0-9]+\.s} 2 } } */ +/* { dg-final { scan-assembler-times {\tld1h\tz[0-9]+\.d} 4 } } */ + +/* { dg-final { scan-assembler-times {\tfcvt\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.d\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tfcvt\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.d\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tfcvt\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.s\n} 2 } } */ + +/* { dg-final { scan-assembler-times {\tfcvt\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.h\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tfcvt\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.s\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tfcvt\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.h\n} 2 } } */ + +/* { dg-final { scan-assembler-not {\tsel\t} } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/unpacked_cond_fcvtz_1.c b/gcc/testsuite/gcc.target/aarch64/sve/unpacked_cond_fcvtz_1.c new file mode 100644 index 00000000000..426d3af8e49 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/sve/unpacked_cond_fcvtz_1.c @@ -0,0 +1,55 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -ftree-vectorize -moverride=sve_width=2048 -fno-trapping-math" } */ + +#include + +#define COND_CVT(TYPE0, TYPE1, TYPE2, COUNT) \ + void \ + test_##TYPE0##_##TYPE1##_##TYPE2 (TYPE0 *__restrict out, \ + TYPE1 *__restrict a, \ + TYPE0 *__restrict b, \ + TYPE2 *__restrict p) \ + { \ + for (unsigned int i = 0; i < COUNT; i++) \ + out[i] = p[i] ? (TYPE0)a[i] : b[i]; \ + } + +#define TEST_FCVTZ(PFX, T) \ + T (PFX##int16_t, _Float16, uint64_t, 32) \ + T (PFX##int16_t, _Float16, uint32_t, 64) \ + T (PFX##int32_t, _Float16, uint64_t, 32) \ + T (PFX##int32_t, _Float16, uint32_t, 64) \ + T (PFX##int64_t, _Float16, uint64_t, 32) \ + T (PFX##int32_t, float, uint64_t, 32) \ + T (PFX##int64_t, float, uint64_t, 32) \ + T (PFX##int32_t, double, uint64_t, 32) + +#define TEST_ALL(T) \ + TEST_FCVTZ (, T) \ + TEST_FCVTZ (u, T) + +TEST_ALL (COND_CVT) + +/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.d} 10 } } */ +/* { dg-final { scan-assembler-times {\tld1h\tz[0-9]+\.s} 6 } } */ +/* { dg-final { scan-assembler-times {\tld1h\tz[0-9]+\.d} 8 } } */ + +/* { dg-final { scan-assembler-times {\tfcvtzs\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.h\n} 2 } } */ +/* { dg-final { scan-assembler-times {\tfcvtzu\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.h\n} 2 } } */ + +/* { dg-final { scan-assembler-times {\tfcvtzs\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.h\n} 2 } } */ +/* { dg-final { scan-assembler-times {\tfcvtzu\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.h\n} 2 } } */ + +/* { dg-final { scan-assembler-times {\tfcvtzs\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.h\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tfcvtzu\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.h\n} 1 } } */ + +/* { dg-final { scan-assembler-times {\tfcvtzs\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.s\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tfcvtzu\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.s\n} 1 } } */ + +/* { dg-final { scan-assembler-times {\tfcvtzs\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.s\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tfcvtzu\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.s\n} 1 } } */ + +/* { dg-final { scan-assembler-times {\tfcvtzs\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.d\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tfcvtzu\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.d\n} 1 } } */ + +/* { dg-final { scan-assembler-not {\tsel\t} } } */ -- 2.47.2