From: Kyrylo Tkachov Date: Tue, 6 Jun 2023 08:56:52 +0000 (+0100) Subject: aarch64: Simplify SHRN, RSHRN expanders and patterns X-Git-Tag: basepoints/gcc-15~8575 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=d2cdfafde2060ba2a96b01da223fe33c67439b94;p=thirdparty%2Fgcc.git aarch64: Simplify SHRN, RSHRN expanders and patterns Now that we've got the annotations we can get rid of explicit !BYTES_BIG_ENDIAN and BYTES_BIG_ENDIAN patterns for the narrowing shift instructions. This allows us to clean up the expanders as well. Bootstrapped and tested on aarch64-none-linux-gnu and aarch64_be-none-elf. gcc/ChangeLog: * config/aarch64/aarch64-simd.md (aarch64_shrn_insn_le): Delete. (aarch64_shrn_insn_be): Delete. (*aarch64_shrn_vect): Rename to... (*aarch64_shrn): ... This. (aarch64_shrn): Remove reference to the above deleted patterns. (aarch64_rshrn_insn_le): Delete. (aarch64_rshrn_insn_be): Delete. (aarch64_rshrn_insn): New define_insn. (aarch64_rshrn): Remove references to the above deleted patterns. gcc/testsuite/ChangeLog: * gcc.target/aarch64/simd/pr99195_5.c: Add testing for shrn_n, rshrn_n intrinsics. --- diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 3b79e246769a..f7cf39f930ca 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -1884,31 +1884,7 @@ } ) -(define_insn "aarch64_shrn_insn_le" - [(set (match_operand: 0 "register_operand" "=w") - (vec_concat: - (truncate: - (lshiftrt:VQN (match_operand:VQN 1 "register_operand" "w") - (match_operand:VQN 2 "aarch64_simd_shift_imm_vec_"))) - (match_operand: 3 "aarch64_simd_or_scalar_imm_zero")))] - "TARGET_SIMD && !BYTES_BIG_ENDIAN" - "shrn\\t%0., %1., %2" - [(set_attr "type" "neon_shift_imm_narrow_q")] -) - -(define_insn "aarch64_shrn_insn_be" - [(set (match_operand: 0 "register_operand" "=w") - (vec_concat: - (match_operand: 3 "aarch64_simd_or_scalar_imm_zero") - (truncate: - (lshiftrt:VQN (match_operand:VQN 1 "register_operand" "w") - (match_operand:VQN 2 "aarch64_simd_shift_imm_vec_")))))] - "TARGET_SIMD && BYTES_BIG_ENDIAN" - "shrn\\t%0., %1., %2" - [(set_attr "type" "neon_shift_imm_narrow_q")] -) - -(define_insn "*aarch64_shrn_vect" +(define_insn "*aarch64_shrn" [(set (match_operand: 0 "register_operand" "=w") (truncate: (SHIFTRT:VQN (match_operand:VQN 1 "register_operand" "w") @@ -1979,48 +1955,17 @@ { operands[2] = aarch64_simd_gen_const_vector_dup (mode, INTVAL (operands[2])); - rtx tmp = gen_reg_rtx (mode); - if (BYTES_BIG_ENDIAN) - emit_insn (gen_aarch64_shrn_insn_be (tmp, operands[1], - operands[2], CONST0_RTX (mode))); - else - emit_insn (gen_aarch64_shrn_insn_le (tmp, operands[1], - operands[2], CONST0_RTX (mode))); - - /* The intrinsic expects a narrow result, so emit a subreg that will get - optimized away as appropriate. */ - emit_move_insn (operands[0], lowpart_subreg (mode, tmp, - mode)); - DONE; } ) -(define_insn "aarch64_rshrn_insn_le" - [(set (match_operand: 0 "register_operand" "=w") - (vec_concat: - (truncate: - (lshiftrt:VQN - (plus:VQN (match_operand:VQN 1 "register_operand" "w") - (match_operand:VQN 3 "aarch64_simd_rshrn_imm_vec")) - (match_operand:VQN 2 "aarch64_simd_shift_imm_vec_"))) - (match_operand: 4 "aarch64_simd_or_scalar_imm_zero")))] - "TARGET_SIMD && !BYTES_BIG_ENDIAN - && INTVAL (CONST_VECTOR_ELT (operands[3], 0)) - == (HOST_WIDE_INT_1 << (INTVAL (CONST_VECTOR_ELT (operands[2], 0)) - 1))" - "rshrn\\t%0., %1., %2" - [(set_attr "type" "neon_shift_imm_narrow_q")] -) - -(define_insn "aarch64_rshrn_insn_be" - [(set (match_operand: 0 "register_operand" "=w") - (vec_concat: - (match_operand: 4 "aarch64_simd_or_scalar_imm_zero") - (truncate: - (lshiftrt:VQN - (plus:VQN (match_operand:VQN 1 "register_operand" "w") - (match_operand:VQN 3 "aarch64_simd_rshrn_imm_vec")) - (match_operand:VQN 2 "aarch64_simd_shift_imm_vec_")))))] - "TARGET_SIMD && BYTES_BIG_ENDIAN +(define_insn "aarch64_rshrn_insn" + [(set (match_operand: 0 "register_operand" "=w") + (truncate: + (lshiftrt:VQN + (plus:VQN (match_operand:VQN 1 "register_operand" "w") + (match_operand:VQN 3 "aarch64_simd_rshrn_imm_vec")) + (match_operand:VQN 2 "aarch64_simd_shift_imm_vec_"))))] + "TARGET_SIMD && INTVAL (CONST_VECTOR_ELT (operands[3], 0)) == (HOST_WIDE_INT_1 << (INTVAL (CONST_VECTOR_ELT (operands[2], 0)) - 1))" "rshrn\\t%0., %1., %2" @@ -2044,24 +1989,10 @@ = aarch64_simd_gen_const_vector_dup (mode, HOST_WIDE_INT_1U << (INTVAL (operands[2]) - 1)); - rtx tmp = gen_reg_rtx (mode); operands[2] = aarch64_simd_gen_const_vector_dup (mode, INTVAL (operands[2])); - if (BYTES_BIG_ENDIAN) - emit_insn ( - gen_aarch64_rshrn_insn_be (tmp, operands[1], - operands[2], shft, - CONST0_RTX (mode))); - else - emit_insn ( - gen_aarch64_rshrn_insn_le (tmp, operands[1], - operands[2], shft, - CONST0_RTX (mode))); - - /* The intrinsic expects a narrow result, so emit a subreg that will - get optimized away as appropriate. */ - emit_move_insn (operands[0], lowpart_subreg (mode, tmp, - mode)); + emit_insn (gen_aarch64_rshrn_insn (operands[0], operands[1], + operands[2], shft)); } DONE; } diff --git a/gcc/testsuite/gcc.target/aarch64/simd/pr99195_5.c b/gcc/testsuite/gcc.target/aarch64/simd/pr99195_5.c index a07f82179cc5..d1143243999b 100644 --- a/gcc/testsuite/gcc.target/aarch64/simd/pr99195_5.c +++ b/gcc/testsuite/gcc.target/aarch64/simd/pr99195_5.c @@ -17,7 +17,9 @@ foo_##OP##_##OS (IT a) \ #define FUNC(OT,IT,IMT,IS,OS) \ MYOP (OT, IT, IMT, qshrn_n, IS, OS) \ -MYOP (OT, IT, IMT, qrshrn_n, IS, OS) +MYOP (OT, IT, IMT, qrshrn_n, IS, OS) \ +MYOP (OT, IT, IMT, shrn_n, IS, OS) \ +MYOP (OT, IT, IMT, rshrn_n, IS, OS) #define FUNCUN(OT,IT,IMT,IS,OS) \ MYOP (OT, IT, IMT, qshrun_n, IS, OS) \