From: Kyrylo Tkachov Date: Wed, 26 Apr 2023 14:11:11 +0000 (+0100) Subject: aarch64: Reimplement RSHRN2 intrinsic patterns with standard RTL codes X-Git-Tag: basepoints/gcc-15~9872 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=b4c69e6b663753f41259e2e19ad03cfc11457534;p=thirdparty%2Fgcc.git aarch64: Reimplement RSHRN2 intrinsic patterns with standard RTL codes Similar to the previous patch, we can reimplement the rshrn2 patterns using standard RTL codes for shift, truncate and plus with the appropriate constants. This allows us to get rid of UNSPEC_RSHRN entirely. Bootstrapped and tested on aarch64-none-linux-gnu. gcc/ChangeLog: * config/aarch64/aarch64-simd.md (aarch64_rshrn2_insn_le): Reimplement using standard RTL codes instead of unspec. (aarch64_rshrn2_insn_be): Likewise. (aarch64_rshrn2): Adjust for the above. * config/aarch64/aarch64.md (UNSPEC_RSHRN): Delete. --- diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index f8913107aad6..1e7295428054 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -2107,23 +2107,31 @@ [(set (match_operand: 0 "register_operand" "=w") (vec_concat: (match_operand: 1 "register_operand" "0") - (unspec: [(match_operand:VQN 2 "register_operand" "w") - (match_operand:VQN 3 "aarch64_simd_shift_imm_vec_")] - UNSPEC_RSHRN)))] - "TARGET_SIMD && !BYTES_BIG_ENDIAN" - "rshrn2\\t%0., %2., %3" + (truncate: + (lshiftrt:VQN + (plus:VQN (match_operand:VQN 2 "register_operand" "w") + (match_operand:VQN 3 "aarch64_simd_rshrn_imm_vec")) + (match_operand:VQN 4 "aarch64_simd_shift_imm_vec_")))))] + "TARGET_SIMD && !BYTES_BIG_ENDIAN + && INTVAL (CONST_VECTOR_ELT (operands[3], 0)) + == (HOST_WIDE_INT_1 << (INTVAL (CONST_VECTOR_ELT (operands[4], 0)) - 1))" + "rshrn2\\t%0., %2., %4" [(set_attr "type" "neon_shift_imm_narrow_q")] ) (define_insn "aarch64_rshrn2_insn_be" [(set (match_operand: 0 "register_operand" "=w") (vec_concat: - (unspec: [(match_operand:VQN 2 "register_operand" "w") - (match_operand:VQN 3 "aarch64_simd_shift_imm_vec_")] - UNSPEC_RSHRN) + (truncate: + (lshiftrt:VQN + (plus:VQN (match_operand:VQN 2 "register_operand" "w") + (match_operand:VQN 3 "aarch64_simd_rshrn_imm_vec")) + (match_operand:VQN 4 "aarch64_simd_shift_imm_vec_"))) (match_operand: 1 "register_operand" "0")))] - "TARGET_SIMD && BYTES_BIG_ENDIAN" - "rshrn2\\t%0., %2., %3" + "TARGET_SIMD && BYTES_BIG_ENDIAN + && INTVAL (CONST_VECTOR_ELT (operands[3], 0)) + == (HOST_WIDE_INT_1 << (INTVAL (CONST_VECTOR_ELT (operands[4], 0)) - 1))" + "rshrn2\\t%0., %2., %4" [(set_attr "type" "neon_shift_imm_narrow_q")] ) @@ -2142,17 +2150,24 @@ } else { + rtx shft + = aarch64_simd_gen_const_vector_dup (mode, + HOST_WIDE_INT_1U + << (INTVAL (operands[3]) - 1)); + operands[3] = aarch64_simd_gen_const_vector_dup (mode, INTVAL (operands[3])); if (BYTES_BIG_ENDIAN) emit_insn (gen_aarch64_rshrn2_insn_be (operands[0], operands[1], operands[2], + shft, operands[3])); else emit_insn (gen_aarch64_rshrn2_insn_le (operands[0], operands[1], operands[2], + shft, operands[3])); } DONE; diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md index 3e18f0405fa3..e1a2b265b205 100644 --- a/gcc/config/aarch64/aarch64.md +++ b/gcc/config/aarch64/aarch64.md @@ -231,7 +231,6 @@ UNSPEC_SSP_SYSREG UNSPEC_SP_SET UNSPEC_SP_TEST - UNSPEC_RSHRN UNSPEC_RSQRT UNSPEC_RSQRTE UNSPEC_RSQRTS