From: Jonathan Wright Date: Fri, 14 May 2021 16:18:34 +0000 (+0100) Subject: aarch64: Refactor aarch64_qshrn_n RTL pattern X-Git-Tag: basepoints/gcc-13~7396 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=ddbdb9a384f53419d0e6fbcca2a4534a2668e5f8;p=thirdparty%2Fgcc.git aarch64: Refactor aarch64_qshrn_n RTL pattern Split the aarch64_qshrn_n pattern into separate scalar and vector variants. Further split the vector pattern into big/little endian variants that model the zero-high-half semantics of the underlying instruction - allowing for more combinations with the write-to-high-half variant (aarch64_qshrn2_n.) gcc/ChangeLog: 2021-05-14 Jonathan Wright * config/aarch64/aarch64-simd-builtins.def: Split builtin generation for aarch64_qshrn_n pattern into separate scalar and vector generators. * config/aarch64/aarch64-simd.md (aarch64_qshrn_n): Define as an expander and split into... (aarch64_qshrn_n_insn_le): This and... (aarch64_qshrn_n_insn_be): This. * config/aarch64/iterators.md: Define SD_HSDI iterator. --- diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 1e81bb53287e..18baa6720b09 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -421,12 +421,18 @@ BUILTIN_VQW (SHIFTIMM, sshll2_n, 0, NONE) BUILTIN_VQW (SHIFTIMM, ushll2_n, 0, NONE) /* Implemented by aarch64_qshrn_n. */ - BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrun_n, 0, NONE) - BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrun_n, 0, NONE) - BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrn_n, 0, NONE) - BUILTIN_VSQN_HSDI (USHIFTIMM, uqshrn_n, 0, NONE) - BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrn_n, 0, NONE) - BUILTIN_VSQN_HSDI (USHIFTIMM, uqrshrn_n, 0, NONE) + BUILTIN_VQN (SHIFTIMM, sqshrun_n, 0, NONE) + BUILTIN_VQN (SHIFTIMM, sqrshrun_n, 0, NONE) + BUILTIN_VQN (SHIFTIMM, sqshrn_n, 0, NONE) + BUILTIN_VQN (USHIFTIMM, uqshrn_n, 0, NONE) + BUILTIN_VQN (SHIFTIMM, sqrshrn_n, 0, NONE) + BUILTIN_VQN (USHIFTIMM, uqrshrn_n, 0, NONE) + BUILTIN_SD_HSDI (SHIFTIMM, sqshrun_n, 0, NONE) + BUILTIN_SD_HSDI (SHIFTIMM, sqrshrun_n, 0, NONE) + BUILTIN_SD_HSDI (SHIFTIMM, sqshrn_n, 0, NONE) + BUILTIN_SD_HSDI (USHIFTIMM, uqshrn_n, 0, NONE) + BUILTIN_SD_HSDI (SHIFTIMM, sqrshrn_n, 0, NONE) + BUILTIN_SD_HSDI (USHIFTIMM, uqrshrn_n, 0, NONE) /* Implemented by aarch64_qshrn2_n. */ BUILTIN_VQN (SHIFT2IMM_UUSS, sqshrun2_n, 0, NONE) BUILTIN_VQN (SHIFT2IMM_UUSS, sqrshrun2_n, 0, NONE) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 79523093ec32..c67fa3fb6f0c 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -6045,7 +6045,7 @@ (define_insn "aarch64_qshrn_n" [(set (match_operand: 0 "register_operand" "=w") - (unspec: [(match_operand:VSQN_HSDI 1 "register_operand" "w") + (unspec: [(match_operand:SD_HSDI 1 "register_operand" "w") (match_operand:SI 2 "aarch64_simd_shift_imm_offset_" "i")] VQSHRN_N))] @@ -6054,6 +6054,58 @@ [(set_attr "type" "neon_sat_shift_imm_narrow_q")] ) +(define_insn "aarch64_qshrn_n_insn_le" + [(set (match_operand: 0 "register_operand" "=w") + (vec_concat: + (unspec: + [(match_operand:VQN 1 "register_operand" "w") + (match_operand:VQN 2 "aarch64_simd_shift_imm_vec_")] + VQSHRN_N) + (match_operand: 3 "aarch64_simd_or_scalar_imm_zero")))] + "TARGET_SIMD && !BYTES_BIG_ENDIAN" + "qshrn\\t%0, %1, %2" + [(set_attr "type" "neon_shift_imm_narrow_q")] +) + +(define_insn "aarch64_qshrn_n_insn_be" + [(set (match_operand: 0 "register_operand" "=w") + (vec_concat: + (match_operand: 3 "aarch64_simd_or_scalar_imm_zero") + (unspec: + [(match_operand:VQN 1 "register_operand" "w") + (match_operand:VQN 2 "aarch64_simd_shift_imm_vec_")] + VQSHRN_N)))] + "TARGET_SIMD && BYTES_BIG_ENDIAN" + "qshrn\\t%0, %1, %2" + [(set_attr "type" "neon_shift_imm_narrow_q")] +) + +(define_expand "aarch64_qshrn_n" + [(set (match_operand: 0 "register_operand" "=w") + (unspec: [(match_operand:VQN 1 "register_operand") + (match_operand:SI 2 + "aarch64_simd_shift_imm_offset_")] + VQSHRN_N))] + "TARGET_SIMD" + { + operands[2] = aarch64_simd_gen_const_vector_dup (mode, + INTVAL (operands[2])); + rtx tmp = gen_reg_rtx (mode); + if (BYTES_BIG_ENDIAN) + emit_insn (gen_aarch64_qshrn_n_insn_be (tmp, + operands[1], operands[2], CONST0_RTX (mode))); + else + emit_insn (gen_aarch64_qshrn_n_insn_le (tmp, + operands[1], operands[2], CONST0_RTX (mode))); + + /* The intrinsic expects a narrow result, so emit a subreg that will get + optimized away as appropriate. */ + emit_move_insn (operands[0], lowpart_subreg (mode, tmp, + mode)); + DONE; + } +) + (define_insn "aarch64_qshrn2_n_insn_le" [(set (match_operand: 0 "register_operand" "=w") (vec_concat: diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index 0ec93b0ff6a0..e9047d00d979 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -282,6 +282,9 @@ ;; Scalar 64-bit container: 16, 32-bit integer modes (define_mode_iterator SD_HSI [HI SI]) +;; Scalar 64-bit container: 16-bit, 32-bit and 64-bit integer modes. +(define_mode_iterator SD_HSDI [HI SI DI]) + ;; Advanced SIMD 64-bit container: 16, 32-bit integer modes. (define_mode_iterator VQ_HSI [V8HI V4SI])