]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
aarch64: Simplify SHRN, RSHRN expanders and patterns
authorKyrylo Tkachov <kyrylo.tkachov@arm.com>
Tue, 6 Jun 2023 08:56:52 +0000 (09:56 +0100)
committerKyrylo Tkachov <kyrylo.tkachov@arm.com>
Tue, 6 Jun 2023 08:56:52 +0000 (09:56 +0100)
Now that we've got the <vczle><vczbe> annotations we can get rid of explicit
!BYTES_BIG_ENDIAN and BYTES_BIG_ENDIAN patterns for the narrowing shift instructions.
This allows us to clean up the expanders as well.

Bootstrapped and tested on aarch64-none-linux-gnu and aarch64_be-none-elf.

gcc/ChangeLog:

* config/aarch64/aarch64-simd.md (aarch64_shrn<mode>_insn_le): Delete.
(aarch64_shrn<mode>_insn_be): Delete.
(*aarch64_<srn_op>shrn<mode>_vect):  Rename to...
(*aarch64_<srn_op>shrn<mode><vczle><vczbe>): ... This.
(aarch64_shrn<mode>): Remove reference to the above deleted patterns.
(aarch64_rshrn<mode>_insn_le): Delete.
(aarch64_rshrn<mode>_insn_be): Delete.
(aarch64_rshrn<mode><vczle><vczbe>_insn): New define_insn.
(aarch64_rshrn<mode>): Remove references to the above deleted patterns.

gcc/testsuite/ChangeLog:

* gcc.target/aarch64/simd/pr99195_5.c: Add testing for shrn_n, rshrn_n
intrinsics.

gcc/config/aarch64/aarch64-simd.md
gcc/testsuite/gcc.target/aarch64/simd/pr99195_5.c

index 3b79e246769afd117f46bcc3a578d7373388e348..f7cf39f930ca2ca68a8b9883cf2c7f149527b3af 100644 (file)
  }
 )
 
-(define_insn "aarch64_shrn<mode>_insn_le"
-  [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
-       (vec_concat:<VNARROWQ2>
-         (truncate:<VNARROWQ>
-           (lshiftrt:VQN (match_operand:VQN 1 "register_operand" "w")
-             (match_operand:VQN 2 "aarch64_simd_shift_imm_vec_<vn_mode>")))
-         (match_operand:<VNARROWQ> 3 "aarch64_simd_or_scalar_imm_zero")))]
-  "TARGET_SIMD && !BYTES_BIG_ENDIAN"
-  "shrn\\t%0.<Vntype>, %1.<Vtype>, %2"
-  [(set_attr "type" "neon_shift_imm_narrow_q")]
-)
-
-(define_insn "aarch64_shrn<mode>_insn_be"
-  [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
-       (vec_concat:<VNARROWQ2>
-         (match_operand:<VNARROWQ> 3 "aarch64_simd_or_scalar_imm_zero")
-         (truncate:<VNARROWQ>
-           (lshiftrt:VQN (match_operand:VQN 1 "register_operand" "w")
-             (match_operand:VQN 2 "aarch64_simd_shift_imm_vec_<vn_mode>")))))]
-  "TARGET_SIMD && BYTES_BIG_ENDIAN"
-  "shrn\\t%0.<Vntype>, %1.<Vtype>, %2"
-  [(set_attr "type" "neon_shift_imm_narrow_q")]
-)
-
-(define_insn "*aarch64_<srn_op>shrn<mode>_vect"
+(define_insn "*aarch64_<srn_op>shrn<mode><vczle><vczbe>"
   [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
         (truncate:<VNARROWQ>
           (SHIFTRT:VQN (match_operand:VQN 1 "register_operand" "w")
   {
     operands[2] = aarch64_simd_gen_const_vector_dup (<MODE>mode,
                                                 INTVAL (operands[2]));
-    rtx tmp = gen_reg_rtx (<VNARROWQ2>mode);
-    if (BYTES_BIG_ENDIAN)
-      emit_insn (gen_aarch64_shrn<mode>_insn_be (tmp, operands[1],
-                               operands[2], CONST0_RTX (<VNARROWQ>mode)));
-    else
-      emit_insn (gen_aarch64_shrn<mode>_insn_le (tmp, operands[1],
-                               operands[2], CONST0_RTX (<VNARROWQ>mode)));
-
-    /* The intrinsic expects a narrow result, so emit a subreg that will get
-       optimized away as appropriate.  */
-    emit_move_insn (operands[0], lowpart_subreg (<VNARROWQ>mode, tmp,
-                                                <VNARROWQ2>mode));
-    DONE;
   }
 )
 
-(define_insn "aarch64_rshrn<mode>_insn_le"
-  [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
-       (vec_concat:<VNARROWQ2>
-         (truncate:<VNARROWQ>
-           (lshiftrt:VQN
-             (plus:VQN (match_operand:VQN 1 "register_operand" "w")
-                       (match_operand:VQN 3 "aarch64_simd_rshrn_imm_vec"))
-             (match_operand:VQN 2 "aarch64_simd_shift_imm_vec_<vn_mode>")))
-         (match_operand:<VNARROWQ> 4 "aarch64_simd_or_scalar_imm_zero")))]
-  "TARGET_SIMD && !BYTES_BIG_ENDIAN
-   && INTVAL (CONST_VECTOR_ELT (operands[3], 0))
-      == (HOST_WIDE_INT_1 << (INTVAL (CONST_VECTOR_ELT (operands[2], 0)) - 1))"
-  "rshrn\\t%0.<Vntype>, %1.<Vtype>, %2"
-  [(set_attr "type" "neon_shift_imm_narrow_q")]
-)
-
-(define_insn "aarch64_rshrn<mode>_insn_be"
-  [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
-       (vec_concat:<VNARROWQ2>
-         (match_operand:<VNARROWQ> 4 "aarch64_simd_or_scalar_imm_zero")
-         (truncate:<VNARROWQ>
-           (lshiftrt:VQN
-             (plus:VQN (match_operand:VQN 1 "register_operand" "w")
-                       (match_operand:VQN 3 "aarch64_simd_rshrn_imm_vec"))
-             (match_operand:VQN 2 "aarch64_simd_shift_imm_vec_<vn_mode>")))))]
-  "TARGET_SIMD && BYTES_BIG_ENDIAN
+(define_insn "aarch64_rshrn<mode><vczle><vczbe>_insn"
+  [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+       (truncate:<VNARROWQ>
+         (lshiftrt:VQN
+           (plus:VQN (match_operand:VQN 1 "register_operand" "w")
+                     (match_operand:VQN 3 "aarch64_simd_rshrn_imm_vec"))
+           (match_operand:VQN 2 "aarch64_simd_shift_imm_vec_<vn_mode>"))))]
+  "TARGET_SIMD
    && INTVAL (CONST_VECTOR_ELT (operands[3], 0))
       == (HOST_WIDE_INT_1 << (INTVAL (CONST_VECTOR_ELT (operands[2], 0)) - 1))"
   "rshrn\\t%0.<Vntype>, %1.<Vtype>, %2"
          = aarch64_simd_gen_const_vector_dup (<MODE>mode,
                                               HOST_WIDE_INT_1U
                                                << (INTVAL (operands[2]) - 1));
-       rtx tmp = gen_reg_rtx (<VNARROWQ2>mode);
        operands[2] = aarch64_simd_gen_const_vector_dup (<MODE>mode,
                                                         INTVAL (operands[2]));
-       if (BYTES_BIG_ENDIAN)
-         emit_insn (
-               gen_aarch64_rshrn<mode>_insn_be (tmp, operands[1],
-                                                operands[2], shft,
-                                                CONST0_RTX (<VNARROWQ>mode)));
-       else
-         emit_insn (
-               gen_aarch64_rshrn<mode>_insn_le (tmp, operands[1],
-                                                operands[2], shft,
-                                                CONST0_RTX (<VNARROWQ>mode)));
-
-       /* The intrinsic expects a narrow result, so emit a subreg that will
-          get optimized away as appropriate.  */
-       emit_move_insn (operands[0], lowpart_subreg (<VNARROWQ>mode, tmp,
-                                                    <VNARROWQ2>mode));
+       emit_insn (gen_aarch64_rshrn<mode>_insn (operands[0], operands[1],
+                                                operands[2], shft));
       }
     DONE;
   }
index a07f82179cc58d6342780919e11c899459e6ad4a..d1143243999b6538d39b8a434ee7fbe406b95af5 100644 (file)
@@ -17,7 +17,9 @@ foo_##OP##_##OS (IT a)                     \
 
 #define FUNC(OT,IT,IMT,IS,OS)                  \
 MYOP (OT, IT, IMT, qshrn_n, IS, OS)            \
-MYOP (OT, IT, IMT, qrshrn_n, IS, OS)
+MYOP (OT, IT, IMT, qrshrn_n, IS, OS)           \
+MYOP (OT, IT, IMT, shrn_n, IS, OS)             \
+MYOP (OT, IT, IMT, rshrn_n, IS, OS)
 
 #define FUNCUN(OT,IT,IMT,IS,OS)                        \
 MYOP (OT, IT, IMT, qshrun_n, IS, OS)           \