From: Kyrylo Tkachov Date: Tue, 30 May 2023 09:36:46 +0000 (+0100) Subject: aarch64: Reimplement v(r)hadd and vhsub intrinsics with RTL codes X-Git-Tag: basepoints/gcc-15~8739 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a4dae58abe1a3961aece740b0fada995750c277c;p=thirdparty%2Fgcc.git aarch64: Reimplement v(r)hadd and vhsub intrinsics with RTL codes This patch reimplements the MD patterns for the UHADD,SHADD,UHSUB,SHSUB,URHADD,SRHADD instructions using standard RTL operations rather than unspecs. The correct RTL representations involves widening the inputs before adding them and halving, followed by a truncation back to the original mode. An unfortunate wart in the patch is that we end up having very similar expanders for the intrinsics through the aarch64_h and aarch64_rhadd names and the standard names for the vector averaging optabs avg3_floor and avg3_ceil. I'd like to reuse avg3_ceil for the intrinsics builtin as well but our scheme in aarch64-simd-builtins.def and aarch64-builtins.cc makes it awkward by only allowing mappings of entries in aarch64-simd-builtins.def to: 0 - CODE_FOR_aarch64_ 1-9 - CODE_FOR_<1-9> 10 - CODE_FOR_ whereas here we want a string after the i.e. CODE_FOR_uavg3_ceil. This patch adds a bit of remapping logic in aarch64-builtins.cc before the construction of the builtin info that remaps the CODE_FOR_* definitions in aarch64-simd-builtins.def to the optab-derived ones. CODE_FOR_aarch64_srhaddv4si gets remapped to CODE_FOR_avgv4si3_ceil, for example. It's a bit specific to this case, but this solution requires the least invasive changes while avoiding having duplicate expanders just for the sake of a different pattern name. Bootstrapped and tested on aarch64-none-linux-gnu and aarch64_be-none-elf. gcc/ChangeLog: * config/aarch64/aarch64-builtins.cc (VAR1): Move to after inclusion of aarch64-builtin-iterators.h. Add definition to remap shadd, uhadd, srhadd, urhadd builtin codes for standard optab ones. * config/aarch64/aarch64-simd.md (avg3_floor): Rename to... (avg3_floor): ... This. Expand to RTL codes rather than unspec. (avg3_ceil): Rename to... (avg3_ceil): ... This. Expand to RTL codes rather than unspec. (aarch64_hsub): New define_expand. (aarch64_h): Split into... (*aarch64_h_insn): ... This... (*aarch64_rhadd_insn): ... And this. --- diff --git a/gcc/config/aarch64/aarch64-builtins.cc b/gcc/config/aarch64/aarch64-builtins.cc index cb6aae3f1faf..e0bb2128e029 100644 --- a/gcc/config/aarch64/aarch64-builtins.cc +++ b/gcc/config/aarch64/aarch64-builtins.cc @@ -502,8 +502,11 @@ aarch64_types_storestruct_lane_p_qualifiers[SIMD_MAX_BUILTIN_ARGS] #define CF4(N, X) CODE_FOR_##N##X##4 #define CF10(N, X) CODE_FOR_##N##X -#define VAR1(T, N, MAP, FLAG, A) \ - {#N #A, UP (A), CF##MAP (N, A), 0, TYPES_##T, FLAG_##FLAG}, +/* Define cascading VAR macros that are used from + aarch64-builtin-iterators.h to iterate over modes. These definitions + will end up generating a number of VAR1 expansions and code later on in the + file should redefine VAR1 to whatever it needs to process on a per-mode + basis. */ #define VAR2(T, N, MAP, FLAG, A, B) \ VAR1 (T, N, MAP, FLAG, A) \ VAR1 (T, N, MAP, FLAG, B) @@ -552,6 +555,26 @@ aarch64_types_storestruct_lane_p_qualifiers[SIMD_MAX_BUILTIN_ARGS] #include "aarch64-builtin-iterators.h" +/* The builtins below should be expanded through the standard optabs + CODE_FOR_[u]avg3_[floor,ceil]. However the mapping scheme in + aarch64-simd-builtins.def does not easily allow us to have a pre-mode + ("uavg") and post-mode string ("_ceil") in the CODE_FOR_* construction. + So the builtins use a name that is natural for AArch64 instructions + e.g. "aarch64_srhadd" and we re-map these to the optab-related + CODE_FOR_ here. */ +#undef VAR1 +#define VAR1(F,T1,T2,I,M) \ +constexpr insn_code CODE_FOR_aarch64_##F##M = CODE_FOR_##T1##M##3##T2; + +BUILTIN_VDQ_BHSI (srhadd, avg, _ceil, 0) +BUILTIN_VDQ_BHSI (urhadd, uavg, _ceil, 0) +BUILTIN_VDQ_BHSI (shadd, avg, _floor, 0) +BUILTIN_VDQ_BHSI (uhadd, uavg, _floor, 0) + +#undef VAR1 +#define VAR1(T, N, MAP, FLAG, A) \ + {#N #A, UP (A), CF##MAP (N, A), 0, TYPES_##T, FLAG_##FLAG}, + static aarch64_simd_builtin_datum aarch64_simd_builtin_data[] = { #include "aarch64-simd-builtins.def" }; diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 52901642f935..c4171ed214d0 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -4944,30 +4944,86 @@ ;; h. -(define_expand "avg3_floor" +(define_expand "avg3_floor" [(set (match_operand:VDQ_BHSI 0 "register_operand") - (unspec:VDQ_BHSI [(match_operand:VDQ_BHSI 1 "register_operand") - (match_operand:VDQ_BHSI 2 "register_operand")] - HADD))] + (truncate:VDQ_BHSI + (ashiftrt: + (plus: + (ANY_EXTEND: + (match_operand:VDQ_BHSI 1 "register_operand")) + (ANY_EXTEND: + (match_operand:VDQ_BHSI 2 "register_operand"))) + (match_dup 3))))] "TARGET_SIMD" + { + operands[3] = CONST1_RTX (mode); + } ) -(define_expand "avg3_ceil" +(define_expand "avg3_ceil" [(set (match_operand:VDQ_BHSI 0 "register_operand") - (unspec:VDQ_BHSI [(match_operand:VDQ_BHSI 1 "register_operand") - (match_operand:VDQ_BHSI 2 "register_operand")] - RHADD))] + (truncate:VDQ_BHSI + (ashiftrt: + (plus: + (plus: + (ANY_EXTEND: + (match_operand:VDQ_BHSI 1 "register_operand")) + (ANY_EXTEND: + (match_operand:VDQ_BHSI 2 "register_operand"))) + (match_dup 3)) + (match_dup 3))))] "TARGET_SIMD" + { + operands[3] = CONST1_RTX (mode); + } ) -(define_insn "aarch64_h" +(define_expand "aarch64_hsub" + [(set (match_operand:VDQ_BHSI 0 "register_operand") + (truncate:VDQ_BHSI + (ashiftrt: + (minus: + (ANY_EXTEND: + (match_operand:VDQ_BHSI 1 "register_operand")) + (ANY_EXTEND: + (match_operand:VDQ_BHSI 2 "register_operand"))) + (match_dup 3))))] + "TARGET_SIMD" + { + operands[3] = CONST1_RTX (mode); + } +) + +(define_insn "*aarch64_h_insn" [(set (match_operand:VDQ_BHSI 0 "register_operand" "=w") - (unspec:VDQ_BHSI [(match_operand:VDQ_BHSI 1 "register_operand" "w") - (match_operand:VDQ_BHSI 2 "register_operand" "w")] - HADDSUB))] + (truncate:VDQ_BHSI + (ashiftrt: + (ADDSUB: + (ANY_EXTEND: + (match_operand:VDQ_BHSI 1 "register_operand" "w")) + (ANY_EXTEND: + (match_operand:VDQ_BHSI 2 "register_operand" "w"))) + (match_operand: 3 "aarch64_simd_imm_one"))))] "TARGET_SIMD" - "h\\t%0., %1., %2." - [(set_attr "type" "neon__halve")] + "h\\t%0., %1., %2." + [(set_attr "type" "neon__halve")] +) + +(define_insn "*aarch64_rhadd_insn" + [(set (match_operand:VDQ_BHSI 0 "register_operand" "=w") + (truncate:VDQ_BHSI + (ashiftrt: + (plus: + (plus: + (ANY_EXTEND: + (match_operand:VDQ_BHSI 1 "register_operand" "w")) + (ANY_EXTEND: + (match_operand:VDQ_BHSI 2 "register_operand" "w"))) + (match_operand: 3 "aarch64_simd_imm_one")) + (match_dup 3))))] + "TARGET_SIMD" + "rhadd\\t%0., %1., %2." + [(set_attr "type" "neon_add_halve")] ) ;; hn.