From: Jonathan Wright Date: Fri, 12 Feb 2021 15:37:05 +0000 (+0000) Subject: aarch64: Use RTL builtins for v[q]tbx intrinsics X-Git-Tag: basepoints/gcc-13~8076 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=4362c9c88d9092a6585cd061e5535cb2f0453d13;p=thirdparty%2Fgcc.git aarch64: Use RTL builtins for v[q]tbx intrinsics Rewrite v[q]tbx Neon intrinsics to use RTL builtins rather than inline assembly code, allowing for better scheduling and optimization. gcc/ChangeLog: 2021-02-12 Jonathan Wright * config/aarch64/aarch64-simd-builtins.def: Add tbx1 builtin generator macros. * config/aarch64/aarch64-simd.md (aarch64_tbx1): Define. * config/aarch64/arm_neon.h (vqtbx1_s8): USE RTL builtin instead of inline asm. (vqtbx1_u8): Likewise. (vqtbx1_p8): Likewise. (vqtbx1q_s8): Likewise. (vqtbx1q_u8): Likewise. (vqtbx1q_p8): Likewise. (vtbx2_s8): Likewise. (vtbx2_u8): Likewise. (vtbx2_p8): Likewise. --- diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 04b392bf3d00..a7d4f2b94b58 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -708,6 +708,10 @@ VAR1 (BINOP, qtbl4, 0, NONE, v8qi) VAR1 (BINOP, qtbl4, 0, NONE, v16qi) + /* Implemented by aarch64_tbx1. */ + VAR2 (TERNOP, tbx1, 0, NONE, v8qi, v16qi) + VAR2 (TERNOPU, tbx1, 0, NONE, v8qi, v16qi) + /* Implemented by aarch64_tbx4. */ VAR1 (TERNOP, tbx4, 0, NONE, v8qi) VAR1 (TERNOP, tbx4, 0, NONE, v16qi) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 565ce5a4071c..299d911aff69 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -6852,6 +6852,17 @@ [(set_attr "type" "neon_tbl1")] ) +(define_insn "aarch64_tbx1" + [(set (match_operand:VB 0 "register_operand" "=w") + (unspec:VB [(match_operand:VB 1 "register_operand" "0") + (match_operand:V16QI 2 "register_operand" "w") + (match_operand:VB 3 "register_operand" "w")] + UNSPEC_TBX))] + "TARGET_SIMD" + "tbx\\t%0., {%2.16b}, %3." + [(set_attr "type" "neon_tbl1")] +) + ;; Two source registers. (define_insn "aarch64_tbl2v16qi" diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 08171290c3bb..ead2bd0990d4 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -9625,72 +9625,46 @@ __extension__ extern __inline int8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqtbx1_s8 (int8x8_t __r, int8x16_t __tab, uint8x8_t __idx) { - int8x8_t __result = __r; - __asm__ ("tbx %0.8b,{%1.16b},%2.8b" - : "+w"(__result) - : "w"(__tab), "w"(__idx) - : /* No clobbers */); - return __result; + return __builtin_aarch64_tbx1v8qi (__r, __tab, (int8x8_t) __idx); } __extension__ extern __inline uint8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqtbx1_u8 (uint8x8_t __r, uint8x16_t __tab, uint8x8_t __idx) { - uint8x8_t __result = __r; - __asm__ ("tbx %0.8b,{%1.16b},%2.8b" - : "+w"(__result) - : "w"(__tab), "w"(__idx) - : /* No clobbers */); - return __result; + return __builtin_aarch64_tbx1v8qi_uuuu (__r, __tab, __idx); } __extension__ extern __inline poly8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqtbx1_p8 (poly8x8_t __r, poly8x16_t __tab, uint8x8_t __idx) { - poly8x8_t __result = __r; - __asm__ ("tbx %0.8b,{%1.16b},%2.8b" - : "+w"(__result) - : "w"(__tab), "w"(__idx) - : /* No clobbers */); - return __result; + return (poly8x8_t) __builtin_aarch64_tbx1v8qi ((int8x8_t) __r, + (int8x16_t) __tab, + (int8x8_t) __idx); } __extension__ extern __inline int8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqtbx1q_s8 (int8x16_t __r, int8x16_t __tab, uint8x16_t __idx) { - int8x16_t __result = __r; - __asm__ ("tbx %0.16b,{%1.16b},%2.16b" - : "+w"(__result) - : "w"(__tab), "w"(__idx) - : /* No clobbers */); - return __result; + return __builtin_aarch64_tbx1v16qi (__r, __tab, (int8x16_t) __idx); } __extension__ extern __inline uint8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqtbx1q_u8 (uint8x16_t __r, uint8x16_t __tab, uint8x16_t __idx) { - uint8x16_t __result = __r; - __asm__ ("tbx %0.16b,{%1.16b},%2.16b" - : "+w"(__result) - : "w"(__tab), "w"(__idx) - : /* No clobbers */); - return __result; + return __builtin_aarch64_tbx1v16qi_uuuu (__r, __tab, __idx); } __extension__ extern __inline poly8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqtbx1q_p8 (poly8x16_t __r, poly8x16_t __tab, uint8x16_t __idx) { - poly8x16_t __result = __r; - __asm__ ("tbx %0.16b,{%1.16b},%2.16b" - : "+w"(__result) - : "w"(__tab), "w"(__idx) - : /* No clobbers */); - return __result; + return (poly8x16_t) __builtin_aarch64_tbx1v16qi ((int8x16_t) __r, + (int8x16_t) __tab, + (int8x16_t) __idx); } /* V7 legacy table intrinsics. */ @@ -9854,39 +9828,26 @@ __extension__ extern __inline int8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vtbx2_s8 (int8x8_t __r, int8x8x2_t __tab, int8x8_t __idx) { - int8x8_t __result = __r; int8x16_t __temp = vcombine_s8 (__tab.val[0], __tab.val[1]); - __asm__ ("tbx %0.8b, {%1.16b}, %2.8b" - : "+w"(__result) - : "w"(__temp), "w"(__idx) - : /* No clobbers */); - return __result; + return __builtin_aarch64_tbx1v8qi (__r, __temp, __idx); } __extension__ extern __inline uint8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vtbx2_u8 (uint8x8_t __r, uint8x8x2_t __tab, uint8x8_t __idx) { - uint8x8_t __result = __r; uint8x16_t __temp = vcombine_u8 (__tab.val[0], __tab.val[1]); - __asm__ ("tbx %0.8b, {%1.16b}, %2.8b" - : "+w"(__result) - : "w"(__temp), "w"(__idx) - : /* No clobbers */); - return __result; + return __builtin_aarch64_tbx1v8qi_uuuu (__r, __temp, __idx); } __extension__ extern __inline poly8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vtbx2_p8 (poly8x8_t __r, poly8x8x2_t __tab, uint8x8_t __idx) { - poly8x8_t __result = __r; poly8x16_t __temp = vcombine_p8 (__tab.val[0], __tab.val[1]); - __asm__ ("tbx %0.8b, {%1.16b}, %2.8b" - : "+w"(__result) - : "w"(__temp), "w"(__idx) - : /* No clobbers */); - return __result; + return (poly8x8_t) __builtin_aarch64_tbx1v8qi ((int8x8_t) __r, + (int8x16_t) __temp, + (int8x8_t) __idx); } /* End of temporary inline asm. */