From: Ezra Sitorus Date: Tue, 2 Jan 2024 09:23:36 +0000 (+0000) Subject: arm: vld1q_types_x4 ACLE intrinsics X-Git-Tag: basepoints/gcc-15~2936 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=84d713f6f45bef44b8bea84b02caa8a165bff1b5;p=thirdparty%2Fgcc.git arm: vld1q_types_x4 ACLE intrinsics This patch is part of a series of patches implementing the _xN variants of the vld1q intrinsic for the arm port. This patch adds the _x4 variants of the vld1q intrinsic. ACLE documents: https://developer.arm.com/documentation/ihi0053/latest/ ISA documents: https://developer.arm.com/documentation/ddi0487/latest/ gcc/ChangeLog: * config/arm/arm_neon.h (vld1q_u8_x4, vld1q_u16_x4, vld1q_u32_x4, vld1q_u64_x4): New. (vld1q_s8_x4, vld1q_s16_x4, vld1q_s32_x4, vld1q_s64_x4): New. (vld1q_f16_x4, vld1q_f32_x4): New. (vld1q_p8_x4, vld1q_p16_x4, vld1q_p64_x4): New. (vld1q_bf16_x4): New. * config/arm/arm_neon_builtins.def (vld1_x4): New entries. * config/arm/neon.md (neon_vld1_x4): New. (neon_vld1x4qa, neon_vld1x4qb): New * config/arm/unspecs.md (UNSPEC_VLD1X4A, UNSPEC_VLD1X4B): New. gcc/testsuite/ChangeLog: * gcc.target/arm/simd/vld1q_base_xN_1.c: Updated. * gcc.target/arm/simd/vld1q_bf16_xN_1.c: Updated. * gcc.target/arm/simd/vld1q_fp16_xN_1.c: Updated. * gcc.target/arm/simd/vld1q_p64_xN_1.c: Updated. --- diff --git a/gcc/config/arm/arm_neon.h b/gcc/config/arm/arm_neon.h index 8f532480549f..bf9d2c1e1923 100644 --- a/gcc/config/arm/arm_neon.h +++ b/gcc/config/arm/arm_neon.h @@ -10421,6 +10421,15 @@ vld1q_p64_x3 (const poly64_t * __a) return __rv.__i; } +__extension__ extern __inline poly64x2x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_p64_x4 (const poly64_t * __a) +{ + union { poly64x2x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld1_x4v2di ((const __builtin_neon_di *) __a); + return __rv.__i; +} + #pragma GCC pop_options __extension__ extern __inline int8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) @@ -10522,6 +10531,42 @@ vld1q_s64_x3 (const int64_t * __a) return __rv.__i; } +__extension__ extern __inline int8x16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s8_x4 (const uint8_t * __a) +{ + union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld1_x4v16qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s16_x4 (const uint16_t * __a) +{ + union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld1_x4v8hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int32x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s32_x4 (const int32_t * __a) +{ + union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld1_x4v4si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +__extension__ extern __inline int64x2x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s64_x4 (const int64_t * __a) +{ + union { int64x2x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld1_x4v2di ((const __builtin_neon_di *) __a); + return __rv.__i; +} + #if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) __extension__ extern __inline float16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) @@ -10578,6 +10623,26 @@ vld1q_f32_x3 (const float32_t * __a) return __rv.__i; } +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_f16_x4 (const float16_t * __a) +{ + union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld1_x4v8hf (__a); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_f32_x4 (const float32_t * __a) +{ + union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld1_x4v4sf ((const __builtin_neon_sf *) __a); + return __rv.__i; +} + __extension__ extern __inline uint8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_u8 (const uint8_t * __a) @@ -10678,6 +10743,42 @@ vld1q_u64_x3 (const uint64_t * __a) return __rv.__i; } +__extension__ extern __inline uint8x16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u8_x4 (const uint8_t * __a) +{ + union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld1_x4v16qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u16_x4 (const uint16_t * __a) +{ + union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld1_x4v8hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint32x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u32_x4 (const uint32_t * __a) +{ + union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld1_x4v4si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint64x2x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u64_x4 (const uint64_t * __a) +{ + union { uint64x2x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld1_x4v2di ((const __builtin_neon_di *) __a); + return __rv.__i; +} + __extension__ extern __inline poly8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_p8 (const poly8_t * __a) @@ -10728,6 +10829,24 @@ vld1q_p16_x3 (const poly16_t * __a) return __rv.__i; } +__extension__ extern __inline poly8x16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_p8_x4 (const poly8_t * __a) +{ + union { poly8x16x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld1_x4v16qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_p16_x4 (const poly16_t * __a) +{ + union { poly16x8x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld1_x4v8hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + __extension__ extern __inline int8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1_lane_s8 (const int8_t * __a, int8x8_t __b, const int __c) @@ -20038,6 +20157,15 @@ vld1q_bf16_x3 (const bfloat16_t * __ptr) return __rv.__i; } +__extension__ extern __inline bfloat16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_bf16_x4 (const bfloat16_t * __ptr) +{ + union { bfloat16x8x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld1_x4v8bf ((const __builtin_neon_bf *) __ptr); + return __rv.__i; +} + __extension__ extern __inline bfloat16x4x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld2_bf16 (bfloat16_t const * __ptr) diff --git a/gcc/config/arm/arm_neon_builtins.def b/gcc/config/arm/arm_neon_builtins.def index 1782606b7047..83dff8f09ab0 100644 --- a/gcc/config/arm/arm_neon_builtins.def +++ b/gcc/config/arm/arm_neon_builtins.def @@ -300,6 +300,7 @@ VAR1 (TERNOP, vtbx3, v8qi) VAR1 (TERNOP, vtbx4, v8qi) VAR7 (LOAD1, vld1_x2, v16qi, v8hi, v4si, v2di, v8hf, v4sf, v8bf) VAR7 (LOAD1, vld1_x3, v16qi, v8hi, v4si, v2di, v8hf, v4sf, v8bf) +VAR7 (LOAD1, vld1_x4, v16qi, v8hi, v4si, v2di, v8hf, v4sf, v8bf) VAR13 (LOAD1, vld1, v8qi, v4hi, v4hf, v2si, v2sf, v16qi, v8hi, v8hf, v4si, v4sf, v2di, v4bf, v8bf) diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md index 6e73737f34fb..d2923f7b1b1f 100644 --- a/gcc/config/arm/neon.md +++ b/gcc/config/arm/neon.md @@ -5064,6 +5064,54 @@ if (BYTES_BIG_ENDIAN) [(set_attr "type" "neon_load1_3reg")] ) +(define_expand "neon_vld1_x4" + [(match_operand:XI 0 "s_register_operand") + (match_operand:XI 1 "neon_struct_operand") + (unspec:VQXBF [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + "TARGET_NEON" +{ + rtx mem = adjust_address (operands[1], OImode, 0); + emit_insn (gen_neon_vld1x4qa (operands[0], mem)); + mem = adjust_address (mem, OImode, GET_MODE_SIZE (OImode)); + emit_insn (gen_neon_vld1x4qb (operands[0], mem, operands[0])); + DONE; +}) + +(define_insn "neon_vld1x4qa" + [(set (match_operand:XI 0 "s_register_operand" "=w") + (unspec:XI [(match_operand:OI 1 "neon_struct_operand" "Um") + (unspec:VQXBF [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + UNSPEC_VLD1X4A))] + "TARGET_NEON" +{ + rtx ops[2]; + ops[0] = gen_rtx_REG (OImode, REGNO (operands[0])); + ops[1] = operands[1]; + + output_asm_insn ("vld1.\t%h0, %A1", ops); + return ""; +} + [(set_attr "type" "neon_load1_4reg")] +) + +(define_insn "neon_vld1x4qb" + [(set (match_operand:XI 0 "s_register_operand" "=w") + (unspec:XI [(match_operand:OI 1 "neon_struct_operand" "Um") + (match_operand:XI 2 "s_register_operand" "0") + (unspec:VQXBF [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + UNSPEC_VLD1X4B))] + "TARGET_NEON" +{ + rtx ops[2]; + ops[0] = gen_rtx_REG (OImode, REGNO (operands[0]) + 8); + ops[1] = operands[1]; + + output_asm_insn ("vld1.\t%h0, %A1", ops); + return ""; +} + [(set_attr "type" "neon_load1_4reg")] +) + ;; The lane numbers in the RTL are in GCC lane order, having been flipped ;; in arm_expand_neon_args. The lane numbers are restored to architectural ;; lane order here. diff --git a/gcc/config/arm/unspecs.md b/gcc/config/arm/unspecs.md index 2f88c07aa096..25609dcccb67 100644 --- a/gcc/config/arm/unspecs.md +++ b/gcc/config/arm/unspecs.md @@ -344,6 +344,8 @@ UNSPEC_VLD1 UNSPEC_VLD1X3A UNSPEC_VLD1X3B + UNSPEC_VLD1X4A + UNSPEC_VLD1X4B UNSPEC_VLD1_LANE UNSPEC_VLD2 UNSPEC_VLD2_DUP diff --git a/gcc/testsuite/gcc.target/arm/simd/vld1q_base_xN_1.c b/gcc/testsuite/gcc.target/arm/simd/vld1q_base_xN_1.c index 117bc58c1617..01b29b600847 100644 --- a/gcc/testsuite/gcc.target/arm/simd/vld1q_base_xN_1.c +++ b/gcc/testsuite/gcc.target/arm/simd/vld1q_base_xN_1.c @@ -115,14 +115,69 @@ poly16x8x3_t test_vld1q_p16_x3 (poly16_t * a) return vld1q_p16_x3 (a); } -/* { dg-final { scan-assembler-times {vld1.8\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 6 } } */ -/* { dg-final { scan-assembler-times {vld1.8\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]!\n} 3 } } */ +uint8x16x4_t test_vld1q_u8_x4 (uint8_t * a) +{ + return vld1q_u8_x4 (a); +} + +uint16x8x4_t test_vld1q_u16_x4 (uint16_t * a) +{ + return vld1q_u16_x4 (a); +} + +uint32x4x4_t test_vld1q_u32_x4 (uint32_t * a) +{ + return vld1q_u32_x4 (a); +} + +uint64x2x4_t test_vld1q_u64_x4 (uint64_t * a) +{ + return vld1q_u64_x4 (a); +} + +int8x16x4_t test_vld1q_s8_x4 (int8_t * a) +{ + return vld1q_s8_x4 (a); +} + +int16x8x4_t test_vld1q_s16_x4 (int16_t * a) +{ + return vld1q_s16_x4 (a); +} + +int32x4x4_t test_vld1q_s32_x4 (int32_t * a) +{ + return vld1q_s32_x4 (a); +} + +int64x2x4_t test_vld1q_s64_x4 (int64_t * a) +{ + return vld1q_s64_x4 (a); +} + +float32x4x4_t test_vld1q_f32_x4 (float32_t * a) +{ + return vld1q_f32_x4 (a); +} + +poly8x16x4_t test_vld1q_p8_x4 (poly8_t * a) +{ + return vld1q_p8_x4 (a); +} + +poly16x8x4_t test_vld1q_p16_x4 (poly16_t * a) +{ + return vld1q_p16_x4 (a); +} + +/* { dg-final { scan-assembler-times {vld1.8\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 9 } } */ +/* { dg-final { scan-assembler-times {vld1.8\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]!\n} 6 } } */ -/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 6 } } */ -/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]!\n} 3 } } */ +/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 9 } } */ +/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]!\n} 6 } } */ -/* { dg-final { scan-assembler-times {vld1.32\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 6 } } */ -/* { dg-final { scan-assembler-times {vld1.32\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]!\n} 3 } } */ +/* { dg-final { scan-assembler-times {vld1.32\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 9 } } */ +/* { dg-final { scan-assembler-times {vld1.32\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]!\n} 6 } } */ -/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+:64\]\n} 4 } } */ -/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+:64\]!\n} 2 } } */ +/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+:64\]\n} 6 } } */ +/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+:64\]!\n} 4 } } */ diff --git a/gcc/testsuite/gcc.target/arm/simd/vld1q_bf16_xN_1.c b/gcc/testsuite/gcc.target/arm/simd/vld1q_bf16_xN_1.c index 75b61f1ecf03..21db3520c1a0 100644 --- a/gcc/testsuite/gcc.target/arm/simd/vld1q_bf16_xN_1.c +++ b/gcc/testsuite/gcc.target/arm/simd/vld1q_bf16_xN_1.c @@ -15,5 +15,10 @@ bfloat16x8x3_t test_vld1q_bf16_x3 (bfloat16_t * a) return vld1q_bf16_x3 (a); } -/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 2 } } */ -/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]!\n} 1 } } */ +bfloat16x8x4_t test_vld1q_bf16_x4 (bfloat16_t * a) +{ + return vld1q_bf16_x4 (a); +} + +/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 3 } } */ +/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]!\n} 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/simd/vld1q_fp16_xN_1.c b/gcc/testsuite/gcc.target/arm/simd/vld1q_fp16_xN_1.c index 9032048bed5c..3838cd072eab 100644 --- a/gcc/testsuite/gcc.target/arm/simd/vld1q_fp16_xN_1.c +++ b/gcc/testsuite/gcc.target/arm/simd/vld1q_fp16_xN_1.c @@ -15,5 +15,10 @@ float16x8x3_t test_vld1q_f16_x3 (float16_t * a) return vld1q_f16_x3 (a); } -/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 2 } } */ -/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]!\n} 1 } } */ +float16x8x4_t test_vld1q_f16_x4 (float16_t * a) +{ + return vld1q_f16_x4 (a); +} + +/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]\n} 3 } } */ +/* { dg-final { scan-assembler-times {vld1.16\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+\]!\n} 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/simd/vld1q_p64_xN_1.c b/gcc/testsuite/gcc.target/arm/simd/vld1q_p64_xN_1.c index aabc31bbd088..d359a5a85a22 100644 --- a/gcc/testsuite/gcc.target/arm/simd/vld1q_p64_xN_1.c +++ b/gcc/testsuite/gcc.target/arm/simd/vld1q_p64_xN_1.c @@ -15,5 +15,10 @@ poly64x2x3_t test_vld1q_p64_x3 (poly64_t * a) return vld1q_p64_x3 (a); } -/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+:64\]\n} 2 } } */ -/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+:64\]!\n} 1 } } */ +poly64x2x4_t test_vld1q_p64_x4 (poly64_t * a) +{ + return vld1q_p64_x4 (a); +} + +/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+:64\]\n} 3 } } */ +/* { dg-final { scan-assembler-times {vld1.64\t\{d[0-9]+-d[0-9]+\}, \[r[0-9]+:64\]!\n} 2 } } */