]>
Commit | Line | Data |
---|---|---|
63c8f7d6 SP |
1 | /* Arm MVE intrinsics include file. |
2 | ||
3 | Copyright (C) 2019-2020 Free Software Foundation, Inc. | |
4 | Contributed by Arm. | |
5 | ||
6 | This file is part of GCC. | |
7 | ||
8 | GCC is free software; you can redistribute it and/or modify it | |
9 | under the terms of the GNU General Public License as published | |
10 | by the Free Software Foundation; either version 3, or (at your | |
11 | option) any later version. | |
12 | ||
13 | GCC is distributed in the hope that it will be useful, but WITHOUT | |
14 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public | |
16 | License for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with GCC; see the file COPYING3. If not see | |
20 | <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | #ifndef _GCC_ARM_MVE_H | |
23 | #define _GCC_ARM_MVE_H | |
24 | ||
85244449 SP |
25 | #if __ARM_BIG_ENDIAN |
26 | #error "MVE intrinsics are not supported in Big-Endian mode." | |
27 | #endif | |
28 | ||
63c8f7d6 SP |
29 | #if !__ARM_FEATURE_MVE |
30 | #error "MVE feature not supported" | |
31 | #endif | |
32 | ||
33 | #include <stdint.h> | |
34 | #ifndef __cplusplus | |
35 | #include <stdbool.h> | |
36 | #endif | |
37 | ||
38 | #ifdef __cplusplus | |
39 | extern "C" { | |
40 | #endif | |
41 | ||
42 | #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ | |
43 | typedef __fp16 float16_t; | |
44 | typedef float float32_t; | |
45 | typedef __simd128_float16_t float16x8_t; | |
46 | typedef __simd128_float32_t float32x4_t; | |
47 | #endif | |
48 | ||
14782c81 SP |
49 | #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ |
50 | typedef struct { float16x8_t val[2]; } float16x8x2_t; | |
51 | typedef struct { float16x8_t val[4]; } float16x8x4_t; | |
52 | typedef struct { float32x4_t val[2]; } float32x4x2_t; | |
53 | typedef struct { float32x4_t val[4]; } float32x4x4_t; | |
54 | #endif | |
55 | ||
63c8f7d6 SP |
56 | typedef uint16_t mve_pred16_t; |
57 | typedef __simd128_uint8_t uint8x16_t; | |
58 | typedef __simd128_uint16_t uint16x8_t; | |
59 | typedef __simd128_uint32_t uint32x4_t; | |
60 | typedef __simd128_uint64_t uint64x2_t; | |
61 | typedef __simd128_int8_t int8x16_t; | |
62 | typedef __simd128_int16_t int16x8_t; | |
63 | typedef __simd128_int32_t int32x4_t; | |
64 | typedef __simd128_int64_t int64x2_t; | |
65 | ||
14782c81 SP |
66 | typedef struct { int16x8_t val[2]; } int16x8x2_t; |
67 | typedef struct { int16x8_t val[4]; } int16x8x4_t; | |
68 | typedef struct { int32x4_t val[2]; } int32x4x2_t; | |
69 | typedef struct { int32x4_t val[4]; } int32x4x4_t; | |
70 | typedef struct { int8x16_t val[2]; } int8x16x2_t; | |
71 | typedef struct { int8x16_t val[4]; } int8x16x4_t; | |
72 | typedef struct { uint16x8_t val[2]; } uint16x8x2_t; | |
73 | typedef struct { uint16x8_t val[4]; } uint16x8x4_t; | |
74 | typedef struct { uint32x4_t val[2]; } uint32x4x2_t; | |
75 | typedef struct { uint32x4_t val[4]; } uint32x4x4_t; | |
76 | typedef struct { uint8x16_t val[2]; } uint8x16x2_t; | |
77 | typedef struct { uint8x16_t val[4]; } uint8x16x4_t; | |
78 | ||
79 | #ifndef __ARM_MVE_PRESERVE_USER_NAMESPACE | |
80 | #define vst4q_s8( __addr, __value) __arm_vst4q_s8( __addr, __value) | |
81 | #define vst4q_s16( __addr, __value) __arm_vst4q_s16( __addr, __value) | |
82 | #define vst4q_s32( __addr, __value) __arm_vst4q_s32( __addr, __value) | |
83 | #define vst4q_u8( __addr, __value) __arm_vst4q_u8( __addr, __value) | |
84 | #define vst4q_u16( __addr, __value) __arm_vst4q_u16( __addr, __value) | |
85 | #define vst4q_u32( __addr, __value) __arm_vst4q_u32( __addr, __value) | |
86 | #define vst4q_f16( __addr, __value) __arm_vst4q_f16( __addr, __value) | |
87 | #define vst4q_f32( __addr, __value) __arm_vst4q_f32( __addr, __value) | |
a50f6abf SP |
88 | #define vrndxq_f16(__a) __arm_vrndxq_f16(__a) |
89 | #define vrndxq_f32(__a) __arm_vrndxq_f32(__a) | |
90 | #define vrndq_f16(__a) __arm_vrndq_f16(__a) | |
91 | #define vrndq_f32(__a) __arm_vrndq_f32(__a) | |
92 | #define vrndpq_f16(__a) __arm_vrndpq_f16(__a) | |
93 | #define vrndpq_f32(__a) __arm_vrndpq_f32(__a) | |
94 | #define vrndnq_f16(__a) __arm_vrndnq_f16(__a) | |
95 | #define vrndnq_f32(__a) __arm_vrndnq_f32(__a) | |
96 | #define vrndmq_f16(__a) __arm_vrndmq_f16(__a) | |
97 | #define vrndmq_f32(__a) __arm_vrndmq_f32(__a) | |
98 | #define vrndaq_f16(__a) __arm_vrndaq_f16(__a) | |
99 | #define vrndaq_f32(__a) __arm_vrndaq_f32(__a) | |
100 | #define vrev64q_f16(__a) __arm_vrev64q_f16(__a) | |
101 | #define vrev64q_f32(__a) __arm_vrev64q_f32(__a) | |
102 | #define vnegq_f16(__a) __arm_vnegq_f16(__a) | |
103 | #define vnegq_f32(__a) __arm_vnegq_f32(__a) | |
104 | #define vdupq_n_f16(__a) __arm_vdupq_n_f16(__a) | |
105 | #define vdupq_n_f32(__a) __arm_vdupq_n_f32(__a) | |
106 | #define vabsq_f16(__a) __arm_vabsq_f16(__a) | |
107 | #define vabsq_f32(__a) __arm_vabsq_f32(__a) | |
108 | #define vrev32q_f16(__a) __arm_vrev32q_f16(__a) | |
109 | #define vcvttq_f32_f16(__a) __arm_vcvttq_f32_f16(__a) | |
110 | #define vcvtbq_f32_f16(__a) __arm_vcvtbq_f32_f16(__a) | |
111 | #define vcvtq_f16_s16(__a) __arm_vcvtq_f16_s16(__a) | |
112 | #define vcvtq_f32_s32(__a) __arm_vcvtq_f32_s32(__a) | |
113 | #define vcvtq_f16_u16(__a) __arm_vcvtq_f16_u16(__a) | |
114 | #define vcvtq_f32_u32(__a) __arm_vcvtq_f32_u32(__a) | |
6df4618c SP |
115 | #define vdupq_n_s8(__a) __arm_vdupq_n_s8(__a) |
116 | #define vdupq_n_s16(__a) __arm_vdupq_n_s16(__a) | |
117 | #define vdupq_n_s32(__a) __arm_vdupq_n_s32(__a) | |
118 | #define vabsq_s8(__a) __arm_vabsq_s8(__a) | |
119 | #define vabsq_s16(__a) __arm_vabsq_s16(__a) | |
120 | #define vabsq_s32(__a) __arm_vabsq_s32(__a) | |
121 | #define vclsq_s8(__a) __arm_vclsq_s8(__a) | |
122 | #define vclsq_s16(__a) __arm_vclsq_s16(__a) | |
123 | #define vclsq_s32(__a) __arm_vclsq_s32(__a) | |
124 | #define vclzq_s8(__a) __arm_vclzq_s8(__a) | |
125 | #define vclzq_s16(__a) __arm_vclzq_s16(__a) | |
126 | #define vclzq_s32(__a) __arm_vclzq_s32(__a) | |
127 | #define vnegq_s8(__a) __arm_vnegq_s8(__a) | |
128 | #define vnegq_s16(__a) __arm_vnegq_s16(__a) | |
129 | #define vnegq_s32(__a) __arm_vnegq_s32(__a) | |
130 | #define vaddlvq_s32(__a) __arm_vaddlvq_s32(__a) | |
131 | #define vaddvq_s8(__a) __arm_vaddvq_s8(__a) | |
132 | #define vaddvq_s16(__a) __arm_vaddvq_s16(__a) | |
133 | #define vaddvq_s32(__a) __arm_vaddvq_s32(__a) | |
134 | #define vmovlbq_s8(__a) __arm_vmovlbq_s8(__a) | |
135 | #define vmovlbq_s16(__a) __arm_vmovlbq_s16(__a) | |
136 | #define vmovltq_s8(__a) __arm_vmovltq_s8(__a) | |
137 | #define vmovltq_s16(__a) __arm_vmovltq_s16(__a) | |
138 | #define vmvnq_s8(__a) __arm_vmvnq_s8(__a) | |
139 | #define vmvnq_s16(__a) __arm_vmvnq_s16(__a) | |
140 | #define vmvnq_s32(__a) __arm_vmvnq_s32(__a) | |
5db0eb95 SP |
141 | #define vmvnq_n_s16( __imm) __arm_vmvnq_n_s16( __imm) |
142 | #define vmvnq_n_s32( __imm) __arm_vmvnq_n_s32( __imm) | |
6df4618c SP |
143 | #define vrev16q_s8(__a) __arm_vrev16q_s8(__a) |
144 | #define vrev32q_s8(__a) __arm_vrev32q_s8(__a) | |
145 | #define vrev32q_s16(__a) __arm_vrev32q_s16(__a) | |
5db0eb95 SP |
146 | #define vrev64q_s8(__a) __arm_vrev64q_s8(__a) |
147 | #define vrev64q_s16(__a) __arm_vrev64q_s16(__a) | |
148 | #define vrev64q_s32(__a) __arm_vrev64q_s32(__a) | |
6df4618c SP |
149 | #define vqabsq_s8(__a) __arm_vqabsq_s8(__a) |
150 | #define vqabsq_s16(__a) __arm_vqabsq_s16(__a) | |
151 | #define vqabsq_s32(__a) __arm_vqabsq_s32(__a) | |
152 | #define vqnegq_s8(__a) __arm_vqnegq_s8(__a) | |
153 | #define vqnegq_s16(__a) __arm_vqnegq_s16(__a) | |
154 | #define vqnegq_s32(__a) __arm_vqnegq_s32(__a) | |
155 | #define vcvtaq_s16_f16(__a) __arm_vcvtaq_s16_f16(__a) | |
156 | #define vcvtaq_s32_f32(__a) __arm_vcvtaq_s32_f32(__a) | |
157 | #define vcvtnq_s16_f16(__a) __arm_vcvtnq_s16_f16(__a) | |
158 | #define vcvtnq_s32_f32(__a) __arm_vcvtnq_s32_f32(__a) | |
159 | #define vcvtpq_s16_f16(__a) __arm_vcvtpq_s16_f16(__a) | |
160 | #define vcvtpq_s32_f32(__a) __arm_vcvtpq_s32_f32(__a) | |
161 | #define vcvtmq_s16_f16(__a) __arm_vcvtmq_s16_f16(__a) | |
162 | #define vcvtmq_s32_f32(__a) __arm_vcvtmq_s32_f32(__a) | |
5db0eb95 SP |
163 | #define vcvtq_s16_f16(__a) __arm_vcvtq_s16_f16(__a) |
164 | #define vcvtq_s32_f32(__a) __arm_vcvtq_s32_f32(__a) | |
165 | #define vrev64q_u8(__a) __arm_vrev64q_u8(__a) | |
166 | #define vrev64q_u16(__a) __arm_vrev64q_u16(__a) | |
167 | #define vrev64q_u32(__a) __arm_vrev64q_u32(__a) | |
6df4618c SP |
168 | #define vmvnq_u8(__a) __arm_vmvnq_u8(__a) |
169 | #define vmvnq_u16(__a) __arm_vmvnq_u16(__a) | |
170 | #define vmvnq_u32(__a) __arm_vmvnq_u32(__a) | |
171 | #define vdupq_n_u8(__a) __arm_vdupq_n_u8(__a) | |
172 | #define vdupq_n_u16(__a) __arm_vdupq_n_u16(__a) | |
173 | #define vdupq_n_u32(__a) __arm_vdupq_n_u32(__a) | |
174 | #define vclzq_u8(__a) __arm_vclzq_u8(__a) | |
175 | #define vclzq_u16(__a) __arm_vclzq_u16(__a) | |
176 | #define vclzq_u32(__a) __arm_vclzq_u32(__a) | |
177 | #define vaddvq_u8(__a) __arm_vaddvq_u8(__a) | |
178 | #define vaddvq_u16(__a) __arm_vaddvq_u16(__a) | |
179 | #define vaddvq_u32(__a) __arm_vaddvq_u32(__a) | |
180 | #define vrev32q_u8(__a) __arm_vrev32q_u8(__a) | |
181 | #define vrev32q_u16(__a) __arm_vrev32q_u16(__a) | |
182 | #define vmovltq_u8(__a) __arm_vmovltq_u8(__a) | |
183 | #define vmovltq_u16(__a) __arm_vmovltq_u16(__a) | |
184 | #define vmovlbq_u8(__a) __arm_vmovlbq_u8(__a) | |
185 | #define vmovlbq_u16(__a) __arm_vmovlbq_u16(__a) | |
5db0eb95 SP |
186 | #define vmvnq_n_u16( __imm) __arm_vmvnq_n_u16( __imm) |
187 | #define vmvnq_n_u32( __imm) __arm_vmvnq_n_u32( __imm) | |
6df4618c SP |
188 | #define vrev16q_u8(__a) __arm_vrev16q_u8(__a) |
189 | #define vaddlvq_u32(__a) __arm_vaddlvq_u32(__a) | |
5db0eb95 SP |
190 | #define vcvtq_u16_f16(__a) __arm_vcvtq_u16_f16(__a) |
191 | #define vcvtq_u32_f32(__a) __arm_vcvtq_u32_f32(__a) | |
6df4618c SP |
192 | #define vcvtpq_u16_f16(__a) __arm_vcvtpq_u16_f16(__a) |
193 | #define vcvtpq_u32_f32(__a) __arm_vcvtpq_u32_f32(__a) | |
194 | #define vcvtnq_u16_f16(__a) __arm_vcvtnq_u16_f16(__a) | |
195 | #define vcvtmq_u16_f16(__a) __arm_vcvtmq_u16_f16(__a) | |
196 | #define vcvtmq_u32_f32(__a) __arm_vcvtmq_u32_f32(__a) | |
197 | #define vcvtaq_u16_f16(__a) __arm_vcvtaq_u16_f16(__a) | |
198 | #define vcvtaq_u32_f32(__a) __arm_vcvtaq_u32_f32(__a) | |
a475f153 SP |
199 | #define vctp16q(__a) __arm_vctp16q(__a) |
200 | #define vctp32q(__a) __arm_vctp32q(__a) | |
201 | #define vctp64q(__a) __arm_vctp64q(__a) | |
202 | #define vctp8q(__a) __arm_vctp8q(__a) | |
203 | #define vpnot(__a) __arm_vpnot(__a) | |
4be8cf77 SP |
204 | #define vsubq_n_f16(__a, __b) __arm_vsubq_n_f16(__a, __b) |
205 | #define vsubq_n_f32(__a, __b) __arm_vsubq_n_f32(__a, __b) | |
206 | #define vbrsrq_n_f16(__a, __b) __arm_vbrsrq_n_f16(__a, __b) | |
207 | #define vbrsrq_n_f32(__a, __b) __arm_vbrsrq_n_f32(__a, __b) | |
208 | #define vcvtq_n_f16_s16(__a, __imm6) __arm_vcvtq_n_f16_s16(__a, __imm6) | |
209 | #define vcvtq_n_f32_s32(__a, __imm6) __arm_vcvtq_n_f32_s32(__a, __imm6) | |
210 | #define vcvtq_n_f16_u16(__a, __imm6) __arm_vcvtq_n_f16_u16(__a, __imm6) | |
211 | #define vcvtq_n_f32_u32(__a, __imm6) __arm_vcvtq_n_f32_u32(__a, __imm6) | |
212 | #define vcreateq_f16(__a, __b) __arm_vcreateq_f16(__a, __b) | |
213 | #define vcreateq_f32(__a, __b) __arm_vcreateq_f32(__a, __b) | |
f166a8cd SP |
214 | #define vcvtq_n_s16_f16(__a, __imm6) __arm_vcvtq_n_s16_f16(__a, __imm6) |
215 | #define vcvtq_n_s32_f32(__a, __imm6) __arm_vcvtq_n_s32_f32(__a, __imm6) | |
216 | #define vcvtq_n_u16_f16(__a, __imm6) __arm_vcvtq_n_u16_f16(__a, __imm6) | |
217 | #define vcvtq_n_u32_f32(__a, __imm6) __arm_vcvtq_n_u32_f32(__a, __imm6) | |
218 | #define vcreateq_u8(__a, __b) __arm_vcreateq_u8(__a, __b) | |
219 | #define vcreateq_u16(__a, __b) __arm_vcreateq_u16(__a, __b) | |
220 | #define vcreateq_u32(__a, __b) __arm_vcreateq_u32(__a, __b) | |
221 | #define vcreateq_u64(__a, __b) __arm_vcreateq_u64(__a, __b) | |
222 | #define vcreateq_s8(__a, __b) __arm_vcreateq_s8(__a, __b) | |
223 | #define vcreateq_s16(__a, __b) __arm_vcreateq_s16(__a, __b) | |
224 | #define vcreateq_s32(__a, __b) __arm_vcreateq_s32(__a, __b) | |
225 | #define vcreateq_s64(__a, __b) __arm_vcreateq_s64(__a, __b) | |
226 | #define vshrq_n_s8(__a, __imm) __arm_vshrq_n_s8(__a, __imm) | |
227 | #define vshrq_n_s16(__a, __imm) __arm_vshrq_n_s16(__a, __imm) | |
228 | #define vshrq_n_s32(__a, __imm) __arm_vshrq_n_s32(__a, __imm) | |
229 | #define vshrq_n_u8(__a, __imm) __arm_vshrq_n_u8(__a, __imm) | |
230 | #define vshrq_n_u16(__a, __imm) __arm_vshrq_n_u16(__a, __imm) | |
231 | #define vshrq_n_u32(__a, __imm) __arm_vshrq_n_u32(__a, __imm) | |
d71dba7b SP |
232 | #define vaddlvq_p_s32(__a, __p) __arm_vaddlvq_p_s32(__a, __p) |
233 | #define vaddlvq_p_u32(__a, __p) __arm_vaddlvq_p_u32(__a, __p) | |
234 | #define vcmpneq_s8(__a, __b) __arm_vcmpneq_s8(__a, __b) | |
235 | #define vcmpneq_s16(__a, __b) __arm_vcmpneq_s16(__a, __b) | |
236 | #define vcmpneq_s32(__a, __b) __arm_vcmpneq_s32(__a, __b) | |
237 | #define vcmpneq_u8(__a, __b) __arm_vcmpneq_u8(__a, __b) | |
238 | #define vcmpneq_u16(__a, __b) __arm_vcmpneq_u16(__a, __b) | |
239 | #define vcmpneq_u32(__a, __b) __arm_vcmpneq_u32(__a, __b) | |
240 | #define vshlq_s8(__a, __b) __arm_vshlq_s8(__a, __b) | |
241 | #define vshlq_s16(__a, __b) __arm_vshlq_s16(__a, __b) | |
242 | #define vshlq_s32(__a, __b) __arm_vshlq_s32(__a, __b) | |
243 | #define vshlq_u8(__a, __b) __arm_vshlq_u8(__a, __b) | |
244 | #define vshlq_u16(__a, __b) __arm_vshlq_u16(__a, __b) | |
245 | #define vshlq_u32(__a, __b) __arm_vshlq_u32(__a, __b) | |
33203b4c SP |
246 | #define vsubq_u8(__a, __b) __arm_vsubq_u8(__a, __b) |
247 | #define vsubq_n_u8(__a, __b) __arm_vsubq_n_u8(__a, __b) | |
248 | #define vrmulhq_u8(__a, __b) __arm_vrmulhq_u8(__a, __b) | |
249 | #define vrhaddq_u8(__a, __b) __arm_vrhaddq_u8(__a, __b) | |
250 | #define vqsubq_u8(__a, __b) __arm_vqsubq_u8(__a, __b) | |
251 | #define vqsubq_n_u8(__a, __b) __arm_vqsubq_n_u8(__a, __b) | |
252 | #define vqaddq_u8(__a, __b) __arm_vqaddq_u8(__a, __b) | |
253 | #define vqaddq_n_u8(__a, __b) __arm_vqaddq_n_u8(__a, __b) | |
254 | #define vorrq_u8(__a, __b) __arm_vorrq_u8(__a, __b) | |
255 | #define vornq_u8(__a, __b) __arm_vornq_u8(__a, __b) | |
256 | #define vmulq_u8(__a, __b) __arm_vmulq_u8(__a, __b) | |
257 | #define vmulq_n_u8(__a, __b) __arm_vmulq_n_u8(__a, __b) | |
258 | #define vmulltq_int_u8(__a, __b) __arm_vmulltq_int_u8(__a, __b) | |
259 | #define vmullbq_int_u8(__a, __b) __arm_vmullbq_int_u8(__a, __b) | |
260 | #define vmulhq_u8(__a, __b) __arm_vmulhq_u8(__a, __b) | |
261 | #define vmladavq_u8(__a, __b) __arm_vmladavq_u8(__a, __b) | |
262 | #define vminvq_u8(__a, __b) __arm_vminvq_u8(__a, __b) | |
263 | #define vminq_u8(__a, __b) __arm_vminq_u8(__a, __b) | |
264 | #define vmaxvq_u8(__a, __b) __arm_vmaxvq_u8(__a, __b) | |
265 | #define vmaxq_u8(__a, __b) __arm_vmaxq_u8(__a, __b) | |
266 | #define vhsubq_u8(__a, __b) __arm_vhsubq_u8(__a, __b) | |
267 | #define vhsubq_n_u8(__a, __b) __arm_vhsubq_n_u8(__a, __b) | |
268 | #define vhaddq_u8(__a, __b) __arm_vhaddq_u8(__a, __b) | |
269 | #define vhaddq_n_u8(__a, __b) __arm_vhaddq_n_u8(__a, __b) | |
270 | #define veorq_u8(__a, __b) __arm_veorq_u8(__a, __b) | |
271 | #define vcmpneq_n_u8(__a, __b) __arm_vcmpneq_n_u8(__a, __b) | |
272 | #define vcmphiq_u8(__a, __b) __arm_vcmphiq_u8(__a, __b) | |
273 | #define vcmphiq_n_u8(__a, __b) __arm_vcmphiq_n_u8(__a, __b) | |
274 | #define vcmpeqq_u8(__a, __b) __arm_vcmpeqq_u8(__a, __b) | |
275 | #define vcmpeqq_n_u8(__a, __b) __arm_vcmpeqq_n_u8(__a, __b) | |
276 | #define vcmpcsq_u8(__a, __b) __arm_vcmpcsq_u8(__a, __b) | |
277 | #define vcmpcsq_n_u8(__a, __b) __arm_vcmpcsq_n_u8(__a, __b) | |
278 | #define vcaddq_rot90_u8(__a, __b) __arm_vcaddq_rot90_u8(__a, __b) | |
279 | #define vcaddq_rot270_u8(__a, __b) __arm_vcaddq_rot270_u8(__a, __b) | |
280 | #define vbicq_u8(__a, __b) __arm_vbicq_u8(__a, __b) | |
281 | #define vandq_u8(__a, __b) __arm_vandq_u8(__a, __b) | |
282 | #define vaddvq_p_u8(__a, __p) __arm_vaddvq_p_u8(__a, __p) | |
283 | #define vaddvaq_u8(__a, __b) __arm_vaddvaq_u8(__a, __b) | |
284 | #define vaddq_n_u8(__a, __b) __arm_vaddq_n_u8(__a, __b) | |
285 | #define vabdq_u8(__a, __b) __arm_vabdq_u8(__a, __b) | |
286 | #define vshlq_r_u8(__a, __b) __arm_vshlq_r_u8(__a, __b) | |
287 | #define vrshlq_u8(__a, __b) __arm_vrshlq_u8(__a, __b) | |
288 | #define vrshlq_n_u8(__a, __b) __arm_vrshlq_n_u8(__a, __b) | |
289 | #define vqshlq_u8(__a, __b) __arm_vqshlq_u8(__a, __b) | |
290 | #define vqshlq_r_u8(__a, __b) __arm_vqshlq_r_u8(__a, __b) | |
291 | #define vqrshlq_u8(__a, __b) __arm_vqrshlq_u8(__a, __b) | |
292 | #define vqrshlq_n_u8(__a, __b) __arm_vqrshlq_n_u8(__a, __b) | |
293 | #define vminavq_s8(__a, __b) __arm_vminavq_s8(__a, __b) | |
294 | #define vminaq_s8(__a, __b) __arm_vminaq_s8(__a, __b) | |
295 | #define vmaxavq_s8(__a, __b) __arm_vmaxavq_s8(__a, __b) | |
296 | #define vmaxaq_s8(__a, __b) __arm_vmaxaq_s8(__a, __b) | |
297 | #define vbrsrq_n_u8(__a, __b) __arm_vbrsrq_n_u8(__a, __b) | |
298 | #define vshlq_n_u8(__a, __imm) __arm_vshlq_n_u8(__a, __imm) | |
299 | #define vrshrq_n_u8(__a, __imm) __arm_vrshrq_n_u8(__a, __imm) | |
300 | #define vqshlq_n_u8(__a, __imm) __arm_vqshlq_n_u8(__a, __imm) | |
301 | #define vcmpneq_n_s8(__a, __b) __arm_vcmpneq_n_s8(__a, __b) | |
302 | #define vcmpltq_s8(__a, __b) __arm_vcmpltq_s8(__a, __b) | |
303 | #define vcmpltq_n_s8(__a, __b) __arm_vcmpltq_n_s8(__a, __b) | |
304 | #define vcmpleq_s8(__a, __b) __arm_vcmpleq_s8(__a, __b) | |
305 | #define vcmpleq_n_s8(__a, __b) __arm_vcmpleq_n_s8(__a, __b) | |
306 | #define vcmpgtq_s8(__a, __b) __arm_vcmpgtq_s8(__a, __b) | |
307 | #define vcmpgtq_n_s8(__a, __b) __arm_vcmpgtq_n_s8(__a, __b) | |
308 | #define vcmpgeq_s8(__a, __b) __arm_vcmpgeq_s8(__a, __b) | |
309 | #define vcmpgeq_n_s8(__a, __b) __arm_vcmpgeq_n_s8(__a, __b) | |
310 | #define vcmpeqq_s8(__a, __b) __arm_vcmpeqq_s8(__a, __b) | |
311 | #define vcmpeqq_n_s8(__a, __b) __arm_vcmpeqq_n_s8(__a, __b) | |
312 | #define vqshluq_n_s8(__a, __imm) __arm_vqshluq_n_s8(__a, __imm) | |
313 | #define vaddvq_p_s8(__a, __p) __arm_vaddvq_p_s8(__a, __p) | |
314 | #define vsubq_s8(__a, __b) __arm_vsubq_s8(__a, __b) | |
315 | #define vsubq_n_s8(__a, __b) __arm_vsubq_n_s8(__a, __b) | |
316 | #define vshlq_r_s8(__a, __b) __arm_vshlq_r_s8(__a, __b) | |
317 | #define vrshlq_s8(__a, __b) __arm_vrshlq_s8(__a, __b) | |
318 | #define vrshlq_n_s8(__a, __b) __arm_vrshlq_n_s8(__a, __b) | |
319 | #define vrmulhq_s8(__a, __b) __arm_vrmulhq_s8(__a, __b) | |
320 | #define vrhaddq_s8(__a, __b) __arm_vrhaddq_s8(__a, __b) | |
321 | #define vqsubq_s8(__a, __b) __arm_vqsubq_s8(__a, __b) | |
322 | #define vqsubq_n_s8(__a, __b) __arm_vqsubq_n_s8(__a, __b) | |
323 | #define vqshlq_s8(__a, __b) __arm_vqshlq_s8(__a, __b) | |
324 | #define vqshlq_r_s8(__a, __b) __arm_vqshlq_r_s8(__a, __b) | |
325 | #define vqrshlq_s8(__a, __b) __arm_vqrshlq_s8(__a, __b) | |
326 | #define vqrshlq_n_s8(__a, __b) __arm_vqrshlq_n_s8(__a, __b) | |
327 | #define vqrdmulhq_s8(__a, __b) __arm_vqrdmulhq_s8(__a, __b) | |
328 | #define vqrdmulhq_n_s8(__a, __b) __arm_vqrdmulhq_n_s8(__a, __b) | |
329 | #define vqdmulhq_s8(__a, __b) __arm_vqdmulhq_s8(__a, __b) | |
330 | #define vqdmulhq_n_s8(__a, __b) __arm_vqdmulhq_n_s8(__a, __b) | |
331 | #define vqaddq_s8(__a, __b) __arm_vqaddq_s8(__a, __b) | |
332 | #define vqaddq_n_s8(__a, __b) __arm_vqaddq_n_s8(__a, __b) | |
333 | #define vorrq_s8(__a, __b) __arm_vorrq_s8(__a, __b) | |
334 | #define vornq_s8(__a, __b) __arm_vornq_s8(__a, __b) | |
335 | #define vmulq_s8(__a, __b) __arm_vmulq_s8(__a, __b) | |
336 | #define vmulq_n_s8(__a, __b) __arm_vmulq_n_s8(__a, __b) | |
337 | #define vmulltq_int_s8(__a, __b) __arm_vmulltq_int_s8(__a, __b) | |
338 | #define vmullbq_int_s8(__a, __b) __arm_vmullbq_int_s8(__a, __b) | |
339 | #define vmulhq_s8(__a, __b) __arm_vmulhq_s8(__a, __b) | |
340 | #define vmlsdavxq_s8(__a, __b) __arm_vmlsdavxq_s8(__a, __b) | |
341 | #define vmlsdavq_s8(__a, __b) __arm_vmlsdavq_s8(__a, __b) | |
342 | #define vmladavxq_s8(__a, __b) __arm_vmladavxq_s8(__a, __b) | |
343 | #define vmladavq_s8(__a, __b) __arm_vmladavq_s8(__a, __b) | |
344 | #define vminvq_s8(__a, __b) __arm_vminvq_s8(__a, __b) | |
345 | #define vminq_s8(__a, __b) __arm_vminq_s8(__a, __b) | |
346 | #define vmaxvq_s8(__a, __b) __arm_vmaxvq_s8(__a, __b) | |
347 | #define vmaxq_s8(__a, __b) __arm_vmaxq_s8(__a, __b) | |
348 | #define vhsubq_s8(__a, __b) __arm_vhsubq_s8(__a, __b) | |
349 | #define vhsubq_n_s8(__a, __b) __arm_vhsubq_n_s8(__a, __b) | |
350 | #define vhcaddq_rot90_s8(__a, __b) __arm_vhcaddq_rot90_s8(__a, __b) | |
351 | #define vhcaddq_rot270_s8(__a, __b) __arm_vhcaddq_rot270_s8(__a, __b) | |
352 | #define vhaddq_s8(__a, __b) __arm_vhaddq_s8(__a, __b) | |
353 | #define vhaddq_n_s8(__a, __b) __arm_vhaddq_n_s8(__a, __b) | |
354 | #define veorq_s8(__a, __b) __arm_veorq_s8(__a, __b) | |
355 | #define vcaddq_rot90_s8(__a, __b) __arm_vcaddq_rot90_s8(__a, __b) | |
356 | #define vcaddq_rot270_s8(__a, __b) __arm_vcaddq_rot270_s8(__a, __b) | |
357 | #define vbrsrq_n_s8(__a, __b) __arm_vbrsrq_n_s8(__a, __b) | |
358 | #define vbicq_s8(__a, __b) __arm_vbicq_s8(__a, __b) | |
359 | #define vandq_s8(__a, __b) __arm_vandq_s8(__a, __b) | |
360 | #define vaddvaq_s8(__a, __b) __arm_vaddvaq_s8(__a, __b) | |
361 | #define vaddq_n_s8(__a, __b) __arm_vaddq_n_s8(__a, __b) | |
362 | #define vabdq_s8(__a, __b) __arm_vabdq_s8(__a, __b) | |
363 | #define vshlq_n_s8(__a, __imm) __arm_vshlq_n_s8(__a, __imm) | |
364 | #define vrshrq_n_s8(__a, __imm) __arm_vrshrq_n_s8(__a, __imm) | |
365 | #define vqshlq_n_s8(__a, __imm) __arm_vqshlq_n_s8(__a, __imm) | |
366 | #define vsubq_u16(__a, __b) __arm_vsubq_u16(__a, __b) | |
367 | #define vsubq_n_u16(__a, __b) __arm_vsubq_n_u16(__a, __b) | |
368 | #define vrmulhq_u16(__a, __b) __arm_vrmulhq_u16(__a, __b) | |
369 | #define vrhaddq_u16(__a, __b) __arm_vrhaddq_u16(__a, __b) | |
370 | #define vqsubq_u16(__a, __b) __arm_vqsubq_u16(__a, __b) | |
371 | #define vqsubq_n_u16(__a, __b) __arm_vqsubq_n_u16(__a, __b) | |
372 | #define vqaddq_u16(__a, __b) __arm_vqaddq_u16(__a, __b) | |
373 | #define vqaddq_n_u16(__a, __b) __arm_vqaddq_n_u16(__a, __b) | |
374 | #define vorrq_u16(__a, __b) __arm_vorrq_u16(__a, __b) | |
375 | #define vornq_u16(__a, __b) __arm_vornq_u16(__a, __b) | |
376 | #define vmulq_u16(__a, __b) __arm_vmulq_u16(__a, __b) | |
377 | #define vmulq_n_u16(__a, __b) __arm_vmulq_n_u16(__a, __b) | |
378 | #define vmulltq_int_u16(__a, __b) __arm_vmulltq_int_u16(__a, __b) | |
379 | #define vmullbq_int_u16(__a, __b) __arm_vmullbq_int_u16(__a, __b) | |
380 | #define vmulhq_u16(__a, __b) __arm_vmulhq_u16(__a, __b) | |
381 | #define vmladavq_u16(__a, __b) __arm_vmladavq_u16(__a, __b) | |
382 | #define vminvq_u16(__a, __b) __arm_vminvq_u16(__a, __b) | |
383 | #define vminq_u16(__a, __b) __arm_vminq_u16(__a, __b) | |
384 | #define vmaxvq_u16(__a, __b) __arm_vmaxvq_u16(__a, __b) | |
385 | #define vmaxq_u16(__a, __b) __arm_vmaxq_u16(__a, __b) | |
386 | #define vhsubq_u16(__a, __b) __arm_vhsubq_u16(__a, __b) | |
387 | #define vhsubq_n_u16(__a, __b) __arm_vhsubq_n_u16(__a, __b) | |
388 | #define vhaddq_u16(__a, __b) __arm_vhaddq_u16(__a, __b) | |
389 | #define vhaddq_n_u16(__a, __b) __arm_vhaddq_n_u16(__a, __b) | |
390 | #define veorq_u16(__a, __b) __arm_veorq_u16(__a, __b) | |
391 | #define vcmpneq_n_u16(__a, __b) __arm_vcmpneq_n_u16(__a, __b) | |
392 | #define vcmphiq_u16(__a, __b) __arm_vcmphiq_u16(__a, __b) | |
393 | #define vcmphiq_n_u16(__a, __b) __arm_vcmphiq_n_u16(__a, __b) | |
394 | #define vcmpeqq_u16(__a, __b) __arm_vcmpeqq_u16(__a, __b) | |
395 | #define vcmpeqq_n_u16(__a, __b) __arm_vcmpeqq_n_u16(__a, __b) | |
396 | #define vcmpcsq_u16(__a, __b) __arm_vcmpcsq_u16(__a, __b) | |
397 | #define vcmpcsq_n_u16(__a, __b) __arm_vcmpcsq_n_u16(__a, __b) | |
398 | #define vcaddq_rot90_u16(__a, __b) __arm_vcaddq_rot90_u16(__a, __b) | |
399 | #define vcaddq_rot270_u16(__a, __b) __arm_vcaddq_rot270_u16(__a, __b) | |
400 | #define vbicq_u16(__a, __b) __arm_vbicq_u16(__a, __b) | |
401 | #define vandq_u16(__a, __b) __arm_vandq_u16(__a, __b) | |
402 | #define vaddvq_p_u16(__a, __p) __arm_vaddvq_p_u16(__a, __p) | |
403 | #define vaddvaq_u16(__a, __b) __arm_vaddvaq_u16(__a, __b) | |
404 | #define vaddq_n_u16(__a, __b) __arm_vaddq_n_u16(__a, __b) | |
405 | #define vabdq_u16(__a, __b) __arm_vabdq_u16(__a, __b) | |
406 | #define vshlq_r_u16(__a, __b) __arm_vshlq_r_u16(__a, __b) | |
407 | #define vrshlq_u16(__a, __b) __arm_vrshlq_u16(__a, __b) | |
408 | #define vrshlq_n_u16(__a, __b) __arm_vrshlq_n_u16(__a, __b) | |
409 | #define vqshlq_u16(__a, __b) __arm_vqshlq_u16(__a, __b) | |
410 | #define vqshlq_r_u16(__a, __b) __arm_vqshlq_r_u16(__a, __b) | |
411 | #define vqrshlq_u16(__a, __b) __arm_vqrshlq_u16(__a, __b) | |
412 | #define vqrshlq_n_u16(__a, __b) __arm_vqrshlq_n_u16(__a, __b) | |
413 | #define vminavq_s16(__a, __b) __arm_vminavq_s16(__a, __b) | |
414 | #define vminaq_s16(__a, __b) __arm_vminaq_s16(__a, __b) | |
415 | #define vmaxavq_s16(__a, __b) __arm_vmaxavq_s16(__a, __b) | |
416 | #define vmaxaq_s16(__a, __b) __arm_vmaxaq_s16(__a, __b) | |
417 | #define vbrsrq_n_u16(__a, __b) __arm_vbrsrq_n_u16(__a, __b) | |
418 | #define vshlq_n_u16(__a, __imm) __arm_vshlq_n_u16(__a, __imm) | |
419 | #define vrshrq_n_u16(__a, __imm) __arm_vrshrq_n_u16(__a, __imm) | |
420 | #define vqshlq_n_u16(__a, __imm) __arm_vqshlq_n_u16(__a, __imm) | |
421 | #define vcmpneq_n_s16(__a, __b) __arm_vcmpneq_n_s16(__a, __b) | |
422 | #define vcmpltq_s16(__a, __b) __arm_vcmpltq_s16(__a, __b) | |
423 | #define vcmpltq_n_s16(__a, __b) __arm_vcmpltq_n_s16(__a, __b) | |
424 | #define vcmpleq_s16(__a, __b) __arm_vcmpleq_s16(__a, __b) | |
425 | #define vcmpleq_n_s16(__a, __b) __arm_vcmpleq_n_s16(__a, __b) | |
426 | #define vcmpgtq_s16(__a, __b) __arm_vcmpgtq_s16(__a, __b) | |
427 | #define vcmpgtq_n_s16(__a, __b) __arm_vcmpgtq_n_s16(__a, __b) | |
428 | #define vcmpgeq_s16(__a, __b) __arm_vcmpgeq_s16(__a, __b) | |
429 | #define vcmpgeq_n_s16(__a, __b) __arm_vcmpgeq_n_s16(__a, __b) | |
430 | #define vcmpeqq_s16(__a, __b) __arm_vcmpeqq_s16(__a, __b) | |
431 | #define vcmpeqq_n_s16(__a, __b) __arm_vcmpeqq_n_s16(__a, __b) | |
432 | #define vqshluq_n_s16(__a, __imm) __arm_vqshluq_n_s16(__a, __imm) | |
433 | #define vaddvq_p_s16(__a, __p) __arm_vaddvq_p_s16(__a, __p) | |
434 | #define vsubq_s16(__a, __b) __arm_vsubq_s16(__a, __b) | |
435 | #define vsubq_n_s16(__a, __b) __arm_vsubq_n_s16(__a, __b) | |
436 | #define vshlq_r_s16(__a, __b) __arm_vshlq_r_s16(__a, __b) | |
437 | #define vrshlq_s16(__a, __b) __arm_vrshlq_s16(__a, __b) | |
438 | #define vrshlq_n_s16(__a, __b) __arm_vrshlq_n_s16(__a, __b) | |
439 | #define vrmulhq_s16(__a, __b) __arm_vrmulhq_s16(__a, __b) | |
440 | #define vrhaddq_s16(__a, __b) __arm_vrhaddq_s16(__a, __b) | |
441 | #define vqsubq_s16(__a, __b) __arm_vqsubq_s16(__a, __b) | |
442 | #define vqsubq_n_s16(__a, __b) __arm_vqsubq_n_s16(__a, __b) | |
443 | #define vqshlq_s16(__a, __b) __arm_vqshlq_s16(__a, __b) | |
444 | #define vqshlq_r_s16(__a, __b) __arm_vqshlq_r_s16(__a, __b) | |
445 | #define vqrshlq_s16(__a, __b) __arm_vqrshlq_s16(__a, __b) | |
446 | #define vqrshlq_n_s16(__a, __b) __arm_vqrshlq_n_s16(__a, __b) | |
447 | #define vqrdmulhq_s16(__a, __b) __arm_vqrdmulhq_s16(__a, __b) | |
448 | #define vqrdmulhq_n_s16(__a, __b) __arm_vqrdmulhq_n_s16(__a, __b) | |
449 | #define vqdmulhq_s16(__a, __b) __arm_vqdmulhq_s16(__a, __b) | |
450 | #define vqdmulhq_n_s16(__a, __b) __arm_vqdmulhq_n_s16(__a, __b) | |
451 | #define vqaddq_s16(__a, __b) __arm_vqaddq_s16(__a, __b) | |
452 | #define vqaddq_n_s16(__a, __b) __arm_vqaddq_n_s16(__a, __b) | |
453 | #define vorrq_s16(__a, __b) __arm_vorrq_s16(__a, __b) | |
454 | #define vornq_s16(__a, __b) __arm_vornq_s16(__a, __b) | |
455 | #define vmulq_s16(__a, __b) __arm_vmulq_s16(__a, __b) | |
456 | #define vmulq_n_s16(__a, __b) __arm_vmulq_n_s16(__a, __b) | |
457 | #define vmulltq_int_s16(__a, __b) __arm_vmulltq_int_s16(__a, __b) | |
458 | #define vmullbq_int_s16(__a, __b) __arm_vmullbq_int_s16(__a, __b) | |
459 | #define vmulhq_s16(__a, __b) __arm_vmulhq_s16(__a, __b) | |
460 | #define vmlsdavxq_s16(__a, __b) __arm_vmlsdavxq_s16(__a, __b) | |
461 | #define vmlsdavq_s16(__a, __b) __arm_vmlsdavq_s16(__a, __b) | |
462 | #define vmladavxq_s16(__a, __b) __arm_vmladavxq_s16(__a, __b) | |
463 | #define vmladavq_s16(__a, __b) __arm_vmladavq_s16(__a, __b) | |
464 | #define vminvq_s16(__a, __b) __arm_vminvq_s16(__a, __b) | |
465 | #define vminq_s16(__a, __b) __arm_vminq_s16(__a, __b) | |
466 | #define vmaxvq_s16(__a, __b) __arm_vmaxvq_s16(__a, __b) | |
467 | #define vmaxq_s16(__a, __b) __arm_vmaxq_s16(__a, __b) | |
468 | #define vhsubq_s16(__a, __b) __arm_vhsubq_s16(__a, __b) | |
469 | #define vhsubq_n_s16(__a, __b) __arm_vhsubq_n_s16(__a, __b) | |
470 | #define vhcaddq_rot90_s16(__a, __b) __arm_vhcaddq_rot90_s16(__a, __b) | |
471 | #define vhcaddq_rot270_s16(__a, __b) __arm_vhcaddq_rot270_s16(__a, __b) | |
472 | #define vhaddq_s16(__a, __b) __arm_vhaddq_s16(__a, __b) | |
473 | #define vhaddq_n_s16(__a, __b) __arm_vhaddq_n_s16(__a, __b) | |
474 | #define veorq_s16(__a, __b) __arm_veorq_s16(__a, __b) | |
475 | #define vcaddq_rot90_s16(__a, __b) __arm_vcaddq_rot90_s16(__a, __b) | |
476 | #define vcaddq_rot270_s16(__a, __b) __arm_vcaddq_rot270_s16(__a, __b) | |
477 | #define vbrsrq_n_s16(__a, __b) __arm_vbrsrq_n_s16(__a, __b) | |
478 | #define vbicq_s16(__a, __b) __arm_vbicq_s16(__a, __b) | |
479 | #define vandq_s16(__a, __b) __arm_vandq_s16(__a, __b) | |
480 | #define vaddvaq_s16(__a, __b) __arm_vaddvaq_s16(__a, __b) | |
481 | #define vaddq_n_s16(__a, __b) __arm_vaddq_n_s16(__a, __b) | |
482 | #define vabdq_s16(__a, __b) __arm_vabdq_s16(__a, __b) | |
483 | #define vshlq_n_s16(__a, __imm) __arm_vshlq_n_s16(__a, __imm) | |
484 | #define vrshrq_n_s16(__a, __imm) __arm_vrshrq_n_s16(__a, __imm) | |
485 | #define vqshlq_n_s16(__a, __imm) __arm_vqshlq_n_s16(__a, __imm) | |
486 | #define vsubq_u32(__a, __b) __arm_vsubq_u32(__a, __b) | |
487 | #define vsubq_n_u32(__a, __b) __arm_vsubq_n_u32(__a, __b) | |
488 | #define vrmulhq_u32(__a, __b) __arm_vrmulhq_u32(__a, __b) | |
489 | #define vrhaddq_u32(__a, __b) __arm_vrhaddq_u32(__a, __b) | |
490 | #define vqsubq_u32(__a, __b) __arm_vqsubq_u32(__a, __b) | |
491 | #define vqsubq_n_u32(__a, __b) __arm_vqsubq_n_u32(__a, __b) | |
492 | #define vqaddq_u32(__a, __b) __arm_vqaddq_u32(__a, __b) | |
493 | #define vqaddq_n_u32(__a, __b) __arm_vqaddq_n_u32(__a, __b) | |
494 | #define vorrq_u32(__a, __b) __arm_vorrq_u32(__a, __b) | |
495 | #define vornq_u32(__a, __b) __arm_vornq_u32(__a, __b) | |
496 | #define vmulq_u32(__a, __b) __arm_vmulq_u32(__a, __b) | |
497 | #define vmulq_n_u32(__a, __b) __arm_vmulq_n_u32(__a, __b) | |
498 | #define vmulltq_int_u32(__a, __b) __arm_vmulltq_int_u32(__a, __b) | |
499 | #define vmullbq_int_u32(__a, __b) __arm_vmullbq_int_u32(__a, __b) | |
500 | #define vmulhq_u32(__a, __b) __arm_vmulhq_u32(__a, __b) | |
501 | #define vmladavq_u32(__a, __b) __arm_vmladavq_u32(__a, __b) | |
502 | #define vminvq_u32(__a, __b) __arm_vminvq_u32(__a, __b) | |
503 | #define vminq_u32(__a, __b) __arm_vminq_u32(__a, __b) | |
504 | #define vmaxvq_u32(__a, __b) __arm_vmaxvq_u32(__a, __b) | |
505 | #define vmaxq_u32(__a, __b) __arm_vmaxq_u32(__a, __b) | |
506 | #define vhsubq_u32(__a, __b) __arm_vhsubq_u32(__a, __b) | |
507 | #define vhsubq_n_u32(__a, __b) __arm_vhsubq_n_u32(__a, __b) | |
508 | #define vhaddq_u32(__a, __b) __arm_vhaddq_u32(__a, __b) | |
509 | #define vhaddq_n_u32(__a, __b) __arm_vhaddq_n_u32(__a, __b) | |
510 | #define veorq_u32(__a, __b) __arm_veorq_u32(__a, __b) | |
511 | #define vcmpneq_n_u32(__a, __b) __arm_vcmpneq_n_u32(__a, __b) | |
512 | #define vcmphiq_u32(__a, __b) __arm_vcmphiq_u32(__a, __b) | |
513 | #define vcmphiq_n_u32(__a, __b) __arm_vcmphiq_n_u32(__a, __b) | |
514 | #define vcmpeqq_u32(__a, __b) __arm_vcmpeqq_u32(__a, __b) | |
515 | #define vcmpeqq_n_u32(__a, __b) __arm_vcmpeqq_n_u32(__a, __b) | |
516 | #define vcmpcsq_u32(__a, __b) __arm_vcmpcsq_u32(__a, __b) | |
517 | #define vcmpcsq_n_u32(__a, __b) __arm_vcmpcsq_n_u32(__a, __b) | |
518 | #define vcaddq_rot90_u32(__a, __b) __arm_vcaddq_rot90_u32(__a, __b) | |
519 | #define vcaddq_rot270_u32(__a, __b) __arm_vcaddq_rot270_u32(__a, __b) | |
520 | #define vbicq_u32(__a, __b) __arm_vbicq_u32(__a, __b) | |
521 | #define vandq_u32(__a, __b) __arm_vandq_u32(__a, __b) | |
522 | #define vaddvq_p_u32(__a, __p) __arm_vaddvq_p_u32(__a, __p) | |
523 | #define vaddvaq_u32(__a, __b) __arm_vaddvaq_u32(__a, __b) | |
524 | #define vaddq_n_u32(__a, __b) __arm_vaddq_n_u32(__a, __b) | |
525 | #define vabdq_u32(__a, __b) __arm_vabdq_u32(__a, __b) | |
526 | #define vshlq_r_u32(__a, __b) __arm_vshlq_r_u32(__a, __b) | |
527 | #define vrshlq_u32(__a, __b) __arm_vrshlq_u32(__a, __b) | |
528 | #define vrshlq_n_u32(__a, __b) __arm_vrshlq_n_u32(__a, __b) | |
529 | #define vqshlq_u32(__a, __b) __arm_vqshlq_u32(__a, __b) | |
530 | #define vqshlq_r_u32(__a, __b) __arm_vqshlq_r_u32(__a, __b) | |
531 | #define vqrshlq_u32(__a, __b) __arm_vqrshlq_u32(__a, __b) | |
532 | #define vqrshlq_n_u32(__a, __b) __arm_vqrshlq_n_u32(__a, __b) | |
533 | #define vminavq_s32(__a, __b) __arm_vminavq_s32(__a, __b) | |
534 | #define vminaq_s32(__a, __b) __arm_vminaq_s32(__a, __b) | |
535 | #define vmaxavq_s32(__a, __b) __arm_vmaxavq_s32(__a, __b) | |
536 | #define vmaxaq_s32(__a, __b) __arm_vmaxaq_s32(__a, __b) | |
537 | #define vbrsrq_n_u32(__a, __b) __arm_vbrsrq_n_u32(__a, __b) | |
538 | #define vshlq_n_u32(__a, __imm) __arm_vshlq_n_u32(__a, __imm) | |
539 | #define vrshrq_n_u32(__a, __imm) __arm_vrshrq_n_u32(__a, __imm) | |
540 | #define vqshlq_n_u32(__a, __imm) __arm_vqshlq_n_u32(__a, __imm) | |
541 | #define vcmpneq_n_s32(__a, __b) __arm_vcmpneq_n_s32(__a, __b) | |
542 | #define vcmpltq_s32(__a, __b) __arm_vcmpltq_s32(__a, __b) | |
543 | #define vcmpltq_n_s32(__a, __b) __arm_vcmpltq_n_s32(__a, __b) | |
544 | #define vcmpleq_s32(__a, __b) __arm_vcmpleq_s32(__a, __b) | |
545 | #define vcmpleq_n_s32(__a, __b) __arm_vcmpleq_n_s32(__a, __b) | |
546 | #define vcmpgtq_s32(__a, __b) __arm_vcmpgtq_s32(__a, __b) | |
547 | #define vcmpgtq_n_s32(__a, __b) __arm_vcmpgtq_n_s32(__a, __b) | |
548 | #define vcmpgeq_s32(__a, __b) __arm_vcmpgeq_s32(__a, __b) | |
549 | #define vcmpgeq_n_s32(__a, __b) __arm_vcmpgeq_n_s32(__a, __b) | |
550 | #define vcmpeqq_s32(__a, __b) __arm_vcmpeqq_s32(__a, __b) | |
551 | #define vcmpeqq_n_s32(__a, __b) __arm_vcmpeqq_n_s32(__a, __b) | |
552 | #define vqshluq_n_s32(__a, __imm) __arm_vqshluq_n_s32(__a, __imm) | |
553 | #define vaddvq_p_s32(__a, __p) __arm_vaddvq_p_s32(__a, __p) | |
554 | #define vsubq_s32(__a, __b) __arm_vsubq_s32(__a, __b) | |
555 | #define vsubq_n_s32(__a, __b) __arm_vsubq_n_s32(__a, __b) | |
556 | #define vshlq_r_s32(__a, __b) __arm_vshlq_r_s32(__a, __b) | |
557 | #define vrshlq_s32(__a, __b) __arm_vrshlq_s32(__a, __b) | |
558 | #define vrshlq_n_s32(__a, __b) __arm_vrshlq_n_s32(__a, __b) | |
559 | #define vrmulhq_s32(__a, __b) __arm_vrmulhq_s32(__a, __b) | |
560 | #define vrhaddq_s32(__a, __b) __arm_vrhaddq_s32(__a, __b) | |
561 | #define vqsubq_s32(__a, __b) __arm_vqsubq_s32(__a, __b) | |
562 | #define vqsubq_n_s32(__a, __b) __arm_vqsubq_n_s32(__a, __b) | |
563 | #define vqshlq_s32(__a, __b) __arm_vqshlq_s32(__a, __b) | |
564 | #define vqshlq_r_s32(__a, __b) __arm_vqshlq_r_s32(__a, __b) | |
565 | #define vqrshlq_s32(__a, __b) __arm_vqrshlq_s32(__a, __b) | |
566 | #define vqrshlq_n_s32(__a, __b) __arm_vqrshlq_n_s32(__a, __b) | |
567 | #define vqrdmulhq_s32(__a, __b) __arm_vqrdmulhq_s32(__a, __b) | |
568 | #define vqrdmulhq_n_s32(__a, __b) __arm_vqrdmulhq_n_s32(__a, __b) | |
569 | #define vqdmulhq_s32(__a, __b) __arm_vqdmulhq_s32(__a, __b) | |
570 | #define vqdmulhq_n_s32(__a, __b) __arm_vqdmulhq_n_s32(__a, __b) | |
571 | #define vqaddq_s32(__a, __b) __arm_vqaddq_s32(__a, __b) | |
572 | #define vqaddq_n_s32(__a, __b) __arm_vqaddq_n_s32(__a, __b) | |
573 | #define vorrq_s32(__a, __b) __arm_vorrq_s32(__a, __b) | |
574 | #define vornq_s32(__a, __b) __arm_vornq_s32(__a, __b) | |
575 | #define vmulq_s32(__a, __b) __arm_vmulq_s32(__a, __b) | |
576 | #define vmulq_n_s32(__a, __b) __arm_vmulq_n_s32(__a, __b) | |
577 | #define vmulltq_int_s32(__a, __b) __arm_vmulltq_int_s32(__a, __b) | |
578 | #define vmullbq_int_s32(__a, __b) __arm_vmullbq_int_s32(__a, __b) | |
579 | #define vmulhq_s32(__a, __b) __arm_vmulhq_s32(__a, __b) | |
580 | #define vmlsdavxq_s32(__a, __b) __arm_vmlsdavxq_s32(__a, __b) | |
581 | #define vmlsdavq_s32(__a, __b) __arm_vmlsdavq_s32(__a, __b) | |
582 | #define vmladavxq_s32(__a, __b) __arm_vmladavxq_s32(__a, __b) | |
583 | #define vmladavq_s32(__a, __b) __arm_vmladavq_s32(__a, __b) | |
584 | #define vminvq_s32(__a, __b) __arm_vminvq_s32(__a, __b) | |
585 | #define vminq_s32(__a, __b) __arm_vminq_s32(__a, __b) | |
586 | #define vmaxvq_s32(__a, __b) __arm_vmaxvq_s32(__a, __b) | |
587 | #define vmaxq_s32(__a, __b) __arm_vmaxq_s32(__a, __b) | |
588 | #define vhsubq_s32(__a, __b) __arm_vhsubq_s32(__a, __b) | |
589 | #define vhsubq_n_s32(__a, __b) __arm_vhsubq_n_s32(__a, __b) | |
590 | #define vhcaddq_rot90_s32(__a, __b) __arm_vhcaddq_rot90_s32(__a, __b) | |
591 | #define vhcaddq_rot270_s32(__a, __b) __arm_vhcaddq_rot270_s32(__a, __b) | |
592 | #define vhaddq_s32(__a, __b) __arm_vhaddq_s32(__a, __b) | |
593 | #define vhaddq_n_s32(__a, __b) __arm_vhaddq_n_s32(__a, __b) | |
594 | #define veorq_s32(__a, __b) __arm_veorq_s32(__a, __b) | |
595 | #define vcaddq_rot90_s32(__a, __b) __arm_vcaddq_rot90_s32(__a, __b) | |
596 | #define vcaddq_rot270_s32(__a, __b) __arm_vcaddq_rot270_s32(__a, __b) | |
597 | #define vbrsrq_n_s32(__a, __b) __arm_vbrsrq_n_s32(__a, __b) | |
598 | #define vbicq_s32(__a, __b) __arm_vbicq_s32(__a, __b) | |
599 | #define vandq_s32(__a, __b) __arm_vandq_s32(__a, __b) | |
600 | #define vaddvaq_s32(__a, __b) __arm_vaddvaq_s32(__a, __b) | |
601 | #define vaddq_n_s32(__a, __b) __arm_vaddq_n_s32(__a, __b) | |
602 | #define vabdq_s32(__a, __b) __arm_vabdq_s32(__a, __b) | |
603 | #define vshlq_n_s32(__a, __imm) __arm_vshlq_n_s32(__a, __imm) | |
604 | #define vrshrq_n_s32(__a, __imm) __arm_vrshrq_n_s32(__a, __imm) | |
605 | #define vqshlq_n_s32(__a, __imm) __arm_vqshlq_n_s32(__a, __imm) | |
f9355dee SP |
606 | #define vqmovntq_u16(__a, __b) __arm_vqmovntq_u16(__a, __b) |
607 | #define vqmovnbq_u16(__a, __b) __arm_vqmovnbq_u16(__a, __b) | |
608 | #define vmulltq_poly_p8(__a, __b) __arm_vmulltq_poly_p8(__a, __b) | |
609 | #define vmullbq_poly_p8(__a, __b) __arm_vmullbq_poly_p8(__a, __b) | |
610 | #define vmovntq_u16(__a, __b) __arm_vmovntq_u16(__a, __b) | |
611 | #define vmovnbq_u16(__a, __b) __arm_vmovnbq_u16(__a, __b) | |
612 | #define vmlaldavq_u16(__a, __b) __arm_vmlaldavq_u16(__a, __b) | |
613 | #define vqmovuntq_s16(__a, __b) __arm_vqmovuntq_s16(__a, __b) | |
614 | #define vqmovunbq_s16(__a, __b) __arm_vqmovunbq_s16(__a, __b) | |
615 | #define vshlltq_n_u8(__a, __imm) __arm_vshlltq_n_u8(__a, __imm) | |
616 | #define vshllbq_n_u8(__a, __imm) __arm_vshllbq_n_u8(__a, __imm) | |
617 | #define vorrq_n_u16(__a, __imm) __arm_vorrq_n_u16(__a, __imm) | |
618 | #define vbicq_n_u16(__a, __imm) __arm_vbicq_n_u16(__a, __imm) | |
619 | #define vcmpneq_n_f16(__a, __b) __arm_vcmpneq_n_f16(__a, __b) | |
620 | #define vcmpneq_f16(__a, __b) __arm_vcmpneq_f16(__a, __b) | |
621 | #define vcmpltq_n_f16(__a, __b) __arm_vcmpltq_n_f16(__a, __b) | |
622 | #define vcmpltq_f16(__a, __b) __arm_vcmpltq_f16(__a, __b) | |
623 | #define vcmpleq_n_f16(__a, __b) __arm_vcmpleq_n_f16(__a, __b) | |
624 | #define vcmpleq_f16(__a, __b) __arm_vcmpleq_f16(__a, __b) | |
625 | #define vcmpgtq_n_f16(__a, __b) __arm_vcmpgtq_n_f16(__a, __b) | |
626 | #define vcmpgtq_f16(__a, __b) __arm_vcmpgtq_f16(__a, __b) | |
627 | #define vcmpgeq_n_f16(__a, __b) __arm_vcmpgeq_n_f16(__a, __b) | |
628 | #define vcmpgeq_f16(__a, __b) __arm_vcmpgeq_f16(__a, __b) | |
629 | #define vcmpeqq_n_f16(__a, __b) __arm_vcmpeqq_n_f16(__a, __b) | |
630 | #define vcmpeqq_f16(__a, __b) __arm_vcmpeqq_f16(__a, __b) | |
631 | #define vsubq_f16(__a, __b) __arm_vsubq_f16(__a, __b) | |
632 | #define vqmovntq_s16(__a, __b) __arm_vqmovntq_s16(__a, __b) | |
633 | #define vqmovnbq_s16(__a, __b) __arm_vqmovnbq_s16(__a, __b) | |
634 | #define vqdmulltq_s16(__a, __b) __arm_vqdmulltq_s16(__a, __b) | |
635 | #define vqdmulltq_n_s16(__a, __b) __arm_vqdmulltq_n_s16(__a, __b) | |
636 | #define vqdmullbq_s16(__a, __b) __arm_vqdmullbq_s16(__a, __b) | |
637 | #define vqdmullbq_n_s16(__a, __b) __arm_vqdmullbq_n_s16(__a, __b) | |
638 | #define vorrq_f16(__a, __b) __arm_vorrq_f16(__a, __b) | |
639 | #define vornq_f16(__a, __b) __arm_vornq_f16(__a, __b) | |
640 | #define vmulq_n_f16(__a, __b) __arm_vmulq_n_f16(__a, __b) | |
641 | #define vmulq_f16(__a, __b) __arm_vmulq_f16(__a, __b) | |
642 | #define vmovntq_s16(__a, __b) __arm_vmovntq_s16(__a, __b) | |
643 | #define vmovnbq_s16(__a, __b) __arm_vmovnbq_s16(__a, __b) | |
644 | #define vmlsldavxq_s16(__a, __b) __arm_vmlsldavxq_s16(__a, __b) | |
645 | #define vmlsldavq_s16(__a, __b) __arm_vmlsldavq_s16(__a, __b) | |
646 | #define vmlaldavxq_s16(__a, __b) __arm_vmlaldavxq_s16(__a, __b) | |
647 | #define vmlaldavq_s16(__a, __b) __arm_vmlaldavq_s16(__a, __b) | |
648 | #define vminnmvq_f16(__a, __b) __arm_vminnmvq_f16(__a, __b) | |
649 | #define vminnmq_f16(__a, __b) __arm_vminnmq_f16(__a, __b) | |
650 | #define vminnmavq_f16(__a, __b) __arm_vminnmavq_f16(__a, __b) | |
651 | #define vminnmaq_f16(__a, __b) __arm_vminnmaq_f16(__a, __b) | |
652 | #define vmaxnmvq_f16(__a, __b) __arm_vmaxnmvq_f16(__a, __b) | |
653 | #define vmaxnmq_f16(__a, __b) __arm_vmaxnmq_f16(__a, __b) | |
654 | #define vmaxnmavq_f16(__a, __b) __arm_vmaxnmavq_f16(__a, __b) | |
655 | #define vmaxnmaq_f16(__a, __b) __arm_vmaxnmaq_f16(__a, __b) | |
656 | #define veorq_f16(__a, __b) __arm_veorq_f16(__a, __b) | |
657 | #define vcmulq_rot90_f16(__a, __b) __arm_vcmulq_rot90_f16(__a, __b) | |
658 | #define vcmulq_rot270_f16(__a, __b) __arm_vcmulq_rot270_f16(__a, __b) | |
659 | #define vcmulq_rot180_f16(__a, __b) __arm_vcmulq_rot180_f16(__a, __b) | |
660 | #define vcmulq_f16(__a, __b) __arm_vcmulq_f16(__a, __b) | |
661 | #define vcaddq_rot90_f16(__a, __b) __arm_vcaddq_rot90_f16(__a, __b) | |
662 | #define vcaddq_rot270_f16(__a, __b) __arm_vcaddq_rot270_f16(__a, __b) | |
663 | #define vbicq_f16(__a, __b) __arm_vbicq_f16(__a, __b) | |
664 | #define vandq_f16(__a, __b) __arm_vandq_f16(__a, __b) | |
665 | #define vaddq_n_f16(__a, __b) __arm_vaddq_n_f16(__a, __b) | |
666 | #define vabdq_f16(__a, __b) __arm_vabdq_f16(__a, __b) | |
667 | #define vshlltq_n_s8(__a, __imm) __arm_vshlltq_n_s8(__a, __imm) | |
668 | #define vshllbq_n_s8(__a, __imm) __arm_vshllbq_n_s8(__a, __imm) | |
669 | #define vorrq_n_s16(__a, __imm) __arm_vorrq_n_s16(__a, __imm) | |
670 | #define vbicq_n_s16(__a, __imm) __arm_vbicq_n_s16(__a, __imm) | |
671 | #define vqmovntq_u32(__a, __b) __arm_vqmovntq_u32(__a, __b) | |
672 | #define vqmovnbq_u32(__a, __b) __arm_vqmovnbq_u32(__a, __b) | |
673 | #define vmulltq_poly_p16(__a, __b) __arm_vmulltq_poly_p16(__a, __b) | |
674 | #define vmullbq_poly_p16(__a, __b) __arm_vmullbq_poly_p16(__a, __b) | |
675 | #define vmovntq_u32(__a, __b) __arm_vmovntq_u32(__a, __b) | |
676 | #define vmovnbq_u32(__a, __b) __arm_vmovnbq_u32(__a, __b) | |
677 | #define vmlaldavq_u32(__a, __b) __arm_vmlaldavq_u32(__a, __b) | |
678 | #define vqmovuntq_s32(__a, __b) __arm_vqmovuntq_s32(__a, __b) | |
679 | #define vqmovunbq_s32(__a, __b) __arm_vqmovunbq_s32(__a, __b) | |
680 | #define vshlltq_n_u16(__a, __imm) __arm_vshlltq_n_u16(__a, __imm) | |
681 | #define vshllbq_n_u16(__a, __imm) __arm_vshllbq_n_u16(__a, __imm) | |
682 | #define vorrq_n_u32(__a, __imm) __arm_vorrq_n_u32(__a, __imm) | |
683 | #define vbicq_n_u32(__a, __imm) __arm_vbicq_n_u32(__a, __imm) | |
684 | #define vcmpneq_n_f32(__a, __b) __arm_vcmpneq_n_f32(__a, __b) | |
685 | #define vcmpneq_f32(__a, __b) __arm_vcmpneq_f32(__a, __b) | |
686 | #define vcmpltq_n_f32(__a, __b) __arm_vcmpltq_n_f32(__a, __b) | |
687 | #define vcmpltq_f32(__a, __b) __arm_vcmpltq_f32(__a, __b) | |
688 | #define vcmpleq_n_f32(__a, __b) __arm_vcmpleq_n_f32(__a, __b) | |
689 | #define vcmpleq_f32(__a, __b) __arm_vcmpleq_f32(__a, __b) | |
690 | #define vcmpgtq_n_f32(__a, __b) __arm_vcmpgtq_n_f32(__a, __b) | |
691 | #define vcmpgtq_f32(__a, __b) __arm_vcmpgtq_f32(__a, __b) | |
692 | #define vcmpgeq_n_f32(__a, __b) __arm_vcmpgeq_n_f32(__a, __b) | |
693 | #define vcmpgeq_f32(__a, __b) __arm_vcmpgeq_f32(__a, __b) | |
694 | #define vcmpeqq_n_f32(__a, __b) __arm_vcmpeqq_n_f32(__a, __b) | |
695 | #define vcmpeqq_f32(__a, __b) __arm_vcmpeqq_f32(__a, __b) | |
696 | #define vsubq_f32(__a, __b) __arm_vsubq_f32(__a, __b) | |
697 | #define vqmovntq_s32(__a, __b) __arm_vqmovntq_s32(__a, __b) | |
698 | #define vqmovnbq_s32(__a, __b) __arm_vqmovnbq_s32(__a, __b) | |
699 | #define vqdmulltq_s32(__a, __b) __arm_vqdmulltq_s32(__a, __b) | |
700 | #define vqdmulltq_n_s32(__a, __b) __arm_vqdmulltq_n_s32(__a, __b) | |
701 | #define vqdmullbq_s32(__a, __b) __arm_vqdmullbq_s32(__a, __b) | |
702 | #define vqdmullbq_n_s32(__a, __b) __arm_vqdmullbq_n_s32(__a, __b) | |
703 | #define vorrq_f32(__a, __b) __arm_vorrq_f32(__a, __b) | |
704 | #define vornq_f32(__a, __b) __arm_vornq_f32(__a, __b) | |
705 | #define vmulq_n_f32(__a, __b) __arm_vmulq_n_f32(__a, __b) | |
706 | #define vmulq_f32(__a, __b) __arm_vmulq_f32(__a, __b) | |
707 | #define vmovntq_s32(__a, __b) __arm_vmovntq_s32(__a, __b) | |
708 | #define vmovnbq_s32(__a, __b) __arm_vmovnbq_s32(__a, __b) | |
709 | #define vmlsldavxq_s32(__a, __b) __arm_vmlsldavxq_s32(__a, __b) | |
710 | #define vmlsldavq_s32(__a, __b) __arm_vmlsldavq_s32(__a, __b) | |
711 | #define vmlaldavxq_s32(__a, __b) __arm_vmlaldavxq_s32(__a, __b) | |
712 | #define vmlaldavq_s32(__a, __b) __arm_vmlaldavq_s32(__a, __b) | |
713 | #define vminnmvq_f32(__a, __b) __arm_vminnmvq_f32(__a, __b) | |
714 | #define vminnmq_f32(__a, __b) __arm_vminnmq_f32(__a, __b) | |
715 | #define vminnmavq_f32(__a, __b) __arm_vminnmavq_f32(__a, __b) | |
716 | #define vminnmaq_f32(__a, __b) __arm_vminnmaq_f32(__a, __b) | |
717 | #define vmaxnmvq_f32(__a, __b) __arm_vmaxnmvq_f32(__a, __b) | |
718 | #define vmaxnmq_f32(__a, __b) __arm_vmaxnmq_f32(__a, __b) | |
719 | #define vmaxnmavq_f32(__a, __b) __arm_vmaxnmavq_f32(__a, __b) | |
720 | #define vmaxnmaq_f32(__a, __b) __arm_vmaxnmaq_f32(__a, __b) | |
721 | #define veorq_f32(__a, __b) __arm_veorq_f32(__a, __b) | |
722 | #define vcmulq_rot90_f32(__a, __b) __arm_vcmulq_rot90_f32(__a, __b) | |
723 | #define vcmulq_rot270_f32(__a, __b) __arm_vcmulq_rot270_f32(__a, __b) | |
724 | #define vcmulq_rot180_f32(__a, __b) __arm_vcmulq_rot180_f32(__a, __b) | |
725 | #define vcmulq_f32(__a, __b) __arm_vcmulq_f32(__a, __b) | |
726 | #define vcaddq_rot90_f32(__a, __b) __arm_vcaddq_rot90_f32(__a, __b) | |
727 | #define vcaddq_rot270_f32(__a, __b) __arm_vcaddq_rot270_f32(__a, __b) | |
728 | #define vbicq_f32(__a, __b) __arm_vbicq_f32(__a, __b) | |
729 | #define vandq_f32(__a, __b) __arm_vandq_f32(__a, __b) | |
730 | #define vaddq_n_f32(__a, __b) __arm_vaddq_n_f32(__a, __b) | |
731 | #define vabdq_f32(__a, __b) __arm_vabdq_f32(__a, __b) | |
732 | #define vshlltq_n_s16(__a, __imm) __arm_vshlltq_n_s16(__a, __imm) | |
733 | #define vshllbq_n_s16(__a, __imm) __arm_vshllbq_n_s16(__a, __imm) | |
734 | #define vorrq_n_s32(__a, __imm) __arm_vorrq_n_s32(__a, __imm) | |
735 | #define vbicq_n_s32(__a, __imm) __arm_vbicq_n_s32(__a, __imm) | |
736 | #define vrmlaldavhq_u32(__a, __b) __arm_vrmlaldavhq_u32(__a, __b) | |
737 | #define vctp8q_m(__a, __p) __arm_vctp8q_m(__a, __p) | |
738 | #define vctp64q_m(__a, __p) __arm_vctp64q_m(__a, __p) | |
739 | #define vctp32q_m(__a, __p) __arm_vctp32q_m(__a, __p) | |
740 | #define vctp16q_m(__a, __p) __arm_vctp16q_m(__a, __p) | |
741 | #define vaddlvaq_u32(__a, __b) __arm_vaddlvaq_u32(__a, __b) | |
742 | #define vrmlsldavhxq_s32(__a, __b) __arm_vrmlsldavhxq_s32(__a, __b) | |
743 | #define vrmlsldavhq_s32(__a, __b) __arm_vrmlsldavhq_s32(__a, __b) | |
744 | #define vrmlaldavhxq_s32(__a, __b) __arm_vrmlaldavhxq_s32(__a, __b) | |
745 | #define vrmlaldavhq_s32(__a, __b) __arm_vrmlaldavhq_s32(__a, __b) | |
746 | #define vcvttq_f16_f32(__a, __b) __arm_vcvttq_f16_f32(__a, __b) | |
747 | #define vcvtbq_f16_f32(__a, __b) __arm_vcvtbq_f16_f32(__a, __b) | |
748 | #define vaddlvaq_s32(__a, __b) __arm_vaddlvaq_s32(__a, __b) | |
0dad5b33 SP |
749 | #define vabavq_s8(__a, __b, __c) __arm_vabavq_s8(__a, __b, __c) |
750 | #define vabavq_s16(__a, __b, __c) __arm_vabavq_s16(__a, __b, __c) | |
751 | #define vabavq_s32(__a, __b, __c) __arm_vabavq_s32(__a, __b, __c) | |
752 | #define vbicq_m_n_s16(__a, __imm, __p) __arm_vbicq_m_n_s16(__a, __imm, __p) | |
753 | #define vbicq_m_n_s32(__a, __imm, __p) __arm_vbicq_m_n_s32(__a, __imm, __p) | |
754 | #define vbicq_m_n_u16(__a, __imm, __p) __arm_vbicq_m_n_u16(__a, __imm, __p) | |
755 | #define vbicq_m_n_u32(__a, __imm, __p) __arm_vbicq_m_n_u32(__a, __imm, __p) | |
756 | #define vcmpeqq_m_f16(__a, __b, __p) __arm_vcmpeqq_m_f16(__a, __b, __p) | |
757 | #define vcmpeqq_m_f32(__a, __b, __p) __arm_vcmpeqq_m_f32(__a, __b, __p) | |
758 | #define vcvtaq_m_s16_f16(__inactive, __a, __p) __arm_vcvtaq_m_s16_f16(__inactive, __a, __p) | |
759 | #define vcvtaq_m_u16_f16(__inactive, __a, __p) __arm_vcvtaq_m_u16_f16(__inactive, __a, __p) | |
760 | #define vcvtaq_m_s32_f32(__inactive, __a, __p) __arm_vcvtaq_m_s32_f32(__inactive, __a, __p) | |
761 | #define vcvtaq_m_u32_f32(__inactive, __a, __p) __arm_vcvtaq_m_u32_f32(__inactive, __a, __p) | |
762 | #define vcvtq_m_f16_s16(__inactive, __a, __p) __arm_vcvtq_m_f16_s16(__inactive, __a, __p) | |
763 | #define vcvtq_m_f16_u16(__inactive, __a, __p) __arm_vcvtq_m_f16_u16(__inactive, __a, __p) | |
764 | #define vcvtq_m_f32_s32(__inactive, __a, __p) __arm_vcvtq_m_f32_s32(__inactive, __a, __p) | |
765 | #define vcvtq_m_f32_u32(__inactive, __a, __p) __arm_vcvtq_m_f32_u32(__inactive, __a, __p) | |
766 | #define vqrshrnbq_n_s16(__a, __b, __imm) __arm_vqrshrnbq_n_s16(__a, __b, __imm) | |
767 | #define vqrshrnbq_n_u16(__a, __b, __imm) __arm_vqrshrnbq_n_u16(__a, __b, __imm) | |
768 | #define vqrshrnbq_n_s32(__a, __b, __imm) __arm_vqrshrnbq_n_s32(__a, __b, __imm) | |
769 | #define vqrshrnbq_n_u32(__a, __b, __imm) __arm_vqrshrnbq_n_u32(__a, __b, __imm) | |
770 | #define vqrshrunbq_n_s16(__a, __b, __imm) __arm_vqrshrunbq_n_s16(__a, __b, __imm) | |
771 | #define vqrshrunbq_n_s32(__a, __b, __imm) __arm_vqrshrunbq_n_s32(__a, __b, __imm) | |
772 | #define vrmlaldavhaq_s32(__a, __b, __c) __arm_vrmlaldavhaq_s32(__a, __b, __c) | |
773 | #define vrmlaldavhaq_u32(__a, __b, __c) __arm_vrmlaldavhaq_u32(__a, __b, __c) | |
774 | #define vshlcq_s8(__a, __b, __imm) __arm_vshlcq_s8(__a, __b, __imm) | |
775 | #define vshlcq_u8(__a, __b, __imm) __arm_vshlcq_u8(__a, __b, __imm) | |
776 | #define vshlcq_s16(__a, __b, __imm) __arm_vshlcq_s16(__a, __b, __imm) | |
777 | #define vshlcq_u16(__a, __b, __imm) __arm_vshlcq_u16(__a, __b, __imm) | |
778 | #define vshlcq_s32(__a, __b, __imm) __arm_vshlcq_s32(__a, __b, __imm) | |
779 | #define vshlcq_u32(__a, __b, __imm) __arm_vshlcq_u32(__a, __b, __imm) | |
780 | #define vabavq_u8(__a, __b, __c) __arm_vabavq_u8(__a, __b, __c) | |
781 | #define vabavq_u16(__a, __b, __c) __arm_vabavq_u16(__a, __b, __c) | |
782 | #define vabavq_u32(__a, __b, __c) __arm_vabavq_u32(__a, __b, __c) | |
8165795c SP |
783 | #define vpselq_u8(__a, __b, __p) __arm_vpselq_u8(__a, __b, __p) |
784 | #define vpselq_s8(__a, __b, __p) __arm_vpselq_s8(__a, __b, __p) | |
785 | #define vrev64q_m_u8(__inactive, __a, __p) __arm_vrev64q_m_u8(__inactive, __a, __p) | |
786 | #define vqrdmlashq_n_u8(__a, __b, __c) __arm_vqrdmlashq_n_u8(__a, __b, __c) | |
787 | #define vqrdmlahq_n_u8(__a, __b, __c) __arm_vqrdmlahq_n_u8(__a, __b, __c) | |
788 | #define vqdmlahq_n_u8(__a, __b, __c) __arm_vqdmlahq_n_u8(__a, __b, __c) | |
789 | #define vmvnq_m_u8(__inactive, __a, __p) __arm_vmvnq_m_u8(__inactive, __a, __p) | |
790 | #define vmlasq_n_u8(__a, __b, __c) __arm_vmlasq_n_u8(__a, __b, __c) | |
791 | #define vmlaq_n_u8(__a, __b, __c) __arm_vmlaq_n_u8(__a, __b, __c) | |
792 | #define vmladavq_p_u8(__a, __b, __p) __arm_vmladavq_p_u8(__a, __b, __p) | |
793 | #define vmladavaq_u8(__a, __b, __c) __arm_vmladavaq_u8(__a, __b, __c) | |
794 | #define vminvq_p_u8(__a, __b, __p) __arm_vminvq_p_u8(__a, __b, __p) | |
795 | #define vmaxvq_p_u8(__a, __b, __p) __arm_vmaxvq_p_u8(__a, __b, __p) | |
796 | #define vdupq_m_n_u8(__inactive, __a, __p) __arm_vdupq_m_n_u8(__inactive, __a, __p) | |
797 | #define vcmpneq_m_u8(__a, __b, __p) __arm_vcmpneq_m_u8(__a, __b, __p) | |
798 | #define vcmpneq_m_n_u8(__a, __b, __p) __arm_vcmpneq_m_n_u8(__a, __b, __p) | |
799 | #define vcmphiq_m_u8(__a, __b, __p) __arm_vcmphiq_m_u8(__a, __b, __p) | |
800 | #define vcmphiq_m_n_u8(__a, __b, __p) __arm_vcmphiq_m_n_u8(__a, __b, __p) | |
801 | #define vcmpeqq_m_u8(__a, __b, __p) __arm_vcmpeqq_m_u8(__a, __b, __p) | |
802 | #define vcmpeqq_m_n_u8(__a, __b, __p) __arm_vcmpeqq_m_n_u8(__a, __b, __p) | |
803 | #define vcmpcsq_m_u8(__a, __b, __p) __arm_vcmpcsq_m_u8(__a, __b, __p) | |
804 | #define vcmpcsq_m_n_u8(__a, __b, __p) __arm_vcmpcsq_m_n_u8(__a, __b, __p) | |
805 | #define vclzq_m_u8(__inactive, __a, __p) __arm_vclzq_m_u8(__inactive, __a, __p) | |
806 | #define vaddvaq_p_u8(__a, __b, __p) __arm_vaddvaq_p_u8(__a, __b, __p) | |
807 | #define vsriq_n_u8(__a, __b, __imm) __arm_vsriq_n_u8(__a, __b, __imm) | |
808 | #define vsliq_n_u8(__a, __b, __imm) __arm_vsliq_n_u8(__a, __b, __imm) | |
809 | #define vshlq_m_r_u8(__a, __b, __p) __arm_vshlq_m_r_u8(__a, __b, __p) | |
810 | #define vrshlq_m_n_u8(__a, __b, __p) __arm_vrshlq_m_n_u8(__a, __b, __p) | |
811 | #define vqshlq_m_r_u8(__a, __b, __p) __arm_vqshlq_m_r_u8(__a, __b, __p) | |
812 | #define vqrshlq_m_n_u8(__a, __b, __p) __arm_vqrshlq_m_n_u8(__a, __b, __p) | |
813 | #define vminavq_p_s8(__a, __b, __p) __arm_vminavq_p_s8(__a, __b, __p) | |
814 | #define vminaq_m_s8(__a, __b, __p) __arm_vminaq_m_s8(__a, __b, __p) | |
815 | #define vmaxavq_p_s8(__a, __b, __p) __arm_vmaxavq_p_s8(__a, __b, __p) | |
816 | #define vmaxaq_m_s8(__a, __b, __p) __arm_vmaxaq_m_s8(__a, __b, __p) | |
817 | #define vcmpneq_m_s8(__a, __b, __p) __arm_vcmpneq_m_s8(__a, __b, __p) | |
818 | #define vcmpneq_m_n_s8(__a, __b, __p) __arm_vcmpneq_m_n_s8(__a, __b, __p) | |
819 | #define vcmpltq_m_s8(__a, __b, __p) __arm_vcmpltq_m_s8(__a, __b, __p) | |
820 | #define vcmpltq_m_n_s8(__a, __b, __p) __arm_vcmpltq_m_n_s8(__a, __b, __p) | |
821 | #define vcmpleq_m_s8(__a, __b, __p) __arm_vcmpleq_m_s8(__a, __b, __p) | |
822 | #define vcmpleq_m_n_s8(__a, __b, __p) __arm_vcmpleq_m_n_s8(__a, __b, __p) | |
823 | #define vcmpgtq_m_s8(__a, __b, __p) __arm_vcmpgtq_m_s8(__a, __b, __p) | |
824 | #define vcmpgtq_m_n_s8(__a, __b, __p) __arm_vcmpgtq_m_n_s8(__a, __b, __p) | |
825 | #define vcmpgeq_m_s8(__a, __b, __p) __arm_vcmpgeq_m_s8(__a, __b, __p) | |
826 | #define vcmpgeq_m_n_s8(__a, __b, __p) __arm_vcmpgeq_m_n_s8(__a, __b, __p) | |
827 | #define vcmpeqq_m_s8(__a, __b, __p) __arm_vcmpeqq_m_s8(__a, __b, __p) | |
828 | #define vcmpeqq_m_n_s8(__a, __b, __p) __arm_vcmpeqq_m_n_s8(__a, __b, __p) | |
829 | #define vshlq_m_r_s8(__a, __b, __p) __arm_vshlq_m_r_s8(__a, __b, __p) | |
830 | #define vrshlq_m_n_s8(__a, __b, __p) __arm_vrshlq_m_n_s8(__a, __b, __p) | |
831 | #define vrev64q_m_s8(__inactive, __a, __p) __arm_vrev64q_m_s8(__inactive, __a, __p) | |
832 | #define vqshlq_m_r_s8(__a, __b, __p) __arm_vqshlq_m_r_s8(__a, __b, __p) | |
833 | #define vqrshlq_m_n_s8(__a, __b, __p) __arm_vqrshlq_m_n_s8(__a, __b, __p) | |
834 | #define vqnegq_m_s8(__inactive, __a, __p) __arm_vqnegq_m_s8(__inactive, __a, __p) | |
835 | #define vqabsq_m_s8(__inactive, __a, __p) __arm_vqabsq_m_s8(__inactive, __a, __p) | |
836 | #define vnegq_m_s8(__inactive, __a, __p) __arm_vnegq_m_s8(__inactive, __a, __p) | |
837 | #define vmvnq_m_s8(__inactive, __a, __p) __arm_vmvnq_m_s8(__inactive, __a, __p) | |
838 | #define vmlsdavxq_p_s8(__a, __b, __p) __arm_vmlsdavxq_p_s8(__a, __b, __p) | |
839 | #define vmlsdavq_p_s8(__a, __b, __p) __arm_vmlsdavq_p_s8(__a, __b, __p) | |
840 | #define vmladavxq_p_s8(__a, __b, __p) __arm_vmladavxq_p_s8(__a, __b, __p) | |
841 | #define vmladavq_p_s8(__a, __b, __p) __arm_vmladavq_p_s8(__a, __b, __p) | |
842 | #define vminvq_p_s8(__a, __b, __p) __arm_vminvq_p_s8(__a, __b, __p) | |
843 | #define vmaxvq_p_s8(__a, __b, __p) __arm_vmaxvq_p_s8(__a, __b, __p) | |
844 | #define vdupq_m_n_s8(__inactive, __a, __p) __arm_vdupq_m_n_s8(__inactive, __a, __p) | |
845 | #define vclzq_m_s8(__inactive, __a, __p) __arm_vclzq_m_s8(__inactive, __a, __p) | |
846 | #define vclsq_m_s8(__inactive, __a, __p) __arm_vclsq_m_s8(__inactive, __a, __p) | |
847 | #define vaddvaq_p_s8(__a, __b, __p) __arm_vaddvaq_p_s8(__a, __b, __p) | |
848 | #define vabsq_m_s8(__inactive, __a, __p) __arm_vabsq_m_s8(__inactive, __a, __p) | |
849 | #define vqrdmlsdhxq_s8(__inactive, __a, __b) __arm_vqrdmlsdhxq_s8(__inactive, __a, __b) | |
850 | #define vqrdmlsdhq_s8(__inactive, __a, __b) __arm_vqrdmlsdhq_s8(__inactive, __a, __b) | |
851 | #define vqrdmlashq_n_s8(__a, __b, __c) __arm_vqrdmlashq_n_s8(__a, __b, __c) | |
852 | #define vqrdmlahq_n_s8(__a, __b, __c) __arm_vqrdmlahq_n_s8(__a, __b, __c) | |
853 | #define vqrdmladhxq_s8(__inactive, __a, __b) __arm_vqrdmladhxq_s8(__inactive, __a, __b) | |
854 | #define vqrdmladhq_s8(__inactive, __a, __b) __arm_vqrdmladhq_s8(__inactive, __a, __b) | |
855 | #define vqdmlsdhxq_s8(__inactive, __a, __b) __arm_vqdmlsdhxq_s8(__inactive, __a, __b) | |
856 | #define vqdmlsdhq_s8(__inactive, __a, __b) __arm_vqdmlsdhq_s8(__inactive, __a, __b) | |
857 | #define vqdmlahq_n_s8(__a, __b, __c) __arm_vqdmlahq_n_s8(__a, __b, __c) | |
858 | #define vqdmladhxq_s8(__inactive, __a, __b) __arm_vqdmladhxq_s8(__inactive, __a, __b) | |
859 | #define vqdmladhq_s8(__inactive, __a, __b) __arm_vqdmladhq_s8(__inactive, __a, __b) | |
860 | #define vmlsdavaxq_s8(__a, __b, __c) __arm_vmlsdavaxq_s8(__a, __b, __c) | |
861 | #define vmlsdavaq_s8(__a, __b, __c) __arm_vmlsdavaq_s8(__a, __b, __c) | |
862 | #define vmlasq_n_s8(__a, __b, __c) __arm_vmlasq_n_s8(__a, __b, __c) | |
863 | #define vmlaq_n_s8(__a, __b, __c) __arm_vmlaq_n_s8(__a, __b, __c) | |
864 | #define vmladavaxq_s8(__a, __b, __c) __arm_vmladavaxq_s8(__a, __b, __c) | |
865 | #define vmladavaq_s8(__a, __b, __c) __arm_vmladavaq_s8(__a, __b, __c) | |
866 | #define vsriq_n_s8(__a, __b, __imm) __arm_vsriq_n_s8(__a, __b, __imm) | |
867 | #define vsliq_n_s8(__a, __b, __imm) __arm_vsliq_n_s8(__a, __b, __imm) | |
868 | #define vpselq_u16(__a, __b, __p) __arm_vpselq_u16(__a, __b, __p) | |
869 | #define vpselq_s16(__a, __b, __p) __arm_vpselq_s16(__a, __b, __p) | |
870 | #define vrev64q_m_u16(__inactive, __a, __p) __arm_vrev64q_m_u16(__inactive, __a, __p) | |
871 | #define vqrdmlashq_n_u16(__a, __b, __c) __arm_vqrdmlashq_n_u16(__a, __b, __c) | |
872 | #define vqrdmlahq_n_u16(__a, __b, __c) __arm_vqrdmlahq_n_u16(__a, __b, __c) | |
873 | #define vqdmlahq_n_u16(__a, __b, __c) __arm_vqdmlahq_n_u16(__a, __b, __c) | |
874 | #define vmvnq_m_u16(__inactive, __a, __p) __arm_vmvnq_m_u16(__inactive, __a, __p) | |
875 | #define vmlasq_n_u16(__a, __b, __c) __arm_vmlasq_n_u16(__a, __b, __c) | |
876 | #define vmlaq_n_u16(__a, __b, __c) __arm_vmlaq_n_u16(__a, __b, __c) | |
877 | #define vmladavq_p_u16(__a, __b, __p) __arm_vmladavq_p_u16(__a, __b, __p) | |
878 | #define vmladavaq_u16(__a, __b, __c) __arm_vmladavaq_u16(__a, __b, __c) | |
879 | #define vminvq_p_u16(__a, __b, __p) __arm_vminvq_p_u16(__a, __b, __p) | |
880 | #define vmaxvq_p_u16(__a, __b, __p) __arm_vmaxvq_p_u16(__a, __b, __p) | |
881 | #define vdupq_m_n_u16(__inactive, __a, __p) __arm_vdupq_m_n_u16(__inactive, __a, __p) | |
882 | #define vcmpneq_m_u16(__a, __b, __p) __arm_vcmpneq_m_u16(__a, __b, __p) | |
883 | #define vcmpneq_m_n_u16(__a, __b, __p) __arm_vcmpneq_m_n_u16(__a, __b, __p) | |
884 | #define vcmphiq_m_u16(__a, __b, __p) __arm_vcmphiq_m_u16(__a, __b, __p) | |
885 | #define vcmphiq_m_n_u16(__a, __b, __p) __arm_vcmphiq_m_n_u16(__a, __b, __p) | |
886 | #define vcmpeqq_m_u16(__a, __b, __p) __arm_vcmpeqq_m_u16(__a, __b, __p) | |
887 | #define vcmpeqq_m_n_u16(__a, __b, __p) __arm_vcmpeqq_m_n_u16(__a, __b, __p) | |
888 | #define vcmpcsq_m_u16(__a, __b, __p) __arm_vcmpcsq_m_u16(__a, __b, __p) | |
889 | #define vcmpcsq_m_n_u16(__a, __b, __p) __arm_vcmpcsq_m_n_u16(__a, __b, __p) | |
890 | #define vclzq_m_u16(__inactive, __a, __p) __arm_vclzq_m_u16(__inactive, __a, __p) | |
891 | #define vaddvaq_p_u16(__a, __b, __p) __arm_vaddvaq_p_u16(__a, __b, __p) | |
892 | #define vsriq_n_u16(__a, __b, __imm) __arm_vsriq_n_u16(__a, __b, __imm) | |
893 | #define vsliq_n_u16(__a, __b, __imm) __arm_vsliq_n_u16(__a, __b, __imm) | |
894 | #define vshlq_m_r_u16(__a, __b, __p) __arm_vshlq_m_r_u16(__a, __b, __p) | |
895 | #define vrshlq_m_n_u16(__a, __b, __p) __arm_vrshlq_m_n_u16(__a, __b, __p) | |
896 | #define vqshlq_m_r_u16(__a, __b, __p) __arm_vqshlq_m_r_u16(__a, __b, __p) | |
897 | #define vqrshlq_m_n_u16(__a, __b, __p) __arm_vqrshlq_m_n_u16(__a, __b, __p) | |
898 | #define vminavq_p_s16(__a, __b, __p) __arm_vminavq_p_s16(__a, __b, __p) | |
899 | #define vminaq_m_s16(__a, __b, __p) __arm_vminaq_m_s16(__a, __b, __p) | |
900 | #define vmaxavq_p_s16(__a, __b, __p) __arm_vmaxavq_p_s16(__a, __b, __p) | |
901 | #define vmaxaq_m_s16(__a, __b, __p) __arm_vmaxaq_m_s16(__a, __b, __p) | |
902 | #define vcmpneq_m_s16(__a, __b, __p) __arm_vcmpneq_m_s16(__a, __b, __p) | |
903 | #define vcmpneq_m_n_s16(__a, __b, __p) __arm_vcmpneq_m_n_s16(__a, __b, __p) | |
904 | #define vcmpltq_m_s16(__a, __b, __p) __arm_vcmpltq_m_s16(__a, __b, __p) | |
905 | #define vcmpltq_m_n_s16(__a, __b, __p) __arm_vcmpltq_m_n_s16(__a, __b, __p) | |
906 | #define vcmpleq_m_s16(__a, __b, __p) __arm_vcmpleq_m_s16(__a, __b, __p) | |
907 | #define vcmpleq_m_n_s16(__a, __b, __p) __arm_vcmpleq_m_n_s16(__a, __b, __p) | |
908 | #define vcmpgtq_m_s16(__a, __b, __p) __arm_vcmpgtq_m_s16(__a, __b, __p) | |
909 | #define vcmpgtq_m_n_s16(__a, __b, __p) __arm_vcmpgtq_m_n_s16(__a, __b, __p) | |
910 | #define vcmpgeq_m_s16(__a, __b, __p) __arm_vcmpgeq_m_s16(__a, __b, __p) | |
911 | #define vcmpgeq_m_n_s16(__a, __b, __p) __arm_vcmpgeq_m_n_s16(__a, __b, __p) | |
912 | #define vcmpeqq_m_s16(__a, __b, __p) __arm_vcmpeqq_m_s16(__a, __b, __p) | |
913 | #define vcmpeqq_m_n_s16(__a, __b, __p) __arm_vcmpeqq_m_n_s16(__a, __b, __p) | |
914 | #define vshlq_m_r_s16(__a, __b, __p) __arm_vshlq_m_r_s16(__a, __b, __p) | |
915 | #define vrshlq_m_n_s16(__a, __b, __p) __arm_vrshlq_m_n_s16(__a, __b, __p) | |
916 | #define vrev64q_m_s16(__inactive, __a, __p) __arm_vrev64q_m_s16(__inactive, __a, __p) | |
917 | #define vqshlq_m_r_s16(__a, __b, __p) __arm_vqshlq_m_r_s16(__a, __b, __p) | |
918 | #define vqrshlq_m_n_s16(__a, __b, __p) __arm_vqrshlq_m_n_s16(__a, __b, __p) | |
919 | #define vqnegq_m_s16(__inactive, __a, __p) __arm_vqnegq_m_s16(__inactive, __a, __p) | |
920 | #define vqabsq_m_s16(__inactive, __a, __p) __arm_vqabsq_m_s16(__inactive, __a, __p) | |
921 | #define vnegq_m_s16(__inactive, __a, __p) __arm_vnegq_m_s16(__inactive, __a, __p) | |
922 | #define vmvnq_m_s16(__inactive, __a, __p) __arm_vmvnq_m_s16(__inactive, __a, __p) | |
923 | #define vmlsdavxq_p_s16(__a, __b, __p) __arm_vmlsdavxq_p_s16(__a, __b, __p) | |
924 | #define vmlsdavq_p_s16(__a, __b, __p) __arm_vmlsdavq_p_s16(__a, __b, __p) | |
925 | #define vmladavxq_p_s16(__a, __b, __p) __arm_vmladavxq_p_s16(__a, __b, __p) | |
926 | #define vmladavq_p_s16(__a, __b, __p) __arm_vmladavq_p_s16(__a, __b, __p) | |
927 | #define vminvq_p_s16(__a, __b, __p) __arm_vminvq_p_s16(__a, __b, __p) | |
928 | #define vmaxvq_p_s16(__a, __b, __p) __arm_vmaxvq_p_s16(__a, __b, __p) | |
929 | #define vdupq_m_n_s16(__inactive, __a, __p) __arm_vdupq_m_n_s16(__inactive, __a, __p) | |
930 | #define vclzq_m_s16(__inactive, __a, __p) __arm_vclzq_m_s16(__inactive, __a, __p) | |
931 | #define vclsq_m_s16(__inactive, __a, __p) __arm_vclsq_m_s16(__inactive, __a, __p) | |
932 | #define vaddvaq_p_s16(__a, __b, __p) __arm_vaddvaq_p_s16(__a, __b, __p) | |
933 | #define vabsq_m_s16(__inactive, __a, __p) __arm_vabsq_m_s16(__inactive, __a, __p) | |
934 | #define vqrdmlsdhxq_s16(__inactive, __a, __b) __arm_vqrdmlsdhxq_s16(__inactive, __a, __b) | |
935 | #define vqrdmlsdhq_s16(__inactive, __a, __b) __arm_vqrdmlsdhq_s16(__inactive, __a, __b) | |
936 | #define vqrdmlashq_n_s16(__a, __b, __c) __arm_vqrdmlashq_n_s16(__a, __b, __c) | |
937 | #define vqrdmlahq_n_s16(__a, __b, __c) __arm_vqrdmlahq_n_s16(__a, __b, __c) | |
938 | #define vqrdmladhxq_s16(__inactive, __a, __b) __arm_vqrdmladhxq_s16(__inactive, __a, __b) | |
939 | #define vqrdmladhq_s16(__inactive, __a, __b) __arm_vqrdmladhq_s16(__inactive, __a, __b) | |
940 | #define vqdmlsdhxq_s16(__inactive, __a, __b) __arm_vqdmlsdhxq_s16(__inactive, __a, __b) | |
941 | #define vqdmlsdhq_s16(__inactive, __a, __b) __arm_vqdmlsdhq_s16(__inactive, __a, __b) | |
942 | #define vqdmlahq_n_s16(__a, __b, __c) __arm_vqdmlahq_n_s16(__a, __b, __c) | |
943 | #define vqdmladhxq_s16(__inactive, __a, __b) __arm_vqdmladhxq_s16(__inactive, __a, __b) | |
944 | #define vqdmladhq_s16(__inactive, __a, __b) __arm_vqdmladhq_s16(__inactive, __a, __b) | |
945 | #define vmlsdavaxq_s16(__a, __b, __c) __arm_vmlsdavaxq_s16(__a, __b, __c) | |
946 | #define vmlsdavaq_s16(__a, __b, __c) __arm_vmlsdavaq_s16(__a, __b, __c) | |
947 | #define vmlasq_n_s16(__a, __b, __c) __arm_vmlasq_n_s16(__a, __b, __c) | |
948 | #define vmlaq_n_s16(__a, __b, __c) __arm_vmlaq_n_s16(__a, __b, __c) | |
949 | #define vmladavaxq_s16(__a, __b, __c) __arm_vmladavaxq_s16(__a, __b, __c) | |
950 | #define vmladavaq_s16(__a, __b, __c) __arm_vmladavaq_s16(__a, __b, __c) | |
951 | #define vsriq_n_s16(__a, __b, __imm) __arm_vsriq_n_s16(__a, __b, __imm) | |
952 | #define vsliq_n_s16(__a, __b, __imm) __arm_vsliq_n_s16(__a, __b, __imm) | |
953 | #define vpselq_u32(__a, __b, __p) __arm_vpselq_u32(__a, __b, __p) | |
954 | #define vpselq_s32(__a, __b, __p) __arm_vpselq_s32(__a, __b, __p) | |
955 | #define vrev64q_m_u32(__inactive, __a, __p) __arm_vrev64q_m_u32(__inactive, __a, __p) | |
956 | #define vqrdmlashq_n_u32(__a, __b, __c) __arm_vqrdmlashq_n_u32(__a, __b, __c) | |
957 | #define vqrdmlahq_n_u32(__a, __b, __c) __arm_vqrdmlahq_n_u32(__a, __b, __c) | |
958 | #define vqdmlahq_n_u32(__a, __b, __c) __arm_vqdmlahq_n_u32(__a, __b, __c) | |
959 | #define vmvnq_m_u32(__inactive, __a, __p) __arm_vmvnq_m_u32(__inactive, __a, __p) | |
960 | #define vmlasq_n_u32(__a, __b, __c) __arm_vmlasq_n_u32(__a, __b, __c) | |
961 | #define vmlaq_n_u32(__a, __b, __c) __arm_vmlaq_n_u32(__a, __b, __c) | |
962 | #define vmladavq_p_u32(__a, __b, __p) __arm_vmladavq_p_u32(__a, __b, __p) | |
963 | #define vmladavaq_u32(__a, __b, __c) __arm_vmladavaq_u32(__a, __b, __c) | |
964 | #define vminvq_p_u32(__a, __b, __p) __arm_vminvq_p_u32(__a, __b, __p) | |
965 | #define vmaxvq_p_u32(__a, __b, __p) __arm_vmaxvq_p_u32(__a, __b, __p) | |
966 | #define vdupq_m_n_u32(__inactive, __a, __p) __arm_vdupq_m_n_u32(__inactive, __a, __p) | |
967 | #define vcmpneq_m_u32(__a, __b, __p) __arm_vcmpneq_m_u32(__a, __b, __p) | |
968 | #define vcmpneq_m_n_u32(__a, __b, __p) __arm_vcmpneq_m_n_u32(__a, __b, __p) | |
969 | #define vcmphiq_m_u32(__a, __b, __p) __arm_vcmphiq_m_u32(__a, __b, __p) | |
970 | #define vcmphiq_m_n_u32(__a, __b, __p) __arm_vcmphiq_m_n_u32(__a, __b, __p) | |
971 | #define vcmpeqq_m_u32(__a, __b, __p) __arm_vcmpeqq_m_u32(__a, __b, __p) | |
972 | #define vcmpeqq_m_n_u32(__a, __b, __p) __arm_vcmpeqq_m_n_u32(__a, __b, __p) | |
973 | #define vcmpcsq_m_u32(__a, __b, __p) __arm_vcmpcsq_m_u32(__a, __b, __p) | |
974 | #define vcmpcsq_m_n_u32(__a, __b, __p) __arm_vcmpcsq_m_n_u32(__a, __b, __p) | |
975 | #define vclzq_m_u32(__inactive, __a, __p) __arm_vclzq_m_u32(__inactive, __a, __p) | |
976 | #define vaddvaq_p_u32(__a, __b, __p) __arm_vaddvaq_p_u32(__a, __b, __p) | |
977 | #define vsriq_n_u32(__a, __b, __imm) __arm_vsriq_n_u32(__a, __b, __imm) | |
978 | #define vsliq_n_u32(__a, __b, __imm) __arm_vsliq_n_u32(__a, __b, __imm) | |
979 | #define vshlq_m_r_u32(__a, __b, __p) __arm_vshlq_m_r_u32(__a, __b, __p) | |
980 | #define vrshlq_m_n_u32(__a, __b, __p) __arm_vrshlq_m_n_u32(__a, __b, __p) | |
981 | #define vqshlq_m_r_u32(__a, __b, __p) __arm_vqshlq_m_r_u32(__a, __b, __p) | |
982 | #define vqrshlq_m_n_u32(__a, __b, __p) __arm_vqrshlq_m_n_u32(__a, __b, __p) | |
983 | #define vminavq_p_s32(__a, __b, __p) __arm_vminavq_p_s32(__a, __b, __p) | |
984 | #define vminaq_m_s32(__a, __b, __p) __arm_vminaq_m_s32(__a, __b, __p) | |
985 | #define vmaxavq_p_s32(__a, __b, __p) __arm_vmaxavq_p_s32(__a, __b, __p) | |
986 | #define vmaxaq_m_s32(__a, __b, __p) __arm_vmaxaq_m_s32(__a, __b, __p) | |
987 | #define vcmpneq_m_s32(__a, __b, __p) __arm_vcmpneq_m_s32(__a, __b, __p) | |
988 | #define vcmpneq_m_n_s32(__a, __b, __p) __arm_vcmpneq_m_n_s32(__a, __b, __p) | |
989 | #define vcmpltq_m_s32(__a, __b, __p) __arm_vcmpltq_m_s32(__a, __b, __p) | |
990 | #define vcmpltq_m_n_s32(__a, __b, __p) __arm_vcmpltq_m_n_s32(__a, __b, __p) | |
991 | #define vcmpleq_m_s32(__a, __b, __p) __arm_vcmpleq_m_s32(__a, __b, __p) | |
992 | #define vcmpleq_m_n_s32(__a, __b, __p) __arm_vcmpleq_m_n_s32(__a, __b, __p) | |
993 | #define vcmpgtq_m_s32(__a, __b, __p) __arm_vcmpgtq_m_s32(__a, __b, __p) | |
994 | #define vcmpgtq_m_n_s32(__a, __b, __p) __arm_vcmpgtq_m_n_s32(__a, __b, __p) | |
995 | #define vcmpgeq_m_s32(__a, __b, __p) __arm_vcmpgeq_m_s32(__a, __b, __p) | |
996 | #define vcmpgeq_m_n_s32(__a, __b, __p) __arm_vcmpgeq_m_n_s32(__a, __b, __p) | |
997 | #define vcmpeqq_m_s32(__a, __b, __p) __arm_vcmpeqq_m_s32(__a, __b, __p) | |
998 | #define vcmpeqq_m_n_s32(__a, __b, __p) __arm_vcmpeqq_m_n_s32(__a, __b, __p) | |
999 | #define vshlq_m_r_s32(__a, __b, __p) __arm_vshlq_m_r_s32(__a, __b, __p) | |
1000 | #define vrshlq_m_n_s32(__a, __b, __p) __arm_vrshlq_m_n_s32(__a, __b, __p) | |
1001 | #define vrev64q_m_s32(__inactive, __a, __p) __arm_vrev64q_m_s32(__inactive, __a, __p) | |
1002 | #define vqshlq_m_r_s32(__a, __b, __p) __arm_vqshlq_m_r_s32(__a, __b, __p) | |
1003 | #define vqrshlq_m_n_s32(__a, __b, __p) __arm_vqrshlq_m_n_s32(__a, __b, __p) | |
1004 | #define vqnegq_m_s32(__inactive, __a, __p) __arm_vqnegq_m_s32(__inactive, __a, __p) | |
1005 | #define vqabsq_m_s32(__inactive, __a, __p) __arm_vqabsq_m_s32(__inactive, __a, __p) | |
1006 | #define vnegq_m_s32(__inactive, __a, __p) __arm_vnegq_m_s32(__inactive, __a, __p) | |
1007 | #define vmvnq_m_s32(__inactive, __a, __p) __arm_vmvnq_m_s32(__inactive, __a, __p) | |
1008 | #define vmlsdavxq_p_s32(__a, __b, __p) __arm_vmlsdavxq_p_s32(__a, __b, __p) | |
1009 | #define vmlsdavq_p_s32(__a, __b, __p) __arm_vmlsdavq_p_s32(__a, __b, __p) | |
1010 | #define vmladavxq_p_s32(__a, __b, __p) __arm_vmladavxq_p_s32(__a, __b, __p) | |
1011 | #define vmladavq_p_s32(__a, __b, __p) __arm_vmladavq_p_s32(__a, __b, __p) | |
1012 | #define vminvq_p_s32(__a, __b, __p) __arm_vminvq_p_s32(__a, __b, __p) | |
1013 | #define vmaxvq_p_s32(__a, __b, __p) __arm_vmaxvq_p_s32(__a, __b, __p) | |
1014 | #define vdupq_m_n_s32(__inactive, __a, __p) __arm_vdupq_m_n_s32(__inactive, __a, __p) | |
1015 | #define vclzq_m_s32(__inactive, __a, __p) __arm_vclzq_m_s32(__inactive, __a, __p) | |
1016 | #define vclsq_m_s32(__inactive, __a, __p) __arm_vclsq_m_s32(__inactive, __a, __p) | |
1017 | #define vaddvaq_p_s32(__a, __b, __p) __arm_vaddvaq_p_s32(__a, __b, __p) | |
1018 | #define vabsq_m_s32(__inactive, __a, __p) __arm_vabsq_m_s32(__inactive, __a, __p) | |
1019 | #define vqrdmlsdhxq_s32(__inactive, __a, __b) __arm_vqrdmlsdhxq_s32(__inactive, __a, __b) | |
1020 | #define vqrdmlsdhq_s32(__inactive, __a, __b) __arm_vqrdmlsdhq_s32(__inactive, __a, __b) | |
1021 | #define vqrdmlashq_n_s32(__a, __b, __c) __arm_vqrdmlashq_n_s32(__a, __b, __c) | |
1022 | #define vqrdmlahq_n_s32(__a, __b, __c) __arm_vqrdmlahq_n_s32(__a, __b, __c) | |
1023 | #define vqrdmladhxq_s32(__inactive, __a, __b) __arm_vqrdmladhxq_s32(__inactive, __a, __b) | |
1024 | #define vqrdmladhq_s32(__inactive, __a, __b) __arm_vqrdmladhq_s32(__inactive, __a, __b) | |
1025 | #define vqdmlsdhxq_s32(__inactive, __a, __b) __arm_vqdmlsdhxq_s32(__inactive, __a, __b) | |
1026 | #define vqdmlsdhq_s32(__inactive, __a, __b) __arm_vqdmlsdhq_s32(__inactive, __a, __b) | |
1027 | #define vqdmlahq_n_s32(__a, __b, __c) __arm_vqdmlahq_n_s32(__a, __b, __c) | |
1028 | #define vqdmladhxq_s32(__inactive, __a, __b) __arm_vqdmladhxq_s32(__inactive, __a, __b) | |
1029 | #define vqdmladhq_s32(__inactive, __a, __b) __arm_vqdmladhq_s32(__inactive, __a, __b) | |
1030 | #define vmlsdavaxq_s32(__a, __b, __c) __arm_vmlsdavaxq_s32(__a, __b, __c) | |
1031 | #define vmlsdavaq_s32(__a, __b, __c) __arm_vmlsdavaq_s32(__a, __b, __c) | |
1032 | #define vmlasq_n_s32(__a, __b, __c) __arm_vmlasq_n_s32(__a, __b, __c) | |
1033 | #define vmlaq_n_s32(__a, __b, __c) __arm_vmlaq_n_s32(__a, __b, __c) | |
1034 | #define vmladavaxq_s32(__a, __b, __c) __arm_vmladavaxq_s32(__a, __b, __c) | |
1035 | #define vmladavaq_s32(__a, __b, __c) __arm_vmladavaq_s32(__a, __b, __c) | |
1036 | #define vsriq_n_s32(__a, __b, __imm) __arm_vsriq_n_s32(__a, __b, __imm) | |
1037 | #define vsliq_n_s32(__a, __b, __imm) __arm_vsliq_n_s32(__a, __b, __imm) | |
1038 | #define vpselq_u64(__a, __b, __p) __arm_vpselq_u64(__a, __b, __p) | |
1039 | #define vpselq_s64(__a, __b, __p) __arm_vpselq_s64(__a, __b, __p) | |
e3678b44 SP |
1040 | #define vrmlaldavhaxq_s32(__a, __b, __c) __arm_vrmlaldavhaxq_s32(__a, __b, __c) |
1041 | #define vrmlsldavhaq_s32(__a, __b, __c) __arm_vrmlsldavhaq_s32(__a, __b, __c) | |
1042 | #define vrmlsldavhaxq_s32(__a, __b, __c) __arm_vrmlsldavhaxq_s32(__a, __b, __c) | |
1043 | #define vaddlvaq_p_s32(__a, __b, __p) __arm_vaddlvaq_p_s32(__a, __b, __p) | |
1044 | #define vcvtbq_m_f16_f32(__a, __b, __p) __arm_vcvtbq_m_f16_f32(__a, __b, __p) | |
1045 | #define vcvtbq_m_f32_f16(__inactive, __a, __p) __arm_vcvtbq_m_f32_f16(__inactive, __a, __p) | |
1046 | #define vcvttq_m_f16_f32(__a, __b, __p) __arm_vcvttq_m_f16_f32(__a, __b, __p) | |
1047 | #define vcvttq_m_f32_f16(__inactive, __a, __p) __arm_vcvttq_m_f32_f16(__inactive, __a, __p) | |
1048 | #define vrev16q_m_s8(__inactive, __a, __p) __arm_vrev16q_m_s8(__inactive, __a, __p) | |
1049 | #define vrev32q_m_f16(__inactive, __a, __p) __arm_vrev32q_m_f16(__inactive, __a, __p) | |
1050 | #define vrmlaldavhq_p_s32(__a, __b, __p) __arm_vrmlaldavhq_p_s32(__a, __b, __p) | |
1051 | #define vrmlaldavhxq_p_s32(__a, __b, __p) __arm_vrmlaldavhxq_p_s32(__a, __b, __p) | |
1052 | #define vrmlsldavhq_p_s32(__a, __b, __p) __arm_vrmlsldavhq_p_s32(__a, __b, __p) | |
1053 | #define vrmlsldavhxq_p_s32(__a, __b, __p) __arm_vrmlsldavhxq_p_s32(__a, __b, __p) | |
1054 | #define vaddlvaq_p_u32(__a, __b, __p) __arm_vaddlvaq_p_u32(__a, __b, __p) | |
1055 | #define vrev16q_m_u8(__inactive, __a, __p) __arm_vrev16q_m_u8(__inactive, __a, __p) | |
1056 | #define vrmlaldavhq_p_u32(__a, __b, __p) __arm_vrmlaldavhq_p_u32(__a, __b, __p) | |
1057 | #define vmvnq_m_n_s16(__inactive, __imm, __p) __arm_vmvnq_m_n_s16(__inactive, __imm, __p) | |
1058 | #define vorrq_m_n_s16(__a, __imm, __p) __arm_vorrq_m_n_s16(__a, __imm, __p) | |
1059 | #define vqrshrntq_n_s16(__a, __b, __imm) __arm_vqrshrntq_n_s16(__a, __b, __imm) | |
1060 | #define vqshrnbq_n_s16(__a, __b, __imm) __arm_vqshrnbq_n_s16(__a, __b, __imm) | |
1061 | #define vqshrntq_n_s16(__a, __b, __imm) __arm_vqshrntq_n_s16(__a, __b, __imm) | |
1062 | #define vrshrnbq_n_s16(__a, __b, __imm) __arm_vrshrnbq_n_s16(__a, __b, __imm) | |
1063 | #define vrshrntq_n_s16(__a, __b, __imm) __arm_vrshrntq_n_s16(__a, __b, __imm) | |
1064 | #define vshrnbq_n_s16(__a, __b, __imm) __arm_vshrnbq_n_s16(__a, __b, __imm) | |
1065 | #define vshrntq_n_s16(__a, __b, __imm) __arm_vshrntq_n_s16(__a, __b, __imm) | |
1066 | #define vcmlaq_f16(__a, __b, __c) __arm_vcmlaq_f16(__a, __b, __c) | |
1067 | #define vcmlaq_rot180_f16(__a, __b, __c) __arm_vcmlaq_rot180_f16(__a, __b, __c) | |
1068 | #define vcmlaq_rot270_f16(__a, __b, __c) __arm_vcmlaq_rot270_f16(__a, __b, __c) | |
1069 | #define vcmlaq_rot90_f16(__a, __b, __c) __arm_vcmlaq_rot90_f16(__a, __b, __c) | |
1070 | #define vfmaq_f16(__a, __b, __c) __arm_vfmaq_f16(__a, __b, __c) | |
1071 | #define vfmaq_n_f16(__a, __b, __c) __arm_vfmaq_n_f16(__a, __b, __c) | |
1072 | #define vfmasq_n_f16(__a, __b, __c) __arm_vfmasq_n_f16(__a, __b, __c) | |
1073 | #define vfmsq_f16(__a, __b, __c) __arm_vfmsq_f16(__a, __b, __c) | |
1074 | #define vmlaldavaq_s16(__a, __b, __c) __arm_vmlaldavaq_s16(__a, __b, __c) | |
1075 | #define vmlaldavaxq_s16(__a, __b, __c) __arm_vmlaldavaxq_s16(__a, __b, __c) | |
1076 | #define vmlsldavaq_s16(__a, __b, __c) __arm_vmlsldavaq_s16(__a, __b, __c) | |
1077 | #define vmlsldavaxq_s16(__a, __b, __c) __arm_vmlsldavaxq_s16(__a, __b, __c) | |
1078 | #define vabsq_m_f16(__inactive, __a, __p) __arm_vabsq_m_f16(__inactive, __a, __p) | |
1079 | #define vcvtmq_m_s16_f16(__inactive, __a, __p) __arm_vcvtmq_m_s16_f16(__inactive, __a, __p) | |
1080 | #define vcvtnq_m_s16_f16(__inactive, __a, __p) __arm_vcvtnq_m_s16_f16(__inactive, __a, __p) | |
1081 | #define vcvtpq_m_s16_f16(__inactive, __a, __p) __arm_vcvtpq_m_s16_f16(__inactive, __a, __p) | |
1082 | #define vcvtq_m_s16_f16(__inactive, __a, __p) __arm_vcvtq_m_s16_f16(__inactive, __a, __p) | |
1083 | #define vdupq_m_n_f16(__inactive, __a, __p) __arm_vdupq_m_n_f16(__inactive, __a, __p) | |
1084 | #define vmaxnmaq_m_f16(__a, __b, __p) __arm_vmaxnmaq_m_f16(__a, __b, __p) | |
1085 | #define vmaxnmavq_p_f16(__a, __b, __p) __arm_vmaxnmavq_p_f16(__a, __b, __p) | |
1086 | #define vmaxnmvq_p_f16(__a, __b, __p) __arm_vmaxnmvq_p_f16(__a, __b, __p) | |
1087 | #define vminnmaq_m_f16(__a, __b, __p) __arm_vminnmaq_m_f16(__a, __b, __p) | |
1088 | #define vminnmavq_p_f16(__a, __b, __p) __arm_vminnmavq_p_f16(__a, __b, __p) | |
1089 | #define vminnmvq_p_f16(__a, __b, __p) __arm_vminnmvq_p_f16(__a, __b, __p) | |
1090 | #define vmlaldavq_p_s16(__a, __b, __p) __arm_vmlaldavq_p_s16(__a, __b, __p) | |
1091 | #define vmlaldavxq_p_s16(__a, __b, __p) __arm_vmlaldavxq_p_s16(__a, __b, __p) | |
1092 | #define vmlsldavq_p_s16(__a, __b, __p) __arm_vmlsldavq_p_s16(__a, __b, __p) | |
1093 | #define vmlsldavxq_p_s16(__a, __b, __p) __arm_vmlsldavxq_p_s16(__a, __b, __p) | |
1094 | #define vmovlbq_m_s8(__inactive, __a, __p) __arm_vmovlbq_m_s8(__inactive, __a, __p) | |
1095 | #define vmovltq_m_s8(__inactive, __a, __p) __arm_vmovltq_m_s8(__inactive, __a, __p) | |
1096 | #define vmovnbq_m_s16(__a, __b, __p) __arm_vmovnbq_m_s16(__a, __b, __p) | |
1097 | #define vmovntq_m_s16(__a, __b, __p) __arm_vmovntq_m_s16(__a, __b, __p) | |
1098 | #define vnegq_m_f16(__inactive, __a, __p) __arm_vnegq_m_f16(__inactive, __a, __p) | |
1099 | #define vpselq_f16(__a, __b, __p) __arm_vpselq_f16(__a, __b, __p) | |
1100 | #define vqmovnbq_m_s16(__a, __b, __p) __arm_vqmovnbq_m_s16(__a, __b, __p) | |
1101 | #define vqmovntq_m_s16(__a, __b, __p) __arm_vqmovntq_m_s16(__a, __b, __p) | |
1102 | #define vrev32q_m_s8(__inactive, __a, __p) __arm_vrev32q_m_s8(__inactive, __a, __p) | |
1103 | #define vrev64q_m_f16(__inactive, __a, __p) __arm_vrev64q_m_f16(__inactive, __a, __p) | |
1104 | #define vrndaq_m_f16(__inactive, __a, __p) __arm_vrndaq_m_f16(__inactive, __a, __p) | |
1105 | #define vrndmq_m_f16(__inactive, __a, __p) __arm_vrndmq_m_f16(__inactive, __a, __p) | |
1106 | #define vrndnq_m_f16(__inactive, __a, __p) __arm_vrndnq_m_f16(__inactive, __a, __p) | |
1107 | #define vrndpq_m_f16(__inactive, __a, __p) __arm_vrndpq_m_f16(__inactive, __a, __p) | |
1108 | #define vrndq_m_f16(__inactive, __a, __p) __arm_vrndq_m_f16(__inactive, __a, __p) | |
1109 | #define vrndxq_m_f16(__inactive, __a, __p) __arm_vrndxq_m_f16(__inactive, __a, __p) | |
1110 | #define vcmpeqq_m_n_f16(__a, __b, __p) __arm_vcmpeqq_m_n_f16(__a, __b, __p) | |
1111 | #define vcmpgeq_m_f16(__a, __b, __p) __arm_vcmpgeq_m_f16(__a, __b, __p) | |
1112 | #define vcmpgeq_m_n_f16(__a, __b, __p) __arm_vcmpgeq_m_n_f16(__a, __b, __p) | |
1113 | #define vcmpgtq_m_f16(__a, __b, __p) __arm_vcmpgtq_m_f16(__a, __b, __p) | |
1114 | #define vcmpgtq_m_n_f16(__a, __b, __p) __arm_vcmpgtq_m_n_f16(__a, __b, __p) | |
1115 | #define vcmpleq_m_f16(__a, __b, __p) __arm_vcmpleq_m_f16(__a, __b, __p) | |
1116 | #define vcmpleq_m_n_f16(__a, __b, __p) __arm_vcmpleq_m_n_f16(__a, __b, __p) | |
1117 | #define vcmpltq_m_f16(__a, __b, __p) __arm_vcmpltq_m_f16(__a, __b, __p) | |
1118 | #define vcmpltq_m_n_f16(__a, __b, __p) __arm_vcmpltq_m_n_f16(__a, __b, __p) | |
1119 | #define vcmpneq_m_f16(__a, __b, __p) __arm_vcmpneq_m_f16(__a, __b, __p) | |
1120 | #define vcmpneq_m_n_f16(__a, __b, __p) __arm_vcmpneq_m_n_f16(__a, __b, __p) | |
1121 | #define vmvnq_m_n_u16(__inactive, __imm, __p) __arm_vmvnq_m_n_u16(__inactive, __imm, __p) | |
1122 | #define vorrq_m_n_u16(__a, __imm, __p) __arm_vorrq_m_n_u16(__a, __imm, __p) | |
1123 | #define vqrshruntq_n_s16(__a, __b, __imm) __arm_vqrshruntq_n_s16(__a, __b, __imm) | |
1124 | #define vqshrunbq_n_s16(__a, __b, __imm) __arm_vqshrunbq_n_s16(__a, __b, __imm) | |
1125 | #define vqshruntq_n_s16(__a, __b, __imm) __arm_vqshruntq_n_s16(__a, __b, __imm) | |
1126 | #define vcvtmq_m_u16_f16(__inactive, __a, __p) __arm_vcvtmq_m_u16_f16(__inactive, __a, __p) | |
1127 | #define vcvtnq_m_u16_f16(__inactive, __a, __p) __arm_vcvtnq_m_u16_f16(__inactive, __a, __p) | |
1128 | #define vcvtpq_m_u16_f16(__inactive, __a, __p) __arm_vcvtpq_m_u16_f16(__inactive, __a, __p) | |
1129 | #define vcvtq_m_u16_f16(__inactive, __a, __p) __arm_vcvtq_m_u16_f16(__inactive, __a, __p) | |
1130 | #define vqmovunbq_m_s16(__a, __b, __p) __arm_vqmovunbq_m_s16(__a, __b, __p) | |
1131 | #define vqmovuntq_m_s16(__a, __b, __p) __arm_vqmovuntq_m_s16(__a, __b, __p) | |
1132 | #define vqrshrntq_n_u16(__a, __b, __imm) __arm_vqrshrntq_n_u16(__a, __b, __imm) | |
1133 | #define vqshrnbq_n_u16(__a, __b, __imm) __arm_vqshrnbq_n_u16(__a, __b, __imm) | |
1134 | #define vqshrntq_n_u16(__a, __b, __imm) __arm_vqshrntq_n_u16(__a, __b, __imm) | |
1135 | #define vrshrnbq_n_u16(__a, __b, __imm) __arm_vrshrnbq_n_u16(__a, __b, __imm) | |
1136 | #define vrshrntq_n_u16(__a, __b, __imm) __arm_vrshrntq_n_u16(__a, __b, __imm) | |
1137 | #define vshrnbq_n_u16(__a, __b, __imm) __arm_vshrnbq_n_u16(__a, __b, __imm) | |
1138 | #define vshrntq_n_u16(__a, __b, __imm) __arm_vshrntq_n_u16(__a, __b, __imm) | |
1139 | #define vmlaldavaq_u16(__a, __b, __c) __arm_vmlaldavaq_u16(__a, __b, __c) | |
1140 | #define vmlaldavq_p_u16(__a, __b, __p) __arm_vmlaldavq_p_u16(__a, __b, __p) | |
1141 | #define vmovlbq_m_u8(__inactive, __a, __p) __arm_vmovlbq_m_u8(__inactive, __a, __p) | |
1142 | #define vmovltq_m_u8(__inactive, __a, __p) __arm_vmovltq_m_u8(__inactive, __a, __p) | |
1143 | #define vmovnbq_m_u16(__a, __b, __p) __arm_vmovnbq_m_u16(__a, __b, __p) | |
1144 | #define vmovntq_m_u16(__a, __b, __p) __arm_vmovntq_m_u16(__a, __b, __p) | |
1145 | #define vqmovnbq_m_u16(__a, __b, __p) __arm_vqmovnbq_m_u16(__a, __b, __p) | |
1146 | #define vqmovntq_m_u16(__a, __b, __p) __arm_vqmovntq_m_u16(__a, __b, __p) | |
1147 | #define vrev32q_m_u8(__inactive, __a, __p) __arm_vrev32q_m_u8(__inactive, __a, __p) | |
1148 | #define vmvnq_m_n_s32(__inactive, __imm, __p) __arm_vmvnq_m_n_s32(__inactive, __imm, __p) | |
1149 | #define vorrq_m_n_s32(__a, __imm, __p) __arm_vorrq_m_n_s32(__a, __imm, __p) | |
1150 | #define vqrshrntq_n_s32(__a, __b, __imm) __arm_vqrshrntq_n_s32(__a, __b, __imm) | |
1151 | #define vqshrnbq_n_s32(__a, __b, __imm) __arm_vqshrnbq_n_s32(__a, __b, __imm) | |
1152 | #define vqshrntq_n_s32(__a, __b, __imm) __arm_vqshrntq_n_s32(__a, __b, __imm) | |
1153 | #define vrshrnbq_n_s32(__a, __b, __imm) __arm_vrshrnbq_n_s32(__a, __b, __imm) | |
1154 | #define vrshrntq_n_s32(__a, __b, __imm) __arm_vrshrntq_n_s32(__a, __b, __imm) | |
1155 | #define vshrnbq_n_s32(__a, __b, __imm) __arm_vshrnbq_n_s32(__a, __b, __imm) | |
1156 | #define vshrntq_n_s32(__a, __b, __imm) __arm_vshrntq_n_s32(__a, __b, __imm) | |
1157 | #define vcmlaq_f32(__a, __b, __c) __arm_vcmlaq_f32(__a, __b, __c) | |
1158 | #define vcmlaq_rot180_f32(__a, __b, __c) __arm_vcmlaq_rot180_f32(__a, __b, __c) | |
1159 | #define vcmlaq_rot270_f32(__a, __b, __c) __arm_vcmlaq_rot270_f32(__a, __b, __c) | |
1160 | #define vcmlaq_rot90_f32(__a, __b, __c) __arm_vcmlaq_rot90_f32(__a, __b, __c) | |
1161 | #define vfmaq_f32(__a, __b, __c) __arm_vfmaq_f32(__a, __b, __c) | |
1162 | #define vfmaq_n_f32(__a, __b, __c) __arm_vfmaq_n_f32(__a, __b, __c) | |
1163 | #define vfmasq_n_f32(__a, __b, __c) __arm_vfmasq_n_f32(__a, __b, __c) | |
1164 | #define vfmsq_f32(__a, __b, __c) __arm_vfmsq_f32(__a, __b, __c) | |
1165 | #define vmlaldavaq_s32(__a, __b, __c) __arm_vmlaldavaq_s32(__a, __b, __c) | |
1166 | #define vmlaldavaxq_s32(__a, __b, __c) __arm_vmlaldavaxq_s32(__a, __b, __c) | |
1167 | #define vmlsldavaq_s32(__a, __b, __c) __arm_vmlsldavaq_s32(__a, __b, __c) | |
1168 | #define vmlsldavaxq_s32(__a, __b, __c) __arm_vmlsldavaxq_s32(__a, __b, __c) | |
1169 | #define vabsq_m_f32(__inactive, __a, __p) __arm_vabsq_m_f32(__inactive, __a, __p) | |
1170 | #define vcvtmq_m_s32_f32(__inactive, __a, __p) __arm_vcvtmq_m_s32_f32(__inactive, __a, __p) | |
1171 | #define vcvtnq_m_s32_f32(__inactive, __a, __p) __arm_vcvtnq_m_s32_f32(__inactive, __a, __p) | |
1172 | #define vcvtpq_m_s32_f32(__inactive, __a, __p) __arm_vcvtpq_m_s32_f32(__inactive, __a, __p) | |
1173 | #define vcvtq_m_s32_f32(__inactive, __a, __p) __arm_vcvtq_m_s32_f32(__inactive, __a, __p) | |
1174 | #define vdupq_m_n_f32(__inactive, __a, __p) __arm_vdupq_m_n_f32(__inactive, __a, __p) | |
1175 | #define vmaxnmaq_m_f32(__a, __b, __p) __arm_vmaxnmaq_m_f32(__a, __b, __p) | |
1176 | #define vmaxnmavq_p_f32(__a, __b, __p) __arm_vmaxnmavq_p_f32(__a, __b, __p) | |
1177 | #define vmaxnmvq_p_f32(__a, __b, __p) __arm_vmaxnmvq_p_f32(__a, __b, __p) | |
1178 | #define vminnmaq_m_f32(__a, __b, __p) __arm_vminnmaq_m_f32(__a, __b, __p) | |
1179 | #define vminnmavq_p_f32(__a, __b, __p) __arm_vminnmavq_p_f32(__a, __b, __p) | |
1180 | #define vminnmvq_p_f32(__a, __b, __p) __arm_vminnmvq_p_f32(__a, __b, __p) | |
1181 | #define vmlaldavq_p_s32(__a, __b, __p) __arm_vmlaldavq_p_s32(__a, __b, __p) | |
1182 | #define vmlaldavxq_p_s32(__a, __b, __p) __arm_vmlaldavxq_p_s32(__a, __b, __p) | |
1183 | #define vmlsldavq_p_s32(__a, __b, __p) __arm_vmlsldavq_p_s32(__a, __b, __p) | |
1184 | #define vmlsldavxq_p_s32(__a, __b, __p) __arm_vmlsldavxq_p_s32(__a, __b, __p) | |
1185 | #define vmovlbq_m_s16(__inactive, __a, __p) __arm_vmovlbq_m_s16(__inactive, __a, __p) | |
1186 | #define vmovltq_m_s16(__inactive, __a, __p) __arm_vmovltq_m_s16(__inactive, __a, __p) | |
1187 | #define vmovnbq_m_s32(__a, __b, __p) __arm_vmovnbq_m_s32(__a, __b, __p) | |
1188 | #define vmovntq_m_s32(__a, __b, __p) __arm_vmovntq_m_s32(__a, __b, __p) | |
1189 | #define vnegq_m_f32(__inactive, __a, __p) __arm_vnegq_m_f32(__inactive, __a, __p) | |
1190 | #define vpselq_f32(__a, __b, __p) __arm_vpselq_f32(__a, __b, __p) | |
1191 | #define vqmovnbq_m_s32(__a, __b, __p) __arm_vqmovnbq_m_s32(__a, __b, __p) | |
1192 | #define vqmovntq_m_s32(__a, __b, __p) __arm_vqmovntq_m_s32(__a, __b, __p) | |
1193 | #define vrev32q_m_s16(__inactive, __a, __p) __arm_vrev32q_m_s16(__inactive, __a, __p) | |
1194 | #define vrev64q_m_f32(__inactive, __a, __p) __arm_vrev64q_m_f32(__inactive, __a, __p) | |
1195 | #define vrndaq_m_f32(__inactive, __a, __p) __arm_vrndaq_m_f32(__inactive, __a, __p) | |
1196 | #define vrndmq_m_f32(__inactive, __a, __p) __arm_vrndmq_m_f32(__inactive, __a, __p) | |
1197 | #define vrndnq_m_f32(__inactive, __a, __p) __arm_vrndnq_m_f32(__inactive, __a, __p) | |
1198 | #define vrndpq_m_f32(__inactive, __a, __p) __arm_vrndpq_m_f32(__inactive, __a, __p) | |
1199 | #define vrndq_m_f32(__inactive, __a, __p) __arm_vrndq_m_f32(__inactive, __a, __p) | |
1200 | #define vrndxq_m_f32(__inactive, __a, __p) __arm_vrndxq_m_f32(__inactive, __a, __p) | |
1201 | #define vcmpeqq_m_n_f32(__a, __b, __p) __arm_vcmpeqq_m_n_f32(__a, __b, __p) | |
1202 | #define vcmpgeq_m_f32(__a, __b, __p) __arm_vcmpgeq_m_f32(__a, __b, __p) | |
1203 | #define vcmpgeq_m_n_f32(__a, __b, __p) __arm_vcmpgeq_m_n_f32(__a, __b, __p) | |
1204 | #define vcmpgtq_m_f32(__a, __b, __p) __arm_vcmpgtq_m_f32(__a, __b, __p) | |
1205 | #define vcmpgtq_m_n_f32(__a, __b, __p) __arm_vcmpgtq_m_n_f32(__a, __b, __p) | |
1206 | #define vcmpleq_m_f32(__a, __b, __p) __arm_vcmpleq_m_f32(__a, __b, __p) | |
1207 | #define vcmpleq_m_n_f32(__a, __b, __p) __arm_vcmpleq_m_n_f32(__a, __b, __p) | |
1208 | #define vcmpltq_m_f32(__a, __b, __p) __arm_vcmpltq_m_f32(__a, __b, __p) | |
1209 | #define vcmpltq_m_n_f32(__a, __b, __p) __arm_vcmpltq_m_n_f32(__a, __b, __p) | |
1210 | #define vcmpneq_m_f32(__a, __b, __p) __arm_vcmpneq_m_f32(__a, __b, __p) | |
1211 | #define vcmpneq_m_n_f32(__a, __b, __p) __arm_vcmpneq_m_n_f32(__a, __b, __p) | |
1212 | #define vmvnq_m_n_u32(__inactive, __imm, __p) __arm_vmvnq_m_n_u32(__inactive, __imm, __p) | |
1213 | #define vorrq_m_n_u32(__a, __imm, __p) __arm_vorrq_m_n_u32(__a, __imm, __p) | |
1214 | #define vqrshruntq_n_s32(__a, __b, __imm) __arm_vqrshruntq_n_s32(__a, __b, __imm) | |
1215 | #define vqshrunbq_n_s32(__a, __b, __imm) __arm_vqshrunbq_n_s32(__a, __b, __imm) | |
1216 | #define vqshruntq_n_s32(__a, __b, __imm) __arm_vqshruntq_n_s32(__a, __b, __imm) | |
1217 | #define vcvtmq_m_u32_f32(__inactive, __a, __p) __arm_vcvtmq_m_u32_f32(__inactive, __a, __p) | |
1218 | #define vcvtnq_m_u32_f32(__inactive, __a, __p) __arm_vcvtnq_m_u32_f32(__inactive, __a, __p) | |
1219 | #define vcvtpq_m_u32_f32(__inactive, __a, __p) __arm_vcvtpq_m_u32_f32(__inactive, __a, __p) | |
1220 | #define vcvtq_m_u32_f32(__inactive, __a, __p) __arm_vcvtq_m_u32_f32(__inactive, __a, __p) | |
1221 | #define vqmovunbq_m_s32(__a, __b, __p) __arm_vqmovunbq_m_s32(__a, __b, __p) | |
1222 | #define vqmovuntq_m_s32(__a, __b, __p) __arm_vqmovuntq_m_s32(__a, __b, __p) | |
1223 | #define vqrshrntq_n_u32(__a, __b, __imm) __arm_vqrshrntq_n_u32(__a, __b, __imm) | |
1224 | #define vqshrnbq_n_u32(__a, __b, __imm) __arm_vqshrnbq_n_u32(__a, __b, __imm) | |
1225 | #define vqshrntq_n_u32(__a, __b, __imm) __arm_vqshrntq_n_u32(__a, __b, __imm) | |
1226 | #define vrshrnbq_n_u32(__a, __b, __imm) __arm_vrshrnbq_n_u32(__a, __b, __imm) | |
1227 | #define vrshrntq_n_u32(__a, __b, __imm) __arm_vrshrntq_n_u32(__a, __b, __imm) | |
1228 | #define vshrnbq_n_u32(__a, __b, __imm) __arm_vshrnbq_n_u32(__a, __b, __imm) | |
1229 | #define vshrntq_n_u32(__a, __b, __imm) __arm_vshrntq_n_u32(__a, __b, __imm) | |
1230 | #define vmlaldavaq_u32(__a, __b, __c) __arm_vmlaldavaq_u32(__a, __b, __c) | |
1231 | #define vmlaldavq_p_u32(__a, __b, __p) __arm_vmlaldavq_p_u32(__a, __b, __p) | |
1232 | #define vmovlbq_m_u16(__inactive, __a, __p) __arm_vmovlbq_m_u16(__inactive, __a, __p) | |
1233 | #define vmovltq_m_u16(__inactive, __a, __p) __arm_vmovltq_m_u16(__inactive, __a, __p) | |
1234 | #define vmovnbq_m_u32(__a, __b, __p) __arm_vmovnbq_m_u32(__a, __b, __p) | |
1235 | #define vmovntq_m_u32(__a, __b, __p) __arm_vmovntq_m_u32(__a, __b, __p) | |
1236 | #define vqmovnbq_m_u32(__a, __b, __p) __arm_vqmovnbq_m_u32(__a, __b, __p) | |
1237 | #define vqmovntq_m_u32(__a, __b, __p) __arm_vqmovntq_m_u32(__a, __b, __p) | |
1238 | #define vrev32q_m_u16(__inactive, __a, __p) __arm_vrev32q_m_u16(__inactive, __a, __p) | |
db5db9d2 SP |
1239 | #define vsriq_m_n_s8(__a, __b, __imm, __p) __arm_vsriq_m_n_s8(__a, __b, __imm, __p) |
1240 | #define vsubq_m_s8(__inactive, __a, __b, __p) __arm_vsubq_m_s8(__inactive, __a, __b, __p) | |
1241 | #define vcvtq_m_n_f16_u16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f16_u16(__inactive, __a, __imm6, __p) | |
1242 | #define vqshluq_m_n_s8(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s8(__inactive, __a, __imm, __p) | |
1243 | #define vabavq_p_s8(__a, __b, __c, __p) __arm_vabavq_p_s8(__a, __b, __c, __p) | |
1244 | #define vsriq_m_n_u8(__a, __b, __imm, __p) __arm_vsriq_m_n_u8(__a, __b, __imm, __p) | |
1245 | #define vshlq_m_u8(__inactive, __a, __b, __p) __arm_vshlq_m_u8(__inactive, __a, __b, __p) | |
1246 | #define vsubq_m_u8(__inactive, __a, __b, __p) __arm_vsubq_m_u8(__inactive, __a, __b, __p) | |
1247 | #define vabavq_p_u8(__a, __b, __c, __p) __arm_vabavq_p_u8(__a, __b, __c, __p) | |
1248 | #define vshlq_m_s8(__inactive, __a, __b, __p) __arm_vshlq_m_s8(__inactive, __a, __b, __p) | |
1249 | #define vcvtq_m_n_f16_s16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f16_s16(__inactive, __a, __imm6, __p) | |
1250 | #define vsriq_m_n_s16(__a, __b, __imm, __p) __arm_vsriq_m_n_s16(__a, __b, __imm, __p) | |
1251 | #define vsubq_m_s16(__inactive, __a, __b, __p) __arm_vsubq_m_s16(__inactive, __a, __b, __p) | |
1252 | #define vcvtq_m_n_f32_u32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f32_u32(__inactive, __a, __imm6, __p) | |
1253 | #define vqshluq_m_n_s16(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s16(__inactive, __a, __imm, __p) | |
1254 | #define vabavq_p_s16(__a, __b, __c, __p) __arm_vabavq_p_s16(__a, __b, __c, __p) | |
1255 | #define vsriq_m_n_u16(__a, __b, __imm, __p) __arm_vsriq_m_n_u16(__a, __b, __imm, __p) | |
1256 | #define vshlq_m_u16(__inactive, __a, __b, __p) __arm_vshlq_m_u16(__inactive, __a, __b, __p) | |
1257 | #define vsubq_m_u16(__inactive, __a, __b, __p) __arm_vsubq_m_u16(__inactive, __a, __b, __p) | |
1258 | #define vabavq_p_u16(__a, __b, __c, __p) __arm_vabavq_p_u16(__a, __b, __c, __p) | |
1259 | #define vshlq_m_s16(__inactive, __a, __b, __p) __arm_vshlq_m_s16(__inactive, __a, __b, __p) | |
1260 | #define vcvtq_m_n_f32_s32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f32_s32(__inactive, __a, __imm6, __p) | |
1261 | #define vsriq_m_n_s32(__a, __b, __imm, __p) __arm_vsriq_m_n_s32(__a, __b, __imm, __p) | |
1262 | #define vsubq_m_s32(__inactive, __a, __b, __p) __arm_vsubq_m_s32(__inactive, __a, __b, __p) | |
1263 | #define vqshluq_m_n_s32(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s32(__inactive, __a, __imm, __p) | |
1264 | #define vabavq_p_s32(__a, __b, __c, __p) __arm_vabavq_p_s32(__a, __b, __c, __p) | |
1265 | #define vsriq_m_n_u32(__a, __b, __imm, __p) __arm_vsriq_m_n_u32(__a, __b, __imm, __p) | |
1266 | #define vshlq_m_u32(__inactive, __a, __b, __p) __arm_vshlq_m_u32(__inactive, __a, __b, __p) | |
1267 | #define vsubq_m_u32(__inactive, __a, __b, __p) __arm_vsubq_m_u32(__inactive, __a, __b, __p) | |
1268 | #define vabavq_p_u32(__a, __b, __c, __p) __arm_vabavq_p_u32(__a, __b, __c, __p) | |
1269 | #define vshlq_m_s32(__inactive, __a, __b, __p) __arm_vshlq_m_s32(__inactive, __a, __b, __p) | |
8eb3b6b9 SP |
1270 | #define vabdq_m_s8(__inactive, __a, __b, __p) __arm_vabdq_m_s8(__inactive, __a, __b, __p) |
1271 | #define vabdq_m_s32(__inactive, __a, __b, __p) __arm_vabdq_m_s32(__inactive, __a, __b, __p) | |
1272 | #define vabdq_m_s16(__inactive, __a, __b, __p) __arm_vabdq_m_s16(__inactive, __a, __b, __p) | |
1273 | #define vabdq_m_u8(__inactive, __a, __b, __p) __arm_vabdq_m_u8(__inactive, __a, __b, __p) | |
1274 | #define vabdq_m_u32(__inactive, __a, __b, __p) __arm_vabdq_m_u32(__inactive, __a, __b, __p) | |
1275 | #define vabdq_m_u16(__inactive, __a, __b, __p) __arm_vabdq_m_u16(__inactive, __a, __b, __p) | |
1276 | #define vaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vaddq_m_n_s8(__inactive, __a, __b, __p) | |
1277 | #define vaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vaddq_m_n_s32(__inactive, __a, __b, __p) | |
1278 | #define vaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vaddq_m_n_s16(__inactive, __a, __b, __p) | |
1279 | #define vaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vaddq_m_n_u8(__inactive, __a, __b, __p) | |
1280 | #define vaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vaddq_m_n_u32(__inactive, __a, __b, __p) | |
1281 | #define vaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vaddq_m_n_u16(__inactive, __a, __b, __p) | |
1282 | #define vaddq_m_s8(__inactive, __a, __b, __p) __arm_vaddq_m_s8(__inactive, __a, __b, __p) | |
1283 | #define vaddq_m_s32(__inactive, __a, __b, __p) __arm_vaddq_m_s32(__inactive, __a, __b, __p) | |
1284 | #define vaddq_m_s16(__inactive, __a, __b, __p) __arm_vaddq_m_s16(__inactive, __a, __b, __p) | |
1285 | #define vaddq_m_u8(__inactive, __a, __b, __p) __arm_vaddq_m_u8(__inactive, __a, __b, __p) | |
1286 | #define vaddq_m_u32(__inactive, __a, __b, __p) __arm_vaddq_m_u32(__inactive, __a, __b, __p) | |
1287 | #define vaddq_m_u16(__inactive, __a, __b, __p) __arm_vaddq_m_u16(__inactive, __a, __b, __p) | |
1288 | #define vandq_m_s8(__inactive, __a, __b, __p) __arm_vandq_m_s8(__inactive, __a, __b, __p) | |
1289 | #define vandq_m_s32(__inactive, __a, __b, __p) __arm_vandq_m_s32(__inactive, __a, __b, __p) | |
1290 | #define vandq_m_s16(__inactive, __a, __b, __p) __arm_vandq_m_s16(__inactive, __a, __b, __p) | |
1291 | #define vandq_m_u8(__inactive, __a, __b, __p) __arm_vandq_m_u8(__inactive, __a, __b, __p) | |
1292 | #define vandq_m_u32(__inactive, __a, __b, __p) __arm_vandq_m_u32(__inactive, __a, __b, __p) | |
1293 | #define vandq_m_u16(__inactive, __a, __b, __p) __arm_vandq_m_u16(__inactive, __a, __b, __p) | |
1294 | #define vbicq_m_s8(__inactive, __a, __b, __p) __arm_vbicq_m_s8(__inactive, __a, __b, __p) | |
1295 | #define vbicq_m_s32(__inactive, __a, __b, __p) __arm_vbicq_m_s32(__inactive, __a, __b, __p) | |
1296 | #define vbicq_m_s16(__inactive, __a, __b, __p) __arm_vbicq_m_s16(__inactive, __a, __b, __p) | |
1297 | #define vbicq_m_u8(__inactive, __a, __b, __p) __arm_vbicq_m_u8(__inactive, __a, __b, __p) | |
1298 | #define vbicq_m_u32(__inactive, __a, __b, __p) __arm_vbicq_m_u32(__inactive, __a, __b, __p) | |
1299 | #define vbicq_m_u16(__inactive, __a, __b, __p) __arm_vbicq_m_u16(__inactive, __a, __b, __p) | |
1300 | #define vbrsrq_m_n_s8(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s8(__inactive, __a, __b, __p) | |
1301 | #define vbrsrq_m_n_s32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s32(__inactive, __a, __b, __p) | |
1302 | #define vbrsrq_m_n_s16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s16(__inactive, __a, __b, __p) | |
1303 | #define vbrsrq_m_n_u8(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u8(__inactive, __a, __b, __p) | |
1304 | #define vbrsrq_m_n_u32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u32(__inactive, __a, __b, __p) | |
1305 | #define vbrsrq_m_n_u16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u16(__inactive, __a, __b, __p) | |
1306 | #define vcaddq_rot270_m_s8(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s8(__inactive, __a, __b, __p) | |
1307 | #define vcaddq_rot270_m_s32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s32(__inactive, __a, __b, __p) | |
1308 | #define vcaddq_rot270_m_s16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s16(__inactive, __a, __b, __p) | |
1309 | #define vcaddq_rot270_m_u8(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u8(__inactive, __a, __b, __p) | |
1310 | #define vcaddq_rot270_m_u32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u32(__inactive, __a, __b, __p) | |
1311 | #define vcaddq_rot270_m_u16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u16(__inactive, __a, __b, __p) | |
1312 | #define vcaddq_rot90_m_s8(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s8(__inactive, __a, __b, __p) | |
1313 | #define vcaddq_rot90_m_s32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s32(__inactive, __a, __b, __p) | |
1314 | #define vcaddq_rot90_m_s16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s16(__inactive, __a, __b, __p) | |
1315 | #define vcaddq_rot90_m_u8(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u8(__inactive, __a, __b, __p) | |
1316 | #define vcaddq_rot90_m_u32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u32(__inactive, __a, __b, __p) | |
1317 | #define vcaddq_rot90_m_u16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u16(__inactive, __a, __b, __p) | |
1318 | #define veorq_m_s8(__inactive, __a, __b, __p) __arm_veorq_m_s8(__inactive, __a, __b, __p) | |
1319 | #define veorq_m_s32(__inactive, __a, __b, __p) __arm_veorq_m_s32(__inactive, __a, __b, __p) | |
1320 | #define veorq_m_s16(__inactive, __a, __b, __p) __arm_veorq_m_s16(__inactive, __a, __b, __p) | |
1321 | #define veorq_m_u8(__inactive, __a, __b, __p) __arm_veorq_m_u8(__inactive, __a, __b, __p) | |
1322 | #define veorq_m_u32(__inactive, __a, __b, __p) __arm_veorq_m_u32(__inactive, __a, __b, __p) | |
1323 | #define veorq_m_u16(__inactive, __a, __b, __p) __arm_veorq_m_u16(__inactive, __a, __b, __p) | |
1324 | #define vhaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s8(__inactive, __a, __b, __p) | |
1325 | #define vhaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s32(__inactive, __a, __b, __p) | |
1326 | #define vhaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s16(__inactive, __a, __b, __p) | |
1327 | #define vhaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u8(__inactive, __a, __b, __p) | |
1328 | #define vhaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u32(__inactive, __a, __b, __p) | |
1329 | #define vhaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u16(__inactive, __a, __b, __p) | |
1330 | #define vhaddq_m_s8(__inactive, __a, __b, __p) __arm_vhaddq_m_s8(__inactive, __a, __b, __p) | |
1331 | #define vhaddq_m_s32(__inactive, __a, __b, __p) __arm_vhaddq_m_s32(__inactive, __a, __b, __p) | |
1332 | #define vhaddq_m_s16(__inactive, __a, __b, __p) __arm_vhaddq_m_s16(__inactive, __a, __b, __p) | |
1333 | #define vhaddq_m_u8(__inactive, __a, __b, __p) __arm_vhaddq_m_u8(__inactive, __a, __b, __p) | |
1334 | #define vhaddq_m_u32(__inactive, __a, __b, __p) __arm_vhaddq_m_u32(__inactive, __a, __b, __p) | |
1335 | #define vhaddq_m_u16(__inactive, __a, __b, __p) __arm_vhaddq_m_u16(__inactive, __a, __b, __p) | |
1336 | #define vhcaddq_rot270_m_s8(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s8(__inactive, __a, __b, __p) | |
1337 | #define vhcaddq_rot270_m_s32(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s32(__inactive, __a, __b, __p) | |
1338 | #define vhcaddq_rot270_m_s16(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s16(__inactive, __a, __b, __p) | |
1339 | #define vhcaddq_rot90_m_s8(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s8(__inactive, __a, __b, __p) | |
1340 | #define vhcaddq_rot90_m_s32(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s32(__inactive, __a, __b, __p) | |
1341 | #define vhcaddq_rot90_m_s16(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s16(__inactive, __a, __b, __p) | |
1342 | #define vhsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s8(__inactive, __a, __b, __p) | |
1343 | #define vhsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s32(__inactive, __a, __b, __p) | |
1344 | #define vhsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s16(__inactive, __a, __b, __p) | |
1345 | #define vhsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u8(__inactive, __a, __b, __p) | |
1346 | #define vhsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u32(__inactive, __a, __b, __p) | |
1347 | #define vhsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u16(__inactive, __a, __b, __p) | |
1348 | #define vhsubq_m_s8(__inactive, __a, __b, __p) __arm_vhsubq_m_s8(__inactive, __a, __b, __p) | |
1349 | #define vhsubq_m_s32(__inactive, __a, __b, __p) __arm_vhsubq_m_s32(__inactive, __a, __b, __p) | |
1350 | #define vhsubq_m_s16(__inactive, __a, __b, __p) __arm_vhsubq_m_s16(__inactive, __a, __b, __p) | |
1351 | #define vhsubq_m_u8(__inactive, __a, __b, __p) __arm_vhsubq_m_u8(__inactive, __a, __b, __p) | |
1352 | #define vhsubq_m_u32(__inactive, __a, __b, __p) __arm_vhsubq_m_u32(__inactive, __a, __b, __p) | |
1353 | #define vhsubq_m_u16(__inactive, __a, __b, __p) __arm_vhsubq_m_u16(__inactive, __a, __b, __p) | |
1354 | #define vmaxq_m_s8(__inactive, __a, __b, __p) __arm_vmaxq_m_s8(__inactive, __a, __b, __p) | |
1355 | #define vmaxq_m_s32(__inactive, __a, __b, __p) __arm_vmaxq_m_s32(__inactive, __a, __b, __p) | |
1356 | #define vmaxq_m_s16(__inactive, __a, __b, __p) __arm_vmaxq_m_s16(__inactive, __a, __b, __p) | |
1357 | #define vmaxq_m_u8(__inactive, __a, __b, __p) __arm_vmaxq_m_u8(__inactive, __a, __b, __p) | |
1358 | #define vmaxq_m_u32(__inactive, __a, __b, __p) __arm_vmaxq_m_u32(__inactive, __a, __b, __p) | |
1359 | #define vmaxq_m_u16(__inactive, __a, __b, __p) __arm_vmaxq_m_u16(__inactive, __a, __b, __p) | |
1360 | #define vminq_m_s8(__inactive, __a, __b, __p) __arm_vminq_m_s8(__inactive, __a, __b, __p) | |
1361 | #define vminq_m_s32(__inactive, __a, __b, __p) __arm_vminq_m_s32(__inactive, __a, __b, __p) | |
1362 | #define vminq_m_s16(__inactive, __a, __b, __p) __arm_vminq_m_s16(__inactive, __a, __b, __p) | |
1363 | #define vminq_m_u8(__inactive, __a, __b, __p) __arm_vminq_m_u8(__inactive, __a, __b, __p) | |
1364 | #define vminq_m_u32(__inactive, __a, __b, __p) __arm_vminq_m_u32(__inactive, __a, __b, __p) | |
1365 | #define vminq_m_u16(__inactive, __a, __b, __p) __arm_vminq_m_u16(__inactive, __a, __b, __p) | |
1366 | #define vmladavaq_p_s8(__a, __b, __c, __p) __arm_vmladavaq_p_s8(__a, __b, __c, __p) | |
1367 | #define vmladavaq_p_s32(__a, __b, __c, __p) __arm_vmladavaq_p_s32(__a, __b, __c, __p) | |
1368 | #define vmladavaq_p_s16(__a, __b, __c, __p) __arm_vmladavaq_p_s16(__a, __b, __c, __p) | |
1369 | #define vmladavaq_p_u8(__a, __b, __c, __p) __arm_vmladavaq_p_u8(__a, __b, __c, __p) | |
1370 | #define vmladavaq_p_u32(__a, __b, __c, __p) __arm_vmladavaq_p_u32(__a, __b, __c, __p) | |
1371 | #define vmladavaq_p_u16(__a, __b, __c, __p) __arm_vmladavaq_p_u16(__a, __b, __c, __p) | |
1372 | #define vmladavaxq_p_s8(__a, __b, __c, __p) __arm_vmladavaxq_p_s8(__a, __b, __c, __p) | |
1373 | #define vmladavaxq_p_s32(__a, __b, __c, __p) __arm_vmladavaxq_p_s32(__a, __b, __c, __p) | |
1374 | #define vmladavaxq_p_s16(__a, __b, __c, __p) __arm_vmladavaxq_p_s16(__a, __b, __c, __p) | |
1375 | #define vmlaq_m_n_s8(__a, __b, __c, __p) __arm_vmlaq_m_n_s8(__a, __b, __c, __p) | |
1376 | #define vmlaq_m_n_s32(__a, __b, __c, __p) __arm_vmlaq_m_n_s32(__a, __b, __c, __p) | |
1377 | #define vmlaq_m_n_s16(__a, __b, __c, __p) __arm_vmlaq_m_n_s16(__a, __b, __c, __p) | |
1378 | #define vmlaq_m_n_u8(__a, __b, __c, __p) __arm_vmlaq_m_n_u8(__a, __b, __c, __p) | |
1379 | #define vmlaq_m_n_u32(__a, __b, __c, __p) __arm_vmlaq_m_n_u32(__a, __b, __c, __p) | |
1380 | #define vmlaq_m_n_u16(__a, __b, __c, __p) __arm_vmlaq_m_n_u16(__a, __b, __c, __p) | |
1381 | #define vmlasq_m_n_s8(__a, __b, __c, __p) __arm_vmlasq_m_n_s8(__a, __b, __c, __p) | |
1382 | #define vmlasq_m_n_s32(__a, __b, __c, __p) __arm_vmlasq_m_n_s32(__a, __b, __c, __p) | |
1383 | #define vmlasq_m_n_s16(__a, __b, __c, __p) __arm_vmlasq_m_n_s16(__a, __b, __c, __p) | |
1384 | #define vmlasq_m_n_u8(__a, __b, __c, __p) __arm_vmlasq_m_n_u8(__a, __b, __c, __p) | |
1385 | #define vmlasq_m_n_u32(__a, __b, __c, __p) __arm_vmlasq_m_n_u32(__a, __b, __c, __p) | |
1386 | #define vmlasq_m_n_u16(__a, __b, __c, __p) __arm_vmlasq_m_n_u16(__a, __b, __c, __p) | |
1387 | #define vmlsdavaq_p_s8(__a, __b, __c, __p) __arm_vmlsdavaq_p_s8(__a, __b, __c, __p) | |
1388 | #define vmlsdavaq_p_s32(__a, __b, __c, __p) __arm_vmlsdavaq_p_s32(__a, __b, __c, __p) | |
1389 | #define vmlsdavaq_p_s16(__a, __b, __c, __p) __arm_vmlsdavaq_p_s16(__a, __b, __c, __p) | |
1390 | #define vmlsdavaxq_p_s8(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s8(__a, __b, __c, __p) | |
1391 | #define vmlsdavaxq_p_s32(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s32(__a, __b, __c, __p) | |
1392 | #define vmlsdavaxq_p_s16(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s16(__a, __b, __c, __p) | |
1393 | #define vmulhq_m_s8(__inactive, __a, __b, __p) __arm_vmulhq_m_s8(__inactive, __a, __b, __p) | |
1394 | #define vmulhq_m_s32(__inactive, __a, __b, __p) __arm_vmulhq_m_s32(__inactive, __a, __b, __p) | |
1395 | #define vmulhq_m_s16(__inactive, __a, __b, __p) __arm_vmulhq_m_s16(__inactive, __a, __b, __p) | |
1396 | #define vmulhq_m_u8(__inactive, __a, __b, __p) __arm_vmulhq_m_u8(__inactive, __a, __b, __p) | |
1397 | #define vmulhq_m_u32(__inactive, __a, __b, __p) __arm_vmulhq_m_u32(__inactive, __a, __b, __p) | |
1398 | #define vmulhq_m_u16(__inactive, __a, __b, __p) __arm_vmulhq_m_u16(__inactive, __a, __b, __p) | |
1399 | #define vmullbq_int_m_s8(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s8(__inactive, __a, __b, __p) | |
1400 | #define vmullbq_int_m_s32(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s32(__inactive, __a, __b, __p) | |
1401 | #define vmullbq_int_m_s16(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s16(__inactive, __a, __b, __p) | |
1402 | #define vmullbq_int_m_u8(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u8(__inactive, __a, __b, __p) | |
1403 | #define vmullbq_int_m_u32(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u32(__inactive, __a, __b, __p) | |
1404 | #define vmullbq_int_m_u16(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u16(__inactive, __a, __b, __p) | |
1405 | #define vmulltq_int_m_s8(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s8(__inactive, __a, __b, __p) | |
1406 | #define vmulltq_int_m_s32(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s32(__inactive, __a, __b, __p) | |
1407 | #define vmulltq_int_m_s16(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s16(__inactive, __a, __b, __p) | |
1408 | #define vmulltq_int_m_u8(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u8(__inactive, __a, __b, __p) | |
1409 | #define vmulltq_int_m_u32(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u32(__inactive, __a, __b, __p) | |
1410 | #define vmulltq_int_m_u16(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u16(__inactive, __a, __b, __p) | |
1411 | #define vmulq_m_n_s8(__inactive, __a, __b, __p) __arm_vmulq_m_n_s8(__inactive, __a, __b, __p) | |
1412 | #define vmulq_m_n_s32(__inactive, __a, __b, __p) __arm_vmulq_m_n_s32(__inactive, __a, __b, __p) | |
1413 | #define vmulq_m_n_s16(__inactive, __a, __b, __p) __arm_vmulq_m_n_s16(__inactive, __a, __b, __p) | |
1414 | #define vmulq_m_n_u8(__inactive, __a, __b, __p) __arm_vmulq_m_n_u8(__inactive, __a, __b, __p) | |
1415 | #define vmulq_m_n_u32(__inactive, __a, __b, __p) __arm_vmulq_m_n_u32(__inactive, __a, __b, __p) | |
1416 | #define vmulq_m_n_u16(__inactive, __a, __b, __p) __arm_vmulq_m_n_u16(__inactive, __a, __b, __p) | |
1417 | #define vmulq_m_s8(__inactive, __a, __b, __p) __arm_vmulq_m_s8(__inactive, __a, __b, __p) | |
1418 | #define vmulq_m_s32(__inactive, __a, __b, __p) __arm_vmulq_m_s32(__inactive, __a, __b, __p) | |
1419 | #define vmulq_m_s16(__inactive, __a, __b, __p) __arm_vmulq_m_s16(__inactive, __a, __b, __p) | |
1420 | #define vmulq_m_u8(__inactive, __a, __b, __p) __arm_vmulq_m_u8(__inactive, __a, __b, __p) | |
1421 | #define vmulq_m_u32(__inactive, __a, __b, __p) __arm_vmulq_m_u32(__inactive, __a, __b, __p) | |
1422 | #define vmulq_m_u16(__inactive, __a, __b, __p) __arm_vmulq_m_u16(__inactive, __a, __b, __p) | |
1423 | #define vornq_m_s8(__inactive, __a, __b, __p) __arm_vornq_m_s8(__inactive, __a, __b, __p) | |
1424 | #define vornq_m_s32(__inactive, __a, __b, __p) __arm_vornq_m_s32(__inactive, __a, __b, __p) | |
1425 | #define vornq_m_s16(__inactive, __a, __b, __p) __arm_vornq_m_s16(__inactive, __a, __b, __p) | |
1426 | #define vornq_m_u8(__inactive, __a, __b, __p) __arm_vornq_m_u8(__inactive, __a, __b, __p) | |
1427 | #define vornq_m_u32(__inactive, __a, __b, __p) __arm_vornq_m_u32(__inactive, __a, __b, __p) | |
1428 | #define vornq_m_u16(__inactive, __a, __b, __p) __arm_vornq_m_u16(__inactive, __a, __b, __p) | |
1429 | #define vorrq_m_s8(__inactive, __a, __b, __p) __arm_vorrq_m_s8(__inactive, __a, __b, __p) | |
1430 | #define vorrq_m_s32(__inactive, __a, __b, __p) __arm_vorrq_m_s32(__inactive, __a, __b, __p) | |
1431 | #define vorrq_m_s16(__inactive, __a, __b, __p) __arm_vorrq_m_s16(__inactive, __a, __b, __p) | |
1432 | #define vorrq_m_u8(__inactive, __a, __b, __p) __arm_vorrq_m_u8(__inactive, __a, __b, __p) | |
1433 | #define vorrq_m_u32(__inactive, __a, __b, __p) __arm_vorrq_m_u32(__inactive, __a, __b, __p) | |
1434 | #define vorrq_m_u16(__inactive, __a, __b, __p) __arm_vorrq_m_u16(__inactive, __a, __b, __p) | |
1435 | #define vqaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s8(__inactive, __a, __b, __p) | |
1436 | #define vqaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s32(__inactive, __a, __b, __p) | |
1437 | #define vqaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s16(__inactive, __a, __b, __p) | |
1438 | #define vqaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u8(__inactive, __a, __b, __p) | |
1439 | #define vqaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u32(__inactive, __a, __b, __p) | |
1440 | #define vqaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u16(__inactive, __a, __b, __p) | |
1441 | #define vqaddq_m_s8(__inactive, __a, __b, __p) __arm_vqaddq_m_s8(__inactive, __a, __b, __p) | |
1442 | #define vqaddq_m_s32(__inactive, __a, __b, __p) __arm_vqaddq_m_s32(__inactive, __a, __b, __p) | |
1443 | #define vqaddq_m_s16(__inactive, __a, __b, __p) __arm_vqaddq_m_s16(__inactive, __a, __b, __p) | |
1444 | #define vqaddq_m_u8(__inactive, __a, __b, __p) __arm_vqaddq_m_u8(__inactive, __a, __b, __p) | |
1445 | #define vqaddq_m_u32(__inactive, __a, __b, __p) __arm_vqaddq_m_u32(__inactive, __a, __b, __p) | |
1446 | #define vqaddq_m_u16(__inactive, __a, __b, __p) __arm_vqaddq_m_u16(__inactive, __a, __b, __p) | |
1447 | #define vqdmladhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s8(__inactive, __a, __b, __p) | |
1448 | #define vqdmladhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s32(__inactive, __a, __b, __p) | |
1449 | #define vqdmladhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s16(__inactive, __a, __b, __p) | |
1450 | #define vqdmladhxq_m_s8(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s8(__inactive, __a, __b, __p) | |
1451 | #define vqdmladhxq_m_s32(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s32(__inactive, __a, __b, __p) | |
1452 | #define vqdmladhxq_m_s16(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s16(__inactive, __a, __b, __p) | |
1453 | #define vqdmlahq_m_n_s8(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s8(__a, __b, __c, __p) | |
1454 | #define vqdmlahq_m_n_s32(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s32(__a, __b, __c, __p) | |
1455 | #define vqdmlahq_m_n_s16(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s16(__a, __b, __c, __p) | |
1456 | #define vqdmlsdhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s8(__inactive, __a, __b, __p) | |
1457 | #define vqdmlsdhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s32(__inactive, __a, __b, __p) | |
1458 | #define vqdmlsdhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s16(__inactive, __a, __b, __p) | |
1459 | #define vqdmlsdhxq_m_s8(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s8(__inactive, __a, __b, __p) | |
1460 | #define vqdmlsdhxq_m_s32(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s32(__inactive, __a, __b, __p) | |
1461 | #define vqdmlsdhxq_m_s16(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s16(__inactive, __a, __b, __p) | |
1462 | #define vqdmulhq_m_n_s8(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s8(__inactive, __a, __b, __p) | |
1463 | #define vqdmulhq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s32(__inactive, __a, __b, __p) | |
1464 | #define vqdmulhq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s16(__inactive, __a, __b, __p) | |
1465 | #define vqdmulhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s8(__inactive, __a, __b, __p) | |
1466 | #define vqdmulhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s32(__inactive, __a, __b, __p) | |
1467 | #define vqdmulhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s16(__inactive, __a, __b, __p) | |
1468 | #define vqrdmladhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s8(__inactive, __a, __b, __p) | |
1469 | #define vqrdmladhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s32(__inactive, __a, __b, __p) | |
1470 | #define vqrdmladhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s16(__inactive, __a, __b, __p) | |
1471 | #define vqrdmladhxq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s8(__inactive, __a, __b, __p) | |
1472 | #define vqrdmladhxq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s32(__inactive, __a, __b, __p) | |
1473 | #define vqrdmladhxq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s16(__inactive, __a, __b, __p) | |
1474 | #define vqrdmlahq_m_n_s8(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s8(__a, __b, __c, __p) | |
1475 | #define vqrdmlahq_m_n_s32(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s32(__a, __b, __c, __p) | |
1476 | #define vqrdmlahq_m_n_s16(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s16(__a, __b, __c, __p) | |
1477 | #define vqrdmlashq_m_n_s8(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s8(__a, __b, __c, __p) | |
1478 | #define vqrdmlashq_m_n_s32(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s32(__a, __b, __c, __p) | |
1479 | #define vqrdmlashq_m_n_s16(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s16(__a, __b, __c, __p) | |
1480 | #define vqrdmlsdhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s8(__inactive, __a, __b, __p) | |
1481 | #define vqrdmlsdhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s32(__inactive, __a, __b, __p) | |
1482 | #define vqrdmlsdhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s16(__inactive, __a, __b, __p) | |
1483 | #define vqrdmlsdhxq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s8(__inactive, __a, __b, __p) | |
1484 | #define vqrdmlsdhxq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s32(__inactive, __a, __b, __p) | |
1485 | #define vqrdmlsdhxq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s16(__inactive, __a, __b, __p) | |
1486 | #define vqrdmulhq_m_n_s8(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s8(__inactive, __a, __b, __p) | |
1487 | #define vqrdmulhq_m_n_s32(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s32(__inactive, __a, __b, __p) | |
1488 | #define vqrdmulhq_m_n_s16(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s16(__inactive, __a, __b, __p) | |
1489 | #define vqrdmulhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s8(__inactive, __a, __b, __p) | |
1490 | #define vqrdmulhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s32(__inactive, __a, __b, __p) | |
1491 | #define vqrdmulhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s16(__inactive, __a, __b, __p) | |
1492 | #define vqrshlq_m_s8(__inactive, __a, __b, __p) __arm_vqrshlq_m_s8(__inactive, __a, __b, __p) | |
1493 | #define vqrshlq_m_s32(__inactive, __a, __b, __p) __arm_vqrshlq_m_s32(__inactive, __a, __b, __p) | |
1494 | #define vqrshlq_m_s16(__inactive, __a, __b, __p) __arm_vqrshlq_m_s16(__inactive, __a, __b, __p) | |
1495 | #define vqrshlq_m_u8(__inactive, __a, __b, __p) __arm_vqrshlq_m_u8(__inactive, __a, __b, __p) | |
1496 | #define vqrshlq_m_u32(__inactive, __a, __b, __p) __arm_vqrshlq_m_u32(__inactive, __a, __b, __p) | |
1497 | #define vqrshlq_m_u16(__inactive, __a, __b, __p) __arm_vqrshlq_m_u16(__inactive, __a, __b, __p) | |
1498 | #define vqshlq_m_n_s8(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s8(__inactive, __a, __imm, __p) | |
1499 | #define vqshlq_m_n_s32(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s32(__inactive, __a, __imm, __p) | |
1500 | #define vqshlq_m_n_s16(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s16(__inactive, __a, __imm, __p) | |
1501 | #define vqshlq_m_n_u8(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_u8(__inactive, __a, __imm, __p) | |
1502 | #define vqshlq_m_n_u32(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_u32(__inactive, __a, __imm, __p) | |
1503 | #define vqshlq_m_n_u16(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_u16(__inactive, __a, __imm, __p) | |
1504 | #define vqshlq_m_s8(__inactive, __a, __b, __p) __arm_vqshlq_m_s8(__inactive, __a, __b, __p) | |
1505 | #define vqshlq_m_s32(__inactive, __a, __b, __p) __arm_vqshlq_m_s32(__inactive, __a, __b, __p) | |
1506 | #define vqshlq_m_s16(__inactive, __a, __b, __p) __arm_vqshlq_m_s16(__inactive, __a, __b, __p) | |
1507 | #define vqshlq_m_u8(__inactive, __a, __b, __p) __arm_vqshlq_m_u8(__inactive, __a, __b, __p) | |
1508 | #define vqshlq_m_u32(__inactive, __a, __b, __p) __arm_vqshlq_m_u32(__inactive, __a, __b, __p) | |
1509 | #define vqshlq_m_u16(__inactive, __a, __b, __p) __arm_vqshlq_m_u16(__inactive, __a, __b, __p) | |
1510 | #define vqsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s8(__inactive, __a, __b, __p) | |
1511 | #define vqsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s32(__inactive, __a, __b, __p) | |
1512 | #define vqsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s16(__inactive, __a, __b, __p) | |
1513 | #define vqsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u8(__inactive, __a, __b, __p) | |
1514 | #define vqsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u32(__inactive, __a, __b, __p) | |
1515 | #define vqsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u16(__inactive, __a, __b, __p) | |
1516 | #define vqsubq_m_s8(__inactive, __a, __b, __p) __arm_vqsubq_m_s8(__inactive, __a, __b, __p) | |
1517 | #define vqsubq_m_s32(__inactive, __a, __b, __p) __arm_vqsubq_m_s32(__inactive, __a, __b, __p) | |
1518 | #define vqsubq_m_s16(__inactive, __a, __b, __p) __arm_vqsubq_m_s16(__inactive, __a, __b, __p) | |
1519 | #define vqsubq_m_u8(__inactive, __a, __b, __p) __arm_vqsubq_m_u8(__inactive, __a, __b, __p) | |
1520 | #define vqsubq_m_u32(__inactive, __a, __b, __p) __arm_vqsubq_m_u32(__inactive, __a, __b, __p) | |
1521 | #define vqsubq_m_u16(__inactive, __a, __b, __p) __arm_vqsubq_m_u16(__inactive, __a, __b, __p) | |
1522 | #define vrhaddq_m_s8(__inactive, __a, __b, __p) __arm_vrhaddq_m_s8(__inactive, __a, __b, __p) | |
1523 | #define vrhaddq_m_s32(__inactive, __a, __b, __p) __arm_vrhaddq_m_s32(__inactive, __a, __b, __p) | |
1524 | #define vrhaddq_m_s16(__inactive, __a, __b, __p) __arm_vrhaddq_m_s16(__inactive, __a, __b, __p) | |
1525 | #define vrhaddq_m_u8(__inactive, __a, __b, __p) __arm_vrhaddq_m_u8(__inactive, __a, __b, __p) | |
1526 | #define vrhaddq_m_u32(__inactive, __a, __b, __p) __arm_vrhaddq_m_u32(__inactive, __a, __b, __p) | |
1527 | #define vrhaddq_m_u16(__inactive, __a, __b, __p) __arm_vrhaddq_m_u16(__inactive, __a, __b, __p) | |
1528 | #define vrmulhq_m_s8(__inactive, __a, __b, __p) __arm_vrmulhq_m_s8(__inactive, __a, __b, __p) | |
1529 | #define vrmulhq_m_s32(__inactive, __a, __b, __p) __arm_vrmulhq_m_s32(__inactive, __a, __b, __p) | |
1530 | #define vrmulhq_m_s16(__inactive, __a, __b, __p) __arm_vrmulhq_m_s16(__inactive, __a, __b, __p) | |
1531 | #define vrmulhq_m_u8(__inactive, __a, __b, __p) __arm_vrmulhq_m_u8(__inactive, __a, __b, __p) | |
1532 | #define vrmulhq_m_u32(__inactive, __a, __b, __p) __arm_vrmulhq_m_u32(__inactive, __a, __b, __p) | |
1533 | #define vrmulhq_m_u16(__inactive, __a, __b, __p) __arm_vrmulhq_m_u16(__inactive, __a, __b, __p) | |
1534 | #define vrshlq_m_s8(__inactive, __a, __b, __p) __arm_vrshlq_m_s8(__inactive, __a, __b, __p) | |
1535 | #define vrshlq_m_s32(__inactive, __a, __b, __p) __arm_vrshlq_m_s32(__inactive, __a, __b, __p) | |
1536 | #define vrshlq_m_s16(__inactive, __a, __b, __p) __arm_vrshlq_m_s16(__inactive, __a, __b, __p) | |
1537 | #define vrshlq_m_u8(__inactive, __a, __b, __p) __arm_vrshlq_m_u8(__inactive, __a, __b, __p) | |
1538 | #define vrshlq_m_u32(__inactive, __a, __b, __p) __arm_vrshlq_m_u32(__inactive, __a, __b, __p) | |
1539 | #define vrshlq_m_u16(__inactive, __a, __b, __p) __arm_vrshlq_m_u16(__inactive, __a, __b, __p) | |
1540 | #define vrshrq_m_n_s8(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s8(__inactive, __a, __imm, __p) | |
1541 | #define vrshrq_m_n_s32(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s32(__inactive, __a, __imm, __p) | |
1542 | #define vrshrq_m_n_s16(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s16(__inactive, __a, __imm, __p) | |
1543 | #define vrshrq_m_n_u8(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_u8(__inactive, __a, __imm, __p) | |
1544 | #define vrshrq_m_n_u32(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_u32(__inactive, __a, __imm, __p) | |
1545 | #define vrshrq_m_n_u16(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_u16(__inactive, __a, __imm, __p) | |
1546 | #define vshlq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshlq_m_n_s8(__inactive, __a, __imm, __p) | |
1547 | #define vshlq_m_n_s32(__inactive, __a, __imm, __p) __arm_vshlq_m_n_s32(__inactive, __a, __imm, __p) | |
1548 | #define vshlq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshlq_m_n_s16(__inactive, __a, __imm, __p) | |
1549 | #define vshlq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshlq_m_n_u8(__inactive, __a, __imm, __p) | |
1550 | #define vshlq_m_n_u32(__inactive, __a, __imm, __p) __arm_vshlq_m_n_u32(__inactive, __a, __imm, __p) | |
1551 | #define vshlq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshlq_m_n_u16(__inactive, __a, __imm, __p) | |
1552 | #define vshrq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshrq_m_n_s8(__inactive, __a, __imm, __p) | |
1553 | #define vshrq_m_n_s32(__inactive, __a, __imm, __p) __arm_vshrq_m_n_s32(__inactive, __a, __imm, __p) | |
1554 | #define vshrq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshrq_m_n_s16(__inactive, __a, __imm, __p) | |
1555 | #define vshrq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshrq_m_n_u8(__inactive, __a, __imm, __p) | |
1556 | #define vshrq_m_n_u32(__inactive, __a, __imm, __p) __arm_vshrq_m_n_u32(__inactive, __a, __imm, __p) | |
1557 | #define vshrq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshrq_m_n_u16(__inactive, __a, __imm, __p) | |
1558 | #define vsliq_m_n_s8(__a, __b, __imm, __p) __arm_vsliq_m_n_s8(__a, __b, __imm, __p) | |
1559 | #define vsliq_m_n_s32(__a, __b, __imm, __p) __arm_vsliq_m_n_s32(__a, __b, __imm, __p) | |
1560 | #define vsliq_m_n_s16(__a, __b, __imm, __p) __arm_vsliq_m_n_s16(__a, __b, __imm, __p) | |
1561 | #define vsliq_m_n_u8(__a, __b, __imm, __p) __arm_vsliq_m_n_u8(__a, __b, __imm, __p) | |
1562 | #define vsliq_m_n_u32(__a, __b, __imm, __p) __arm_vsliq_m_n_u32(__a, __b, __imm, __p) | |
1563 | #define vsliq_m_n_u16(__a, __b, __imm, __p) __arm_vsliq_m_n_u16(__a, __b, __imm, __p) | |
1564 | #define vsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vsubq_m_n_s8(__inactive, __a, __b, __p) | |
1565 | #define vsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vsubq_m_n_s32(__inactive, __a, __b, __p) | |
1566 | #define vsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vsubq_m_n_s16(__inactive, __a, __b, __p) | |
1567 | #define vsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vsubq_m_n_u8(__inactive, __a, __b, __p) | |
1568 | #define vsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vsubq_m_n_u32(__inactive, __a, __b, __p) | |
1569 | #define vsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vsubq_m_n_u16(__inactive, __a, __b, __p) | |
f2170a37 SP |
1570 | #define vmlaldavaq_p_s32(__a, __b, __c, __p) __arm_vmlaldavaq_p_s32(__a, __b, __c, __p) |
1571 | #define vmlaldavaq_p_s16(__a, __b, __c, __p) __arm_vmlaldavaq_p_s16(__a, __b, __c, __p) | |
1572 | #define vmlaldavaq_p_u32(__a, __b, __c, __p) __arm_vmlaldavaq_p_u32(__a, __b, __c, __p) | |
1573 | #define vmlaldavaq_p_u16(__a, __b, __c, __p) __arm_vmlaldavaq_p_u16(__a, __b, __c, __p) | |
1574 | #define vmlaldavaxq_p_s32(__a, __b, __c, __p) __arm_vmlaldavaxq_p_s32(__a, __b, __c, __p) | |
1575 | #define vmlaldavaxq_p_s16(__a, __b, __c, __p) __arm_vmlaldavaxq_p_s16(__a, __b, __c, __p) | |
1576 | #define vmlaldavaxq_p_u32(__a, __b, __c, __p) __arm_vmlaldavaxq_p_u32(__a, __b, __c, __p) | |
1577 | #define vmlaldavaxq_p_u16(__a, __b, __c, __p) __arm_vmlaldavaxq_p_u16(__a, __b, __c, __p) | |
1578 | #define vmlsldavaq_p_s32(__a, __b, __c, __p) __arm_vmlsldavaq_p_s32(__a, __b, __c, __p) | |
1579 | #define vmlsldavaq_p_s16(__a, __b, __c, __p) __arm_vmlsldavaq_p_s16(__a, __b, __c, __p) | |
1580 | #define vmlsldavaxq_p_s32(__a, __b, __c, __p) __arm_vmlsldavaxq_p_s32(__a, __b, __c, __p) | |
1581 | #define vmlsldavaxq_p_s16(__a, __b, __c, __p) __arm_vmlsldavaxq_p_s16(__a, __b, __c, __p) | |
1582 | #define vmullbq_poly_m_p8(__inactive, __a, __b, __p) __arm_vmullbq_poly_m_p8(__inactive, __a, __b, __p) | |
1583 | #define vmullbq_poly_m_p16(__inactive, __a, __b, __p) __arm_vmullbq_poly_m_p16(__inactive, __a, __b, __p) | |
1584 | #define vmulltq_poly_m_p8(__inactive, __a, __b, __p) __arm_vmulltq_poly_m_p8(__inactive, __a, __b, __p) | |
1585 | #define vmulltq_poly_m_p16(__inactive, __a, __b, __p) __arm_vmulltq_poly_m_p16(__inactive, __a, __b, __p) | |
1586 | #define vqdmullbq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmullbq_m_n_s32(__inactive, __a, __b, __p) | |
1587 | #define vqdmullbq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmullbq_m_n_s16(__inactive, __a, __b, __p) | |
1588 | #define vqdmullbq_m_s32(__inactive, __a, __b, __p) __arm_vqdmullbq_m_s32(__inactive, __a, __b, __p) | |
1589 | #define vqdmullbq_m_s16(__inactive, __a, __b, __p) __arm_vqdmullbq_m_s16(__inactive, __a, __b, __p) | |
1590 | #define vqdmulltq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmulltq_m_n_s32(__inactive, __a, __b, __p) | |
1591 | #define vqdmulltq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmulltq_m_n_s16(__inactive, __a, __b, __p) | |
1592 | #define vqdmulltq_m_s32(__inactive, __a, __b, __p) __arm_vqdmulltq_m_s32(__inactive, __a, __b, __p) | |
1593 | #define vqdmulltq_m_s16(__inactive, __a, __b, __p) __arm_vqdmulltq_m_s16(__inactive, __a, __b, __p) | |
1594 | #define vqrshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_s32(__a, __b, __imm, __p) | |
1595 | #define vqrshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_s16(__a, __b, __imm, __p) | |
1596 | #define vqrshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_u32(__a, __b, __imm, __p) | |
1597 | #define vqrshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_u16(__a, __b, __imm, __p) | |
1598 | #define vqrshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_s32(__a, __b, __imm, __p) | |
1599 | #define vqrshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_s16(__a, __b, __imm, __p) | |
1600 | #define vqrshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_u32(__a, __b, __imm, __p) | |
1601 | #define vqrshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_u16(__a, __b, __imm, __p) | |
1602 | #define vqrshrunbq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrunbq_m_n_s32(__a, __b, __imm, __p) | |
1603 | #define vqrshrunbq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrunbq_m_n_s16(__a, __b, __imm, __p) | |
1604 | #define vqrshruntq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshruntq_m_n_s32(__a, __b, __imm, __p) | |
1605 | #define vqrshruntq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshruntq_m_n_s16(__a, __b, __imm, __p) | |
1606 | #define vqshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_s32(__a, __b, __imm, __p) | |
1607 | #define vqshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_s16(__a, __b, __imm, __p) | |
1608 | #define vqshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_u32(__a, __b, __imm, __p) | |
1609 | #define vqshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_u16(__a, __b, __imm, __p) | |
1610 | #define vqshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrntq_m_n_s32(__a, __b, __imm, __p) | |
1611 | #define vqshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrntq_m_n_s16(__a, __b, __imm, __p) | |
1612 | #define vqshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vqshrntq_m_n_u32(__a, __b, __imm, __p) | |
1613 | #define vqshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vqshrntq_m_n_u16(__a, __b, __imm, __p) | |
1614 | #define vqshrunbq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrunbq_m_n_s32(__a, __b, __imm, __p) | |
1615 | #define vqshrunbq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrunbq_m_n_s16(__a, __b, __imm, __p) | |
1616 | #define vqshruntq_m_n_s32(__a, __b, __imm, __p) __arm_vqshruntq_m_n_s32(__a, __b, __imm, __p) | |
1617 | #define vqshruntq_m_n_s16(__a, __b, __imm, __p) __arm_vqshruntq_m_n_s16(__a, __b, __imm, __p) | |
1618 | #define vrmlaldavhaq_p_s32(__a, __b, __c, __p) __arm_vrmlaldavhaq_p_s32(__a, __b, __c, __p) | |
1619 | #define vrmlaldavhaq_p_u32(__a, __b, __c, __p) __arm_vrmlaldavhaq_p_u32(__a, __b, __c, __p) | |
1620 | #define vrmlaldavhaxq_p_s32(__a, __b, __c, __p) __arm_vrmlaldavhaxq_p_s32(__a, __b, __c, __p) | |
1621 | #define vrmlsldavhaq_p_s32(__a, __b, __c, __p) __arm_vrmlsldavhaq_p_s32(__a, __b, __c, __p) | |
1622 | #define vrmlsldavhaxq_p_s32(__a, __b, __c, __p) __arm_vrmlsldavhaxq_p_s32(__a, __b, __c, __p) | |
1623 | #define vrshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_s32(__a, __b, __imm, __p) | |
1624 | #define vrshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_s16(__a, __b, __imm, __p) | |
1625 | #define vrshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_u32(__a, __b, __imm, __p) | |
1626 | #define vrshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_u16(__a, __b, __imm, __p) | |
1627 | #define vrshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vrshrntq_m_n_s32(__a, __b, __imm, __p) | |
1628 | #define vrshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vrshrntq_m_n_s16(__a, __b, __imm, __p) | |
1629 | #define vrshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vrshrntq_m_n_u32(__a, __b, __imm, __p) | |
1630 | #define vrshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vrshrntq_m_n_u16(__a, __b, __imm, __p) | |
1631 | #define vshllbq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_s8(__inactive, __a, __imm, __p) | |
1632 | #define vshllbq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_s16(__inactive, __a, __imm, __p) | |
1633 | #define vshllbq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_u8(__inactive, __a, __imm, __p) | |
1634 | #define vshllbq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_u16(__inactive, __a, __imm, __p) | |
1635 | #define vshlltq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_s8(__inactive, __a, __imm, __p) | |
1636 | #define vshlltq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_s16(__inactive, __a, __imm, __p) | |
1637 | #define vshlltq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_u8(__inactive, __a, __imm, __p) | |
1638 | #define vshlltq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_u16(__inactive, __a, __imm, __p) | |
1639 | #define vshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vshrnbq_m_n_s32(__a, __b, __imm, __p) | |
1640 | #define vshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vshrnbq_m_n_s16(__a, __b, __imm, __p) | |
1641 | #define vshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vshrnbq_m_n_u32(__a, __b, __imm, __p) | |
1642 | #define vshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vshrnbq_m_n_u16(__a, __b, __imm, __p) | |
1643 | #define vshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vshrntq_m_n_s32(__a, __b, __imm, __p) | |
1644 | #define vshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vshrntq_m_n_s16(__a, __b, __imm, __p) | |
1645 | #define vshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vshrntq_m_n_u32(__a, __b, __imm, __p) | |
1646 | #define vshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vshrntq_m_n_u16(__a, __b, __imm, __p) | |
532e9e24 SP |
1647 | #define vabdq_m_f32(__inactive, __a, __b, __p) __arm_vabdq_m_f32(__inactive, __a, __b, __p) |
1648 | #define vabdq_m_f16(__inactive, __a, __b, __p) __arm_vabdq_m_f16(__inactive, __a, __b, __p) | |
1649 | #define vaddq_m_f32(__inactive, __a, __b, __p) __arm_vaddq_m_f32(__inactive, __a, __b, __p) | |
1650 | #define vaddq_m_f16(__inactive, __a, __b, __p) __arm_vaddq_m_f16(__inactive, __a, __b, __p) | |
1651 | #define vaddq_m_n_f32(__inactive, __a, __b, __p) __arm_vaddq_m_n_f32(__inactive, __a, __b, __p) | |
1652 | #define vaddq_m_n_f16(__inactive, __a, __b, __p) __arm_vaddq_m_n_f16(__inactive, __a, __b, __p) | |
1653 | #define vandq_m_f32(__inactive, __a, __b, __p) __arm_vandq_m_f32(__inactive, __a, __b, __p) | |
1654 | #define vandq_m_f16(__inactive, __a, __b, __p) __arm_vandq_m_f16(__inactive, __a, __b, __p) | |
1655 | #define vbicq_m_f32(__inactive, __a, __b, __p) __arm_vbicq_m_f32(__inactive, __a, __b, __p) | |
1656 | #define vbicq_m_f16(__inactive, __a, __b, __p) __arm_vbicq_m_f16(__inactive, __a, __b, __p) | |
1657 | #define vbrsrq_m_n_f32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_f32(__inactive, __a, __b, __p) | |
1658 | #define vbrsrq_m_n_f16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_f16(__inactive, __a, __b, __p) | |
1659 | #define vcaddq_rot270_m_f32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_f32(__inactive, __a, __b, __p) | |
1660 | #define vcaddq_rot270_m_f16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_f16(__inactive, __a, __b, __p) | |
1661 | #define vcaddq_rot90_m_f32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_f32(__inactive, __a, __b, __p) | |
1662 | #define vcaddq_rot90_m_f16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_f16(__inactive, __a, __b, __p) | |
1663 | #define vcmlaq_m_f32(__a, __b, __c, __p) __arm_vcmlaq_m_f32(__a, __b, __c, __p) | |
1664 | #define vcmlaq_m_f16(__a, __b, __c, __p) __arm_vcmlaq_m_f16(__a, __b, __c, __p) | |
1665 | #define vcmlaq_rot180_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot180_m_f32(__a, __b, __c, __p) | |
1666 | #define vcmlaq_rot180_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot180_m_f16(__a, __b, __c, __p) | |
1667 | #define vcmlaq_rot270_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot270_m_f32(__a, __b, __c, __p) | |
1668 | #define vcmlaq_rot270_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot270_m_f16(__a, __b, __c, __p) | |
1669 | #define vcmlaq_rot90_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot90_m_f32(__a, __b, __c, __p) | |
1670 | #define vcmlaq_rot90_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot90_m_f16(__a, __b, __c, __p) | |
1671 | #define vcmulq_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_m_f32(__inactive, __a, __b, __p) | |
1672 | #define vcmulq_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_m_f16(__inactive, __a, __b, __p) | |
1673 | #define vcmulq_rot180_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m_f32(__inactive, __a, __b, __p) | |
1674 | #define vcmulq_rot180_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m_f16(__inactive, __a, __b, __p) | |
1675 | #define vcmulq_rot270_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m_f32(__inactive, __a, __b, __p) | |
1676 | #define vcmulq_rot270_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m_f16(__inactive, __a, __b, __p) | |
1677 | #define vcmulq_rot90_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m_f32(__inactive, __a, __b, __p) | |
1678 | #define vcmulq_rot90_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m_f16(__inactive, __a, __b, __p) | |
1679 | #define vcvtq_m_n_s32_f32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_s32_f32(__inactive, __a, __imm6, __p) | |
1680 | #define vcvtq_m_n_s16_f16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_s16_f16(__inactive, __a, __imm6, __p) | |
1681 | #define vcvtq_m_n_u32_f32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_u32_f32(__inactive, __a, __imm6, __p) | |
1682 | #define vcvtq_m_n_u16_f16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_u16_f16(__inactive, __a, __imm6, __p) | |
1683 | #define veorq_m_f32(__inactive, __a, __b, __p) __arm_veorq_m_f32(__inactive, __a, __b, __p) | |
1684 | #define veorq_m_f16(__inactive, __a, __b, __p) __arm_veorq_m_f16(__inactive, __a, __b, __p) | |
1685 | #define vfmaq_m_f32(__a, __b, __c, __p) __arm_vfmaq_m_f32(__a, __b, __c, __p) | |
1686 | #define vfmaq_m_f16(__a, __b, __c, __p) __arm_vfmaq_m_f16(__a, __b, __c, __p) | |
1687 | #define vfmaq_m_n_f32(__a, __b, __c, __p) __arm_vfmaq_m_n_f32(__a, __b, __c, __p) | |
1688 | #define vfmaq_m_n_f16(__a, __b, __c, __p) __arm_vfmaq_m_n_f16(__a, __b, __c, __p) | |
1689 | #define vfmasq_m_n_f32(__a, __b, __c, __p) __arm_vfmasq_m_n_f32(__a, __b, __c, __p) | |
1690 | #define vfmasq_m_n_f16(__a, __b, __c, __p) __arm_vfmasq_m_n_f16(__a, __b, __c, __p) | |
1691 | #define vfmsq_m_f32(__a, __b, __c, __p) __arm_vfmsq_m_f32(__a, __b, __c, __p) | |
1692 | #define vfmsq_m_f16(__a, __b, __c, __p) __arm_vfmsq_m_f16(__a, __b, __c, __p) | |
1693 | #define vmaxnmq_m_f32(__inactive, __a, __b, __p) __arm_vmaxnmq_m_f32(__inactive, __a, __b, __p) | |
1694 | #define vmaxnmq_m_f16(__inactive, __a, __b, __p) __arm_vmaxnmq_m_f16(__inactive, __a, __b, __p) | |
1695 | #define vminnmq_m_f32(__inactive, __a, __b, __p) __arm_vminnmq_m_f32(__inactive, __a, __b, __p) | |
1696 | #define vminnmq_m_f16(__inactive, __a, __b, __p) __arm_vminnmq_m_f16(__inactive, __a, __b, __p) | |
1697 | #define vmulq_m_f32(__inactive, __a, __b, __p) __arm_vmulq_m_f32(__inactive, __a, __b, __p) | |
1698 | #define vmulq_m_f16(__inactive, __a, __b, __p) __arm_vmulq_m_f16(__inactive, __a, __b, __p) | |
1699 | #define vmulq_m_n_f32(__inactive, __a, __b, __p) __arm_vmulq_m_n_f32(__inactive, __a, __b, __p) | |
1700 | #define vmulq_m_n_f16(__inactive, __a, __b, __p) __arm_vmulq_m_n_f16(__inactive, __a, __b, __p) | |
1701 | #define vornq_m_f32(__inactive, __a, __b, __p) __arm_vornq_m_f32(__inactive, __a, __b, __p) | |
1702 | #define vornq_m_f16(__inactive, __a, __b, __p) __arm_vornq_m_f16(__inactive, __a, __b, __p) | |
1703 | #define vorrq_m_f32(__inactive, __a, __b, __p) __arm_vorrq_m_f32(__inactive, __a, __b, __p) | |
1704 | #define vorrq_m_f16(__inactive, __a, __b, __p) __arm_vorrq_m_f16(__inactive, __a, __b, __p) | |
1705 | #define vsubq_m_f32(__inactive, __a, __b, __p) __arm_vsubq_m_f32(__inactive, __a, __b, __p) | |
1706 | #define vsubq_m_f16(__inactive, __a, __b, __p) __arm_vsubq_m_f16(__inactive, __a, __b, __p) | |
1707 | #define vsubq_m_n_f32(__inactive, __a, __b, __p) __arm_vsubq_m_n_f32(__inactive, __a, __b, __p) | |
1708 | #define vsubq_m_n_f16(__inactive, __a, __b, __p) __arm_vsubq_m_n_f16(__inactive, __a, __b, __p) | |
4ff68575 SP |
1709 | #define vstrbq_s8( __addr, __value) __arm_vstrbq_s8( __addr, __value) |
1710 | #define vstrbq_u8( __addr, __value) __arm_vstrbq_u8( __addr, __value) | |
1711 | #define vstrbq_u16( __addr, __value) __arm_vstrbq_u16( __addr, __value) | |
1712 | #define vstrbq_scatter_offset_s8( __base, __offset, __value) __arm_vstrbq_scatter_offset_s8( __base, __offset, __value) | |
1713 | #define vstrbq_scatter_offset_u8( __base, __offset, __value) __arm_vstrbq_scatter_offset_u8( __base, __offset, __value) | |
1714 | #define vstrbq_scatter_offset_u16( __base, __offset, __value) __arm_vstrbq_scatter_offset_u16( __base, __offset, __value) | |
1715 | #define vstrbq_s16( __addr, __value) __arm_vstrbq_s16( __addr, __value) | |
1716 | #define vstrbq_u32( __addr, __value) __arm_vstrbq_u32( __addr, __value) | |
1717 | #define vstrbq_scatter_offset_s16( __base, __offset, __value) __arm_vstrbq_scatter_offset_s16( __base, __offset, __value) | |
1718 | #define vstrbq_scatter_offset_u32( __base, __offset, __value) __arm_vstrbq_scatter_offset_u32( __base, __offset, __value) | |
1719 | #define vstrbq_s32( __addr, __value) __arm_vstrbq_s32( __addr, __value) | |
1720 | #define vstrbq_scatter_offset_s32( __base, __offset, __value) __arm_vstrbq_scatter_offset_s32( __base, __offset, __value) | |
1721 | #define vstrwq_scatter_base_s32(__addr, __offset, __value) __arm_vstrwq_scatter_base_s32(__addr, __offset, __value) | |
1722 | #define vstrwq_scatter_base_u32(__addr, __offset, __value) __arm_vstrwq_scatter_base_u32(__addr, __offset, __value) | |
535a8645 SP |
1723 | #define vldrbq_gather_offset_u8(__base, __offset) __arm_vldrbq_gather_offset_u8(__base, __offset) |
1724 | #define vldrbq_gather_offset_s8(__base, __offset) __arm_vldrbq_gather_offset_s8(__base, __offset) | |
1725 | #define vldrbq_s8(__base) __arm_vldrbq_s8(__base) | |
1726 | #define vldrbq_u8(__base) __arm_vldrbq_u8(__base) | |
1727 | #define vldrbq_gather_offset_u16(__base, __offset) __arm_vldrbq_gather_offset_u16(__base, __offset) | |
1728 | #define vldrbq_gather_offset_s16(__base, __offset) __arm_vldrbq_gather_offset_s16(__base, __offset) | |
1729 | #define vldrbq_s16(__base) __arm_vldrbq_s16(__base) | |
1730 | #define vldrbq_u16(__base) __arm_vldrbq_u16(__base) | |
1731 | #define vldrbq_gather_offset_u32(__base, __offset) __arm_vldrbq_gather_offset_u32(__base, __offset) | |
1732 | #define vldrbq_gather_offset_s32(__base, __offset) __arm_vldrbq_gather_offset_s32(__base, __offset) | |
1733 | #define vldrbq_s32(__base) __arm_vldrbq_s32(__base) | |
1734 | #define vldrbq_u32(__base) __arm_vldrbq_u32(__base) | |
1735 | #define vldrwq_gather_base_s32(__addr, __offset) __arm_vldrwq_gather_base_s32(__addr, __offset) | |
1736 | #define vldrwq_gather_base_u32(__addr, __offset) __arm_vldrwq_gather_base_u32(__addr, __offset) | |
405e918c SP |
1737 | #define vstrbq_p_s8( __addr, __value, __p) __arm_vstrbq_p_s8( __addr, __value, __p) |
1738 | #define vstrbq_p_s32( __addr, __value, __p) __arm_vstrbq_p_s32( __addr, __value, __p) | |
1739 | #define vstrbq_p_s16( __addr, __value, __p) __arm_vstrbq_p_s16( __addr, __value, __p) | |
1740 | #define vstrbq_p_u8( __addr, __value, __p) __arm_vstrbq_p_u8( __addr, __value, __p) | |
1741 | #define vstrbq_p_u32( __addr, __value, __p) __arm_vstrbq_p_u32( __addr, __value, __p) | |
1742 | #define vstrbq_p_u16( __addr, __value, __p) __arm_vstrbq_p_u16( __addr, __value, __p) | |
1743 | #define vstrbq_scatter_offset_p_s8( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s8( __base, __offset, __value, __p) | |
1744 | #define vstrbq_scatter_offset_p_s32( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s32( __base, __offset, __value, __p) | |
1745 | #define vstrbq_scatter_offset_p_s16( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s16( __base, __offset, __value, __p) | |
1746 | #define vstrbq_scatter_offset_p_u8( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u8( __base, __offset, __value, __p) | |
1747 | #define vstrbq_scatter_offset_p_u32( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u32( __base, __offset, __value, __p) | |
1748 | #define vstrbq_scatter_offset_p_u16( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u16( __base, __offset, __value, __p) | |
1749 | #define vstrwq_scatter_base_p_s32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_s32(__addr, __offset, __value, __p) | |
1750 | #define vstrwq_scatter_base_p_u32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_u32(__addr, __offset, __value, __p) | |
429d607b SP |
1751 | #define vldrbq_gather_offset_z_s16(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s16(__base, __offset, __p) |
1752 | #define vldrbq_gather_offset_z_u8(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u8(__base, __offset, __p) | |
1753 | #define vldrbq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s32(__base, __offset, __p) | |
1754 | #define vldrbq_gather_offset_z_u16(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u16(__base, __offset, __p) | |
1755 | #define vldrbq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u32(__base, __offset, __p) | |
1756 | #define vldrbq_gather_offset_z_s8(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s8(__base, __offset, __p) | |
1757 | #define vldrbq_z_s16(__base, __p) __arm_vldrbq_z_s16(__base, __p) | |
1758 | #define vldrbq_z_u8(__base, __p) __arm_vldrbq_z_u8(__base, __p) | |
1759 | #define vldrbq_z_s8(__base, __p) __arm_vldrbq_z_s8(__base, __p) | |
1760 | #define vldrbq_z_s32(__base, __p) __arm_vldrbq_z_s32(__base, __p) | |
1761 | #define vldrbq_z_u16(__base, __p) __arm_vldrbq_z_u16(__base, __p) | |
1762 | #define vldrbq_z_u32(__base, __p) __arm_vldrbq_z_u32(__base, __p) | |
1763 | #define vldrwq_gather_base_z_u32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_u32(__addr, __offset, __p) | |
1764 | #define vldrwq_gather_base_z_s32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_s32(__addr, __offset, __p) | |
bf1e3d5a SP |
1765 | #define vld1q_s8(__base) __arm_vld1q_s8(__base) |
1766 | #define vld1q_s32(__base) __arm_vld1q_s32(__base) | |
1767 | #define vld1q_s16(__base) __arm_vld1q_s16(__base) | |
1768 | #define vld1q_u8(__base) __arm_vld1q_u8(__base) | |
1769 | #define vld1q_u32(__base) __arm_vld1q_u32(__base) | |
1770 | #define vld1q_u16(__base) __arm_vld1q_u16(__base) | |
1771 | #define vldrhq_gather_offset_s32(__base, __offset) __arm_vldrhq_gather_offset_s32(__base, __offset) | |
1772 | #define vldrhq_gather_offset_s16(__base, __offset) __arm_vldrhq_gather_offset_s16(__base, __offset) | |
1773 | #define vldrhq_gather_offset_u32(__base, __offset) __arm_vldrhq_gather_offset_u32(__base, __offset) | |
1774 | #define vldrhq_gather_offset_u16(__base, __offset) __arm_vldrhq_gather_offset_u16(__base, __offset) | |
1775 | #define vldrhq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrhq_gather_offset_z_s32(__base, __offset, __p) | |
1776 | #define vldrhq_gather_offset_z_s16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_s16(__base, __offset, __p) | |
1777 | #define vldrhq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrhq_gather_offset_z_u32(__base, __offset, __p) | |
1778 | #define vldrhq_gather_offset_z_u16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_u16(__base, __offset, __p) | |
1779 | #define vldrhq_gather_shifted_offset_s32(__base, __offset) __arm_vldrhq_gather_shifted_offset_s32(__base, __offset) | |
1780 | #define vldrhq_gather_shifted_offset_s16(__base, __offset) __arm_vldrhq_gather_shifted_offset_s16(__base, __offset) | |
1781 | #define vldrhq_gather_shifted_offset_u32(__base, __offset) __arm_vldrhq_gather_shifted_offset_u32(__base, __offset) | |
1782 | #define vldrhq_gather_shifted_offset_u16(__base, __offset) __arm_vldrhq_gather_shifted_offset_u16(__base, __offset) | |
1783 | #define vldrhq_gather_shifted_offset_z_s32(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_s32(__base, __offset, __p) | |
1784 | #define vldrhq_gather_shifted_offset_z_s16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_s16(__base, __offset, __p) | |
1785 | #define vldrhq_gather_shifted_offset_z_u32(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_u32(__base, __offset, __p) | |
1786 | #define vldrhq_gather_shifted_offset_z_u16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_u16(__base, __offset, __p) | |
1787 | #define vldrhq_s32(__base) __arm_vldrhq_s32(__base) | |
1788 | #define vldrhq_s16(__base) __arm_vldrhq_s16(__base) | |
1789 | #define vldrhq_u32(__base) __arm_vldrhq_u32(__base) | |
1790 | #define vldrhq_u16(__base) __arm_vldrhq_u16(__base) | |
1791 | #define vldrhq_z_s32(__base, __p) __arm_vldrhq_z_s32(__base, __p) | |
1792 | #define vldrhq_z_s16(__base, __p) __arm_vldrhq_z_s16(__base, __p) | |
1793 | #define vldrhq_z_u32(__base, __p) __arm_vldrhq_z_u32(__base, __p) | |
1794 | #define vldrhq_z_u16(__base, __p) __arm_vldrhq_z_u16(__base, __p) | |
1795 | #define vldrwq_s32(__base) __arm_vldrwq_s32(__base) | |
1796 | #define vldrwq_u32(__base) __arm_vldrwq_u32(__base) | |
1797 | #define vldrwq_z_s32(__base, __p) __arm_vldrwq_z_s32(__base, __p) | |
1798 | #define vldrwq_z_u32(__base, __p) __arm_vldrwq_z_u32(__base, __p) | |
1799 | #define vld1q_f32(__base) __arm_vld1q_f32(__base) | |
1800 | #define vld1q_f16(__base) __arm_vld1q_f16(__base) | |
1801 | #define vldrhq_f16(__base) __arm_vldrhq_f16(__base) | |
1802 | #define vldrhq_z_f16(__base, __p) __arm_vldrhq_z_f16(__base, __p) | |
1803 | #define vldrwq_f32(__base) __arm_vldrwq_f32(__base) | |
1804 | #define vldrwq_z_f32(__base, __p) __arm_vldrwq_z_f32(__base, __p) | |
4cc23303 SP |
1805 | #define vldrdq_gather_base_s64(__addr, __offset) __arm_vldrdq_gather_base_s64(__addr, __offset) |
1806 | #define vldrdq_gather_base_u64(__addr, __offset) __arm_vldrdq_gather_base_u64(__addr, __offset) | |
1807 | #define vldrdq_gather_base_z_s64(__addr, __offset, __p) __arm_vldrdq_gather_base_z_s64(__addr, __offset, __p) | |
1808 | #define vldrdq_gather_base_z_u64(__addr, __offset, __p) __arm_vldrdq_gather_base_z_u64(__addr, __offset, __p) | |
1809 | #define vldrdq_gather_offset_s64(__base, __offset) __arm_vldrdq_gather_offset_s64(__base, __offset) | |
1810 | #define vldrdq_gather_offset_u64(__base, __offset) __arm_vldrdq_gather_offset_u64(__base, __offset) | |
1811 | #define vldrdq_gather_offset_z_s64(__base, __offset, __p) __arm_vldrdq_gather_offset_z_s64(__base, __offset, __p) | |
1812 | #define vldrdq_gather_offset_z_u64(__base, __offset, __p) __arm_vldrdq_gather_offset_z_u64(__base, __offset, __p) | |
1813 | #define vldrdq_gather_shifted_offset_s64(__base, __offset) __arm_vldrdq_gather_shifted_offset_s64(__base, __offset) | |
1814 | #define vldrdq_gather_shifted_offset_u64(__base, __offset) __arm_vldrdq_gather_shifted_offset_u64(__base, __offset) | |
1815 | #define vldrdq_gather_shifted_offset_z_s64(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z_s64(__base, __offset, __p) | |
1816 | #define vldrdq_gather_shifted_offset_z_u64(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z_u64(__base, __offset, __p) | |
1817 | #define vldrhq_gather_offset_f16(__base, __offset) __arm_vldrhq_gather_offset_f16(__base, __offset) | |
1818 | #define vldrhq_gather_offset_z_f16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_f16(__base, __offset, __p) | |
1819 | #define vldrhq_gather_shifted_offset_f16(__base, __offset) __arm_vldrhq_gather_shifted_offset_f16(__base, __offset) | |
1820 | #define vldrhq_gather_shifted_offset_z_f16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_f16(__base, __offset, __p) | |
1821 | #define vldrwq_gather_base_f32(__addr, __offset) __arm_vldrwq_gather_base_f32(__addr, __offset) | |
1822 | #define vldrwq_gather_base_z_f32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_f32(__addr, __offset, __p) | |
1823 | #define vldrwq_gather_offset_f32(__base, __offset) __arm_vldrwq_gather_offset_f32(__base, __offset) | |
1824 | #define vldrwq_gather_offset_s32(__base, __offset) __arm_vldrwq_gather_offset_s32(__base, __offset) | |
1825 | #define vldrwq_gather_offset_u32(__base, __offset) __arm_vldrwq_gather_offset_u32(__base, __offset) | |
1826 | #define vldrwq_gather_offset_z_f32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_f32(__base, __offset, __p) | |
1827 | #define vldrwq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_s32(__base, __offset, __p) | |
1828 | #define vldrwq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_u32(__base, __offset, __p) | |
1829 | #define vldrwq_gather_shifted_offset_f32(__base, __offset) __arm_vldrwq_gather_shifted_offset_f32(__base, __offset) | |
1830 | #define vldrwq_gather_shifted_offset_s32(__base, __offset) __arm_vldrwq_gather_shifted_offset_s32(__base, __offset) | |
1831 | #define vldrwq_gather_shifted_offset_u32(__base, __offset) __arm_vldrwq_gather_shifted_offset_u32(__base, __offset) | |
1832 | #define vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p) | |
1833 | #define vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p) | |
1834 | #define vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p) | |
5cad47e0 SP |
1835 | #define vst1q_f32(__addr, __value) __arm_vst1q_f32(__addr, __value) |
1836 | #define vst1q_f16(__addr, __value) __arm_vst1q_f16(__addr, __value) | |
1837 | #define vst1q_s8(__addr, __value) __arm_vst1q_s8(__addr, __value) | |
1838 | #define vst1q_s32(__addr, __value) __arm_vst1q_s32(__addr, __value) | |
1839 | #define vst1q_s16(__addr, __value) __arm_vst1q_s16(__addr, __value) | |
1840 | #define vst1q_u8(__addr, __value) __arm_vst1q_u8(__addr, __value) | |
1841 | #define vst1q_u32(__addr, __value) __arm_vst1q_u32(__addr, __value) | |
1842 | #define vst1q_u16(__addr, __value) __arm_vst1q_u16(__addr, __value) | |
1843 | #define vstrhq_f16(__addr, __value) __arm_vstrhq_f16(__addr, __value) | |
1844 | #define vstrhq_scatter_offset_s32( __base, __offset, __value) __arm_vstrhq_scatter_offset_s32( __base, __offset, __value) | |
1845 | #define vstrhq_scatter_offset_s16( __base, __offset, __value) __arm_vstrhq_scatter_offset_s16( __base, __offset, __value) | |
1846 | #define vstrhq_scatter_offset_u32( __base, __offset, __value) __arm_vstrhq_scatter_offset_u32( __base, __offset, __value) | |
1847 | #define vstrhq_scatter_offset_u16( __base, __offset, __value) __arm_vstrhq_scatter_offset_u16( __base, __offset, __value) | |
1848 | #define vstrhq_scatter_offset_p_s32( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_s32( __base, __offset, __value, __p) | |
1849 | #define vstrhq_scatter_offset_p_s16( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_s16( __base, __offset, __value, __p) | |
1850 | #define vstrhq_scatter_offset_p_u32( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_u32( __base, __offset, __value, __p) | |
1851 | #define vstrhq_scatter_offset_p_u16( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_u16( __base, __offset, __value, __p) | |
1852 | #define vstrhq_scatter_shifted_offset_s32( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_s32( __base, __offset, __value) | |
1853 | #define vstrhq_scatter_shifted_offset_s16( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_s16( __base, __offset, __value) | |
1854 | #define vstrhq_scatter_shifted_offset_u32( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_u32( __base, __offset, __value) | |
1855 | #define vstrhq_scatter_shifted_offset_u16( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_u16( __base, __offset, __value) | |
1856 | #define vstrhq_scatter_shifted_offset_p_s32( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_s32( __base, __offset, __value, __p) | |
1857 | #define vstrhq_scatter_shifted_offset_p_s16( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_s16( __base, __offset, __value, __p) | |
1858 | #define vstrhq_scatter_shifted_offset_p_u32( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_u32( __base, __offset, __value, __p) | |
1859 | #define vstrhq_scatter_shifted_offset_p_u16( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_u16( __base, __offset, __value, __p) | |
1860 | #define vstrhq_s32(__addr, __value) __arm_vstrhq_s32(__addr, __value) | |
1861 | #define vstrhq_s16(__addr, __value) __arm_vstrhq_s16(__addr, __value) | |
1862 | #define vstrhq_u32(__addr, __value) __arm_vstrhq_u32(__addr, __value) | |
1863 | #define vstrhq_u16(__addr, __value) __arm_vstrhq_u16(__addr, __value) | |
1864 | #define vstrhq_p_f16(__addr, __value, __p) __arm_vstrhq_p_f16(__addr, __value, __p) | |
1865 | #define vstrhq_p_s32(__addr, __value, __p) __arm_vstrhq_p_s32(__addr, __value, __p) | |
1866 | #define vstrhq_p_s16(__addr, __value, __p) __arm_vstrhq_p_s16(__addr, __value, __p) | |
1867 | #define vstrhq_p_u32(__addr, __value, __p) __arm_vstrhq_p_u32(__addr, __value, __p) | |
1868 | #define vstrhq_p_u16(__addr, __value, __p) __arm_vstrhq_p_u16(__addr, __value, __p) | |
1869 | #define vstrwq_f32(__addr, __value) __arm_vstrwq_f32(__addr, __value) | |
1870 | #define vstrwq_s32(__addr, __value) __arm_vstrwq_s32(__addr, __value) | |
1871 | #define vstrwq_u32(__addr, __value) __arm_vstrwq_u32(__addr, __value) | |
1872 | #define vstrwq_p_f32(__addr, __value, __p) __arm_vstrwq_p_f32(__addr, __value, __p) | |
1873 | #define vstrwq_p_s32(__addr, __value, __p) __arm_vstrwq_p_s32(__addr, __value, __p) | |
1874 | #define vstrwq_p_u32(__addr, __value, __p) __arm_vstrwq_p_u32(__addr, __value, __p) | |
7a5fffa5 SP |
1875 | #define vstrdq_scatter_base_p_s64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p_s64(__addr, __offset, __value, __p) |
1876 | #define vstrdq_scatter_base_p_u64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p_u64(__addr, __offset, __value, __p) | |
1877 | #define vstrdq_scatter_base_s64(__addr, __offset, __value) __arm_vstrdq_scatter_base_s64(__addr, __offset, __value) | |
1878 | #define vstrdq_scatter_base_u64(__addr, __offset, __value) __arm_vstrdq_scatter_base_u64(__addr, __offset, __value) | |
1879 | #define vstrdq_scatter_offset_p_s64(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p_s64(__base, __offset, __value, __p) | |
1880 | #define vstrdq_scatter_offset_p_u64(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p_u64(__base, __offset, __value, __p) | |
1881 | #define vstrdq_scatter_offset_s64(__base, __offset, __value) __arm_vstrdq_scatter_offset_s64(__base, __offset, __value) | |
1882 | #define vstrdq_scatter_offset_u64(__base, __offset, __value) __arm_vstrdq_scatter_offset_u64(__base, __offset, __value) | |
1883 | #define vstrdq_scatter_shifted_offset_p_s64(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p_s64(__base, __offset, __value, __p) | |
1884 | #define vstrdq_scatter_shifted_offset_p_u64(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p_u64(__base, __offset, __value, __p) | |
1885 | #define vstrdq_scatter_shifted_offset_s64(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset_s64(__base, __offset, __value) | |
1886 | #define vstrdq_scatter_shifted_offset_u64(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset_u64(__base, __offset, __value) | |
1887 | #define vstrhq_scatter_offset_f16(__base, __offset, __value) __arm_vstrhq_scatter_offset_f16(__base, __offset, __value) | |
1888 | #define vstrhq_scatter_offset_p_f16(__base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_f16(__base, __offset, __value, __p) | |
1889 | #define vstrhq_scatter_shifted_offset_f16(__base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_f16(__base, __offset, __value) | |
1890 | #define vstrhq_scatter_shifted_offset_p_f16(__base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_f16(__base, __offset, __value, __p) | |
1891 | #define vstrwq_scatter_base_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_f32(__addr, __offset, __value) | |
1892 | #define vstrwq_scatter_base_p_f32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_f32(__addr, __offset, __value, __p) | |
1893 | #define vstrwq_scatter_offset_f32(__base, __offset, __value) __arm_vstrwq_scatter_offset_f32(__base, __offset, __value) | |
1894 | #define vstrwq_scatter_offset_p_f32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_f32(__base, __offset, __value, __p) | |
1895 | #define vstrwq_scatter_offset_p_s32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_s32(__base, __offset, __value, __p) | |
1896 | #define vstrwq_scatter_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_u32(__base, __offset, __value, __p) | |
1897 | #define vstrwq_scatter_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_offset_s32(__base, __offset, __value) | |
1898 | #define vstrwq_scatter_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_offset_u32(__base, __offset, __value) | |
1899 | #define vstrwq_scatter_shifted_offset_f32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_f32(__base, __offset, __value) | |
1900 | #define vstrwq_scatter_shifted_offset_p_f32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_f32(__base, __offset, __value, __p) | |
1901 | #define vstrwq_scatter_shifted_offset_p_s32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_s32(__base, __offset, __value, __p) | |
1902 | #define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) | |
1903 | #define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) | |
1904 | #define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) | |
3eff57aa SP |
1905 | #define vaddq_s8(__a, __b) __arm_vaddq_s8(__a, __b) |
1906 | #define vaddq_s16(__a, __b) __arm_vaddq_s16(__a, __b) | |
1907 | #define vaddq_s32(__a, __b) __arm_vaddq_s32(__a, __b) | |
1908 | #define vaddq_u8(__a, __b) __arm_vaddq_u8(__a, __b) | |
1909 | #define vaddq_u16(__a, __b) __arm_vaddq_u16(__a, __b) | |
1910 | #define vaddq_u32(__a, __b) __arm_vaddq_u32(__a, __b) | |
1911 | #define vaddq_f16(__a, __b) __arm_vaddq_f16(__a, __b) | |
1912 | #define vaddq_f32(__a, __b) __arm_vaddq_f32(__a, __b) | |
85a94e87 SP |
1913 | #define vreinterpretq_s16_s32(__a) __arm_vreinterpretq_s16_s32(__a) |
1914 | #define vreinterpretq_s16_s64(__a) __arm_vreinterpretq_s16_s64(__a) | |
1915 | #define vreinterpretq_s16_s8(__a) __arm_vreinterpretq_s16_s8(__a) | |
1916 | #define vreinterpretq_s16_u16(__a) __arm_vreinterpretq_s16_u16(__a) | |
1917 | #define vreinterpretq_s16_u32(__a) __arm_vreinterpretq_s16_u32(__a) | |
1918 | #define vreinterpretq_s16_u64(__a) __arm_vreinterpretq_s16_u64(__a) | |
1919 | #define vreinterpretq_s16_u8(__a) __arm_vreinterpretq_s16_u8(__a) | |
1920 | #define vreinterpretq_s32_s16(__a) __arm_vreinterpretq_s32_s16(__a) | |
1921 | #define vreinterpretq_s32_s64(__a) __arm_vreinterpretq_s32_s64(__a) | |
1922 | #define vreinterpretq_s32_s8(__a) __arm_vreinterpretq_s32_s8(__a) | |
1923 | #define vreinterpretq_s32_u16(__a) __arm_vreinterpretq_s32_u16(__a) | |
1924 | #define vreinterpretq_s32_u32(__a) __arm_vreinterpretq_s32_u32(__a) | |
1925 | #define vreinterpretq_s32_u64(__a) __arm_vreinterpretq_s32_u64(__a) | |
1926 | #define vreinterpretq_s32_u8(__a) __arm_vreinterpretq_s32_u8(__a) | |
1927 | #define vreinterpretq_s64_s16(__a) __arm_vreinterpretq_s64_s16(__a) | |
1928 | #define vreinterpretq_s64_s32(__a) __arm_vreinterpretq_s64_s32(__a) | |
1929 | #define vreinterpretq_s64_s8(__a) __arm_vreinterpretq_s64_s8(__a) | |
1930 | #define vreinterpretq_s64_u16(__a) __arm_vreinterpretq_s64_u16(__a) | |
1931 | #define vreinterpretq_s64_u32(__a) __arm_vreinterpretq_s64_u32(__a) | |
1932 | #define vreinterpretq_s64_u64(__a) __arm_vreinterpretq_s64_u64(__a) | |
1933 | #define vreinterpretq_s64_u8(__a) __arm_vreinterpretq_s64_u8(__a) | |
1934 | #define vreinterpretq_s8_s16(__a) __arm_vreinterpretq_s8_s16(__a) | |
1935 | #define vreinterpretq_s8_s32(__a) __arm_vreinterpretq_s8_s32(__a) | |
1936 | #define vreinterpretq_s8_s64(__a) __arm_vreinterpretq_s8_s64(__a) | |
1937 | #define vreinterpretq_s8_u16(__a) __arm_vreinterpretq_s8_u16(__a) | |
1938 | #define vreinterpretq_s8_u32(__a) __arm_vreinterpretq_s8_u32(__a) | |
1939 | #define vreinterpretq_s8_u64(__a) __arm_vreinterpretq_s8_u64(__a) | |
1940 | #define vreinterpretq_s8_u8(__a) __arm_vreinterpretq_s8_u8(__a) | |
1941 | #define vreinterpretq_u16_s16(__a) __arm_vreinterpretq_u16_s16(__a) | |
1942 | #define vreinterpretq_u16_s32(__a) __arm_vreinterpretq_u16_s32(__a) | |
1943 | #define vreinterpretq_u16_s64(__a) __arm_vreinterpretq_u16_s64(__a) | |
1944 | #define vreinterpretq_u16_s8(__a) __arm_vreinterpretq_u16_s8(__a) | |
1945 | #define vreinterpretq_u16_u32(__a) __arm_vreinterpretq_u16_u32(__a) | |
1946 | #define vreinterpretq_u16_u64(__a) __arm_vreinterpretq_u16_u64(__a) | |
1947 | #define vreinterpretq_u16_u8(__a) __arm_vreinterpretq_u16_u8(__a) | |
1948 | #define vreinterpretq_u32_s16(__a) __arm_vreinterpretq_u32_s16(__a) | |
1949 | #define vreinterpretq_u32_s32(__a) __arm_vreinterpretq_u32_s32(__a) | |
1950 | #define vreinterpretq_u32_s64(__a) __arm_vreinterpretq_u32_s64(__a) | |
1951 | #define vreinterpretq_u32_s8(__a) __arm_vreinterpretq_u32_s8(__a) | |
1952 | #define vreinterpretq_u32_u16(__a) __arm_vreinterpretq_u32_u16(__a) | |
1953 | #define vreinterpretq_u32_u64(__a) __arm_vreinterpretq_u32_u64(__a) | |
1954 | #define vreinterpretq_u32_u8(__a) __arm_vreinterpretq_u32_u8(__a) | |
1955 | #define vreinterpretq_u64_s16(__a) __arm_vreinterpretq_u64_s16(__a) | |
1956 | #define vreinterpretq_u64_s32(__a) __arm_vreinterpretq_u64_s32(__a) | |
1957 | #define vreinterpretq_u64_s64(__a) __arm_vreinterpretq_u64_s64(__a) | |
1958 | #define vreinterpretq_u64_s8(__a) __arm_vreinterpretq_u64_s8(__a) | |
1959 | #define vreinterpretq_u64_u16(__a) __arm_vreinterpretq_u64_u16(__a) | |
1960 | #define vreinterpretq_u64_u32(__a) __arm_vreinterpretq_u64_u32(__a) | |
1961 | #define vreinterpretq_u64_u8(__a) __arm_vreinterpretq_u64_u8(__a) | |
1962 | #define vreinterpretq_u8_s16(__a) __arm_vreinterpretq_u8_s16(__a) | |
1963 | #define vreinterpretq_u8_s32(__a) __arm_vreinterpretq_u8_s32(__a) | |
1964 | #define vreinterpretq_u8_s64(__a) __arm_vreinterpretq_u8_s64(__a) | |
1965 | #define vreinterpretq_u8_s8(__a) __arm_vreinterpretq_u8_s8(__a) | |
1966 | #define vreinterpretq_u8_u16(__a) __arm_vreinterpretq_u8_u16(__a) | |
1967 | #define vreinterpretq_u8_u32(__a) __arm_vreinterpretq_u8_u32(__a) | |
1968 | #define vreinterpretq_u8_u64(__a) __arm_vreinterpretq_u8_u64(__a) | |
1969 | #define vreinterpretq_s32_f16(__a) __arm_vreinterpretq_s32_f16(__a) | |
1970 | #define vreinterpretq_s32_f32(__a) __arm_vreinterpretq_s32_f32(__a) | |
1971 | #define vreinterpretq_u16_f16(__a) __arm_vreinterpretq_u16_f16(__a) | |
1972 | #define vreinterpretq_u16_f32(__a) __arm_vreinterpretq_u16_f32(__a) | |
1973 | #define vreinterpretq_u32_f16(__a) __arm_vreinterpretq_u32_f16(__a) | |
1974 | #define vreinterpretq_u32_f32(__a) __arm_vreinterpretq_u32_f32(__a) | |
1975 | #define vreinterpretq_u64_f16(__a) __arm_vreinterpretq_u64_f16(__a) | |
1976 | #define vreinterpretq_u64_f32(__a) __arm_vreinterpretq_u64_f32(__a) | |
1977 | #define vreinterpretq_u8_f16(__a) __arm_vreinterpretq_u8_f16(__a) | |
1978 | #define vreinterpretq_u8_f32(__a) __arm_vreinterpretq_u8_f32(__a) | |
1979 | #define vreinterpretq_f16_f32(__a) __arm_vreinterpretq_f16_f32(__a) | |
1980 | #define vreinterpretq_f16_s16(__a) __arm_vreinterpretq_f16_s16(__a) | |
1981 | #define vreinterpretq_f16_s32(__a) __arm_vreinterpretq_f16_s32(__a) | |
1982 | #define vreinterpretq_f16_s64(__a) __arm_vreinterpretq_f16_s64(__a) | |
1983 | #define vreinterpretq_f16_s8(__a) __arm_vreinterpretq_f16_s8(__a) | |
1984 | #define vreinterpretq_f16_u16(__a) __arm_vreinterpretq_f16_u16(__a) | |
1985 | #define vreinterpretq_f16_u32(__a) __arm_vreinterpretq_f16_u32(__a) | |
1986 | #define vreinterpretq_f16_u64(__a) __arm_vreinterpretq_f16_u64(__a) | |
1987 | #define vreinterpretq_f16_u8(__a) __arm_vreinterpretq_f16_u8(__a) | |
1988 | #define vreinterpretq_f32_f16(__a) __arm_vreinterpretq_f32_f16(__a) | |
1989 | #define vreinterpretq_f32_s16(__a) __arm_vreinterpretq_f32_s16(__a) | |
1990 | #define vreinterpretq_f32_s32(__a) __arm_vreinterpretq_f32_s32(__a) | |
1991 | #define vreinterpretq_f32_s64(__a) __arm_vreinterpretq_f32_s64(__a) | |
1992 | #define vreinterpretq_f32_s8(__a) __arm_vreinterpretq_f32_s8(__a) | |
1993 | #define vreinterpretq_f32_u16(__a) __arm_vreinterpretq_f32_u16(__a) | |
1994 | #define vreinterpretq_f32_u32(__a) __arm_vreinterpretq_f32_u32(__a) | |
1995 | #define vreinterpretq_f32_u64(__a) __arm_vreinterpretq_f32_u64(__a) | |
1996 | #define vreinterpretq_f32_u8(__a) __arm_vreinterpretq_f32_u8(__a) | |
1997 | #define vreinterpretq_s16_f16(__a) __arm_vreinterpretq_s16_f16(__a) | |
1998 | #define vreinterpretq_s16_f32(__a) __arm_vreinterpretq_s16_f32(__a) | |
1999 | #define vreinterpretq_s64_f16(__a) __arm_vreinterpretq_s64_f16(__a) | |
2000 | #define vreinterpretq_s64_f32(__a) __arm_vreinterpretq_s64_f32(__a) | |
2001 | #define vreinterpretq_s8_f16(__a) __arm_vreinterpretq_s8_f16(__a) | |
2002 | #define vreinterpretq_s8_f32(__a) __arm_vreinterpretq_s8_f32(__a) | |
2003 | #define vuninitializedq_u8(void) __arm_vuninitializedq_u8(void) | |
2004 | #define vuninitializedq_u16(void) __arm_vuninitializedq_u16(void) | |
2005 | #define vuninitializedq_u32(void) __arm_vuninitializedq_u32(void) | |
2006 | #define vuninitializedq_u64(void) __arm_vuninitializedq_u64(void) | |
2007 | #define vuninitializedq_s8(void) __arm_vuninitializedq_s8(void) | |
2008 | #define vuninitializedq_s16(void) __arm_vuninitializedq_s16(void) | |
2009 | #define vuninitializedq_s32(void) __arm_vuninitializedq_s32(void) | |
2010 | #define vuninitializedq_s64(void) __arm_vuninitializedq_s64(void) | |
2011 | #define vuninitializedq_f16(void) __arm_vuninitializedq_f16(void) | |
2012 | #define vuninitializedq_f32(void) __arm_vuninitializedq_f32(void) | |
92f80065 SP |
2013 | #define vddupq_m_n_u8(__inactive, __a, __imm, __p) __arm_vddupq_m_n_u8(__inactive, __a, __imm, __p) |
2014 | #define vddupq_m_n_u32(__inactive, __a, __imm, __p) __arm_vddupq_m_n_u32(__inactive, __a, __imm, __p) | |
2015 | #define vddupq_m_n_u16(__inactive, __a, __imm, __p) __arm_vddupq_m_n_u16(__inactive, __a, __imm, __p) | |
2016 | #define vddupq_m_wb_u8(__inactive, __a, __imm, __p) __arm_vddupq_m_wb_u8(__inactive, __a, __imm, __p) | |
2017 | #define vddupq_m_wb_u16(__inactive, __a, __imm, __p) __arm_vddupq_m_wb_u16(__inactive, __a, __imm, __p) | |
2018 | #define vddupq_m_wb_u32(__inactive, __a, __imm, __p) __arm_vddupq_m_wb_u32(__inactive, __a, __imm, __p) | |
2019 | #define vddupq_n_u8(__a, __imm) __arm_vddupq_n_u8(__a, __imm) | |
2020 | #define vddupq_n_u32(__a, __imm) __arm_vddupq_n_u32(__a, __imm) | |
2021 | #define vddupq_n_u16(__a, __imm) __arm_vddupq_n_u16(__a, __imm) | |
2022 | #define vddupq_wb_u8( __a, __imm) __arm_vddupq_wb_u8( __a, __imm) | |
2023 | #define vddupq_wb_u16( __a, __imm) __arm_vddupq_wb_u16( __a, __imm) | |
2024 | #define vddupq_wb_u32( __a, __imm) __arm_vddupq_wb_u32( __a, __imm) | |
2025 | #define vdwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) | |
2026 | #define vdwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) | |
2027 | #define vdwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) | |
2028 | #define vdwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) | |
2029 | #define vdwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) | |
2030 | #define vdwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) | |
2031 | #define vdwdupq_n_u8(__a, __b, __imm) __arm_vdwdupq_n_u8(__a, __b, __imm) | |
2032 | #define vdwdupq_n_u32(__a, __b, __imm) __arm_vdwdupq_n_u32(__a, __b, __imm) | |
2033 | #define vdwdupq_n_u16(__a, __b, __imm) __arm_vdwdupq_n_u16(__a, __b, __imm) | |
2034 | #define vdwdupq_wb_u8( __a, __b, __imm) __arm_vdwdupq_wb_u8( __a, __b, __imm) | |
2035 | #define vdwdupq_wb_u32( __a, __b, __imm) __arm_vdwdupq_wb_u32( __a, __b, __imm) | |
2036 | #define vdwdupq_wb_u16( __a, __b, __imm) __arm_vdwdupq_wb_u16( __a, __b, __imm) | |
2037 | #define vidupq_m_n_u8(__inactive, __a, __imm, __p) __arm_vidupq_m_n_u8(__inactive, __a, __imm, __p) | |
2038 | #define vidupq_m_n_u32(__inactive, __a, __imm, __p) __arm_vidupq_m_n_u32(__inactive, __a, __imm, __p) | |
2039 | #define vidupq_m_n_u16(__inactive, __a, __imm, __p) __arm_vidupq_m_n_u16(__inactive, __a, __imm, __p) | |
2040 | #define vidupq_m_wb_u8(__inactive, __a, __imm, __p) __arm_vidupq_m_wb_u8(__inactive, __a, __imm, __p) | |
2041 | #define vidupq_m_wb_u16(__inactive, __a, __imm, __p) __arm_vidupq_m_wb_u16(__inactive, __a, __imm, __p) | |
2042 | #define vidupq_m_wb_u32(__inactive, __a, __imm, __p) __arm_vidupq_m_wb_u32(__inactive, __a, __imm, __p) | |
2043 | #define vidupq_n_u8(__a, __imm) __arm_vidupq_n_u8(__a, __imm) | |
2044 | #define vidupq_n_u32(__a, __imm) __arm_vidupq_n_u32(__a, __imm) | |
2045 | #define vidupq_n_u16(__a, __imm) __arm_vidupq_n_u16(__a, __imm) | |
2046 | #define vidupq_wb_u8( __a, __imm) __arm_vidupq_wb_u8( __a, __imm) | |
2047 | #define vidupq_wb_u16( __a, __imm) __arm_vidupq_wb_u16( __a, __imm) | |
2048 | #define vidupq_wb_u32( __a, __imm) __arm_vidupq_wb_u32( __a, __imm) | |
2049 | #define viwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) | |
2050 | #define viwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) | |
2051 | #define viwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) | |
2052 | #define viwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) | |
2053 | #define viwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) | |
2054 | #define viwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) | |
2055 | #define viwdupq_n_u8(__a, __b, __imm) __arm_viwdupq_n_u8(__a, __b, __imm) | |
2056 | #define viwdupq_n_u32(__a, __b, __imm) __arm_viwdupq_n_u32(__a, __b, __imm) | |
2057 | #define viwdupq_n_u16(__a, __b, __imm) __arm_viwdupq_n_u16(__a, __b, __imm) | |
2058 | #define viwdupq_wb_u8( __a, __b, __imm) __arm_viwdupq_wb_u8( __a, __b, __imm) | |
2059 | #define viwdupq_wb_u32( __a, __b, __imm) __arm_viwdupq_wb_u32( __a, __b, __imm) | |
2060 | #define viwdupq_wb_u16( __a, __b, __imm) __arm_viwdupq_wb_u16( __a, __b, __imm) | |
41e1a7ff SP |
2061 | #define vldrdq_gather_base_wb_s64(__addr, __offset) __arm_vldrdq_gather_base_wb_s64(__addr, __offset) |
2062 | #define vldrdq_gather_base_wb_u64(__addr, __offset) __arm_vldrdq_gather_base_wb_u64(__addr, __offset) | |
2063 | #define vldrdq_gather_base_wb_z_s64(__addr, __offset, __p) __arm_vldrdq_gather_base_wb_z_s64(__addr, __offset, __p) | |
2064 | #define vldrdq_gather_base_wb_z_u64(__addr, __offset, __p) __arm_vldrdq_gather_base_wb_z_u64(__addr, __offset, __p) | |
2065 | #define vldrwq_gather_base_wb_f32(__addr, __offset) __arm_vldrwq_gather_base_wb_f32(__addr, __offset) | |
2066 | #define vldrwq_gather_base_wb_s32(__addr, __offset) __arm_vldrwq_gather_base_wb_s32(__addr, __offset) | |
2067 | #define vldrwq_gather_base_wb_u32(__addr, __offset) __arm_vldrwq_gather_base_wb_u32(__addr, __offset) | |
2068 | #define vldrwq_gather_base_wb_z_f32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_f32(__addr, __offset, __p) | |
2069 | #define vldrwq_gather_base_wb_z_s32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_s32(__addr, __offset, __p) | |
2070 | #define vldrwq_gather_base_wb_z_u32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_u32(__addr, __offset, __p) | |
2071 | #define vstrdq_scatter_base_wb_p_s64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p_s64(__addr, __offset, __value, __p) | |
2072 | #define vstrdq_scatter_base_wb_p_u64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p_u64(__addr, __offset, __value, __p) | |
2073 | #define vstrdq_scatter_base_wb_s64(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb_s64(__addr, __offset, __value) | |
2074 | #define vstrdq_scatter_base_wb_u64(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb_u64(__addr, __offset, __value) | |
2075 | #define vstrwq_scatter_base_wb_p_s32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_s32(__addr, __offset, __value, __p) | |
2076 | #define vstrwq_scatter_base_wb_p_f32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_f32(__addr, __offset, __value, __p) | |
2077 | #define vstrwq_scatter_base_wb_p_u32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_u32(__addr, __offset, __value, __p) | |
2078 | #define vstrwq_scatter_base_wb_s32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_s32(__addr, __offset, __value) | |
2079 | #define vstrwq_scatter_base_wb_u32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_u32(__addr, __offset, __value) | |
2080 | #define vstrwq_scatter_base_wb_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_f32(__addr, __offset, __value) | |
261014a1 SP |
2081 | #define vddupq_x_n_u8(__a, __imm, __p) __arm_vddupq_x_n_u8(__a, __imm, __p) |
2082 | #define vddupq_x_n_u16(__a, __imm, __p) __arm_vddupq_x_n_u16(__a, __imm, __p) | |
2083 | #define vddupq_x_n_u32(__a, __imm, __p) __arm_vddupq_x_n_u32(__a, __imm, __p) | |
2084 | #define vddupq_x_wb_u8(__a, __imm, __p) __arm_vddupq_x_wb_u8(__a, __imm, __p) | |
2085 | #define vddupq_x_wb_u16(__a, __imm, __p) __arm_vddupq_x_wb_u16(__a, __imm, __p) | |
2086 | #define vddupq_x_wb_u32(__a, __imm, __p) __arm_vddupq_x_wb_u32(__a, __imm, __p) | |
2087 | #define vdwdupq_x_n_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u8(__a, __b, __imm, __p) | |
2088 | #define vdwdupq_x_n_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u16(__a, __b, __imm, __p) | |
2089 | #define vdwdupq_x_n_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u32(__a, __b, __imm, __p) | |
2090 | #define vdwdupq_x_wb_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u8(__a, __b, __imm, __p) | |
2091 | #define vdwdupq_x_wb_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u16(__a, __b, __imm, __p) | |
2092 | #define vdwdupq_x_wb_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u32(__a, __b, __imm, __p) | |
2093 | #define vidupq_x_n_u8(__a, __imm, __p) __arm_vidupq_x_n_u8(__a, __imm, __p) | |
2094 | #define vidupq_x_n_u16(__a, __imm, __p) __arm_vidupq_x_n_u16(__a, __imm, __p) | |
2095 | #define vidupq_x_n_u32(__a, __imm, __p) __arm_vidupq_x_n_u32(__a, __imm, __p) | |
2096 | #define vidupq_x_wb_u8(__a, __imm, __p) __arm_vidupq_x_wb_u8(__a, __imm, __p) | |
2097 | #define vidupq_x_wb_u16(__a, __imm, __p) __arm_vidupq_x_wb_u16(__a, __imm, __p) | |
2098 | #define vidupq_x_wb_u32(__a, __imm, __p) __arm_vidupq_x_wb_u32(__a, __imm, __p) | |
2099 | #define viwdupq_x_n_u8(__a, __b, __imm, __p) __arm_viwdupq_x_n_u8(__a, __b, __imm, __p) | |
2100 | #define viwdupq_x_n_u16(__a, __b, __imm, __p) __arm_viwdupq_x_n_u16(__a, __b, __imm, __p) | |
2101 | #define viwdupq_x_n_u32(__a, __b, __imm, __p) __arm_viwdupq_x_n_u32(__a, __b, __imm, __p) | |
2102 | #define viwdupq_x_wb_u8(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u8(__a, __b, __imm, __p) | |
2103 | #define viwdupq_x_wb_u16(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u16(__a, __b, __imm, __p) | |
2104 | #define viwdupq_x_wb_u32(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u32(__a, __b, __imm, __p) | |
2105 | #define vdupq_x_n_s8(__a, __p) __arm_vdupq_x_n_s8(__a, __p) | |
2106 | #define vdupq_x_n_s16(__a, __p) __arm_vdupq_x_n_s16(__a, __p) | |
2107 | #define vdupq_x_n_s32(__a, __p) __arm_vdupq_x_n_s32(__a, __p) | |
2108 | #define vdupq_x_n_u8(__a, __p) __arm_vdupq_x_n_u8(__a, __p) | |
2109 | #define vdupq_x_n_u16(__a, __p) __arm_vdupq_x_n_u16(__a, __p) | |
2110 | #define vdupq_x_n_u32(__a, __p) __arm_vdupq_x_n_u32(__a, __p) | |
2111 | #define vminq_x_s8(__a, __b, __p) __arm_vminq_x_s8(__a, __b, __p) | |
2112 | #define vminq_x_s16(__a, __b, __p) __arm_vminq_x_s16(__a, __b, __p) | |
2113 | #define vminq_x_s32(__a, __b, __p) __arm_vminq_x_s32(__a, __b, __p) | |
2114 | #define vminq_x_u8(__a, __b, __p) __arm_vminq_x_u8(__a, __b, __p) | |
2115 | #define vminq_x_u16(__a, __b, __p) __arm_vminq_x_u16(__a, __b, __p) | |
2116 | #define vminq_x_u32(__a, __b, __p) __arm_vminq_x_u32(__a, __b, __p) | |
2117 | #define vmaxq_x_s8(__a, __b, __p) __arm_vmaxq_x_s8(__a, __b, __p) | |
2118 | #define vmaxq_x_s16(__a, __b, __p) __arm_vmaxq_x_s16(__a, __b, __p) | |
2119 | #define vmaxq_x_s32(__a, __b, __p) __arm_vmaxq_x_s32(__a, __b, __p) | |
2120 | #define vmaxq_x_u8(__a, __b, __p) __arm_vmaxq_x_u8(__a, __b, __p) | |
2121 | #define vmaxq_x_u16(__a, __b, __p) __arm_vmaxq_x_u16(__a, __b, __p) | |
2122 | #define vmaxq_x_u32(__a, __b, __p) __arm_vmaxq_x_u32(__a, __b, __p) | |
2123 | #define vabdq_x_s8(__a, __b, __p) __arm_vabdq_x_s8(__a, __b, __p) | |
2124 | #define vabdq_x_s16(__a, __b, __p) __arm_vabdq_x_s16(__a, __b, __p) | |
2125 | #define vabdq_x_s32(__a, __b, __p) __arm_vabdq_x_s32(__a, __b, __p) | |
2126 | #define vabdq_x_u8(__a, __b, __p) __arm_vabdq_x_u8(__a, __b, __p) | |
2127 | #define vabdq_x_u16(__a, __b, __p) __arm_vabdq_x_u16(__a, __b, __p) | |
2128 | #define vabdq_x_u32(__a, __b, __p) __arm_vabdq_x_u32(__a, __b, __p) | |
2129 | #define vabsq_x_s8(__a, __p) __arm_vabsq_x_s8(__a, __p) | |
2130 | #define vabsq_x_s16(__a, __p) __arm_vabsq_x_s16(__a, __p) | |
2131 | #define vabsq_x_s32(__a, __p) __arm_vabsq_x_s32(__a, __p) | |
2132 | #define vaddq_x_s8(__a, __b, __p) __arm_vaddq_x_s8(__a, __b, __p) | |
2133 | #define vaddq_x_s16(__a, __b, __p) __arm_vaddq_x_s16(__a, __b, __p) | |
2134 | #define vaddq_x_s32(__a, __b, __p) __arm_vaddq_x_s32(__a, __b, __p) | |
2135 | #define vaddq_x_n_s8(__a, __b, __p) __arm_vaddq_x_n_s8(__a, __b, __p) | |
2136 | #define vaddq_x_n_s16(__a, __b, __p) __arm_vaddq_x_n_s16(__a, __b, __p) | |
2137 | #define vaddq_x_n_s32(__a, __b, __p) __arm_vaddq_x_n_s32(__a, __b, __p) | |
2138 | #define vaddq_x_u8(__a, __b, __p) __arm_vaddq_x_u8(__a, __b, __p) | |
2139 | #define vaddq_x_u16(__a, __b, __p) __arm_vaddq_x_u16(__a, __b, __p) | |
2140 | #define vaddq_x_u32(__a, __b, __p) __arm_vaddq_x_u32(__a, __b, __p) | |
2141 | #define vaddq_x_n_u8(__a, __b, __p) __arm_vaddq_x_n_u8(__a, __b, __p) | |
2142 | #define vaddq_x_n_u16(__a, __b, __p) __arm_vaddq_x_n_u16(__a, __b, __p) | |
2143 | #define vaddq_x_n_u32(__a, __b, __p) __arm_vaddq_x_n_u32(__a, __b, __p) | |
2144 | #define vclsq_x_s8(__a, __p) __arm_vclsq_x_s8(__a, __p) | |
2145 | #define vclsq_x_s16(__a, __p) __arm_vclsq_x_s16(__a, __p) | |
2146 | #define vclsq_x_s32(__a, __p) __arm_vclsq_x_s32(__a, __p) | |
2147 | #define vclzq_x_s8(__a, __p) __arm_vclzq_x_s8(__a, __p) | |
2148 | #define vclzq_x_s16(__a, __p) __arm_vclzq_x_s16(__a, __p) | |
2149 | #define vclzq_x_s32(__a, __p) __arm_vclzq_x_s32(__a, __p) | |
2150 | #define vclzq_x_u8(__a, __p) __arm_vclzq_x_u8(__a, __p) | |
2151 | #define vclzq_x_u16(__a, __p) __arm_vclzq_x_u16(__a, __p) | |
2152 | #define vclzq_x_u32(__a, __p) __arm_vclzq_x_u32(__a, __p) | |
2153 | #define vnegq_x_s8(__a, __p) __arm_vnegq_x_s8(__a, __p) | |
2154 | #define vnegq_x_s16(__a, __p) __arm_vnegq_x_s16(__a, __p) | |
2155 | #define vnegq_x_s32(__a, __p) __arm_vnegq_x_s32(__a, __p) | |
2156 | #define vmulhq_x_s8(__a, __b, __p) __arm_vmulhq_x_s8(__a, __b, __p) | |
2157 | #define vmulhq_x_s16(__a, __b, __p) __arm_vmulhq_x_s16(__a, __b, __p) | |
2158 | #define vmulhq_x_s32(__a, __b, __p) __arm_vmulhq_x_s32(__a, __b, __p) | |
2159 | #define vmulhq_x_u8(__a, __b, __p) __arm_vmulhq_x_u8(__a, __b, __p) | |
2160 | #define vmulhq_x_u16(__a, __b, __p) __arm_vmulhq_x_u16(__a, __b, __p) | |
2161 | #define vmulhq_x_u32(__a, __b, __p) __arm_vmulhq_x_u32(__a, __b, __p) | |
2162 | #define vmullbq_poly_x_p8(__a, __b, __p) __arm_vmullbq_poly_x_p8(__a, __b, __p) | |
2163 | #define vmullbq_poly_x_p16(__a, __b, __p) __arm_vmullbq_poly_x_p16(__a, __b, __p) | |
2164 | #define vmullbq_int_x_s8(__a, __b, __p) __arm_vmullbq_int_x_s8(__a, __b, __p) | |
2165 | #define vmullbq_int_x_s16(__a, __b, __p) __arm_vmullbq_int_x_s16(__a, __b, __p) | |
2166 | #define vmullbq_int_x_s32(__a, __b, __p) __arm_vmullbq_int_x_s32(__a, __b, __p) | |
2167 | #define vmullbq_int_x_u8(__a, __b, __p) __arm_vmullbq_int_x_u8(__a, __b, __p) | |
2168 | #define vmullbq_int_x_u16(__a, __b, __p) __arm_vmullbq_int_x_u16(__a, __b, __p) | |
2169 | #define vmullbq_int_x_u32(__a, __b, __p) __arm_vmullbq_int_x_u32(__a, __b, __p) | |
2170 | #define vmulltq_poly_x_p8(__a, __b, __p) __arm_vmulltq_poly_x_p8(__a, __b, __p) | |
2171 | #define vmulltq_poly_x_p16(__a, __b, __p) __arm_vmulltq_poly_x_p16(__a, __b, __p) | |
2172 | #define vmulltq_int_x_s8(__a, __b, __p) __arm_vmulltq_int_x_s8(__a, __b, __p) | |
2173 | #define vmulltq_int_x_s16(__a, __b, __p) __arm_vmulltq_int_x_s16(__a, __b, __p) | |
2174 | #define vmulltq_int_x_s32(__a, __b, __p) __arm_vmulltq_int_x_s32(__a, __b, __p) | |
2175 | #define vmulltq_int_x_u8(__a, __b, __p) __arm_vmulltq_int_x_u8(__a, __b, __p) | |
2176 | #define vmulltq_int_x_u16(__a, __b, __p) __arm_vmulltq_int_x_u16(__a, __b, __p) | |
2177 | #define vmulltq_int_x_u32(__a, __b, __p) __arm_vmulltq_int_x_u32(__a, __b, __p) | |
2178 | #define vmulq_x_s8(__a, __b, __p) __arm_vmulq_x_s8(__a, __b, __p) | |
2179 | #define vmulq_x_s16(__a, __b, __p) __arm_vmulq_x_s16(__a, __b, __p) | |
2180 | #define vmulq_x_s32(__a, __b, __p) __arm_vmulq_x_s32(__a, __b, __p) | |
2181 | #define vmulq_x_n_s8(__a, __b, __p) __arm_vmulq_x_n_s8(__a, __b, __p) | |
2182 | #define vmulq_x_n_s16(__a, __b, __p) __arm_vmulq_x_n_s16(__a, __b, __p) | |
2183 | #define vmulq_x_n_s32(__a, __b, __p) __arm_vmulq_x_n_s32(__a, __b, __p) | |
2184 | #define vmulq_x_u8(__a, __b, __p) __arm_vmulq_x_u8(__a, __b, __p) | |
2185 | #define vmulq_x_u16(__a, __b, __p) __arm_vmulq_x_u16(__a, __b, __p) | |
2186 | #define vmulq_x_u32(__a, __b, __p) __arm_vmulq_x_u32(__a, __b, __p) | |
2187 | #define vmulq_x_n_u8(__a, __b, __p) __arm_vmulq_x_n_u8(__a, __b, __p) | |
2188 | #define vmulq_x_n_u16(__a, __b, __p) __arm_vmulq_x_n_u16(__a, __b, __p) | |
2189 | #define vmulq_x_n_u32(__a, __b, __p) __arm_vmulq_x_n_u32(__a, __b, __p) | |
2190 | #define vsubq_x_s8(__a, __b, __p) __arm_vsubq_x_s8(__a, __b, __p) | |
2191 | #define vsubq_x_s16(__a, __b, __p) __arm_vsubq_x_s16(__a, __b, __p) | |
2192 | #define vsubq_x_s32(__a, __b, __p) __arm_vsubq_x_s32(__a, __b, __p) | |
2193 | #define vsubq_x_n_s8(__a, __b, __p) __arm_vsubq_x_n_s8(__a, __b, __p) | |
2194 | #define vsubq_x_n_s16(__a, __b, __p) __arm_vsubq_x_n_s16(__a, __b, __p) | |
2195 | #define vsubq_x_n_s32(__a, __b, __p) __arm_vsubq_x_n_s32(__a, __b, __p) | |
2196 | #define vsubq_x_u8(__a, __b, __p) __arm_vsubq_x_u8(__a, __b, __p) | |
2197 | #define vsubq_x_u16(__a, __b, __p) __arm_vsubq_x_u16(__a, __b, __p) | |
2198 | #define vsubq_x_u32(__a, __b, __p) __arm_vsubq_x_u32(__a, __b, __p) | |
2199 | #define vsubq_x_n_u8(__a, __b, __p) __arm_vsubq_x_n_u8(__a, __b, __p) | |
2200 | #define vsubq_x_n_u16(__a, __b, __p) __arm_vsubq_x_n_u16(__a, __b, __p) | |
2201 | #define vsubq_x_n_u32(__a, __b, __p) __arm_vsubq_x_n_u32(__a, __b, __p) | |
2202 | #define vcaddq_rot90_x_s8(__a, __b, __p) __arm_vcaddq_rot90_x_s8(__a, __b, __p) | |
2203 | #define vcaddq_rot90_x_s16(__a, __b, __p) __arm_vcaddq_rot90_x_s16(__a, __b, __p) | |
2204 | #define vcaddq_rot90_x_s32(__a, __b, __p) __arm_vcaddq_rot90_x_s32(__a, __b, __p) | |
2205 | #define vcaddq_rot90_x_u8(__a, __b, __p) __arm_vcaddq_rot90_x_u8(__a, __b, __p) | |
2206 | #define vcaddq_rot90_x_u16(__a, __b, __p) __arm_vcaddq_rot90_x_u16(__a, __b, __p) | |
2207 | #define vcaddq_rot90_x_u32(__a, __b, __p) __arm_vcaddq_rot90_x_u32(__a, __b, __p) | |
2208 | #define vcaddq_rot270_x_s8(__a, __b, __p) __arm_vcaddq_rot270_x_s8(__a, __b, __p) | |
2209 | #define vcaddq_rot270_x_s16(__a, __b, __p) __arm_vcaddq_rot270_x_s16(__a, __b, __p) | |
2210 | #define vcaddq_rot270_x_s32(__a, __b, __p) __arm_vcaddq_rot270_x_s32(__a, __b, __p) | |
2211 | #define vcaddq_rot270_x_u8(__a, __b, __p) __arm_vcaddq_rot270_x_u8(__a, __b, __p) | |
2212 | #define vcaddq_rot270_x_u16(__a, __b, __p) __arm_vcaddq_rot270_x_u16(__a, __b, __p) | |
2213 | #define vcaddq_rot270_x_u32(__a, __b, __p) __arm_vcaddq_rot270_x_u32(__a, __b, __p) | |
2214 | #define vhaddq_x_n_s8(__a, __b, __p) __arm_vhaddq_x_n_s8(__a, __b, __p) | |
2215 | #define vhaddq_x_n_s16(__a, __b, __p) __arm_vhaddq_x_n_s16(__a, __b, __p) | |
2216 | #define vhaddq_x_n_s32(__a, __b, __p) __arm_vhaddq_x_n_s32(__a, __b, __p) | |
2217 | #define vhaddq_x_n_u8(__a, __b, __p) __arm_vhaddq_x_n_u8(__a, __b, __p) | |
2218 | #define vhaddq_x_n_u16(__a, __b, __p) __arm_vhaddq_x_n_u16(__a, __b, __p) | |
2219 | #define vhaddq_x_n_u32(__a, __b, __p) __arm_vhaddq_x_n_u32(__a, __b, __p) | |
2220 | #define vhaddq_x_s8(__a, __b, __p) __arm_vhaddq_x_s8(__a, __b, __p) | |
2221 | #define vhaddq_x_s16(__a, __b, __p) __arm_vhaddq_x_s16(__a, __b, __p) | |
2222 | #define vhaddq_x_s32(__a, __b, __p) __arm_vhaddq_x_s32(__a, __b, __p) | |
2223 | #define vhaddq_x_u8(__a, __b, __p) __arm_vhaddq_x_u8(__a, __b, __p) | |
2224 | #define vhaddq_x_u16(__a, __b, __p) __arm_vhaddq_x_u16(__a, __b, __p) | |
2225 | #define vhaddq_x_u32(__a, __b, __p) __arm_vhaddq_x_u32(__a, __b, __p) | |
2226 | #define vhcaddq_rot90_x_s8(__a, __b, __p) __arm_vhcaddq_rot90_x_s8(__a, __b, __p) | |
2227 | #define vhcaddq_rot90_x_s16(__a, __b, __p) __arm_vhcaddq_rot90_x_s16(__a, __b, __p) | |
2228 | #define vhcaddq_rot90_x_s32(__a, __b, __p) __arm_vhcaddq_rot90_x_s32(__a, __b, __p) | |
2229 | #define vhcaddq_rot270_x_s8(__a, __b, __p) __arm_vhcaddq_rot270_x_s8(__a, __b, __p) | |
2230 | #define vhcaddq_rot270_x_s16(__a, __b, __p) __arm_vhcaddq_rot270_x_s16(__a, __b, __p) | |
2231 | #define vhcaddq_rot270_x_s32(__a, __b, __p) __arm_vhcaddq_rot270_x_s32(__a, __b, __p) | |
2232 | #define vhsubq_x_n_s8(__a, __b, __p) __arm_vhsubq_x_n_s8(__a, __b, __p) | |
2233 | #define vhsubq_x_n_s16(__a, __b, __p) __arm_vhsubq_x_n_s16(__a, __b, __p) | |
2234 | #define vhsubq_x_n_s32(__a, __b, __p) __arm_vhsubq_x_n_s32(__a, __b, __p) | |
2235 | #define vhsubq_x_n_u8(__a, __b, __p) __arm_vhsubq_x_n_u8(__a, __b, __p) | |
2236 | #define vhsubq_x_n_u16(__a, __b, __p) __arm_vhsubq_x_n_u16(__a, __b, __p) | |
2237 | #define vhsubq_x_n_u32(__a, __b, __p) __arm_vhsubq_x_n_u32(__a, __b, __p) | |
2238 | #define vhsubq_x_s8(__a, __b, __p) __arm_vhsubq_x_s8(__a, __b, __p) | |
2239 | #define vhsubq_x_s16(__a, __b, __p) __arm_vhsubq_x_s16(__a, __b, __p) | |
2240 | #define vhsubq_x_s32(__a, __b, __p) __arm_vhsubq_x_s32(__a, __b, __p) | |
2241 | #define vhsubq_x_u8(__a, __b, __p) __arm_vhsubq_x_u8(__a, __b, __p) | |
2242 | #define vhsubq_x_u16(__a, __b, __p) __arm_vhsubq_x_u16(__a, __b, __p) | |
2243 | #define vhsubq_x_u32(__a, __b, __p) __arm_vhsubq_x_u32(__a, __b, __p) | |
2244 | #define vrhaddq_x_s8(__a, __b, __p) __arm_vrhaddq_x_s8(__a, __b, __p) | |
2245 | #define vrhaddq_x_s16(__a, __b, __p) __arm_vrhaddq_x_s16(__a, __b, __p) | |
2246 | #define vrhaddq_x_s32(__a, __b, __p) __arm_vrhaddq_x_s32(__a, __b, __p) | |
2247 | #define vrhaddq_x_u8(__a, __b, __p) __arm_vrhaddq_x_u8(__a, __b, __p) | |
2248 | #define vrhaddq_x_u16(__a, __b, __p) __arm_vrhaddq_x_u16(__a, __b, __p) | |
2249 | #define vrhaddq_x_u32(__a, __b, __p) __arm_vrhaddq_x_u32(__a, __b, __p) | |
2250 | #define vrmulhq_x_s8(__a, __b, __p) __arm_vrmulhq_x_s8(__a, __b, __p) | |
2251 | #define vrmulhq_x_s16(__a, __b, __p) __arm_vrmulhq_x_s16(__a, __b, __p) | |
2252 | #define vrmulhq_x_s32(__a, __b, __p) __arm_vrmulhq_x_s32(__a, __b, __p) | |
2253 | #define vrmulhq_x_u8(__a, __b, __p) __arm_vrmulhq_x_u8(__a, __b, __p) | |
2254 | #define vrmulhq_x_u16(__a, __b, __p) __arm_vrmulhq_x_u16(__a, __b, __p) | |
2255 | #define vrmulhq_x_u32(__a, __b, __p) __arm_vrmulhq_x_u32(__a, __b, __p) | |
2256 | #define vandq_x_s8(__a, __b, __p) __arm_vandq_x_s8(__a, __b, __p) | |
2257 | #define vandq_x_s16(__a, __b, __p) __arm_vandq_x_s16(__a, __b, __p) | |
2258 | #define vandq_x_s32(__a, __b, __p) __arm_vandq_x_s32(__a, __b, __p) | |
2259 | #define vandq_x_u8(__a, __b, __p) __arm_vandq_x_u8(__a, __b, __p) | |
2260 | #define vandq_x_u16(__a, __b, __p) __arm_vandq_x_u16(__a, __b, __p) | |
2261 | #define vandq_x_u32(__a, __b, __p) __arm_vandq_x_u32(__a, __b, __p) | |
2262 | #define vbicq_x_s8(__a, __b, __p) __arm_vbicq_x_s8(__a, __b, __p) | |
2263 | #define vbicq_x_s16(__a, __b, __p) __arm_vbicq_x_s16(__a, __b, __p) | |
2264 | #define vbicq_x_s32(__a, __b, __p) __arm_vbicq_x_s32(__a, __b, __p) | |
2265 | #define vbicq_x_u8(__a, __b, __p) __arm_vbicq_x_u8(__a, __b, __p) | |
2266 | #define vbicq_x_u16(__a, __b, __p) __arm_vbicq_x_u16(__a, __b, __p) | |
2267 | #define vbicq_x_u32(__a, __b, __p) __arm_vbicq_x_u32(__a, __b, __p) | |
2268 | #define vbrsrq_x_n_s8(__a, __b, __p) __arm_vbrsrq_x_n_s8(__a, __b, __p) | |
2269 | #define vbrsrq_x_n_s16(__a, __b, __p) __arm_vbrsrq_x_n_s16(__a, __b, __p) | |
2270 | #define vbrsrq_x_n_s32(__a, __b, __p) __arm_vbrsrq_x_n_s32(__a, __b, __p) | |
2271 | #define vbrsrq_x_n_u8(__a, __b, __p) __arm_vbrsrq_x_n_u8(__a, __b, __p) | |
2272 | #define vbrsrq_x_n_u16(__a, __b, __p) __arm_vbrsrq_x_n_u16(__a, __b, __p) | |
2273 | #define vbrsrq_x_n_u32(__a, __b, __p) __arm_vbrsrq_x_n_u32(__a, __b, __p) | |
2274 | #define veorq_x_s8(__a, __b, __p) __arm_veorq_x_s8(__a, __b, __p) | |
2275 | #define veorq_x_s16(__a, __b, __p) __arm_veorq_x_s16(__a, __b, __p) | |
2276 | #define veorq_x_s32(__a, __b, __p) __arm_veorq_x_s32(__a, __b, __p) | |
2277 | #define veorq_x_u8(__a, __b, __p) __arm_veorq_x_u8(__a, __b, __p) | |
2278 | #define veorq_x_u16(__a, __b, __p) __arm_veorq_x_u16(__a, __b, __p) | |
2279 | #define veorq_x_u32(__a, __b, __p) __arm_veorq_x_u32(__a, __b, __p) | |
2280 | #define vmovlbq_x_s8(__a, __p) __arm_vmovlbq_x_s8(__a, __p) | |
2281 | #define vmovlbq_x_s16(__a, __p) __arm_vmovlbq_x_s16(__a, __p) | |
2282 | #define vmovlbq_x_u8(__a, __p) __arm_vmovlbq_x_u8(__a, __p) | |
2283 | #define vmovlbq_x_u16(__a, __p) __arm_vmovlbq_x_u16(__a, __p) | |
2284 | #define vmovltq_x_s8(__a, __p) __arm_vmovltq_x_s8(__a, __p) | |
2285 | #define vmovltq_x_s16(__a, __p) __arm_vmovltq_x_s16(__a, __p) | |
2286 | #define vmovltq_x_u8(__a, __p) __arm_vmovltq_x_u8(__a, __p) | |
2287 | #define vmovltq_x_u16(__a, __p) __arm_vmovltq_x_u16(__a, __p) | |
2288 | #define vmvnq_x_s8(__a, __p) __arm_vmvnq_x_s8(__a, __p) | |
2289 | #define vmvnq_x_s16(__a, __p) __arm_vmvnq_x_s16(__a, __p) | |
2290 | #define vmvnq_x_s32(__a, __p) __arm_vmvnq_x_s32(__a, __p) | |
2291 | #define vmvnq_x_u8(__a, __p) __arm_vmvnq_x_u8(__a, __p) | |
2292 | #define vmvnq_x_u16(__a, __p) __arm_vmvnq_x_u16(__a, __p) | |
2293 | #define vmvnq_x_u32(__a, __p) __arm_vmvnq_x_u32(__a, __p) | |
2294 | #define vmvnq_x_n_s16( __imm, __p) __arm_vmvnq_x_n_s16( __imm, __p) | |
2295 | #define vmvnq_x_n_s32( __imm, __p) __arm_vmvnq_x_n_s32( __imm, __p) | |
2296 | #define vmvnq_x_n_u16( __imm, __p) __arm_vmvnq_x_n_u16( __imm, __p) | |
2297 | #define vmvnq_x_n_u32( __imm, __p) __arm_vmvnq_x_n_u32( __imm, __p) | |
2298 | #define vornq_x_s8(__a, __b, __p) __arm_vornq_x_s8(__a, __b, __p) | |
2299 | #define vornq_x_s16(__a, __b, __p) __arm_vornq_x_s16(__a, __b, __p) | |
2300 | #define vornq_x_s32(__a, __b, __p) __arm_vornq_x_s32(__a, __b, __p) | |
2301 | #define vornq_x_u8(__a, __b, __p) __arm_vornq_x_u8(__a, __b, __p) | |
2302 | #define vornq_x_u16(__a, __b, __p) __arm_vornq_x_u16(__a, __b, __p) | |
2303 | #define vornq_x_u32(__a, __b, __p) __arm_vornq_x_u32(__a, __b, __p) | |
2304 | #define vorrq_x_s8(__a, __b, __p) __arm_vorrq_x_s8(__a, __b, __p) | |
2305 | #define vorrq_x_s16(__a, __b, __p) __arm_vorrq_x_s16(__a, __b, __p) | |
2306 | #define vorrq_x_s32(__a, __b, __p) __arm_vorrq_x_s32(__a, __b, __p) | |
2307 | #define vorrq_x_u8(__a, __b, __p) __arm_vorrq_x_u8(__a, __b, __p) | |
2308 | #define vorrq_x_u16(__a, __b, __p) __arm_vorrq_x_u16(__a, __b, __p) | |
2309 | #define vorrq_x_u32(__a, __b, __p) __arm_vorrq_x_u32(__a, __b, __p) | |
2310 | #define vrev16q_x_s8(__a, __p) __arm_vrev16q_x_s8(__a, __p) | |
2311 | #define vrev16q_x_u8(__a, __p) __arm_vrev16q_x_u8(__a, __p) | |
2312 | #define vrev32q_x_s8(__a, __p) __arm_vrev32q_x_s8(__a, __p) | |
2313 | #define vrev32q_x_s16(__a, __p) __arm_vrev32q_x_s16(__a, __p) | |
2314 | #define vrev32q_x_u8(__a, __p) __arm_vrev32q_x_u8(__a, __p) | |
2315 | #define vrev32q_x_u16(__a, __p) __arm_vrev32q_x_u16(__a, __p) | |
2316 | #define vrev64q_x_s8(__a, __p) __arm_vrev64q_x_s8(__a, __p) | |
2317 | #define vrev64q_x_s16(__a, __p) __arm_vrev64q_x_s16(__a, __p) | |
2318 | #define vrev64q_x_s32(__a, __p) __arm_vrev64q_x_s32(__a, __p) | |
2319 | #define vrev64q_x_u8(__a, __p) __arm_vrev64q_x_u8(__a, __p) | |
2320 | #define vrev64q_x_u16(__a, __p) __arm_vrev64q_x_u16(__a, __p) | |
2321 | #define vrev64q_x_u32(__a, __p) __arm_vrev64q_x_u32(__a, __p) | |
2322 | #define vrshlq_x_s8(__a, __b, __p) __arm_vrshlq_x_s8(__a, __b, __p) | |
2323 | #define vrshlq_x_s16(__a, __b, __p) __arm_vrshlq_x_s16(__a, __b, __p) | |
2324 | #define vrshlq_x_s32(__a, __b, __p) __arm_vrshlq_x_s32(__a, __b, __p) | |
2325 | #define vrshlq_x_u8(__a, __b, __p) __arm_vrshlq_x_u8(__a, __b, __p) | |
2326 | #define vrshlq_x_u16(__a, __b, __p) __arm_vrshlq_x_u16(__a, __b, __p) | |
2327 | #define vrshlq_x_u32(__a, __b, __p) __arm_vrshlq_x_u32(__a, __b, __p) | |
2328 | #define vshllbq_x_n_s8(__a, __imm, __p) __arm_vshllbq_x_n_s8(__a, __imm, __p) | |
2329 | #define vshllbq_x_n_s16(__a, __imm, __p) __arm_vshllbq_x_n_s16(__a, __imm, __p) | |
2330 | #define vshllbq_x_n_u8(__a, __imm, __p) __arm_vshllbq_x_n_u8(__a, __imm, __p) | |
2331 | #define vshllbq_x_n_u16(__a, __imm, __p) __arm_vshllbq_x_n_u16(__a, __imm, __p) | |
2332 | #define vshlltq_x_n_s8(__a, __imm, __p) __arm_vshlltq_x_n_s8(__a, __imm, __p) | |
2333 | #define vshlltq_x_n_s16(__a, __imm, __p) __arm_vshlltq_x_n_s16(__a, __imm, __p) | |
2334 | #define vshlltq_x_n_u8(__a, __imm, __p) __arm_vshlltq_x_n_u8(__a, __imm, __p) | |
2335 | #define vshlltq_x_n_u16(__a, __imm, __p) __arm_vshlltq_x_n_u16(__a, __imm, __p) | |
2336 | #define vshlq_x_s8(__a, __b, __p) __arm_vshlq_x_s8(__a, __b, __p) | |
2337 | #define vshlq_x_s16(__a, __b, __p) __arm_vshlq_x_s16(__a, __b, __p) | |
2338 | #define vshlq_x_s32(__a, __b, __p) __arm_vshlq_x_s32(__a, __b, __p) | |
2339 | #define vshlq_x_u8(__a, __b, __p) __arm_vshlq_x_u8(__a, __b, __p) | |
2340 | #define vshlq_x_u16(__a, __b, __p) __arm_vshlq_x_u16(__a, __b, __p) | |
2341 | #define vshlq_x_u32(__a, __b, __p) __arm_vshlq_x_u32(__a, __b, __p) | |
2342 | #define vshlq_x_n_s8(__a, __imm, __p) __arm_vshlq_x_n_s8(__a, __imm, __p) | |
2343 | #define vshlq_x_n_s16(__a, __imm, __p) __arm_vshlq_x_n_s16(__a, __imm, __p) | |
2344 | #define vshlq_x_n_s32(__a, __imm, __p) __arm_vshlq_x_n_s32(__a, __imm, __p) | |
2345 | #define vshlq_x_n_u8(__a, __imm, __p) __arm_vshlq_x_n_u8(__a, __imm, __p) | |
2346 | #define vshlq_x_n_u16(__a, __imm, __p) __arm_vshlq_x_n_u16(__a, __imm, __p) | |
2347 | #define vshlq_x_n_u32(__a, __imm, __p) __arm_vshlq_x_n_u32(__a, __imm, __p) | |
2348 | #define vrshrq_x_n_s8(__a, __imm, __p) __arm_vrshrq_x_n_s8(__a, __imm, __p) | |
2349 | #define vrshrq_x_n_s16(__a, __imm, __p) __arm_vrshrq_x_n_s16(__a, __imm, __p) | |
2350 | #define vrshrq_x_n_s32(__a, __imm, __p) __arm_vrshrq_x_n_s32(__a, __imm, __p) | |
2351 | #define vrshrq_x_n_u8(__a, __imm, __p) __arm_vrshrq_x_n_u8(__a, __imm, __p) | |
2352 | #define vrshrq_x_n_u16(__a, __imm, __p) __arm_vrshrq_x_n_u16(__a, __imm, __p) | |
2353 | #define vrshrq_x_n_u32(__a, __imm, __p) __arm_vrshrq_x_n_u32(__a, __imm, __p) | |
2354 | #define vshrq_x_n_s8(__a, __imm, __p) __arm_vshrq_x_n_s8(__a, __imm, __p) | |
2355 | #define vshrq_x_n_s16(__a, __imm, __p) __arm_vshrq_x_n_s16(__a, __imm, __p) | |
2356 | #define vshrq_x_n_s32(__a, __imm, __p) __arm_vshrq_x_n_s32(__a, __imm, __p) | |
2357 | #define vshrq_x_n_u8(__a, __imm, __p) __arm_vshrq_x_n_u8(__a, __imm, __p) | |
2358 | #define vshrq_x_n_u16(__a, __imm, __p) __arm_vshrq_x_n_u16(__a, __imm, __p) | |
2359 | #define vshrq_x_n_u32(__a, __imm, __p) __arm_vshrq_x_n_u32(__a, __imm, __p) | |
2360 | #define vdupq_x_n_f16(__a, __p) __arm_vdupq_x_n_f16(__a, __p) | |
2361 | #define vdupq_x_n_f32(__a, __p) __arm_vdupq_x_n_f32(__a, __p) | |
2362 | #define vminnmq_x_f16(__a, __b, __p) __arm_vminnmq_x_f16(__a, __b, __p) | |
2363 | #define vminnmq_x_f32(__a, __b, __p) __arm_vminnmq_x_f32(__a, __b, __p) | |
2364 | #define vmaxnmq_x_f16(__a, __b, __p) __arm_vmaxnmq_x_f16(__a, __b, __p) | |
2365 | #define vmaxnmq_x_f32(__a, __b, __p) __arm_vmaxnmq_x_f32(__a, __b, __p) | |
2366 | #define vabdq_x_f16(__a, __b, __p) __arm_vabdq_x_f16(__a, __b, __p) | |
2367 | #define vabdq_x_f32(__a, __b, __p) __arm_vabdq_x_f32(__a, __b, __p) | |
2368 | #define vabsq_x_f16(__a, __p) __arm_vabsq_x_f16(__a, __p) | |
2369 | #define vabsq_x_f32(__a, __p) __arm_vabsq_x_f32(__a, __p) | |
2370 | #define vaddq_x_f16(__a, __b, __p) __arm_vaddq_x_f16(__a, __b, __p) | |
2371 | #define vaddq_x_f32(__a, __b, __p) __arm_vaddq_x_f32(__a, __b, __p) | |
2372 | #define vaddq_x_n_f16(__a, __b, __p) __arm_vaddq_x_n_f16(__a, __b, __p) | |
2373 | #define vaddq_x_n_f32(__a, __b, __p) __arm_vaddq_x_n_f32(__a, __b, __p) | |
2374 | #define vnegq_x_f16(__a, __p) __arm_vnegq_x_f16(__a, __p) | |
2375 | #define vnegq_x_f32(__a, __p) __arm_vnegq_x_f32(__a, __p) | |
2376 | #define vmulq_x_f16(__a, __b, __p) __arm_vmulq_x_f16(__a, __b, __p) | |
2377 | #define vmulq_x_f32(__a, __b, __p) __arm_vmulq_x_f32(__a, __b, __p) | |
2378 | #define vmulq_x_n_f16(__a, __b, __p) __arm_vmulq_x_n_f16(__a, __b, __p) | |
2379 | #define vmulq_x_n_f32(__a, __b, __p) __arm_vmulq_x_n_f32(__a, __b, __p) | |
2380 | #define vsubq_x_f16(__a, __b, __p) __arm_vsubq_x_f16(__a, __b, __p) | |
2381 | #define vsubq_x_f32(__a, __b, __p) __arm_vsubq_x_f32(__a, __b, __p) | |
2382 | #define vsubq_x_n_f16(__a, __b, __p) __arm_vsubq_x_n_f16(__a, __b, __p) | |
2383 | #define vsubq_x_n_f32(__a, __b, __p) __arm_vsubq_x_n_f32(__a, __b, __p) | |
2384 | #define vcaddq_rot90_x_f16(__a, __b, __p) __arm_vcaddq_rot90_x_f16(__a, __b, __p) | |
2385 | #define vcaddq_rot90_x_f32(__a, __b, __p) __arm_vcaddq_rot90_x_f32(__a, __b, __p) | |
2386 | #define vcaddq_rot270_x_f16(__a, __b, __p) __arm_vcaddq_rot270_x_f16(__a, __b, __p) | |
2387 | #define vcaddq_rot270_x_f32(__a, __b, __p) __arm_vcaddq_rot270_x_f32(__a, __b, __p) | |
2388 | #define vcmulq_x_f16(__a, __b, __p) __arm_vcmulq_x_f16(__a, __b, __p) | |
2389 | #define vcmulq_x_f32(__a, __b, __p) __arm_vcmulq_x_f32(__a, __b, __p) | |
2390 | #define vcmulq_rot90_x_f16(__a, __b, __p) __arm_vcmulq_rot90_x_f16(__a, __b, __p) | |
2391 | #define vcmulq_rot90_x_f32(__a, __b, __p) __arm_vcmulq_rot90_x_f32(__a, __b, __p) | |
2392 | #define vcmulq_rot180_x_f16(__a, __b, __p) __arm_vcmulq_rot180_x_f16(__a, __b, __p) | |
2393 | #define vcmulq_rot180_x_f32(__a, __b, __p) __arm_vcmulq_rot180_x_f32(__a, __b, __p) | |
2394 | #define vcmulq_rot270_x_f16(__a, __b, __p) __arm_vcmulq_rot270_x_f16(__a, __b, __p) | |
2395 | #define vcmulq_rot270_x_f32(__a, __b, __p) __arm_vcmulq_rot270_x_f32(__a, __b, __p) | |
2396 | #define vcvtaq_x_s16_f16(__a, __p) __arm_vcvtaq_x_s16_f16(__a, __p) | |
2397 | #define vcvtaq_x_s32_f32(__a, __p) __arm_vcvtaq_x_s32_f32(__a, __p) | |
2398 | #define vcvtaq_x_u16_f16(__a, __p) __arm_vcvtaq_x_u16_f16(__a, __p) | |
2399 | #define vcvtaq_x_u32_f32(__a, __p) __arm_vcvtaq_x_u32_f32(__a, __p) | |
2400 | #define vcvtnq_x_s16_f16(__a, __p) __arm_vcvtnq_x_s16_f16(__a, __p) | |
2401 | #define vcvtnq_x_s32_f32(__a, __p) __arm_vcvtnq_x_s32_f32(__a, __p) | |
2402 | #define vcvtnq_x_u16_f16(__a, __p) __arm_vcvtnq_x_u16_f16(__a, __p) | |
2403 | #define vcvtnq_x_u32_f32(__a, __p) __arm_vcvtnq_x_u32_f32(__a, __p) | |
2404 | #define vcvtpq_x_s16_f16(__a, __p) __arm_vcvtpq_x_s16_f16(__a, __p) | |
2405 | #define vcvtpq_x_s32_f32(__a, __p) __arm_vcvtpq_x_s32_f32(__a, __p) | |
2406 | #define vcvtpq_x_u16_f16(__a, __p) __arm_vcvtpq_x_u16_f16(__a, __p) | |
2407 | #define vcvtpq_x_u32_f32(__a, __p) __arm_vcvtpq_x_u32_f32(__a, __p) | |
2408 | #define vcvtmq_x_s16_f16(__a, __p) __arm_vcvtmq_x_s16_f16(__a, __p) | |
2409 | #define vcvtmq_x_s32_f32(__a, __p) __arm_vcvtmq_x_s32_f32(__a, __p) | |
2410 | #define vcvtmq_x_u16_f16(__a, __p) __arm_vcvtmq_x_u16_f16(__a, __p) | |
2411 | #define vcvtmq_x_u32_f32(__a, __p) __arm_vcvtmq_x_u32_f32(__a, __p) | |
2412 | #define vcvtbq_x_f32_f16(__a, __p) __arm_vcvtbq_x_f32_f16(__a, __p) | |
2413 | #define vcvttq_x_f32_f16(__a, __p) __arm_vcvttq_x_f32_f16(__a, __p) | |
2414 | #define vcvtq_x_f16_u16(__a, __p) __arm_vcvtq_x_f16_u16(__a, __p) | |
2415 | #define vcvtq_x_f16_s16(__a, __p) __arm_vcvtq_x_f16_s16(__a, __p) | |
2416 | #define vcvtq_x_f32_s32(__a, __p) __arm_vcvtq_x_f32_s32(__a, __p) | |
2417 | #define vcvtq_x_f32_u32(__a, __p) __arm_vcvtq_x_f32_u32(__a, __p) | |
2418 | #define vcvtq_x_n_f16_s16(__a, __imm6, __p) __arm_vcvtq_x_n_f16_s16(__a, __imm6, __p) | |
2419 | #define vcvtq_x_n_f16_u16(__a, __imm6, __p) __arm_vcvtq_x_n_f16_u16(__a, __imm6, __p) | |
2420 | #define vcvtq_x_n_f32_s32(__a, __imm6, __p) __arm_vcvtq_x_n_f32_s32(__a, __imm6, __p) | |
2421 | #define vcvtq_x_n_f32_u32(__a, __imm6, __p) __arm_vcvtq_x_n_f32_u32(__a, __imm6, __p) | |
2422 | #define vcvtq_x_s16_f16(__a, __p) __arm_vcvtq_x_s16_f16(__a, __p) | |
2423 | #define vcvtq_x_s32_f32(__a, __p) __arm_vcvtq_x_s32_f32(__a, __p) | |
2424 | #define vcvtq_x_u16_f16(__a, __p) __arm_vcvtq_x_u16_f16(__a, __p) | |
2425 | #define vcvtq_x_u32_f32(__a, __p) __arm_vcvtq_x_u32_f32(__a, __p) | |
2426 | #define vcvtq_x_n_s16_f16(__a, __imm6, __p) __arm_vcvtq_x_n_s16_f16(__a, __imm6, __p) | |
2427 | #define vcvtq_x_n_s32_f32(__a, __imm6, __p) __arm_vcvtq_x_n_s32_f32(__a, __imm6, __p) | |
2428 | #define vcvtq_x_n_u16_f16(__a, __imm6, __p) __arm_vcvtq_x_n_u16_f16(__a, __imm6, __p) | |
2429 | #define vcvtq_x_n_u32_f32(__a, __imm6, __p) __arm_vcvtq_x_n_u32_f32(__a, __imm6, __p) | |
2430 | #define vrndq_x_f16(__a, __p) __arm_vrndq_x_f16(__a, __p) | |
2431 | #define vrndq_x_f32(__a, __p) __arm_vrndq_x_f32(__a, __p) | |
2432 | #define vrndnq_x_f16(__a, __p) __arm_vrndnq_x_f16(__a, __p) | |
2433 | #define vrndnq_x_f32(__a, __p) __arm_vrndnq_x_f32(__a, __p) | |
2434 | #define vrndmq_x_f16(__a, __p) __arm_vrndmq_x_f16(__a, __p) | |
2435 | #define vrndmq_x_f32(__a, __p) __arm_vrndmq_x_f32(__a, __p) | |
2436 | #define vrndpq_x_f16(__a, __p) __arm_vrndpq_x_f16(__a, __p) | |
2437 | #define vrndpq_x_f32(__a, __p) __arm_vrndpq_x_f32(__a, __p) | |
2438 | #define vrndaq_x_f16(__a, __p) __arm_vrndaq_x_f16(__a, __p) | |
2439 | #define vrndaq_x_f32(__a, __p) __arm_vrndaq_x_f32(__a, __p) | |
2440 | #define vrndxq_x_f16(__a, __p) __arm_vrndxq_x_f16(__a, __p) | |
2441 | #define vrndxq_x_f32(__a, __p) __arm_vrndxq_x_f32(__a, __p) | |
2442 | #define vandq_x_f16(__a, __b, __p) __arm_vandq_x_f16(__a, __b, __p) | |
2443 | #define vandq_x_f32(__a, __b, __p) __arm_vandq_x_f32(__a, __b, __p) | |
2444 | #define vbicq_x_f16(__a, __b, __p) __arm_vbicq_x_f16(__a, __b, __p) | |
2445 | #define vbicq_x_f32(__a, __b, __p) __arm_vbicq_x_f32(__a, __b, __p) | |
2446 | #define vbrsrq_x_n_f16(__a, __b, __p) __arm_vbrsrq_x_n_f16(__a, __b, __p) | |
2447 | #define vbrsrq_x_n_f32(__a, __b, __p) __arm_vbrsrq_x_n_f32(__a, __b, __p) | |
2448 | #define veorq_x_f16(__a, __b, __p) __arm_veorq_x_f16(__a, __b, __p) | |
2449 | #define veorq_x_f32(__a, __b, __p) __arm_veorq_x_f32(__a, __b, __p) | |
2450 | #define vornq_x_f16(__a, __b, __p) __arm_vornq_x_f16(__a, __b, __p) | |
2451 | #define vornq_x_f32(__a, __b, __p) __arm_vornq_x_f32(__a, __b, __p) | |
2452 | #define vorrq_x_f16(__a, __b, __p) __arm_vorrq_x_f16(__a, __b, __p) | |
2453 | #define vorrq_x_f32(__a, __b, __p) __arm_vorrq_x_f32(__a, __b, __p) | |
2454 | #define vrev32q_x_f16(__a, __p) __arm_vrev32q_x_f16(__a, __p) | |
2455 | #define vrev64q_x_f16(__a, __p) __arm_vrev64q_x_f16(__a, __p) | |
2456 | #define vrev64q_x_f32(__a, __p) __arm_vrev64q_x_f32(__a, __p) | |
c3562f81 SP |
2457 | #define vadciq_s32(__a, __b, __carry_out) __arm_vadciq_s32(__a, __b, __carry_out) |
2458 | #define vadciq_u32(__a, __b, __carry_out) __arm_vadciq_u32(__a, __b, __carry_out) | |
2459 | #define vadciq_m_s32(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m_s32(__inactive, __a, __b, __carry_out, __p) | |
2460 | #define vadciq_m_u32(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m_u32(__inactive, __a, __b, __carry_out, __p) | |
2461 | #define vadcq_s32(__a, __b, __carry) __arm_vadcq_s32(__a, __b, __carry) | |
2462 | #define vadcq_u32(__a, __b, __carry) __arm_vadcq_u32(__a, __b, __carry) | |
2463 | #define vadcq_m_s32(__inactive, __a, __b, __carry, __p) __arm_vadcq_m_s32(__inactive, __a, __b, __carry, __p) | |
2464 | #define vadcq_m_u32(__inactive, __a, __b, __carry, __p) __arm_vadcq_m_u32(__inactive, __a, __b, __carry, __p) | |
2465 | #define vsbciq_s32(__a, __b, __carry_out) __arm_vsbciq_s32(__a, __b, __carry_out) | |
2466 | #define vsbciq_u32(__a, __b, __carry_out) __arm_vsbciq_u32(__a, __b, __carry_out) | |
2467 | #define vsbciq_m_s32(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m_s32(__inactive, __a, __b, __carry_out, __p) | |
2468 | #define vsbciq_m_u32(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m_u32(__inactive, __a, __b, __carry_out, __p) | |
2469 | #define vsbcq_s32(__a, __b, __carry) __arm_vsbcq_s32(__a, __b, __carry) | |
2470 | #define vsbcq_u32(__a, __b, __carry) __arm_vsbcq_u32(__a, __b, __carry) | |
2471 | #define vsbcq_m_s32(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m_s32(__inactive, __a, __b, __carry, __p) | |
2472 | #define vsbcq_m_u32(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m_u32(__inactive, __a, __b, __carry, __p) | |
1dfcc3b5 SP |
2473 | #define vst1q_p_u8(__addr, __value, __p) __arm_vst1q_p_u8(__addr, __value, __p) |
2474 | #define vst1q_p_s8(__addr, __value, __p) __arm_vst1q_p_s8(__addr, __value, __p) | |
2475 | #define vst2q_s8(__addr, __value) __arm_vst2q_s8(__addr, __value) | |
2476 | #define vst2q_u8(__addr, __value) __arm_vst2q_u8(__addr, __value) | |
2477 | #define vld1q_z_u8(__base, __p) __arm_vld1q_z_u8(__base, __p) | |
2478 | #define vld1q_z_s8(__base, __p) __arm_vld1q_z_s8(__base, __p) | |
2479 | #define vld2q_s8(__addr) __arm_vld2q_s8(__addr) | |
2480 | #define vld2q_u8(__addr) __arm_vld2q_u8(__addr) | |
2481 | #define vld4q_s8(__addr) __arm_vld4q_s8(__addr) | |
2482 | #define vld4q_u8(__addr) __arm_vld4q_u8(__addr) | |
2483 | #define vst1q_p_u16(__addr, __value, __p) __arm_vst1q_p_u16(__addr, __value, __p) | |
2484 | #define vst1q_p_s16(__addr, __value, __p) __arm_vst1q_p_s16(__addr, __value, __p) | |
2485 | #define vst2q_s16(__addr, __value) __arm_vst2q_s16(__addr, __value) | |
2486 | #define vst2q_u16(__addr, __value) __arm_vst2q_u16(__addr, __value) | |
2487 | #define vld1q_z_u16(__base, __p) __arm_vld1q_z_u16(__base, __p) | |
2488 | #define vld1q_z_s16(__base, __p) __arm_vld1q_z_s16(__base, __p) | |
2489 | #define vld2q_s16(__addr) __arm_vld2q_s16(__addr) | |
2490 | #define vld2q_u16(__addr) __arm_vld2q_u16(__addr) | |
2491 | #define vld4q_s16(__addr) __arm_vld4q_s16(__addr) | |
2492 | #define vld4q_u16(__addr) __arm_vld4q_u16(__addr) | |
2493 | #define vst1q_p_u32(__addr, __value, __p) __arm_vst1q_p_u32(__addr, __value, __p) | |
2494 | #define vst1q_p_s32(__addr, __value, __p) __arm_vst1q_p_s32(__addr, __value, __p) | |
2495 | #define vst2q_s32(__addr, __value) __arm_vst2q_s32(__addr, __value) | |
2496 | #define vst2q_u32(__addr, __value) __arm_vst2q_u32(__addr, __value) | |
2497 | #define vld1q_z_u32(__base, __p) __arm_vld1q_z_u32(__base, __p) | |
2498 | #define vld1q_z_s32(__base, __p) __arm_vld1q_z_s32(__base, __p) | |
2499 | #define vld2q_s32(__addr) __arm_vld2q_s32(__addr) | |
2500 | #define vld2q_u32(__addr) __arm_vld2q_u32(__addr) | |
2501 | #define vld4q_s32(__addr) __arm_vld4q_s32(__addr) | |
2502 | #define vld4q_u32(__addr) __arm_vld4q_u32(__addr) | |
2503 | #define vld4q_f16(__addr) __arm_vld4q_f16(__addr) | |
2504 | #define vld2q_f16(__addr) __arm_vld2q_f16(__addr) | |
2505 | #define vld1q_z_f16(__base, __p) __arm_vld1q_z_f16(__base, __p) | |
2506 | #define vst2q_f16(__addr, __value) __arm_vst2q_f16(__addr, __value) | |
2507 | #define vst1q_p_f16(__addr, __value, __p) __arm_vst1q_p_f16(__addr, __value, __p) | |
2508 | #define vld4q_f32(__addr) __arm_vld4q_f32(__addr) | |
2509 | #define vld2q_f32(__addr) __arm_vld2q_f32(__addr) | |
2510 | #define vld1q_z_f32(__base, __p) __arm_vld1q_z_f32(__base, __p) | |
2511 | #define vst2q_f32(__addr, __value) __arm_vst2q_f32(__addr, __value) | |
2512 | #define vst1q_p_f32(__addr, __value, __p) __arm_vst1q_p_f32(__addr, __value, __p) | |
1a5c27b1 SP |
2513 | #define vsetq_lane_f16(__a, __b, __idx) __arm_vsetq_lane_f16(__a, __b, __idx) |
2514 | #define vsetq_lane_f32(__a, __b, __idx) __arm_vsetq_lane_f32(__a, __b, __idx) | |
2515 | #define vsetq_lane_s16(__a, __b, __idx) __arm_vsetq_lane_s16(__a, __b, __idx) | |
2516 | #define vsetq_lane_s32(__a, __b, __idx) __arm_vsetq_lane_s32(__a, __b, __idx) | |
2517 | #define vsetq_lane_s8(__a, __b, __idx) __arm_vsetq_lane_s8(__a, __b, __idx) | |
2518 | #define vsetq_lane_s64(__a, __b, __idx) __arm_vsetq_lane_s64(__a, __b, __idx) | |
2519 | #define vsetq_lane_u8(__a, __b, __idx) __arm_vsetq_lane_u8(__a, __b, __idx) | |
2520 | #define vsetq_lane_u16(__a, __b, __idx) __arm_vsetq_lane_u16(__a, __b, __idx) | |
2521 | #define vsetq_lane_u32(__a, __b, __idx) __arm_vsetq_lane_u32(__a, __b, __idx) | |
2522 | #define vsetq_lane_u64(__a, __b, __idx) __arm_vsetq_lane_u64(__a, __b, __idx) | |
2523 | #define vgetq_lane_f16(__a, __idx) __arm_vgetq_lane_f16(__a, __idx) | |
2524 | #define vgetq_lane_f32(__a, __idx) __arm_vgetq_lane_f32(__a, __idx) | |
2525 | #define vgetq_lane_s16(__a, __idx) __arm_vgetq_lane_s16(__a, __idx) | |
2526 | #define vgetq_lane_s32(__a, __idx) __arm_vgetq_lane_s32(__a, __idx) | |
2527 | #define vgetq_lane_s8(__a, __idx) __arm_vgetq_lane_s8(__a, __idx) | |
2528 | #define vgetq_lane_s64(__a, __idx) __arm_vgetq_lane_s64(__a, __idx) | |
2529 | #define vgetq_lane_u8(__a, __idx) __arm_vgetq_lane_u8(__a, __idx) | |
2530 | #define vgetq_lane_u16(__a, __idx) __arm_vgetq_lane_u16(__a, __idx) | |
2531 | #define vgetq_lane_u32(__a, __idx) __arm_vgetq_lane_u32(__a, __idx) | |
2532 | #define vgetq_lane_u64(__a, __idx) __arm_vgetq_lane_u64(__a, __idx) | |
85244449 SP |
2533 | #define sqrshr(__p0, __p1) __arm_sqrshr(__p0, __p1) |
2534 | #define sqrshrl(__p0, __p1) __arm_sqrshrl(__p0, __p1) | |
2535 | #define sqrshrl_sat48(__p0, __p1) __arm_sqrshrl_sat48(__p0, __p1) | |
2536 | #define sqshl(__p0, __p1) __arm_sqshl(__p0, __p1) | |
2537 | #define sqshll(__p0, __p1) __arm_sqshll(__p0, __p1) | |
2538 | #define srshr(__p0, __p1) __arm_srshr(__p0, __p1) | |
2539 | #define srshrl(__p0, __p1) __arm_srshrl(__p0, __p1) | |
2540 | #define uqrshl(__p0, __p1) __arm_uqrshl(__p0, __p1) | |
2541 | #define uqrshll(__p0, __p1) __arm_uqrshll(__p0, __p1) | |
2542 | #define uqrshll_sat48(__p0, __p1) __arm_uqrshll_sat48(__p0, __p1) | |
2543 | #define uqshl(__p0, __p1) __arm_uqshl(__p0, __p1) | |
2544 | #define uqshll(__p0, __p1) __arm_uqshll(__p0, __p1) | |
2545 | #define urshr(__p0, __p1) __arm_urshr(__p0, __p1) | |
2546 | #define urshrl(__p0, __p1) __arm_urshrl(__p0, __p1) | |
2547 | #define lsll(__p0, __p1) __arm_lsll(__p0, __p1) | |
2548 | #define asrl(__p0, __p1) __arm_asrl(__p0, __p1) | |
88c9a831 SP |
2549 | #define vshlcq_m_s8(__a, __b, __imm, __p) __arm_vshlcq_m_s8(__a, __b, __imm, __p) |
2550 | #define vshlcq_m_u8(__a, __b, __imm, __p) __arm_vshlcq_m_u8(__a, __b, __imm, __p) | |
2551 | #define vshlcq_m_s16(__a, __b, __imm, __p) __arm_vshlcq_m_s16(__a, __b, __imm, __p) | |
2552 | #define vshlcq_m_u16(__a, __b, __imm, __p) __arm_vshlcq_m_u16(__a, __b, __imm, __p) | |
2553 | #define vshlcq_m_s32(__a, __b, __imm, __p) __arm_vshlcq_m_s32(__a, __b, __imm, __p) | |
2554 | #define vshlcq_m_u32(__a, __b, __imm, __p) __arm_vshlcq_m_u32(__a, __b, __imm, __p) | |
14782c81 SP |
2555 | #endif |
2556 | ||
1a5c27b1 SP |
2557 | /* For big-endian, GCC's vector indices are reversed within each 64 bits |
2558 | compared to the architectural lane indices used by MVE intrinsics. */ | |
2559 | #define __ARM_NUM_LANES(__v) (sizeof (__v) / sizeof (__v[0])) | |
2560 | #ifdef __ARM_BIG_ENDIAN | |
2561 | #define __ARM_LANEQ(__vec, __idx) (__idx ^ (__ARM_NUM_LANES(__vec)/2 - 1)) | |
2562 | #else | |
2563 | #define __ARM_LANEQ(__vec, __idx) __idx | |
2564 | #endif | |
2565 | #define __ARM_CHECK_LANEQ(__vec, __idx) \ | |
2566 | __builtin_arm_lane_check (__ARM_NUM_LANES(__vec), \ | |
2567 | __ARM_LANEQ(__vec, __idx)) | |
2568 | ||
14782c81 SP |
2569 | __extension__ extern __inline void |
2570 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2571 | __arm_vst4q_s8 (int8_t * __addr, int8x16x4_t __value) | |
2572 | { | |
2573 | union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv; | |
2574 | __rv.__i = __value; | |
2575 | __builtin_mve_vst4qv16qi ((__builtin_neon_qi *) __addr, __rv.__o); | |
2576 | } | |
2577 | ||
2578 | __extension__ extern __inline void | |
2579 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2580 | __arm_vst4q_s16 (int16_t * __addr, int16x8x4_t __value) | |
2581 | { | |
2582 | union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv; | |
2583 | __rv.__i = __value; | |
2584 | __builtin_mve_vst4qv8hi ((__builtin_neon_hi *) __addr, __rv.__o); | |
2585 | } | |
2586 | ||
2587 | __extension__ extern __inline void | |
2588 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2589 | __arm_vst4q_s32 (int32_t * __addr, int32x4x4_t __value) | |
2590 | { | |
2591 | union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv; | |
2592 | __rv.__i = __value; | |
2593 | __builtin_mve_vst4qv4si ((__builtin_neon_si *) __addr, __rv.__o); | |
2594 | } | |
2595 | ||
2596 | __extension__ extern __inline void | |
2597 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2598 | __arm_vst4q_u8 (uint8_t * __addr, uint8x16x4_t __value) | |
2599 | { | |
2600 | union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv; | |
2601 | __rv.__i = __value; | |
2602 | __builtin_mve_vst4qv16qi ((__builtin_neon_qi *) __addr, __rv.__o); | |
2603 | } | |
2604 | ||
2605 | __extension__ extern __inline void | |
2606 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2607 | __arm_vst4q_u16 (uint16_t * __addr, uint16x8x4_t __value) | |
2608 | { | |
2609 | union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv; | |
2610 | __rv.__i = __value; | |
2611 | __builtin_mve_vst4qv8hi ((__builtin_neon_hi *) __addr, __rv.__o); | |
2612 | } | |
2613 | ||
2614 | __extension__ extern __inline void | |
2615 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2616 | __arm_vst4q_u32 (uint32_t * __addr, uint32x4x4_t __value) | |
2617 | { | |
2618 | union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv; | |
2619 | __rv.__i = __value; | |
2620 | __builtin_mve_vst4qv4si ((__builtin_neon_si *) __addr, __rv.__o); | |
2621 | } | |
2622 | ||
6df4618c SP |
2623 | __extension__ extern __inline int8x16_t |
2624 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2625 | __arm_vdupq_n_s8 (int8_t __a) | |
2626 | { | |
2627 | return __builtin_mve_vdupq_n_sv16qi (__a); | |
2628 | } | |
2629 | ||
2630 | __extension__ extern __inline int16x8_t | |
2631 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2632 | __arm_vdupq_n_s16 (int16_t __a) | |
2633 | { | |
2634 | return __builtin_mve_vdupq_n_sv8hi (__a); | |
2635 | } | |
2636 | ||
2637 | __extension__ extern __inline int32x4_t | |
2638 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2639 | __arm_vdupq_n_s32 (int32_t __a) | |
2640 | { | |
2641 | return __builtin_mve_vdupq_n_sv4si (__a); | |
2642 | } | |
2643 | ||
2644 | __extension__ extern __inline int8x16_t | |
2645 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2646 | __arm_vabsq_s8 (int8x16_t __a) | |
2647 | { | |
2648 | return __builtin_mve_vabsq_sv16qi (__a); | |
2649 | } | |
2650 | ||
2651 | __extension__ extern __inline int16x8_t | |
2652 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2653 | __arm_vabsq_s16 (int16x8_t __a) | |
2654 | { | |
2655 | return __builtin_mve_vabsq_sv8hi (__a); | |
2656 | } | |
2657 | ||
2658 | __extension__ extern __inline int32x4_t | |
2659 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2660 | __arm_vabsq_s32 (int32x4_t __a) | |
2661 | { | |
2662 | return __builtin_mve_vabsq_sv4si (__a); | |
2663 | } | |
2664 | ||
2665 | __extension__ extern __inline int8x16_t | |
2666 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2667 | __arm_vclsq_s8 (int8x16_t __a) | |
2668 | { | |
2669 | return __builtin_mve_vclsq_sv16qi (__a); | |
2670 | } | |
2671 | ||
2672 | __extension__ extern __inline int16x8_t | |
2673 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2674 | __arm_vclsq_s16 (int16x8_t __a) | |
2675 | { | |
2676 | return __builtin_mve_vclsq_sv8hi (__a); | |
2677 | } | |
2678 | ||
2679 | __extension__ extern __inline int32x4_t | |
2680 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2681 | __arm_vclsq_s32 (int32x4_t __a) | |
2682 | { | |
2683 | return __builtin_mve_vclsq_sv4si (__a); | |
2684 | } | |
2685 | ||
2686 | __extension__ extern __inline int8x16_t | |
2687 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2688 | __arm_vclzq_s8 (int8x16_t __a) | |
2689 | { | |
2690 | return __builtin_mve_vclzq_sv16qi (__a); | |
2691 | } | |
2692 | ||
2693 | __extension__ extern __inline int16x8_t | |
2694 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2695 | __arm_vclzq_s16 (int16x8_t __a) | |
2696 | { | |
2697 | return __builtin_mve_vclzq_sv8hi (__a); | |
2698 | } | |
2699 | ||
2700 | __extension__ extern __inline int32x4_t | |
2701 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2702 | __arm_vclzq_s32 (int32x4_t __a) | |
2703 | { | |
2704 | return __builtin_mve_vclzq_sv4si (__a); | |
2705 | } | |
2706 | ||
2707 | __extension__ extern __inline int8x16_t | |
2708 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2709 | __arm_vnegq_s8 (int8x16_t __a) | |
2710 | { | |
2711 | return __builtin_mve_vnegq_sv16qi (__a); | |
2712 | } | |
2713 | ||
2714 | __extension__ extern __inline int16x8_t | |
2715 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2716 | __arm_vnegq_s16 (int16x8_t __a) | |
2717 | { | |
2718 | return __builtin_mve_vnegq_sv8hi (__a); | |
2719 | } | |
2720 | ||
2721 | __extension__ extern __inline int32x4_t | |
2722 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2723 | __arm_vnegq_s32 (int32x4_t __a) | |
2724 | { | |
2725 | return __builtin_mve_vnegq_sv4si (__a); | |
2726 | } | |
2727 | ||
2728 | __extension__ extern __inline int64_t | |
2729 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2730 | __arm_vaddlvq_s32 (int32x4_t __a) | |
2731 | { | |
2732 | return __builtin_mve_vaddlvq_sv4si (__a); | |
2733 | } | |
2734 | ||
2735 | __extension__ extern __inline int32_t | |
2736 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2737 | __arm_vaddvq_s8 (int8x16_t __a) | |
2738 | { | |
2739 | return __builtin_mve_vaddvq_sv16qi (__a); | |
2740 | } | |
2741 | ||
2742 | __extension__ extern __inline int32_t | |
2743 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2744 | __arm_vaddvq_s16 (int16x8_t __a) | |
2745 | { | |
2746 | return __builtin_mve_vaddvq_sv8hi (__a); | |
2747 | } | |
2748 | ||
2749 | __extension__ extern __inline int32_t | |
2750 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2751 | __arm_vaddvq_s32 (int32x4_t __a) | |
2752 | { | |
2753 | return __builtin_mve_vaddvq_sv4si (__a); | |
2754 | } | |
2755 | ||
2756 | __extension__ extern __inline int16x8_t | |
2757 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2758 | __arm_vmovlbq_s8 (int8x16_t __a) | |
2759 | { | |
2760 | return __builtin_mve_vmovlbq_sv16qi (__a); | |
2761 | } | |
2762 | ||
2763 | __extension__ extern __inline int32x4_t | |
2764 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2765 | __arm_vmovlbq_s16 (int16x8_t __a) | |
2766 | { | |
2767 | return __builtin_mve_vmovlbq_sv8hi (__a); | |
2768 | } | |
2769 | ||
2770 | __extension__ extern __inline int16x8_t | |
2771 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2772 | __arm_vmovltq_s8 (int8x16_t __a) | |
2773 | { | |
2774 | return __builtin_mve_vmovltq_sv16qi (__a); | |
2775 | } | |
2776 | ||
2777 | __extension__ extern __inline int32x4_t | |
2778 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2779 | __arm_vmovltq_s16 (int16x8_t __a) | |
2780 | { | |
2781 | return __builtin_mve_vmovltq_sv8hi (__a); | |
2782 | } | |
2783 | ||
2784 | __extension__ extern __inline int8x16_t | |
2785 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2786 | __arm_vmvnq_s8 (int8x16_t __a) | |
2787 | { | |
2788 | return __builtin_mve_vmvnq_sv16qi (__a); | |
2789 | } | |
2790 | ||
2791 | __extension__ extern __inline int16x8_t | |
2792 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2793 | __arm_vmvnq_s16 (int16x8_t __a) | |
2794 | { | |
2795 | return __builtin_mve_vmvnq_sv8hi (__a); | |
2796 | } | |
2797 | ||
2798 | __extension__ extern __inline int32x4_t | |
2799 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2800 | __arm_vmvnq_s32 (int32x4_t __a) | |
2801 | { | |
2802 | return __builtin_mve_vmvnq_sv4si (__a); | |
2803 | } | |
2804 | ||
5db0eb95 SP |
2805 | __extension__ extern __inline int16x8_t |
2806 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2807 | __arm_vmvnq_n_s16 (const int16_t __imm) | |
2808 | { | |
2809 | return __builtin_mve_vmvnq_n_sv8hi (__imm); | |
2810 | } | |
2811 | ||
2812 | __extension__ extern __inline int32x4_t | |
2813 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2814 | __arm_vmvnq_n_s32 (const int32_t __imm) | |
2815 | { | |
2816 | return __builtin_mve_vmvnq_n_sv4si (__imm); | |
2817 | } | |
2818 | ||
6df4618c SP |
2819 | __extension__ extern __inline int8x16_t |
2820 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2821 | __arm_vrev16q_s8 (int8x16_t __a) | |
2822 | { | |
2823 | return __builtin_mve_vrev16q_sv16qi (__a); | |
2824 | } | |
2825 | ||
2826 | __extension__ extern __inline int8x16_t | |
2827 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2828 | __arm_vrev32q_s8 (int8x16_t __a) | |
2829 | { | |
2830 | return __builtin_mve_vrev32q_sv16qi (__a); | |
2831 | } | |
2832 | ||
2833 | __extension__ extern __inline int16x8_t | |
2834 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2835 | __arm_vrev32q_s16 (int16x8_t __a) | |
2836 | { | |
2837 | return __builtin_mve_vrev32q_sv8hi (__a); | |
2838 | } | |
2839 | ||
5db0eb95 SP |
2840 | __extension__ extern __inline int8x16_t |
2841 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2842 | __arm_vrev64q_s8 (int8x16_t __a) | |
2843 | { | |
2844 | return __builtin_mve_vrev64q_sv16qi (__a); | |
2845 | } | |
2846 | ||
2847 | __extension__ extern __inline int16x8_t | |
2848 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2849 | __arm_vrev64q_s16 (int16x8_t __a) | |
2850 | { | |
2851 | return __builtin_mve_vrev64q_sv8hi (__a); | |
2852 | } | |
2853 | ||
2854 | __extension__ extern __inline int32x4_t | |
2855 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2856 | __arm_vrev64q_s32 (int32x4_t __a) | |
2857 | { | |
2858 | return __builtin_mve_vrev64q_sv4si (__a); | |
2859 | } | |
2860 | ||
6df4618c SP |
2861 | __extension__ extern __inline int8x16_t |
2862 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2863 | __arm_vqabsq_s8 (int8x16_t __a) | |
2864 | { | |
2865 | return __builtin_mve_vqabsq_sv16qi (__a); | |
2866 | } | |
2867 | ||
2868 | __extension__ extern __inline int16x8_t | |
2869 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2870 | __arm_vqabsq_s16 (int16x8_t __a) | |
2871 | { | |
2872 | return __builtin_mve_vqabsq_sv8hi (__a); | |
2873 | } | |
2874 | ||
2875 | __extension__ extern __inline int32x4_t | |
2876 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2877 | __arm_vqabsq_s32 (int32x4_t __a) | |
2878 | { | |
2879 | return __builtin_mve_vqabsq_sv4si (__a); | |
2880 | } | |
2881 | ||
2882 | __extension__ extern __inline int8x16_t | |
2883 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2884 | __arm_vqnegq_s8 (int8x16_t __a) | |
2885 | { | |
2886 | return __builtin_mve_vqnegq_sv16qi (__a); | |
2887 | } | |
2888 | ||
2889 | __extension__ extern __inline int16x8_t | |
2890 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2891 | __arm_vqnegq_s16 (int16x8_t __a) | |
2892 | { | |
2893 | return __builtin_mve_vqnegq_sv8hi (__a); | |
2894 | } | |
2895 | ||
2896 | __extension__ extern __inline int32x4_t | |
2897 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2898 | __arm_vqnegq_s32 (int32x4_t __a) | |
2899 | { | |
2900 | return __builtin_mve_vqnegq_sv4si (__a); | |
2901 | } | |
2902 | ||
5db0eb95 SP |
2903 | __extension__ extern __inline uint8x16_t |
2904 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2905 | __arm_vrev64q_u8 (uint8x16_t __a) | |
2906 | { | |
2907 | return __builtin_mve_vrev64q_uv16qi (__a); | |
2908 | } | |
2909 | ||
2910 | __extension__ extern __inline uint16x8_t | |
2911 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2912 | __arm_vrev64q_u16 (uint16x8_t __a) | |
2913 | { | |
2914 | return __builtin_mve_vrev64q_uv8hi (__a); | |
2915 | } | |
2916 | ||
2917 | __extension__ extern __inline uint32x4_t | |
2918 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2919 | __arm_vrev64q_u32 (uint32x4_t __a) | |
2920 | { | |
2921 | return __builtin_mve_vrev64q_uv4si (__a); | |
2922 | } | |
2923 | ||
6df4618c SP |
2924 | __extension__ extern __inline uint8x16_t |
2925 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2926 | __arm_vmvnq_u8 (uint8x16_t __a) | |
2927 | { | |
2928 | return __builtin_mve_vmvnq_uv16qi (__a); | |
2929 | } | |
2930 | ||
2931 | __extension__ extern __inline uint16x8_t | |
2932 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2933 | __arm_vmvnq_u16 (uint16x8_t __a) | |
2934 | { | |
2935 | return __builtin_mve_vmvnq_uv8hi (__a); | |
2936 | } | |
2937 | ||
2938 | __extension__ extern __inline uint32x4_t | |
2939 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2940 | __arm_vmvnq_u32 (uint32x4_t __a) | |
2941 | { | |
2942 | return __builtin_mve_vmvnq_uv4si (__a); | |
2943 | } | |
2944 | ||
2945 | __extension__ extern __inline uint8x16_t | |
2946 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2947 | __arm_vdupq_n_u8 (uint8_t __a) | |
2948 | { | |
2949 | return __builtin_mve_vdupq_n_uv16qi (__a); | |
2950 | } | |
2951 | ||
2952 | __extension__ extern __inline uint16x8_t | |
2953 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2954 | __arm_vdupq_n_u16 (uint16_t __a) | |
2955 | { | |
2956 | return __builtin_mve_vdupq_n_uv8hi (__a); | |
2957 | } | |
2958 | ||
2959 | __extension__ extern __inline uint32x4_t | |
2960 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2961 | __arm_vdupq_n_u32 (uint32_t __a) | |
2962 | { | |
2963 | return __builtin_mve_vdupq_n_uv4si (__a); | |
2964 | } | |
2965 | ||
2966 | __extension__ extern __inline uint8x16_t | |
2967 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2968 | __arm_vclzq_u8 (uint8x16_t __a) | |
2969 | { | |
2970 | return __builtin_mve_vclzq_uv16qi (__a); | |
2971 | } | |
2972 | ||
2973 | __extension__ extern __inline uint16x8_t | |
2974 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2975 | __arm_vclzq_u16 (uint16x8_t __a) | |
2976 | { | |
2977 | return __builtin_mve_vclzq_uv8hi (__a); | |
2978 | } | |
2979 | ||
2980 | __extension__ extern __inline uint32x4_t | |
2981 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2982 | __arm_vclzq_u32 (uint32x4_t __a) | |
2983 | { | |
2984 | return __builtin_mve_vclzq_uv4si (__a); | |
2985 | } | |
2986 | ||
2987 | __extension__ extern __inline uint32_t | |
2988 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2989 | __arm_vaddvq_u8 (uint8x16_t __a) | |
2990 | { | |
2991 | return __builtin_mve_vaddvq_uv16qi (__a); | |
2992 | } | |
2993 | ||
2994 | __extension__ extern __inline uint32_t | |
2995 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2996 | __arm_vaddvq_u16 (uint16x8_t __a) | |
2997 | { | |
2998 | return __builtin_mve_vaddvq_uv8hi (__a); | |
2999 | } | |
3000 | ||
3001 | __extension__ extern __inline uint32_t | |
3002 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3003 | __arm_vaddvq_u32 (uint32x4_t __a) | |
3004 | { | |
3005 | return __builtin_mve_vaddvq_uv4si (__a); | |
3006 | } | |
3007 | ||
3008 | __extension__ extern __inline uint8x16_t | |
3009 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3010 | __arm_vrev32q_u8 (uint8x16_t __a) | |
3011 | { | |
3012 | return __builtin_mve_vrev32q_uv16qi (__a); | |
3013 | } | |
3014 | ||
3015 | __extension__ extern __inline uint16x8_t | |
3016 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3017 | __arm_vrev32q_u16 (uint16x8_t __a) | |
3018 | { | |
3019 | return __builtin_mve_vrev32q_uv8hi (__a); | |
3020 | } | |
3021 | ||
3022 | __extension__ extern __inline uint16x8_t | |
3023 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3024 | __arm_vmovltq_u8 (uint8x16_t __a) | |
3025 | { | |
3026 | return __builtin_mve_vmovltq_uv16qi (__a); | |
3027 | } | |
3028 | ||
3029 | __extension__ extern __inline uint32x4_t | |
3030 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3031 | __arm_vmovltq_u16 (uint16x8_t __a) | |
3032 | { | |
3033 | return __builtin_mve_vmovltq_uv8hi (__a); | |
3034 | } | |
3035 | ||
3036 | __extension__ extern __inline uint16x8_t | |
3037 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3038 | __arm_vmovlbq_u8 (uint8x16_t __a) | |
3039 | { | |
3040 | return __builtin_mve_vmovlbq_uv16qi (__a); | |
3041 | } | |
3042 | ||
3043 | __extension__ extern __inline uint32x4_t | |
3044 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3045 | __arm_vmovlbq_u16 (uint16x8_t __a) | |
3046 | { | |
3047 | return __builtin_mve_vmovlbq_uv8hi (__a); | |
3048 | } | |
3049 | ||
5db0eb95 SP |
3050 | __extension__ extern __inline uint16x8_t |
3051 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3052 | __arm_vmvnq_n_u16 (const int __imm) | |
3053 | { | |
3054 | return __builtin_mve_vmvnq_n_uv8hi (__imm); | |
3055 | } | |
3056 | ||
3057 | __extension__ extern __inline uint32x4_t | |
3058 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3059 | __arm_vmvnq_n_u32 (const int __imm) | |
3060 | { | |
3061 | return __builtin_mve_vmvnq_n_uv4si (__imm); | |
3062 | } | |
3063 | ||
6df4618c SP |
3064 | __extension__ extern __inline uint8x16_t |
3065 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3066 | __arm_vrev16q_u8 (uint8x16_t __a) | |
3067 | { | |
3068 | return __builtin_mve_vrev16q_uv16qi (__a); | |
3069 | } | |
3070 | ||
3071 | __extension__ extern __inline uint64_t | |
3072 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3073 | __arm_vaddlvq_u32 (uint32x4_t __a) | |
3074 | { | |
3075 | return __builtin_mve_vaddlvq_uv4si (__a); | |
3076 | } | |
3077 | ||
a475f153 SP |
3078 | __extension__ extern __inline int64_t |
3079 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3080 | __arm_vctp16q (uint32_t __a) | |
3081 | { | |
3082 | return __builtin_mve_vctp16qhi (__a); | |
3083 | } | |
3084 | ||
3085 | __extension__ extern __inline mve_pred16_t | |
3086 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3087 | __arm_vctp32q (uint32_t __a) | |
3088 | { | |
3089 | return __builtin_mve_vctp32qhi (__a); | |
3090 | } | |
3091 | ||
3092 | __extension__ extern __inline mve_pred16_t | |
3093 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3094 | __arm_vctp64q (uint32_t __a) | |
3095 | { | |
3096 | return __builtin_mve_vctp64qhi (__a); | |
3097 | } | |
3098 | ||
3099 | __extension__ extern __inline mve_pred16_t | |
3100 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3101 | __arm_vctp8q (uint32_t __a) | |
3102 | { | |
3103 | return __builtin_mve_vctp8qhi (__a); | |
3104 | } | |
3105 | ||
3106 | __extension__ extern __inline mve_pred16_t | |
3107 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3108 | __arm_vpnot (mve_pred16_t __a) | |
3109 | { | |
3110 | return __builtin_mve_vpnothi (__a); | |
3111 | } | |
3112 | ||
f166a8cd SP |
3113 | __extension__ extern __inline uint8x16_t |
3114 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3115 | __arm_vcreateq_u8 (uint64_t __a, uint64_t __b) | |
3116 | { | |
3117 | return __builtin_mve_vcreateq_uv16qi (__a, __b); | |
3118 | } | |
3119 | ||
3120 | __extension__ extern __inline uint16x8_t | |
3121 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3122 | __arm_vcreateq_u16 (uint64_t __a, uint64_t __b) | |
3123 | { | |
3124 | return __builtin_mve_vcreateq_uv8hi (__a, __b); | |
3125 | } | |
3126 | ||
3127 | __extension__ extern __inline uint32x4_t | |
3128 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3129 | __arm_vcreateq_u32 (uint64_t __a, uint64_t __b) | |
3130 | { | |
3131 | return __builtin_mve_vcreateq_uv4si (__a, __b); | |
3132 | } | |
3133 | ||
3134 | __extension__ extern __inline uint64x2_t | |
3135 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3136 | __arm_vcreateq_u64 (uint64_t __a, uint64_t __b) | |
3137 | { | |
3138 | return __builtin_mve_vcreateq_uv2di (__a, __b); | |
3139 | } | |
3140 | ||
3141 | __extension__ extern __inline int8x16_t | |
3142 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3143 | __arm_vcreateq_s8 (uint64_t __a, uint64_t __b) | |
3144 | { | |
3145 | return __builtin_mve_vcreateq_sv16qi (__a, __b); | |
3146 | } | |
3147 | ||
3148 | __extension__ extern __inline int16x8_t | |
3149 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3150 | __arm_vcreateq_s16 (uint64_t __a, uint64_t __b) | |
3151 | { | |
3152 | return __builtin_mve_vcreateq_sv8hi (__a, __b); | |
3153 | } | |
3154 | ||
3155 | __extension__ extern __inline int32x4_t | |
3156 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3157 | __arm_vcreateq_s32 (uint64_t __a, uint64_t __b) | |
3158 | { | |
3159 | return __builtin_mve_vcreateq_sv4si (__a, __b); | |
3160 | } | |
3161 | ||
3162 | __extension__ extern __inline int64x2_t | |
3163 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3164 | __arm_vcreateq_s64 (uint64_t __a, uint64_t __b) | |
3165 | { | |
3166 | return __builtin_mve_vcreateq_sv2di (__a, __b); | |
3167 | } | |
3168 | ||
3169 | __extension__ extern __inline int8x16_t | |
3170 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3171 | __arm_vshrq_n_s8 (int8x16_t __a, const int __imm) | |
3172 | { | |
3173 | return __builtin_mve_vshrq_n_sv16qi (__a, __imm); | |
3174 | } | |
3175 | ||
3176 | __extension__ extern __inline int16x8_t | |
3177 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3178 | __arm_vshrq_n_s16 (int16x8_t __a, const int __imm) | |
3179 | { | |
3180 | return __builtin_mve_vshrq_n_sv8hi (__a, __imm); | |
3181 | } | |
3182 | ||
3183 | __extension__ extern __inline int32x4_t | |
3184 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3185 | __arm_vshrq_n_s32 (int32x4_t __a, const int __imm) | |
3186 | { | |
3187 | return __builtin_mve_vshrq_n_sv4si (__a, __imm); | |
3188 | } | |
3189 | ||
3190 | __extension__ extern __inline uint8x16_t | |
3191 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3192 | __arm_vshrq_n_u8 (uint8x16_t __a, const int __imm) | |
3193 | { | |
3194 | return __builtin_mve_vshrq_n_uv16qi (__a, __imm); | |
3195 | } | |
3196 | ||
3197 | __extension__ extern __inline uint16x8_t | |
3198 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3199 | __arm_vshrq_n_u16 (uint16x8_t __a, const int __imm) | |
3200 | { | |
3201 | return __builtin_mve_vshrq_n_uv8hi (__a, __imm); | |
3202 | } | |
3203 | ||
3204 | __extension__ extern __inline uint32x4_t | |
3205 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3206 | __arm_vshrq_n_u32 (uint32x4_t __a, const int __imm) | |
3207 | { | |
3208 | return __builtin_mve_vshrq_n_uv4si (__a, __imm); | |
3209 | } | |
d71dba7b SP |
3210 | __extension__ extern __inline int64_t |
3211 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3212 | __arm_vaddlvq_p_s32 (int32x4_t __a, mve_pred16_t __p) | |
3213 | { | |
3214 | return __builtin_mve_vaddlvq_p_sv4si (__a, __p); | |
3215 | } | |
3216 | ||
3217 | __extension__ extern __inline uint64_t | |
3218 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3219 | __arm_vaddlvq_p_u32 (uint32x4_t __a, mve_pred16_t __p) | |
3220 | { | |
3221 | return __builtin_mve_vaddlvq_p_uv4si (__a, __p); | |
3222 | } | |
3223 | ||
3224 | __extension__ extern __inline int32_t | |
3225 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3226 | __arm_vcmpneq_s8 (int8x16_t __a, int8x16_t __b) | |
3227 | { | |
3228 | return __builtin_mve_vcmpneq_sv16qi (__a, __b); | |
3229 | } | |
3230 | ||
3231 | __extension__ extern __inline mve_pred16_t | |
3232 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3233 | __arm_vcmpneq_s16 (int16x8_t __a, int16x8_t __b) | |
3234 | { | |
3235 | return __builtin_mve_vcmpneq_sv8hi (__a, __b); | |
3236 | } | |
3237 | ||
3238 | __extension__ extern __inline mve_pred16_t | |
3239 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3240 | __arm_vcmpneq_s32 (int32x4_t __a, int32x4_t __b) | |
3241 | { | |
3242 | return __builtin_mve_vcmpneq_sv4si (__a, __b); | |
3243 | } | |
3244 | ||
3245 | __extension__ extern __inline mve_pred16_t | |
3246 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3247 | __arm_vcmpneq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3248 | { | |
3249 | return __builtin_mve_vcmpneq_uv16qi (__a, __b); | |
3250 | } | |
3251 | ||
3252 | __extension__ extern __inline mve_pred16_t | |
3253 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3254 | __arm_vcmpneq_u16 (uint16x8_t __a, uint16x8_t __b) | |
3255 | { | |
3256 | return __builtin_mve_vcmpneq_uv8hi (__a, __b); | |
3257 | } | |
3258 | ||
3259 | __extension__ extern __inline mve_pred16_t | |
3260 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3261 | __arm_vcmpneq_u32 (uint32x4_t __a, uint32x4_t __b) | |
3262 | { | |
3263 | return __builtin_mve_vcmpneq_uv4si (__a, __b); | |
3264 | } | |
3265 | ||
3266 | __extension__ extern __inline int8x16_t | |
3267 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3268 | __arm_vshlq_s8 (int8x16_t __a, int8x16_t __b) | |
3269 | { | |
3270 | return __builtin_mve_vshlq_sv16qi (__a, __b); | |
3271 | } | |
3272 | ||
3273 | __extension__ extern __inline int16x8_t | |
3274 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3275 | __arm_vshlq_s16 (int16x8_t __a, int16x8_t __b) | |
3276 | { | |
3277 | return __builtin_mve_vshlq_sv8hi (__a, __b); | |
3278 | } | |
3279 | ||
3280 | __extension__ extern __inline int32x4_t | |
3281 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3282 | __arm_vshlq_s32 (int32x4_t __a, int32x4_t __b) | |
3283 | { | |
3284 | return __builtin_mve_vshlq_sv4si (__a, __b); | |
3285 | } | |
3286 | ||
3287 | __extension__ extern __inline uint8x16_t | |
3288 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3289 | __arm_vshlq_u8 (uint8x16_t __a, int8x16_t __b) | |
3290 | { | |
3291 | return __builtin_mve_vshlq_uv16qi (__a, __b); | |
3292 | } | |
3293 | ||
3294 | __extension__ extern __inline uint16x8_t | |
3295 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3296 | __arm_vshlq_u16 (uint16x8_t __a, int16x8_t __b) | |
3297 | { | |
3298 | return __builtin_mve_vshlq_uv8hi (__a, __b); | |
3299 | } | |
3300 | ||
3301 | __extension__ extern __inline uint32x4_t | |
3302 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3303 | __arm_vshlq_u32 (uint32x4_t __a, int32x4_t __b) | |
3304 | { | |
3305 | return __builtin_mve_vshlq_uv4si (__a, __b); | |
3306 | } | |
33203b4c SP |
3307 | __extension__ extern __inline uint8x16_t |
3308 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3309 | __arm_vsubq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3310 | { | |
3311 | return __builtin_mve_vsubq_uv16qi (__a, __b); | |
3312 | } | |
3313 | ||
3314 | __extension__ extern __inline uint8x16_t | |
3315 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3316 | __arm_vsubq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3317 | { | |
3318 | return __builtin_mve_vsubq_n_uv16qi (__a, __b); | |
3319 | } | |
3320 | ||
3321 | __extension__ extern __inline uint8x16_t | |
3322 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3323 | __arm_vrmulhq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3324 | { | |
3325 | return __builtin_mve_vrmulhq_uv16qi (__a, __b); | |
3326 | } | |
3327 | ||
3328 | __extension__ extern __inline uint8x16_t | |
3329 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3330 | __arm_vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3331 | { | |
3332 | return __builtin_mve_vrhaddq_uv16qi (__a, __b); | |
3333 | } | |
3334 | ||
3335 | __extension__ extern __inline uint8x16_t | |
3336 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3337 | __arm_vqsubq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3338 | { | |
3339 | return __builtin_mve_vqsubq_uv16qi (__a, __b); | |
3340 | } | |
3341 | ||
3342 | __extension__ extern __inline uint8x16_t | |
3343 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3344 | __arm_vqsubq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3345 | { | |
3346 | return __builtin_mve_vqsubq_n_uv16qi (__a, __b); | |
3347 | } | |
3348 | ||
3349 | __extension__ extern __inline uint8x16_t | |
3350 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3351 | __arm_vqaddq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3352 | { | |
3353 | return __builtin_mve_vqaddq_uv16qi (__a, __b); | |
3354 | } | |
3355 | ||
3356 | __extension__ extern __inline uint8x16_t | |
3357 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3358 | __arm_vqaddq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3359 | { | |
3360 | return __builtin_mve_vqaddq_n_uv16qi (__a, __b); | |
3361 | } | |
3362 | ||
3363 | __extension__ extern __inline uint8x16_t | |
3364 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3365 | __arm_vorrq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3366 | { | |
3367 | return __builtin_mve_vorrq_uv16qi (__a, __b); | |
3368 | } | |
3369 | ||
3370 | __extension__ extern __inline uint8x16_t | |
3371 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3372 | __arm_vornq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3373 | { | |
3374 | return __builtin_mve_vornq_uv16qi (__a, __b); | |
3375 | } | |
3376 | ||
3377 | __extension__ extern __inline uint8x16_t | |
3378 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3379 | __arm_vmulq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3380 | { | |
3381 | return __builtin_mve_vmulq_uv16qi (__a, __b); | |
3382 | } | |
3383 | ||
3384 | __extension__ extern __inline uint8x16_t | |
3385 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3386 | __arm_vmulq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3387 | { | |
3388 | return __builtin_mve_vmulq_n_uv16qi (__a, __b); | |
3389 | } | |
3390 | ||
3391 | __extension__ extern __inline uint16x8_t | |
3392 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3393 | __arm_vmulltq_int_u8 (uint8x16_t __a, uint8x16_t __b) | |
3394 | { | |
3395 | return __builtin_mve_vmulltq_int_uv16qi (__a, __b); | |
3396 | } | |
3397 | ||
3398 | __extension__ extern __inline uint16x8_t | |
3399 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3400 | __arm_vmullbq_int_u8 (uint8x16_t __a, uint8x16_t __b) | |
3401 | { | |
3402 | return __builtin_mve_vmullbq_int_uv16qi (__a, __b); | |
3403 | } | |
3404 | ||
3405 | __extension__ extern __inline uint8x16_t | |
3406 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3407 | __arm_vmulhq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3408 | { | |
3409 | return __builtin_mve_vmulhq_uv16qi (__a, __b); | |
3410 | } | |
3411 | ||
3412 | __extension__ extern __inline uint32_t | |
3413 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3414 | __arm_vmladavq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3415 | { | |
3416 | return __builtin_mve_vmladavq_uv16qi (__a, __b); | |
3417 | } | |
3418 | ||
3419 | __extension__ extern __inline uint8_t | |
3420 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3421 | __arm_vminvq_u8 (uint8_t __a, uint8x16_t __b) | |
3422 | { | |
3423 | return __builtin_mve_vminvq_uv16qi (__a, __b); | |
3424 | } | |
3425 | ||
3426 | __extension__ extern __inline uint8x16_t | |
3427 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3428 | __arm_vminq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3429 | { | |
3430 | return __builtin_mve_vminq_uv16qi (__a, __b); | |
3431 | } | |
3432 | ||
3433 | __extension__ extern __inline uint8_t | |
3434 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3435 | __arm_vmaxvq_u8 (uint8_t __a, uint8x16_t __b) | |
3436 | { | |
3437 | return __builtin_mve_vmaxvq_uv16qi (__a, __b); | |
3438 | } | |
3439 | ||
3440 | __extension__ extern __inline uint8x16_t | |
3441 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3442 | __arm_vmaxq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3443 | { | |
3444 | return __builtin_mve_vmaxq_uv16qi (__a, __b); | |
3445 | } | |
3446 | ||
3447 | __extension__ extern __inline uint8x16_t | |
3448 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3449 | __arm_vhsubq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3450 | { | |
3451 | return __builtin_mve_vhsubq_uv16qi (__a, __b); | |
3452 | } | |
3453 | ||
3454 | __extension__ extern __inline uint8x16_t | |
3455 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3456 | __arm_vhsubq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3457 | { | |
3458 | return __builtin_mve_vhsubq_n_uv16qi (__a, __b); | |
3459 | } | |
3460 | ||
3461 | __extension__ extern __inline uint8x16_t | |
3462 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3463 | __arm_vhaddq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3464 | { | |
3465 | return __builtin_mve_vhaddq_uv16qi (__a, __b); | |
3466 | } | |
3467 | ||
3468 | __extension__ extern __inline uint8x16_t | |
3469 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3470 | __arm_vhaddq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3471 | { | |
3472 | return __builtin_mve_vhaddq_n_uv16qi (__a, __b); | |
3473 | } | |
3474 | ||
3475 | __extension__ extern __inline uint8x16_t | |
3476 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3477 | __arm_veorq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3478 | { | |
3479 | return __builtin_mve_veorq_uv16qi (__a, __b); | |
3480 | } | |
3481 | ||
3482 | __extension__ extern __inline mve_pred16_t | |
3483 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3484 | __arm_vcmpneq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3485 | { | |
3486 | return __builtin_mve_vcmpneq_n_uv16qi (__a, __b); | |
3487 | } | |
3488 | ||
3489 | __extension__ extern __inline mve_pred16_t | |
3490 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3491 | __arm_vcmphiq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3492 | { | |
3493 | return __builtin_mve_vcmphiq_uv16qi (__a, __b); | |
3494 | } | |
3495 | ||
3496 | __extension__ extern __inline mve_pred16_t | |
3497 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3498 | __arm_vcmphiq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3499 | { | |
3500 | return __builtin_mve_vcmphiq_n_uv16qi (__a, __b); | |
3501 | } | |
3502 | ||
3503 | __extension__ extern __inline mve_pred16_t | |
3504 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3505 | __arm_vcmpeqq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3506 | { | |
3507 | return __builtin_mve_vcmpeqq_uv16qi (__a, __b); | |
3508 | } | |
3509 | ||
3510 | __extension__ extern __inline mve_pred16_t | |
3511 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3512 | __arm_vcmpeqq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3513 | { | |
3514 | return __builtin_mve_vcmpeqq_n_uv16qi (__a, __b); | |
3515 | } | |
3516 | ||
3517 | __extension__ extern __inline mve_pred16_t | |
3518 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3519 | __arm_vcmpcsq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3520 | { | |
3521 | return __builtin_mve_vcmpcsq_uv16qi (__a, __b); | |
3522 | } | |
3523 | ||
3524 | __extension__ extern __inline mve_pred16_t | |
3525 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3526 | __arm_vcmpcsq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3527 | { | |
3528 | return __builtin_mve_vcmpcsq_n_uv16qi (__a, __b); | |
3529 | } | |
3530 | ||
3531 | __extension__ extern __inline uint8x16_t | |
3532 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3533 | __arm_vcaddq_rot90_u8 (uint8x16_t __a, uint8x16_t __b) | |
3534 | { | |
3535 | return __builtin_mve_vcaddq_rot90_uv16qi (__a, __b); | |
3536 | } | |
3537 | ||
3538 | __extension__ extern __inline uint8x16_t | |
3539 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3540 | __arm_vcaddq_rot270_u8 (uint8x16_t __a, uint8x16_t __b) | |
3541 | { | |
3542 | return __builtin_mve_vcaddq_rot270_uv16qi (__a, __b); | |
3543 | } | |
3544 | ||
3545 | __extension__ extern __inline uint8x16_t | |
3546 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3547 | __arm_vbicq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3548 | { | |
3549 | return __builtin_mve_vbicq_uv16qi (__a, __b); | |
3550 | } | |
3551 | ||
3552 | __extension__ extern __inline uint8x16_t | |
3553 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3554 | __arm_vandq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3555 | { | |
3556 | return __builtin_mve_vandq_uv16qi (__a, __b); | |
3557 | } | |
3558 | ||
3559 | __extension__ extern __inline uint32_t | |
3560 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3561 | __arm_vaddvq_p_u8 (uint8x16_t __a, mve_pred16_t __p) | |
3562 | { | |
3563 | return __builtin_mve_vaddvq_p_uv16qi (__a, __p); | |
3564 | } | |
3565 | ||
3566 | __extension__ extern __inline uint32_t | |
3567 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3568 | __arm_vaddvaq_u8 (uint32_t __a, uint8x16_t __b) | |
3569 | { | |
3570 | return __builtin_mve_vaddvaq_uv16qi (__a, __b); | |
3571 | } | |
3572 | ||
3573 | __extension__ extern __inline uint8x16_t | |
3574 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3575 | __arm_vaddq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3576 | { | |
3577 | return __builtin_mve_vaddq_n_uv16qi (__a, __b); | |
3578 | } | |
3579 | ||
3580 | __extension__ extern __inline uint8x16_t | |
3581 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3582 | __arm_vabdq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3583 | { | |
3584 | return __builtin_mve_vabdq_uv16qi (__a, __b); | |
3585 | } | |
3586 | ||
3587 | __extension__ extern __inline uint8x16_t | |
3588 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3589 | __arm_vshlq_r_u8 (uint8x16_t __a, int32_t __b) | |
3590 | { | |
3591 | return __builtin_mve_vshlq_r_uv16qi (__a, __b); | |
3592 | } | |
3593 | ||
3594 | __extension__ extern __inline uint8x16_t | |
3595 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3596 | __arm_vrshlq_u8 (uint8x16_t __a, int8x16_t __b) | |
3597 | { | |
3598 | return __builtin_mve_vrshlq_uv16qi (__a, __b); | |
3599 | } | |
3600 | ||
3601 | __extension__ extern __inline uint8x16_t | |
3602 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3603 | __arm_vrshlq_n_u8 (uint8x16_t __a, int32_t __b) | |
3604 | { | |
3605 | return __builtin_mve_vrshlq_n_uv16qi (__a, __b); | |
3606 | } | |
3607 | ||
3608 | __extension__ extern __inline uint8x16_t | |
3609 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3610 | __arm_vqshlq_u8 (uint8x16_t __a, int8x16_t __b) | |
3611 | { | |
3612 | return __builtin_mve_vqshlq_uv16qi (__a, __b); | |
3613 | } | |
3614 | ||
3615 | __extension__ extern __inline uint8x16_t | |
3616 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3617 | __arm_vqshlq_r_u8 (uint8x16_t __a, int32_t __b) | |
3618 | { | |
3619 | return __builtin_mve_vqshlq_r_uv16qi (__a, __b); | |
3620 | } | |
3621 | ||
3622 | __extension__ extern __inline uint8x16_t | |
3623 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3624 | __arm_vqrshlq_u8 (uint8x16_t __a, int8x16_t __b) | |
3625 | { | |
3626 | return __builtin_mve_vqrshlq_uv16qi (__a, __b); | |
3627 | } | |
3628 | ||
3629 | __extension__ extern __inline uint8x16_t | |
3630 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3631 | __arm_vqrshlq_n_u8 (uint8x16_t __a, int32_t __b) | |
3632 | { | |
3633 | return __builtin_mve_vqrshlq_n_uv16qi (__a, __b); | |
3634 | } | |
3635 | ||
3636 | __extension__ extern __inline uint8_t | |
3637 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3638 | __arm_vminavq_s8 (uint8_t __a, int8x16_t __b) | |
3639 | { | |
3640 | return __builtin_mve_vminavq_sv16qi (__a, __b); | |
3641 | } | |
3642 | ||
3643 | __extension__ extern __inline uint8x16_t | |
3644 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3645 | __arm_vminaq_s8 (uint8x16_t __a, int8x16_t __b) | |
3646 | { | |
3647 | return __builtin_mve_vminaq_sv16qi (__a, __b); | |
3648 | } | |
3649 | ||
3650 | __extension__ extern __inline uint8_t | |
3651 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3652 | __arm_vmaxavq_s8 (uint8_t __a, int8x16_t __b) | |
3653 | { | |
3654 | return __builtin_mve_vmaxavq_sv16qi (__a, __b); | |
3655 | } | |
3656 | ||
3657 | __extension__ extern __inline uint8x16_t | |
3658 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3659 | __arm_vmaxaq_s8 (uint8x16_t __a, int8x16_t __b) | |
3660 | { | |
3661 | return __builtin_mve_vmaxaq_sv16qi (__a, __b); | |
3662 | } | |
3663 | ||
3664 | __extension__ extern __inline uint8x16_t | |
3665 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3666 | __arm_vbrsrq_n_u8 (uint8x16_t __a, int32_t __b) | |
3667 | { | |
3668 | return __builtin_mve_vbrsrq_n_uv16qi (__a, __b); | |
3669 | } | |
3670 | ||
3671 | __extension__ extern __inline uint8x16_t | |
3672 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3673 | __arm_vshlq_n_u8 (uint8x16_t __a, const int __imm) | |
3674 | { | |
3675 | return __builtin_mve_vshlq_n_uv16qi (__a, __imm); | |
3676 | } | |
3677 | ||
3678 | __extension__ extern __inline uint8x16_t | |
3679 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3680 | __arm_vrshrq_n_u8 (uint8x16_t __a, const int __imm) | |
3681 | { | |
3682 | return __builtin_mve_vrshrq_n_uv16qi (__a, __imm); | |
3683 | } | |
3684 | ||
3685 | __extension__ extern __inline uint8x16_t | |
3686 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3687 | __arm_vqshlq_n_u8 (uint8x16_t __a, const int __imm) | |
3688 | { | |
3689 | return __builtin_mve_vqshlq_n_uv16qi (__a, __imm); | |
3690 | } | |
3691 | ||
3692 | __extension__ extern __inline mve_pred16_t | |
3693 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3694 | __arm_vcmpneq_n_s8 (int8x16_t __a, int8_t __b) | |
3695 | { | |
3696 | return __builtin_mve_vcmpneq_n_sv16qi (__a, __b); | |
3697 | } | |
3698 | ||
3699 | __extension__ extern __inline mve_pred16_t | |
3700 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3701 | __arm_vcmpltq_s8 (int8x16_t __a, int8x16_t __b) | |
3702 | { | |
3703 | return __builtin_mve_vcmpltq_sv16qi (__a, __b); | |
3704 | } | |
3705 | ||
3706 | __extension__ extern __inline mve_pred16_t | |
3707 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3708 | __arm_vcmpltq_n_s8 (int8x16_t __a, int8_t __b) | |
3709 | { | |
3710 | return __builtin_mve_vcmpltq_n_sv16qi (__a, __b); | |
3711 | } | |
3712 | ||
3713 | __extension__ extern __inline mve_pred16_t | |
3714 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3715 | __arm_vcmpleq_s8 (int8x16_t __a, int8x16_t __b) | |
3716 | { | |
3717 | return __builtin_mve_vcmpleq_sv16qi (__a, __b); | |
3718 | } | |
3719 | ||
3720 | __extension__ extern __inline mve_pred16_t | |
3721 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3722 | __arm_vcmpleq_n_s8 (int8x16_t __a, int8_t __b) | |
3723 | { | |
3724 | return __builtin_mve_vcmpleq_n_sv16qi (__a, __b); | |
3725 | } | |
3726 | ||
3727 | __extension__ extern __inline mve_pred16_t | |
3728 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3729 | __arm_vcmpgtq_s8 (int8x16_t __a, int8x16_t __b) | |
3730 | { | |
3731 | return __builtin_mve_vcmpgtq_sv16qi (__a, __b); | |
3732 | } | |
3733 | ||
3734 | __extension__ extern __inline mve_pred16_t | |
3735 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3736 | __arm_vcmpgtq_n_s8 (int8x16_t __a, int8_t __b) | |
3737 | { | |
3738 | return __builtin_mve_vcmpgtq_n_sv16qi (__a, __b); | |
3739 | } | |
3740 | ||
3741 | __extension__ extern __inline mve_pred16_t | |
3742 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3743 | __arm_vcmpgeq_s8 (int8x16_t __a, int8x16_t __b) | |
3744 | { | |
3745 | return __builtin_mve_vcmpgeq_sv16qi (__a, __b); | |
3746 | } | |
3747 | ||
3748 | __extension__ extern __inline mve_pred16_t | |
3749 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3750 | __arm_vcmpgeq_n_s8 (int8x16_t __a, int8_t __b) | |
3751 | { | |
3752 | return __builtin_mve_vcmpgeq_n_sv16qi (__a, __b); | |
3753 | } | |
3754 | ||
3755 | __extension__ extern __inline mve_pred16_t | |
3756 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3757 | __arm_vcmpeqq_s8 (int8x16_t __a, int8x16_t __b) | |
3758 | { | |
3759 | return __builtin_mve_vcmpeqq_sv16qi (__a, __b); | |
3760 | } | |
3761 | ||
3762 | __extension__ extern __inline mve_pred16_t | |
3763 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3764 | __arm_vcmpeqq_n_s8 (int8x16_t __a, int8_t __b) | |
3765 | { | |
3766 | return __builtin_mve_vcmpeqq_n_sv16qi (__a, __b); | |
3767 | } | |
3768 | ||
3769 | __extension__ extern __inline uint8x16_t | |
3770 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3771 | __arm_vqshluq_n_s8 (int8x16_t __a, const int __imm) | |
3772 | { | |
3773 | return __builtin_mve_vqshluq_n_sv16qi (__a, __imm); | |
3774 | } | |
3775 | ||
3776 | __extension__ extern __inline int32_t | |
3777 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3778 | __arm_vaddvq_p_s8 (int8x16_t __a, mve_pred16_t __p) | |
3779 | { | |
3780 | return __builtin_mve_vaddvq_p_sv16qi (__a, __p); | |
3781 | } | |
3782 | ||
3783 | __extension__ extern __inline int8x16_t | |
3784 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3785 | __arm_vsubq_s8 (int8x16_t __a, int8x16_t __b) | |
3786 | { | |
3787 | return __builtin_mve_vsubq_sv16qi (__a, __b); | |
3788 | } | |
3789 | ||
3790 | __extension__ extern __inline int8x16_t | |
3791 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3792 | __arm_vsubq_n_s8 (int8x16_t __a, int8_t __b) | |
3793 | { | |
3794 | return __builtin_mve_vsubq_n_sv16qi (__a, __b); | |
3795 | } | |
3796 | ||
3797 | __extension__ extern __inline int8x16_t | |
3798 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3799 | __arm_vshlq_r_s8 (int8x16_t __a, int32_t __b) | |
3800 | { | |
3801 | return __builtin_mve_vshlq_r_sv16qi (__a, __b); | |
3802 | } | |
3803 | ||
3804 | __extension__ extern __inline int8x16_t | |
3805 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3806 | __arm_vrshlq_s8 (int8x16_t __a, int8x16_t __b) | |
3807 | { | |
3808 | return __builtin_mve_vrshlq_sv16qi (__a, __b); | |
3809 | } | |
3810 | ||
3811 | __extension__ extern __inline int8x16_t | |
3812 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3813 | __arm_vrshlq_n_s8 (int8x16_t __a, int32_t __b) | |
3814 | { | |
3815 | return __builtin_mve_vrshlq_n_sv16qi (__a, __b); | |
3816 | } | |
3817 | ||
3818 | __extension__ extern __inline int8x16_t | |
3819 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3820 | __arm_vrmulhq_s8 (int8x16_t __a, int8x16_t __b) | |
3821 | { | |
3822 | return __builtin_mve_vrmulhq_sv16qi (__a, __b); | |
3823 | } | |
3824 | ||
3825 | __extension__ extern __inline int8x16_t | |
3826 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3827 | __arm_vrhaddq_s8 (int8x16_t __a, int8x16_t __b) | |
3828 | { | |
3829 | return __builtin_mve_vrhaddq_sv16qi (__a, __b); | |
3830 | } | |
3831 | ||
3832 | __extension__ extern __inline int8x16_t | |
3833 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3834 | __arm_vqsubq_s8 (int8x16_t __a, int8x16_t __b) | |
3835 | { | |
3836 | return __builtin_mve_vqsubq_sv16qi (__a, __b); | |
3837 | } | |
3838 | ||
3839 | __extension__ extern __inline int8x16_t | |
3840 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3841 | __arm_vqsubq_n_s8 (int8x16_t __a, int8_t __b) | |
3842 | { | |
3843 | return __builtin_mve_vqsubq_n_sv16qi (__a, __b); | |
3844 | } | |
3845 | ||
3846 | __extension__ extern __inline int8x16_t | |
3847 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3848 | __arm_vqshlq_s8 (int8x16_t __a, int8x16_t __b) | |
3849 | { | |
3850 | return __builtin_mve_vqshlq_sv16qi (__a, __b); | |
3851 | } | |
3852 | ||
3853 | __extension__ extern __inline int8x16_t | |
3854 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3855 | __arm_vqshlq_r_s8 (int8x16_t __a, int32_t __b) | |
3856 | { | |
3857 | return __builtin_mve_vqshlq_r_sv16qi (__a, __b); | |
3858 | } | |
3859 | ||
3860 | __extension__ extern __inline int8x16_t | |
3861 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3862 | __arm_vqrshlq_s8 (int8x16_t __a, int8x16_t __b) | |
3863 | { | |
3864 | return __builtin_mve_vqrshlq_sv16qi (__a, __b); | |
3865 | } | |
3866 | ||
3867 | __extension__ extern __inline int8x16_t | |
3868 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3869 | __arm_vqrshlq_n_s8 (int8x16_t __a, int32_t __b) | |
3870 | { | |
3871 | return __builtin_mve_vqrshlq_n_sv16qi (__a, __b); | |
3872 | } | |
3873 | ||
3874 | __extension__ extern __inline int8x16_t | |
3875 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3876 | __arm_vqrdmulhq_s8 (int8x16_t __a, int8x16_t __b) | |
3877 | { | |
3878 | return __builtin_mve_vqrdmulhq_sv16qi (__a, __b); | |
3879 | } | |
3880 | ||
3881 | __extension__ extern __inline int8x16_t | |
3882 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3883 | __arm_vqrdmulhq_n_s8 (int8x16_t __a, int8_t __b) | |
3884 | { | |
3885 | return __builtin_mve_vqrdmulhq_n_sv16qi (__a, __b); | |
3886 | } | |
3887 | ||
3888 | __extension__ extern __inline int8x16_t | |
3889 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3890 | __arm_vqdmulhq_s8 (int8x16_t __a, int8x16_t __b) | |
3891 | { | |
3892 | return __builtin_mve_vqdmulhq_sv16qi (__a, __b); | |
3893 | } | |
3894 | ||
3895 | __extension__ extern __inline int8x16_t | |
3896 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3897 | __arm_vqdmulhq_n_s8 (int8x16_t __a, int8_t __b) | |
3898 | { | |
3899 | return __builtin_mve_vqdmulhq_n_sv16qi (__a, __b); | |
3900 | } | |
3901 | ||
3902 | __extension__ extern __inline int8x16_t | |
3903 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3904 | __arm_vqaddq_s8 (int8x16_t __a, int8x16_t __b) | |
3905 | { | |
3906 | return __builtin_mve_vqaddq_sv16qi (__a, __b); | |
3907 | } | |
3908 | ||
3909 | __extension__ extern __inline int8x16_t | |
3910 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3911 | __arm_vqaddq_n_s8 (int8x16_t __a, int8_t __b) | |
3912 | { | |
3913 | return __builtin_mve_vqaddq_n_sv16qi (__a, __b); | |
3914 | } | |
3915 | ||
3916 | __extension__ extern __inline int8x16_t | |
3917 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3918 | __arm_vorrq_s8 (int8x16_t __a, int8x16_t __b) | |
3919 | { | |
3920 | return __builtin_mve_vorrq_sv16qi (__a, __b); | |
3921 | } | |
3922 | ||
3923 | __extension__ extern __inline int8x16_t | |
3924 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3925 | __arm_vornq_s8 (int8x16_t __a, int8x16_t __b) | |
3926 | { | |
3927 | return __builtin_mve_vornq_sv16qi (__a, __b); | |
3928 | } | |
3929 | ||
3930 | __extension__ extern __inline int8x16_t | |
3931 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3932 | __arm_vmulq_s8 (int8x16_t __a, int8x16_t __b) | |
3933 | { | |
3934 | return __builtin_mve_vmulq_sv16qi (__a, __b); | |
3935 | } | |
3936 | ||
3937 | __extension__ extern __inline int8x16_t | |
3938 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3939 | __arm_vmulq_n_s8 (int8x16_t __a, int8_t __b) | |
3940 | { | |
3941 | return __builtin_mve_vmulq_n_sv16qi (__a, __b); | |
3942 | } | |
3943 | ||
3944 | __extension__ extern __inline int16x8_t | |
3945 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3946 | __arm_vmulltq_int_s8 (int8x16_t __a, int8x16_t __b) | |
3947 | { | |
3948 | return __builtin_mve_vmulltq_int_sv16qi (__a, __b); | |
3949 | } | |
3950 | ||
3951 | __extension__ extern __inline int16x8_t | |
3952 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3953 | __arm_vmullbq_int_s8 (int8x16_t __a, int8x16_t __b) | |
3954 | { | |
3955 | return __builtin_mve_vmullbq_int_sv16qi (__a, __b); | |
3956 | } | |
3957 | ||
3958 | __extension__ extern __inline int8x16_t | |
3959 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3960 | __arm_vmulhq_s8 (int8x16_t __a, int8x16_t __b) | |
3961 | { | |
3962 | return __builtin_mve_vmulhq_sv16qi (__a, __b); | |
3963 | } | |
3964 | ||
3965 | __extension__ extern __inline int32_t | |
3966 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3967 | __arm_vmlsdavxq_s8 (int8x16_t __a, int8x16_t __b) | |
3968 | { | |
3969 | return __builtin_mve_vmlsdavxq_sv16qi (__a, __b); | |
3970 | } | |
3971 | ||
3972 | __extension__ extern __inline int32_t | |
3973 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3974 | __arm_vmlsdavq_s8 (int8x16_t __a, int8x16_t __b) | |
3975 | { | |
3976 | return __builtin_mve_vmlsdavq_sv16qi (__a, __b); | |
3977 | } | |
3978 | ||
3979 | __extension__ extern __inline int32_t | |
3980 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3981 | __arm_vmladavxq_s8 (int8x16_t __a, int8x16_t __b) | |
3982 | { | |
3983 | return __builtin_mve_vmladavxq_sv16qi (__a, __b); | |
3984 | } | |
3985 | ||
3986 | __extension__ extern __inline int32_t | |
3987 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3988 | __arm_vmladavq_s8 (int8x16_t __a, int8x16_t __b) | |
3989 | { | |
3990 | return __builtin_mve_vmladavq_sv16qi (__a, __b); | |
3991 | } | |
3992 | ||
3993 | __extension__ extern __inline int8_t | |
3994 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3995 | __arm_vminvq_s8 (int8_t __a, int8x16_t __b) | |
3996 | { | |
3997 | return __builtin_mve_vminvq_sv16qi (__a, __b); | |
3998 | } | |
3999 | ||
4000 | __extension__ extern __inline int8x16_t | |
4001 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4002 | __arm_vminq_s8 (int8x16_t __a, int8x16_t __b) | |
4003 | { | |
4004 | return __builtin_mve_vminq_sv16qi (__a, __b); | |
4005 | } | |
4006 | ||
4007 | __extension__ extern __inline int8_t | |
4008 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4009 | __arm_vmaxvq_s8 (int8_t __a, int8x16_t __b) | |
4010 | { | |
4011 | return __builtin_mve_vmaxvq_sv16qi (__a, __b); | |
4012 | } | |
4013 | ||
4014 | __extension__ extern __inline int8x16_t | |
4015 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4016 | __arm_vmaxq_s8 (int8x16_t __a, int8x16_t __b) | |
4017 | { | |
4018 | return __builtin_mve_vmaxq_sv16qi (__a, __b); | |
4019 | } | |
4020 | ||
4021 | __extension__ extern __inline int8x16_t | |
4022 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4023 | __arm_vhsubq_s8 (int8x16_t __a, int8x16_t __b) | |
4024 | { | |
4025 | return __builtin_mve_vhsubq_sv16qi (__a, __b); | |
4026 | } | |
4027 | ||
4028 | __extension__ extern __inline int8x16_t | |
4029 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4030 | __arm_vhsubq_n_s8 (int8x16_t __a, int8_t __b) | |
4031 | { | |
4032 | return __builtin_mve_vhsubq_n_sv16qi (__a, __b); | |
4033 | } | |
4034 | ||
4035 | __extension__ extern __inline int8x16_t | |
4036 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4037 | __arm_vhcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b) | |
4038 | { | |
4039 | return __builtin_mve_vhcaddq_rot90_sv16qi (__a, __b); | |
4040 | } | |
4041 | ||
4042 | __extension__ extern __inline int8x16_t | |
4043 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4044 | __arm_vhcaddq_rot270_s8 (int8x16_t __a, int8x16_t __b) | |
4045 | { | |
4046 | return __builtin_mve_vhcaddq_rot270_sv16qi (__a, __b); | |
4047 | } | |
4048 | ||
4049 | __extension__ extern __inline int8x16_t | |
4050 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4051 | __arm_vhaddq_s8 (int8x16_t __a, int8x16_t __b) | |
4052 | { | |
4053 | return __builtin_mve_vhaddq_sv16qi (__a, __b); | |
4054 | } | |
4055 | ||
4056 | __extension__ extern __inline int8x16_t | |
4057 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4058 | __arm_vhaddq_n_s8 (int8x16_t __a, int8_t __b) | |
4059 | { | |
4060 | return __builtin_mve_vhaddq_n_sv16qi (__a, __b); | |
4061 | } | |
4062 | ||
4063 | __extension__ extern __inline int8x16_t | |
4064 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4065 | __arm_veorq_s8 (int8x16_t __a, int8x16_t __b) | |
4066 | { | |
4067 | return __builtin_mve_veorq_sv16qi (__a, __b); | |
4068 | } | |
4069 | ||
4070 | __extension__ extern __inline int8x16_t | |
4071 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4072 | __arm_vcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b) | |
4073 | { | |
4074 | return __builtin_mve_vcaddq_rot90_sv16qi (__a, __b); | |
4075 | } | |
4076 | ||
4077 | __extension__ extern __inline int8x16_t | |
4078 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4079 | __arm_vcaddq_rot270_s8 (int8x16_t __a, int8x16_t __b) | |
4080 | { | |
4081 | return __builtin_mve_vcaddq_rot270_sv16qi (__a, __b); | |
4082 | } | |
4083 | ||
4084 | __extension__ extern __inline int8x16_t | |
4085 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4086 | __arm_vbrsrq_n_s8 (int8x16_t __a, int32_t __b) | |
4087 | { | |
4088 | return __builtin_mve_vbrsrq_n_sv16qi (__a, __b); | |
4089 | } | |
4090 | ||
4091 | __extension__ extern __inline int8x16_t | |
4092 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4093 | __arm_vbicq_s8 (int8x16_t __a, int8x16_t __b) | |
4094 | { | |
4095 | return __builtin_mve_vbicq_sv16qi (__a, __b); | |
4096 | } | |
4097 | ||
4098 | __extension__ extern __inline int8x16_t | |
4099 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4100 | __arm_vandq_s8 (int8x16_t __a, int8x16_t __b) | |
4101 | { | |
4102 | return __builtin_mve_vandq_sv16qi (__a, __b); | |
4103 | } | |
4104 | ||
4105 | __extension__ extern __inline int32_t | |
4106 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4107 | __arm_vaddvaq_s8 (int32_t __a, int8x16_t __b) | |
4108 | { | |
4109 | return __builtin_mve_vaddvaq_sv16qi (__a, __b); | |
4110 | } | |
4111 | ||
4112 | __extension__ extern __inline int8x16_t | |
4113 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4114 | __arm_vaddq_n_s8 (int8x16_t __a, int8_t __b) | |
4115 | { | |
4116 | return __builtin_mve_vaddq_n_sv16qi (__a, __b); | |
4117 | } | |
4118 | ||
4119 | __extension__ extern __inline int8x16_t | |
4120 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4121 | __arm_vabdq_s8 (int8x16_t __a, int8x16_t __b) | |
4122 | { | |
4123 | return __builtin_mve_vabdq_sv16qi (__a, __b); | |
4124 | } | |
4125 | ||
4126 | __extension__ extern __inline int8x16_t | |
4127 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4128 | __arm_vshlq_n_s8 (int8x16_t __a, const int __imm) | |
4129 | { | |
4130 | return __builtin_mve_vshlq_n_sv16qi (__a, __imm); | |
4131 | } | |
4132 | ||
4133 | __extension__ extern __inline int8x16_t | |
4134 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4135 | __arm_vrshrq_n_s8 (int8x16_t __a, const int __imm) | |
4136 | { | |
4137 | return __builtin_mve_vrshrq_n_sv16qi (__a, __imm); | |
4138 | } | |
4139 | ||
4140 | __extension__ extern __inline int8x16_t | |
4141 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4142 | __arm_vqshlq_n_s8 (int8x16_t __a, const int __imm) | |
4143 | { | |
4144 | return __builtin_mve_vqshlq_n_sv16qi (__a, __imm); | |
4145 | } | |
4146 | ||
4147 | __extension__ extern __inline uint16x8_t | |
4148 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4149 | __arm_vsubq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4150 | { | |
4151 | return __builtin_mve_vsubq_uv8hi (__a, __b); | |
4152 | } | |
4153 | ||
4154 | __extension__ extern __inline uint16x8_t | |
4155 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4156 | __arm_vsubq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4157 | { | |
4158 | return __builtin_mve_vsubq_n_uv8hi (__a, __b); | |
4159 | } | |
4160 | ||
4161 | __extension__ extern __inline uint16x8_t | |
4162 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4163 | __arm_vrmulhq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4164 | { | |
4165 | return __builtin_mve_vrmulhq_uv8hi (__a, __b); | |
4166 | } | |
4167 | ||
4168 | __extension__ extern __inline uint16x8_t | |
4169 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4170 | __arm_vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4171 | { | |
4172 | return __builtin_mve_vrhaddq_uv8hi (__a, __b); | |
4173 | } | |
4174 | ||
4175 | __extension__ extern __inline uint16x8_t | |
4176 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4177 | __arm_vqsubq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4178 | { | |
4179 | return __builtin_mve_vqsubq_uv8hi (__a, __b); | |
4180 | } | |
4181 | ||
4182 | __extension__ extern __inline uint16x8_t | |
4183 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4184 | __arm_vqsubq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4185 | { | |
4186 | return __builtin_mve_vqsubq_n_uv8hi (__a, __b); | |
4187 | } | |
4188 | ||
4189 | __extension__ extern __inline uint16x8_t | |
4190 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4191 | __arm_vqaddq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4192 | { | |
4193 | return __builtin_mve_vqaddq_uv8hi (__a, __b); | |
4194 | } | |
4195 | ||
4196 | __extension__ extern __inline uint16x8_t | |
4197 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4198 | __arm_vqaddq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4199 | { | |
4200 | return __builtin_mve_vqaddq_n_uv8hi (__a, __b); | |
4201 | } | |
4202 | ||
4203 | __extension__ extern __inline uint16x8_t | |
4204 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4205 | __arm_vorrq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4206 | { | |
4207 | return __builtin_mve_vorrq_uv8hi (__a, __b); | |
4208 | } | |
4209 | ||
4210 | __extension__ extern __inline uint16x8_t | |
4211 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4212 | __arm_vornq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4213 | { | |
4214 | return __builtin_mve_vornq_uv8hi (__a, __b); | |
4215 | } | |
4216 | ||
4217 | __extension__ extern __inline uint16x8_t | |
4218 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4219 | __arm_vmulq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4220 | { | |
4221 | return __builtin_mve_vmulq_uv8hi (__a, __b); | |
4222 | } | |
4223 | ||
4224 | __extension__ extern __inline uint16x8_t | |
4225 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4226 | __arm_vmulq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4227 | { | |
4228 | return __builtin_mve_vmulq_n_uv8hi (__a, __b); | |
4229 | } | |
4230 | ||
4231 | __extension__ extern __inline uint32x4_t | |
4232 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4233 | __arm_vmulltq_int_u16 (uint16x8_t __a, uint16x8_t __b) | |
4234 | { | |
4235 | return __builtin_mve_vmulltq_int_uv8hi (__a, __b); | |
4236 | } | |
4237 | ||
4238 | __extension__ extern __inline uint32x4_t | |
4239 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4240 | __arm_vmullbq_int_u16 (uint16x8_t __a, uint16x8_t __b) | |
4241 | { | |
4242 | return __builtin_mve_vmullbq_int_uv8hi (__a, __b); | |
4243 | } | |
4244 | ||
4245 | __extension__ extern __inline uint16x8_t | |
4246 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4247 | __arm_vmulhq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4248 | { | |
4249 | return __builtin_mve_vmulhq_uv8hi (__a, __b); | |
4250 | } | |
4251 | ||
4252 | __extension__ extern __inline uint32_t | |
4253 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4254 | __arm_vmladavq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4255 | { | |
4256 | return __builtin_mve_vmladavq_uv8hi (__a, __b); | |
4257 | } | |
4258 | ||
4259 | __extension__ extern __inline uint16_t | |
4260 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4261 | __arm_vminvq_u16 (uint16_t __a, uint16x8_t __b) | |
4262 | { | |
4263 | return __builtin_mve_vminvq_uv8hi (__a, __b); | |
4264 | } | |
4265 | ||
4266 | __extension__ extern __inline uint16x8_t | |
4267 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4268 | __arm_vminq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4269 | { | |
4270 | return __builtin_mve_vminq_uv8hi (__a, __b); | |
4271 | } | |
4272 | ||
4273 | __extension__ extern __inline uint16_t | |
4274 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4275 | __arm_vmaxvq_u16 (uint16_t __a, uint16x8_t __b) | |
4276 | { | |
4277 | return __builtin_mve_vmaxvq_uv8hi (__a, __b); | |
4278 | } | |
4279 | ||
4280 | __extension__ extern __inline uint16x8_t | |
4281 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4282 | __arm_vmaxq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4283 | { | |
4284 | return __builtin_mve_vmaxq_uv8hi (__a, __b); | |
4285 | } | |
4286 | ||
4287 | __extension__ extern __inline uint16x8_t | |
4288 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4289 | __arm_vhsubq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4290 | { | |
4291 | return __builtin_mve_vhsubq_uv8hi (__a, __b); | |
4292 | } | |
4293 | ||
4294 | __extension__ extern __inline uint16x8_t | |
4295 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4296 | __arm_vhsubq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4297 | { | |
4298 | return __builtin_mve_vhsubq_n_uv8hi (__a, __b); | |
4299 | } | |
4300 | ||
4301 | __extension__ extern __inline uint16x8_t | |
4302 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4303 | __arm_vhaddq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4304 | { | |
4305 | return __builtin_mve_vhaddq_uv8hi (__a, __b); | |
4306 | } | |
4307 | ||
4308 | __extension__ extern __inline uint16x8_t | |
4309 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4310 | __arm_vhaddq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4311 | { | |
4312 | return __builtin_mve_vhaddq_n_uv8hi (__a, __b); | |
4313 | } | |
4314 | ||
4315 | __extension__ extern __inline uint16x8_t | |
4316 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4317 | __arm_veorq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4318 | { | |
4319 | return __builtin_mve_veorq_uv8hi (__a, __b); | |
4320 | } | |
4321 | ||
4322 | __extension__ extern __inline mve_pred16_t | |
4323 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4324 | __arm_vcmpneq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4325 | { | |
4326 | return __builtin_mve_vcmpneq_n_uv8hi (__a, __b); | |
4327 | } | |
4328 | ||
4329 | __extension__ extern __inline mve_pred16_t | |
4330 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4331 | __arm_vcmphiq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4332 | { | |
4333 | return __builtin_mve_vcmphiq_uv8hi (__a, __b); | |
4334 | } | |
4335 | ||
4336 | __extension__ extern __inline mve_pred16_t | |
4337 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4338 | __arm_vcmphiq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4339 | { | |
4340 | return __builtin_mve_vcmphiq_n_uv8hi (__a, __b); | |
4341 | } | |
4342 | ||
4343 | __extension__ extern __inline mve_pred16_t | |
4344 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4345 | __arm_vcmpeqq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4346 | { | |
4347 | return __builtin_mve_vcmpeqq_uv8hi (__a, __b); | |
4348 | } | |
4349 | ||
4350 | __extension__ extern __inline mve_pred16_t | |
4351 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4352 | __arm_vcmpeqq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4353 | { | |
4354 | return __builtin_mve_vcmpeqq_n_uv8hi (__a, __b); | |
4355 | } | |
4356 | ||
4357 | __extension__ extern __inline mve_pred16_t | |
4358 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4359 | __arm_vcmpcsq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4360 | { | |
4361 | return __builtin_mve_vcmpcsq_uv8hi (__a, __b); | |
4362 | } | |
4363 | ||
4364 | __extension__ extern __inline mve_pred16_t | |
4365 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4366 | __arm_vcmpcsq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4367 | { | |
4368 | return __builtin_mve_vcmpcsq_n_uv8hi (__a, __b); | |
4369 | } | |
4370 | ||
4371 | __extension__ extern __inline uint16x8_t | |
4372 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4373 | __arm_vcaddq_rot90_u16 (uint16x8_t __a, uint16x8_t __b) | |
4374 | { | |
4375 | return __builtin_mve_vcaddq_rot90_uv8hi (__a, __b); | |
4376 | } | |
4377 | ||
4378 | __extension__ extern __inline uint16x8_t | |
4379 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4380 | __arm_vcaddq_rot270_u16 (uint16x8_t __a, uint16x8_t __b) | |
4381 | { | |
4382 | return __builtin_mve_vcaddq_rot270_uv8hi (__a, __b); | |
4383 | } | |
4384 | ||
4385 | __extension__ extern __inline uint16x8_t | |
4386 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4387 | __arm_vbicq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4388 | { | |
4389 | return __builtin_mve_vbicq_uv8hi (__a, __b); | |
4390 | } | |
4391 | ||
4392 | __extension__ extern __inline uint16x8_t | |
4393 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4394 | __arm_vandq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4395 | { | |
4396 | return __builtin_mve_vandq_uv8hi (__a, __b); | |
4397 | } | |
4398 | ||
4399 | __extension__ extern __inline uint32_t | |
4400 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4401 | __arm_vaddvq_p_u16 (uint16x8_t __a, mve_pred16_t __p) | |
4402 | { | |
4403 | return __builtin_mve_vaddvq_p_uv8hi (__a, __p); | |
4404 | } | |
4405 | ||
4406 | __extension__ extern __inline uint32_t | |
4407 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4408 | __arm_vaddvaq_u16 (uint32_t __a, uint16x8_t __b) | |
4409 | { | |
4410 | return __builtin_mve_vaddvaq_uv8hi (__a, __b); | |
4411 | } | |
4412 | ||
4413 | __extension__ extern __inline uint16x8_t | |
4414 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4415 | __arm_vaddq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4416 | { | |
4417 | return __builtin_mve_vaddq_n_uv8hi (__a, __b); | |
4418 | } | |
4419 | ||
4420 | __extension__ extern __inline uint16x8_t | |
4421 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4422 | __arm_vabdq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4423 | { | |
4424 | return __builtin_mve_vabdq_uv8hi (__a, __b); | |
4425 | } | |
4426 | ||
4427 | __extension__ extern __inline uint16x8_t | |
4428 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4429 | __arm_vshlq_r_u16 (uint16x8_t __a, int32_t __b) | |
4430 | { | |
4431 | return __builtin_mve_vshlq_r_uv8hi (__a, __b); | |
4432 | } | |
4433 | ||
4434 | __extension__ extern __inline uint16x8_t | |
4435 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4436 | __arm_vrshlq_u16 (uint16x8_t __a, int16x8_t __b) | |
4437 | { | |
4438 | return __builtin_mve_vrshlq_uv8hi (__a, __b); | |
4439 | } | |
4440 | ||
4441 | __extension__ extern __inline uint16x8_t | |
4442 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4443 | __arm_vrshlq_n_u16 (uint16x8_t __a, int32_t __b) | |
4444 | { | |
4445 | return __builtin_mve_vrshlq_n_uv8hi (__a, __b); | |
4446 | } | |
4447 | ||
4448 | __extension__ extern __inline uint16x8_t | |
4449 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4450 | __arm_vqshlq_u16 (uint16x8_t __a, int16x8_t __b) | |
4451 | { | |
4452 | return __builtin_mve_vqshlq_uv8hi (__a, __b); | |
4453 | } | |
4454 | ||
4455 | __extension__ extern __inline uint16x8_t | |
4456 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4457 | __arm_vqshlq_r_u16 (uint16x8_t __a, int32_t __b) | |
4458 | { | |
4459 | return __builtin_mve_vqshlq_r_uv8hi (__a, __b); | |
4460 | } | |
4461 | ||
4462 | __extension__ extern __inline uint16x8_t | |
4463 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4464 | __arm_vqrshlq_u16 (uint16x8_t __a, int16x8_t __b) | |
4465 | { | |
4466 | return __builtin_mve_vqrshlq_uv8hi (__a, __b); | |
4467 | } | |
4468 | ||
4469 | __extension__ extern __inline uint16x8_t | |
4470 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4471 | __arm_vqrshlq_n_u16 (uint16x8_t __a, int32_t __b) | |
4472 | { | |
4473 | return __builtin_mve_vqrshlq_n_uv8hi (__a, __b); | |
4474 | } | |
4475 | ||
4476 | __extension__ extern __inline uint16_t | |
4477 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4478 | __arm_vminavq_s16 (uint16_t __a, int16x8_t __b) | |
4479 | { | |
4480 | return __builtin_mve_vminavq_sv8hi (__a, __b); | |
4481 | } | |
4482 | ||
4483 | __extension__ extern __inline uint16x8_t | |
4484 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4485 | __arm_vminaq_s16 (uint16x8_t __a, int16x8_t __b) | |
4486 | { | |
4487 | return __builtin_mve_vminaq_sv8hi (__a, __b); | |
4488 | } | |
4489 | ||
4490 | __extension__ extern __inline uint16_t | |
4491 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4492 | __arm_vmaxavq_s16 (uint16_t __a, int16x8_t __b) | |
4493 | { | |
4494 | return __builtin_mve_vmaxavq_sv8hi (__a, __b); | |
4495 | } | |
4496 | ||
4497 | __extension__ extern __inline uint16x8_t | |
4498 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4499 | __arm_vmaxaq_s16 (uint16x8_t __a, int16x8_t __b) | |
4500 | { | |
4501 | return __builtin_mve_vmaxaq_sv8hi (__a, __b); | |
4502 | } | |
4503 | ||
4504 | __extension__ extern __inline uint16x8_t | |
4505 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4506 | __arm_vbrsrq_n_u16 (uint16x8_t __a, int32_t __b) | |
4507 | { | |
4508 | return __builtin_mve_vbrsrq_n_uv8hi (__a, __b); | |
4509 | } | |
4510 | ||
4511 | __extension__ extern __inline uint16x8_t | |
4512 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4513 | __arm_vshlq_n_u16 (uint16x8_t __a, const int __imm) | |
4514 | { | |
4515 | return __builtin_mve_vshlq_n_uv8hi (__a, __imm); | |
4516 | } | |
4517 | ||
4518 | __extension__ extern __inline uint16x8_t | |
4519 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4520 | __arm_vrshrq_n_u16 (uint16x8_t __a, const int __imm) | |
4521 | { | |
4522 | return __builtin_mve_vrshrq_n_uv8hi (__a, __imm); | |
4523 | } | |
4524 | ||
4525 | __extension__ extern __inline uint16x8_t | |
4526 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4527 | __arm_vqshlq_n_u16 (uint16x8_t __a, const int __imm) | |
4528 | { | |
4529 | return __builtin_mve_vqshlq_n_uv8hi (__a, __imm); | |
4530 | } | |
4531 | ||
4532 | __extension__ extern __inline mve_pred16_t | |
4533 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4534 | __arm_vcmpneq_n_s16 (int16x8_t __a, int16_t __b) | |
4535 | { | |
4536 | return __builtin_mve_vcmpneq_n_sv8hi (__a, __b); | |
4537 | } | |
4538 | ||
4539 | __extension__ extern __inline mve_pred16_t | |
4540 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4541 | __arm_vcmpltq_s16 (int16x8_t __a, int16x8_t __b) | |
4542 | { | |
4543 | return __builtin_mve_vcmpltq_sv8hi (__a, __b); | |
4544 | } | |
4545 | ||
4546 | __extension__ extern __inline mve_pred16_t | |
4547 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4548 | __arm_vcmpltq_n_s16 (int16x8_t __a, int16_t __b) | |
4549 | { | |
4550 | return __builtin_mve_vcmpltq_n_sv8hi (__a, __b); | |
4551 | } | |
4552 | ||
4553 | __extension__ extern __inline mve_pred16_t | |
4554 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4555 | __arm_vcmpleq_s16 (int16x8_t __a, int16x8_t __b) | |
4556 | { | |
4557 | return __builtin_mve_vcmpleq_sv8hi (__a, __b); | |
4558 | } | |
4559 | ||
4560 | __extension__ extern __inline mve_pred16_t | |
4561 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4562 | __arm_vcmpleq_n_s16 (int16x8_t __a, int16_t __b) | |
4563 | { | |
4564 | return __builtin_mve_vcmpleq_n_sv8hi (__a, __b); | |
4565 | } | |
4566 | ||
4567 | __extension__ extern __inline mve_pred16_t | |
4568 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4569 | __arm_vcmpgtq_s16 (int16x8_t __a, int16x8_t __b) | |
4570 | { | |
4571 | return __builtin_mve_vcmpgtq_sv8hi (__a, __b); | |
4572 | } | |
4573 | ||
4574 | __extension__ extern __inline mve_pred16_t | |
4575 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4576 | __arm_vcmpgtq_n_s16 (int16x8_t __a, int16_t __b) | |
4577 | { | |
4578 | return __builtin_mve_vcmpgtq_n_sv8hi (__a, __b); | |
4579 | } | |
4580 | ||
4581 | __extension__ extern __inline mve_pred16_t | |
4582 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4583 | __arm_vcmpgeq_s16 (int16x8_t __a, int16x8_t __b) | |
4584 | { | |
4585 | return __builtin_mve_vcmpgeq_sv8hi (__a, __b); | |
4586 | } | |
4587 | ||
4588 | __extension__ extern __inline mve_pred16_t | |
4589 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4590 | __arm_vcmpgeq_n_s16 (int16x8_t __a, int16_t __b) | |
4591 | { | |
4592 | return __builtin_mve_vcmpgeq_n_sv8hi (__a, __b); | |
4593 | } | |
4594 | ||
4595 | __extension__ extern __inline mve_pred16_t | |
4596 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4597 | __arm_vcmpeqq_s16 (int16x8_t __a, int16x8_t __b) | |
4598 | { | |
4599 | return __builtin_mve_vcmpeqq_sv8hi (__a, __b); | |
4600 | } | |
4601 | ||
4602 | __extension__ extern __inline mve_pred16_t | |
4603 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4604 | __arm_vcmpeqq_n_s16 (int16x8_t __a, int16_t __b) | |
4605 | { | |
4606 | return __builtin_mve_vcmpeqq_n_sv8hi (__a, __b); | |
4607 | } | |
4608 | ||
4609 | __extension__ extern __inline uint16x8_t | |
4610 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4611 | __arm_vqshluq_n_s16 (int16x8_t __a, const int __imm) | |
4612 | { | |
4613 | return __builtin_mve_vqshluq_n_sv8hi (__a, __imm); | |
4614 | } | |
4615 | ||
4616 | __extension__ extern __inline int32_t | |
4617 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4618 | __arm_vaddvq_p_s16 (int16x8_t __a, mve_pred16_t __p) | |
4619 | { | |
4620 | return __builtin_mve_vaddvq_p_sv8hi (__a, __p); | |
4621 | } | |
4622 | ||
4623 | __extension__ extern __inline int16x8_t | |
4624 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4625 | __arm_vsubq_s16 (int16x8_t __a, int16x8_t __b) | |
4626 | { | |
4627 | return __builtin_mve_vsubq_sv8hi (__a, __b); | |
4628 | } | |
4629 | ||
4630 | __extension__ extern __inline int16x8_t | |
4631 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4632 | __arm_vsubq_n_s16 (int16x8_t __a, int16_t __b) | |
4633 | { | |
4634 | return __builtin_mve_vsubq_n_sv8hi (__a, __b); | |
4635 | } | |
4636 | ||
4637 | __extension__ extern __inline int16x8_t | |
4638 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4639 | __arm_vshlq_r_s16 (int16x8_t __a, int32_t __b) | |
4640 | { | |
4641 | return __builtin_mve_vshlq_r_sv8hi (__a, __b); | |
4642 | } | |
4643 | ||
4644 | __extension__ extern __inline int16x8_t | |
4645 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4646 | __arm_vrshlq_s16 (int16x8_t __a, int16x8_t __b) | |
4647 | { | |
4648 | return __builtin_mve_vrshlq_sv8hi (__a, __b); | |
4649 | } | |
4650 | ||
4651 | __extension__ extern __inline int16x8_t | |
4652 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4653 | __arm_vrshlq_n_s16 (int16x8_t __a, int32_t __b) | |
4654 | { | |
4655 | return __builtin_mve_vrshlq_n_sv8hi (__a, __b); | |
4656 | } | |
4657 | ||
4658 | __extension__ extern __inline int16x8_t | |
4659 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4660 | __arm_vrmulhq_s16 (int16x8_t __a, int16x8_t __b) | |
4661 | { | |
4662 | return __builtin_mve_vrmulhq_sv8hi (__a, __b); | |
4663 | } | |
4664 | ||
4665 | __extension__ extern __inline int16x8_t | |
4666 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4667 | __arm_vrhaddq_s16 (int16x8_t __a, int16x8_t __b) | |
4668 | { | |
4669 | return __builtin_mve_vrhaddq_sv8hi (__a, __b); | |
4670 | } | |
4671 | ||
4672 | __extension__ extern __inline int16x8_t | |
4673 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4674 | __arm_vqsubq_s16 (int16x8_t __a, int16x8_t __b) | |
4675 | { | |
4676 | return __builtin_mve_vqsubq_sv8hi (__a, __b); | |
4677 | } | |
4678 | ||
4679 | __extension__ extern __inline int16x8_t | |
4680 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4681 | __arm_vqsubq_n_s16 (int16x8_t __a, int16_t __b) | |
4682 | { | |
4683 | return __builtin_mve_vqsubq_n_sv8hi (__a, __b); | |
4684 | } | |
4685 | ||
4686 | __extension__ extern __inline int16x8_t | |
4687 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4688 | __arm_vqshlq_s16 (int16x8_t __a, int16x8_t __b) | |
4689 | { | |
4690 | return __builtin_mve_vqshlq_sv8hi (__a, __b); | |
4691 | } | |
4692 | ||
4693 | __extension__ extern __inline int16x8_t | |
4694 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4695 | __arm_vqshlq_r_s16 (int16x8_t __a, int32_t __b) | |
4696 | { | |
4697 | return __builtin_mve_vqshlq_r_sv8hi (__a, __b); | |
4698 | } | |
4699 | ||
4700 | __extension__ extern __inline int16x8_t | |
4701 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4702 | __arm_vqrshlq_s16 (int16x8_t __a, int16x8_t __b) | |
4703 | { | |
4704 | return __builtin_mve_vqrshlq_sv8hi (__a, __b); | |
4705 | } | |
4706 | ||
4707 | __extension__ extern __inline int16x8_t | |
4708 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4709 | __arm_vqrshlq_n_s16 (int16x8_t __a, int32_t __b) | |
4710 | { | |
4711 | return __builtin_mve_vqrshlq_n_sv8hi (__a, __b); | |
4712 | } | |
4713 | ||
4714 | __extension__ extern __inline int16x8_t | |
4715 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4716 | __arm_vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b) | |
4717 | { | |
4718 | return __builtin_mve_vqrdmulhq_sv8hi (__a, __b); | |
4719 | } | |
4720 | ||
4721 | __extension__ extern __inline int16x8_t | |
4722 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4723 | __arm_vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b) | |
4724 | { | |
4725 | return __builtin_mve_vqrdmulhq_n_sv8hi (__a, __b); | |
4726 | } | |
4727 | ||
4728 | __extension__ extern __inline int16x8_t | |
4729 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4730 | __arm_vqdmulhq_s16 (int16x8_t __a, int16x8_t __b) | |
4731 | { | |
4732 | return __builtin_mve_vqdmulhq_sv8hi (__a, __b); | |
4733 | } | |
4734 | ||
4735 | __extension__ extern __inline int16x8_t | |
4736 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4737 | __arm_vqdmulhq_n_s16 (int16x8_t __a, int16_t __b) | |
4738 | { | |
4739 | return __builtin_mve_vqdmulhq_n_sv8hi (__a, __b); | |
4740 | } | |
4741 | ||
4742 | __extension__ extern __inline int16x8_t | |
4743 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4744 | __arm_vqaddq_s16 (int16x8_t __a, int16x8_t __b) | |
4745 | { | |
4746 | return __builtin_mve_vqaddq_sv8hi (__a, __b); | |
4747 | } | |
4748 | ||
4749 | __extension__ extern __inline int16x8_t | |
4750 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4751 | __arm_vqaddq_n_s16 (int16x8_t __a, int16_t __b) | |
4752 | { | |
4753 | return __builtin_mve_vqaddq_n_sv8hi (__a, __b); | |
4754 | } | |
4755 | ||
4756 | __extension__ extern __inline int16x8_t | |
4757 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4758 | __arm_vorrq_s16 (int16x8_t __a, int16x8_t __b) | |
4759 | { | |
4760 | return __builtin_mve_vorrq_sv8hi (__a, __b); | |
4761 | } | |
4762 | ||
4763 | __extension__ extern __inline int16x8_t | |
4764 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4765 | __arm_vornq_s16 (int16x8_t __a, int16x8_t __b) | |
4766 | { | |
4767 | return __builtin_mve_vornq_sv8hi (__a, __b); | |
4768 | } | |
4769 | ||
4770 | __extension__ extern __inline int16x8_t | |
4771 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4772 | __arm_vmulq_s16 (int16x8_t __a, int16x8_t __b) | |
4773 | { | |
4774 | return __builtin_mve_vmulq_sv8hi (__a, __b); | |
4775 | } | |
4776 | ||
4777 | __extension__ extern __inline int16x8_t | |
4778 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4779 | __arm_vmulq_n_s16 (int16x8_t __a, int16_t __b) | |
4780 | { | |
4781 | return __builtin_mve_vmulq_n_sv8hi (__a, __b); | |
4782 | } | |
4783 | ||
4784 | __extension__ extern __inline int32x4_t | |
4785 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4786 | __arm_vmulltq_int_s16 (int16x8_t __a, int16x8_t __b) | |
4787 | { | |
4788 | return __builtin_mve_vmulltq_int_sv8hi (__a, __b); | |
4789 | } | |
4790 | ||
4791 | __extension__ extern __inline int32x4_t | |
4792 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4793 | __arm_vmullbq_int_s16 (int16x8_t __a, int16x8_t __b) | |
4794 | { | |
4795 | return __builtin_mve_vmullbq_int_sv8hi (__a, __b); | |
4796 | } | |
4797 | ||
4798 | __extension__ extern __inline int16x8_t | |
4799 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4800 | __arm_vmulhq_s16 (int16x8_t __a, int16x8_t __b) | |
4801 | { | |
4802 | return __builtin_mve_vmulhq_sv8hi (__a, __b); | |
4803 | } | |
4804 | ||
4805 | __extension__ extern __inline int32_t | |
4806 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4807 | __arm_vmlsdavxq_s16 (int16x8_t __a, int16x8_t __b) | |
4808 | { | |
4809 | return __builtin_mve_vmlsdavxq_sv8hi (__a, __b); | |
4810 | } | |
4811 | ||
4812 | __extension__ extern __inline int32_t | |
4813 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4814 | __arm_vmlsdavq_s16 (int16x8_t __a, int16x8_t __b) | |
4815 | { | |
4816 | return __builtin_mve_vmlsdavq_sv8hi (__a, __b); | |
4817 | } | |
4818 | ||
4819 | __extension__ extern __inline int32_t | |
4820 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4821 | __arm_vmladavxq_s16 (int16x8_t __a, int16x8_t __b) | |
4822 | { | |
4823 | return __builtin_mve_vmladavxq_sv8hi (__a, __b); | |
4824 | } | |
4825 | ||
4826 | __extension__ extern __inline int32_t | |
4827 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4828 | __arm_vmladavq_s16 (int16x8_t __a, int16x8_t __b) | |
4829 | { | |
4830 | return __builtin_mve_vmladavq_sv8hi (__a, __b); | |
4831 | } | |
4832 | ||
4833 | __extension__ extern __inline int16_t | |
4834 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4835 | __arm_vminvq_s16 (int16_t __a, int16x8_t __b) | |
4836 | { | |
4837 | return __builtin_mve_vminvq_sv8hi (__a, __b); | |
4838 | } | |
4839 | ||
4840 | __extension__ extern __inline int16x8_t | |
4841 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4842 | __arm_vminq_s16 (int16x8_t __a, int16x8_t __b) | |
4843 | { | |
4844 | return __builtin_mve_vminq_sv8hi (__a, __b); | |
4845 | } | |
4846 | ||
4847 | __extension__ extern __inline int16_t | |
4848 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4849 | __arm_vmaxvq_s16 (int16_t __a, int16x8_t __b) | |
4850 | { | |
4851 | return __builtin_mve_vmaxvq_sv8hi (__a, __b); | |
4852 | } | |
4853 | ||
4854 | __extension__ extern __inline int16x8_t | |
4855 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4856 | __arm_vmaxq_s16 (int16x8_t __a, int16x8_t __b) | |
4857 | { | |
4858 | return __builtin_mve_vmaxq_sv8hi (__a, __b); | |
4859 | } | |
4860 | ||
4861 | __extension__ extern __inline int16x8_t | |
4862 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4863 | __arm_vhsubq_s16 (int16x8_t __a, int16x8_t __b) | |
4864 | { | |
4865 | return __builtin_mve_vhsubq_sv8hi (__a, __b); | |
4866 | } | |
4867 | ||
4868 | __extension__ extern __inline int16x8_t | |
4869 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4870 | __arm_vhsubq_n_s16 (int16x8_t __a, int16_t __b) | |
4871 | { | |
4872 | return __builtin_mve_vhsubq_n_sv8hi (__a, __b); | |
4873 | } | |
4874 | ||
4875 | __extension__ extern __inline int16x8_t | |
4876 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4877 | __arm_vhcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b) | |
4878 | { | |
4879 | return __builtin_mve_vhcaddq_rot90_sv8hi (__a, __b); | |
4880 | } | |
4881 | ||
4882 | __extension__ extern __inline int16x8_t | |
4883 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4884 | __arm_vhcaddq_rot270_s16 (int16x8_t __a, int16x8_t __b) | |
4885 | { | |
4886 | return __builtin_mve_vhcaddq_rot270_sv8hi (__a, __b); | |
4887 | } | |
4888 | ||
4889 | __extension__ extern __inline int16x8_t | |
4890 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4891 | __arm_vhaddq_s16 (int16x8_t __a, int16x8_t __b) | |
4892 | { | |
4893 | return __builtin_mve_vhaddq_sv8hi (__a, __b); | |
4894 | } | |
4895 | ||
4896 | __extension__ extern __inline int16x8_t | |
4897 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4898 | __arm_vhaddq_n_s16 (int16x8_t __a, int16_t __b) | |
4899 | { | |
4900 | return __builtin_mve_vhaddq_n_sv8hi (__a, __b); | |
4901 | } | |
4902 | ||
4903 | __extension__ extern __inline int16x8_t | |
4904 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4905 | __arm_veorq_s16 (int16x8_t __a, int16x8_t __b) | |
4906 | { | |
4907 | return __builtin_mve_veorq_sv8hi (__a, __b); | |
4908 | } | |
4909 | ||
4910 | __extension__ extern __inline int16x8_t | |
4911 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4912 | __arm_vcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b) | |
4913 | { | |
4914 | return __builtin_mve_vcaddq_rot90_sv8hi (__a, __b); | |
4915 | } | |
4916 | ||
4917 | __extension__ extern __inline int16x8_t | |
4918 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4919 | __arm_vcaddq_rot270_s16 (int16x8_t __a, int16x8_t __b) | |
4920 | { | |
4921 | return __builtin_mve_vcaddq_rot270_sv8hi (__a, __b); | |
4922 | } | |
4923 | ||
4924 | __extension__ extern __inline int16x8_t | |
4925 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4926 | __arm_vbrsrq_n_s16 (int16x8_t __a, int32_t __b) | |
4927 | { | |
4928 | return __builtin_mve_vbrsrq_n_sv8hi (__a, __b); | |
4929 | } | |
4930 | ||
4931 | __extension__ extern __inline int16x8_t | |
4932 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4933 | __arm_vbicq_s16 (int16x8_t __a, int16x8_t __b) | |
4934 | { | |
4935 | return __builtin_mve_vbicq_sv8hi (__a, __b); | |
4936 | } | |
4937 | ||
4938 | __extension__ extern __inline int16x8_t | |
4939 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4940 | __arm_vandq_s16 (int16x8_t __a, int16x8_t __b) | |
4941 | { | |
4942 | return __builtin_mve_vandq_sv8hi (__a, __b); | |
4943 | } | |
4944 | ||
4945 | __extension__ extern __inline int32_t | |
4946 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4947 | __arm_vaddvaq_s16 (int32_t __a, int16x8_t __b) | |
4948 | { | |
4949 | return __builtin_mve_vaddvaq_sv8hi (__a, __b); | |
4950 | } | |
4951 | ||
4952 | __extension__ extern __inline int16x8_t | |
4953 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4954 | __arm_vaddq_n_s16 (int16x8_t __a, int16_t __b) | |
4955 | { | |
4956 | return __builtin_mve_vaddq_n_sv8hi (__a, __b); | |
4957 | } | |
4958 | ||
4959 | __extension__ extern __inline int16x8_t | |
4960 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4961 | __arm_vabdq_s16 (int16x8_t __a, int16x8_t __b) | |
4962 | { | |
4963 | return __builtin_mve_vabdq_sv8hi (__a, __b); | |
4964 | } | |
4965 | ||
4966 | __extension__ extern __inline int16x8_t | |
4967 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4968 | __arm_vshlq_n_s16 (int16x8_t __a, const int __imm) | |
4969 | { | |
4970 | return __builtin_mve_vshlq_n_sv8hi (__a, __imm); | |
4971 | } | |
4972 | ||
4973 | __extension__ extern __inline int16x8_t | |
4974 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4975 | __arm_vrshrq_n_s16 (int16x8_t __a, const int __imm) | |
4976 | { | |
4977 | return __builtin_mve_vrshrq_n_sv8hi (__a, __imm); | |
4978 | } | |
4979 | ||
4980 | __extension__ extern __inline int16x8_t | |
4981 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4982 | __arm_vqshlq_n_s16 (int16x8_t __a, const int __imm) | |
4983 | { | |
4984 | return __builtin_mve_vqshlq_n_sv8hi (__a, __imm); | |
4985 | } | |
4986 | ||
4987 | __extension__ extern __inline uint32x4_t | |
4988 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4989 | __arm_vsubq_u32 (uint32x4_t __a, uint32x4_t __b) | |
4990 | { | |
4991 | return __builtin_mve_vsubq_uv4si (__a, __b); | |
4992 | } | |
4993 | ||
4994 | __extension__ extern __inline uint32x4_t | |
4995 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4996 | __arm_vsubq_n_u32 (uint32x4_t __a, uint32_t __b) | |
4997 | { | |
4998 | return __builtin_mve_vsubq_n_uv4si (__a, __b); | |
4999 | } | |
5000 | ||
5001 | __extension__ extern __inline uint32x4_t | |
5002 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5003 | __arm_vrmulhq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5004 | { | |
5005 | return __builtin_mve_vrmulhq_uv4si (__a, __b); | |
5006 | } | |
5007 | ||
5008 | __extension__ extern __inline uint32x4_t | |
5009 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5010 | __arm_vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5011 | { | |
5012 | return __builtin_mve_vrhaddq_uv4si (__a, __b); | |
5013 | } | |
5014 | ||
5015 | __extension__ extern __inline uint32x4_t | |
5016 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5017 | __arm_vqsubq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5018 | { | |
5019 | return __builtin_mve_vqsubq_uv4si (__a, __b); | |
5020 | } | |
5021 | ||
5022 | __extension__ extern __inline uint32x4_t | |
5023 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5024 | __arm_vqsubq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5025 | { | |
5026 | return __builtin_mve_vqsubq_n_uv4si (__a, __b); | |
5027 | } | |
5028 | ||
5029 | __extension__ extern __inline uint32x4_t | |
5030 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5031 | __arm_vqaddq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5032 | { | |
5033 | return __builtin_mve_vqaddq_uv4si (__a, __b); | |
5034 | } | |
5035 | ||
5036 | __extension__ extern __inline uint32x4_t | |
5037 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5038 | __arm_vqaddq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5039 | { | |
5040 | return __builtin_mve_vqaddq_n_uv4si (__a, __b); | |
5041 | } | |
5042 | ||
5043 | __extension__ extern __inline uint32x4_t | |
5044 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5045 | __arm_vorrq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5046 | { | |
5047 | return __builtin_mve_vorrq_uv4si (__a, __b); | |
5048 | } | |
5049 | ||
5050 | __extension__ extern __inline uint32x4_t | |
5051 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5052 | __arm_vornq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5053 | { | |
5054 | return __builtin_mve_vornq_uv4si (__a, __b); | |
5055 | } | |
5056 | ||
5057 | __extension__ extern __inline uint32x4_t | |
5058 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5059 | __arm_vmulq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5060 | { | |
5061 | return __builtin_mve_vmulq_uv4si (__a, __b); | |
5062 | } | |
5063 | ||
5064 | __extension__ extern __inline uint32x4_t | |
5065 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5066 | __arm_vmulq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5067 | { | |
5068 | return __builtin_mve_vmulq_n_uv4si (__a, __b); | |
5069 | } | |
5070 | ||
5071 | __extension__ extern __inline uint64x2_t | |
5072 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5073 | __arm_vmulltq_int_u32 (uint32x4_t __a, uint32x4_t __b) | |
5074 | { | |
5075 | return __builtin_mve_vmulltq_int_uv4si (__a, __b); | |
5076 | } | |
5077 | ||
5078 | __extension__ extern __inline uint64x2_t | |
5079 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5080 | __arm_vmullbq_int_u32 (uint32x4_t __a, uint32x4_t __b) | |
5081 | { | |
5082 | return __builtin_mve_vmullbq_int_uv4si (__a, __b); | |
5083 | } | |
5084 | ||
5085 | __extension__ extern __inline uint32x4_t | |
5086 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5087 | __arm_vmulhq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5088 | { | |
5089 | return __builtin_mve_vmulhq_uv4si (__a, __b); | |
5090 | } | |
5091 | ||
5092 | __extension__ extern __inline uint32_t | |
5093 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5094 | __arm_vmladavq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5095 | { | |
5096 | return __builtin_mve_vmladavq_uv4si (__a, __b); | |
5097 | } | |
5098 | ||
5099 | __extension__ extern __inline uint32_t | |
5100 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5101 | __arm_vminvq_u32 (uint32_t __a, uint32x4_t __b) | |
5102 | { | |
5103 | return __builtin_mve_vminvq_uv4si (__a, __b); | |
5104 | } | |
5105 | ||
5106 | __extension__ extern __inline uint32x4_t | |
5107 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5108 | __arm_vminq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5109 | { | |
5110 | return __builtin_mve_vminq_uv4si (__a, __b); | |
5111 | } | |
5112 | ||
5113 | __extension__ extern __inline uint32_t | |
5114 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5115 | __arm_vmaxvq_u32 (uint32_t __a, uint32x4_t __b) | |
5116 | { | |
5117 | return __builtin_mve_vmaxvq_uv4si (__a, __b); | |
5118 | } | |
5119 | ||
5120 | __extension__ extern __inline uint32x4_t | |
5121 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5122 | __arm_vmaxq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5123 | { | |
5124 | return __builtin_mve_vmaxq_uv4si (__a, __b); | |
5125 | } | |
5126 | ||
5127 | __extension__ extern __inline uint32x4_t | |
5128 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5129 | __arm_vhsubq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5130 | { | |
5131 | return __builtin_mve_vhsubq_uv4si (__a, __b); | |
5132 | } | |
5133 | ||
5134 | __extension__ extern __inline uint32x4_t | |
5135 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5136 | __arm_vhsubq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5137 | { | |
5138 | return __builtin_mve_vhsubq_n_uv4si (__a, __b); | |
5139 | } | |
5140 | ||
5141 | __extension__ extern __inline uint32x4_t | |
5142 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5143 | __arm_vhaddq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5144 | { | |
5145 | return __builtin_mve_vhaddq_uv4si (__a, __b); | |
5146 | } | |
5147 | ||
5148 | __extension__ extern __inline uint32x4_t | |
5149 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5150 | __arm_vhaddq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5151 | { | |
5152 | return __builtin_mve_vhaddq_n_uv4si (__a, __b); | |
5153 | } | |
5154 | ||
5155 | __extension__ extern __inline uint32x4_t | |
5156 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5157 | __arm_veorq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5158 | { | |
5159 | return __builtin_mve_veorq_uv4si (__a, __b); | |
5160 | } | |
5161 | ||
5162 | __extension__ extern __inline mve_pred16_t | |
5163 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5164 | __arm_vcmpneq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5165 | { | |
5166 | return __builtin_mve_vcmpneq_n_uv4si (__a, __b); | |
5167 | } | |
5168 | ||
5169 | __extension__ extern __inline mve_pred16_t | |
5170 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5171 | __arm_vcmphiq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5172 | { | |
5173 | return __builtin_mve_vcmphiq_uv4si (__a, __b); | |
5174 | } | |
5175 | ||
5176 | __extension__ extern __inline mve_pred16_t | |
5177 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5178 | __arm_vcmphiq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5179 | { | |
5180 | return __builtin_mve_vcmphiq_n_uv4si (__a, __b); | |
5181 | } | |
5182 | ||
5183 | __extension__ extern __inline mve_pred16_t | |
5184 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5185 | __arm_vcmpeqq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5186 | { | |
5187 | return __builtin_mve_vcmpeqq_uv4si (__a, __b); | |
5188 | } | |
5189 | ||
5190 | __extension__ extern __inline mve_pred16_t | |
5191 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5192 | __arm_vcmpeqq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5193 | { | |
5194 | return __builtin_mve_vcmpeqq_n_uv4si (__a, __b); | |
5195 | } | |
5196 | ||
5197 | __extension__ extern __inline mve_pred16_t | |
5198 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5199 | __arm_vcmpcsq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5200 | { | |
5201 | return __builtin_mve_vcmpcsq_uv4si (__a, __b); | |
5202 | } | |
5203 | ||
5204 | __extension__ extern __inline mve_pred16_t | |
5205 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5206 | __arm_vcmpcsq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5207 | { | |
5208 | return __builtin_mve_vcmpcsq_n_uv4si (__a, __b); | |
5209 | } | |
5210 | ||
5211 | __extension__ extern __inline uint32x4_t | |
5212 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5213 | __arm_vcaddq_rot90_u32 (uint32x4_t __a, uint32x4_t __b) | |
5214 | { | |
5215 | return __builtin_mve_vcaddq_rot90_uv4si (__a, __b); | |
5216 | } | |
5217 | ||
5218 | __extension__ extern __inline uint32x4_t | |
5219 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5220 | __arm_vcaddq_rot270_u32 (uint32x4_t __a, uint32x4_t __b) | |
5221 | { | |
5222 | return __builtin_mve_vcaddq_rot270_uv4si (__a, __b); | |
5223 | } | |
5224 | ||
5225 | __extension__ extern __inline uint32x4_t | |
5226 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5227 | __arm_vbicq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5228 | { | |
5229 | return __builtin_mve_vbicq_uv4si (__a, __b); | |
5230 | } | |
5231 | ||
5232 | __extension__ extern __inline uint32x4_t | |
5233 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5234 | __arm_vandq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5235 | { | |
5236 | return __builtin_mve_vandq_uv4si (__a, __b); | |
5237 | } | |
5238 | ||
5239 | __extension__ extern __inline uint32_t | |
5240 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5241 | __arm_vaddvq_p_u32 (uint32x4_t __a, mve_pred16_t __p) | |
5242 | { | |
5243 | return __builtin_mve_vaddvq_p_uv4si (__a, __p); | |
5244 | } | |
5245 | ||
5246 | __extension__ extern __inline uint32_t | |
5247 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5248 | __arm_vaddvaq_u32 (uint32_t __a, uint32x4_t __b) | |
5249 | { | |
5250 | return __builtin_mve_vaddvaq_uv4si (__a, __b); | |
5251 | } | |
5252 | ||
5253 | __extension__ extern __inline uint32x4_t | |
5254 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5255 | __arm_vaddq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5256 | { | |
5257 | return __builtin_mve_vaddq_n_uv4si (__a, __b); | |
5258 | } | |
5259 | ||
5260 | __extension__ extern __inline uint32x4_t | |
5261 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5262 | __arm_vabdq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5263 | { | |
5264 | return __builtin_mve_vabdq_uv4si (__a, __b); | |
5265 | } | |
5266 | ||
5267 | __extension__ extern __inline uint32x4_t | |
5268 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5269 | __arm_vshlq_r_u32 (uint32x4_t __a, int32_t __b) | |
5270 | { | |
5271 | return __builtin_mve_vshlq_r_uv4si (__a, __b); | |
5272 | } | |
5273 | ||
5274 | __extension__ extern __inline uint32x4_t | |
5275 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5276 | __arm_vrshlq_u32 (uint32x4_t __a, int32x4_t __b) | |
5277 | { | |
5278 | return __builtin_mve_vrshlq_uv4si (__a, __b); | |
5279 | } | |
5280 | ||
5281 | __extension__ extern __inline uint32x4_t | |
5282 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5283 | __arm_vrshlq_n_u32 (uint32x4_t __a, int32_t __b) | |
5284 | { | |
5285 | return __builtin_mve_vrshlq_n_uv4si (__a, __b); | |
5286 | } | |
5287 | ||
5288 | __extension__ extern __inline uint32x4_t | |
5289 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5290 | __arm_vqshlq_u32 (uint32x4_t __a, int32x4_t __b) | |
5291 | { | |
5292 | return __builtin_mve_vqshlq_uv4si (__a, __b); | |
5293 | } | |
5294 | ||
5295 | __extension__ extern __inline uint32x4_t | |
5296 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5297 | __arm_vqshlq_r_u32 (uint32x4_t __a, int32_t __b) | |
5298 | { | |
5299 | return __builtin_mve_vqshlq_r_uv4si (__a, __b); | |
5300 | } | |
5301 | ||
5302 | __extension__ extern __inline uint32x4_t | |
5303 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5304 | __arm_vqrshlq_u32 (uint32x4_t __a, int32x4_t __b) | |
5305 | { | |
5306 | return __builtin_mve_vqrshlq_uv4si (__a, __b); | |
5307 | } | |
5308 | ||
5309 | __extension__ extern __inline uint32x4_t | |
5310 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5311 | __arm_vqrshlq_n_u32 (uint32x4_t __a, int32_t __b) | |
5312 | { | |
5313 | return __builtin_mve_vqrshlq_n_uv4si (__a, __b); | |
5314 | } | |
5315 | ||
5316 | __extension__ extern __inline uint32_t | |
5317 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5318 | __arm_vminavq_s32 (uint32_t __a, int32x4_t __b) | |
5319 | { | |
5320 | return __builtin_mve_vminavq_sv4si (__a, __b); | |
5321 | } | |
5322 | ||
5323 | __extension__ extern __inline uint32x4_t | |
5324 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5325 | __arm_vminaq_s32 (uint32x4_t __a, int32x4_t __b) | |
5326 | { | |
5327 | return __builtin_mve_vminaq_sv4si (__a, __b); | |
5328 | } | |
5329 | ||
5330 | __extension__ extern __inline uint32_t | |
5331 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5332 | __arm_vmaxavq_s32 (uint32_t __a, int32x4_t __b) | |
5333 | { | |
5334 | return __builtin_mve_vmaxavq_sv4si (__a, __b); | |
5335 | } | |
5336 | ||
5337 | __extension__ extern __inline uint32x4_t | |
5338 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5339 | __arm_vmaxaq_s32 (uint32x4_t __a, int32x4_t __b) | |
5340 | { | |
5341 | return __builtin_mve_vmaxaq_sv4si (__a, __b); | |
5342 | } | |
5343 | ||
5344 | __extension__ extern __inline uint32x4_t | |
5345 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5346 | __arm_vbrsrq_n_u32 (uint32x4_t __a, int32_t __b) | |
5347 | { | |
5348 | return __builtin_mve_vbrsrq_n_uv4si (__a, __b); | |
5349 | } | |
5350 | ||
5351 | __extension__ extern __inline uint32x4_t | |
5352 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5353 | __arm_vshlq_n_u32 (uint32x4_t __a, const int __imm) | |
5354 | { | |
5355 | return __builtin_mve_vshlq_n_uv4si (__a, __imm); | |
5356 | } | |
5357 | ||
5358 | __extension__ extern __inline uint32x4_t | |
5359 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5360 | __arm_vrshrq_n_u32 (uint32x4_t __a, const int __imm) | |
5361 | { | |
5362 | return __builtin_mve_vrshrq_n_uv4si (__a, __imm); | |
5363 | } | |
5364 | ||
5365 | __extension__ extern __inline uint32x4_t | |
5366 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5367 | __arm_vqshlq_n_u32 (uint32x4_t __a, const int __imm) | |
5368 | { | |
5369 | return __builtin_mve_vqshlq_n_uv4si (__a, __imm); | |
5370 | } | |
5371 | ||
5372 | __extension__ extern __inline mve_pred16_t | |
5373 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5374 | __arm_vcmpneq_n_s32 (int32x4_t __a, int32_t __b) | |
5375 | { | |
5376 | return __builtin_mve_vcmpneq_n_sv4si (__a, __b); | |
5377 | } | |
5378 | ||
5379 | __extension__ extern __inline mve_pred16_t | |
5380 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5381 | __arm_vcmpltq_s32 (int32x4_t __a, int32x4_t __b) | |
5382 | { | |
5383 | return __builtin_mve_vcmpltq_sv4si (__a, __b); | |
5384 | } | |
5385 | ||
5386 | __extension__ extern __inline mve_pred16_t | |
5387 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5388 | __arm_vcmpltq_n_s32 (int32x4_t __a, int32_t __b) | |
5389 | { | |
5390 | return __builtin_mve_vcmpltq_n_sv4si (__a, __b); | |
5391 | } | |
5392 | ||
5393 | __extension__ extern __inline mve_pred16_t | |
5394 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5395 | __arm_vcmpleq_s32 (int32x4_t __a, int32x4_t __b) | |
5396 | { | |
5397 | return __builtin_mve_vcmpleq_sv4si (__a, __b); | |
5398 | } | |
5399 | ||
5400 | __extension__ extern __inline mve_pred16_t | |
5401 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5402 | __arm_vcmpleq_n_s32 (int32x4_t __a, int32_t __b) | |
5403 | { | |
5404 | return __builtin_mve_vcmpleq_n_sv4si (__a, __b); | |
5405 | } | |
5406 | ||
5407 | __extension__ extern __inline mve_pred16_t | |
5408 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5409 | __arm_vcmpgtq_s32 (int32x4_t __a, int32x4_t __b) | |
5410 | { | |
5411 | return __builtin_mve_vcmpgtq_sv4si (__a, __b); | |
5412 | } | |
5413 | ||
5414 | __extension__ extern __inline mve_pred16_t | |
5415 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5416 | __arm_vcmpgtq_n_s32 (int32x4_t __a, int32_t __b) | |
5417 | { | |
5418 | return __builtin_mve_vcmpgtq_n_sv4si (__a, __b); | |
5419 | } | |
5420 | ||
5421 | __extension__ extern __inline mve_pred16_t | |
5422 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5423 | __arm_vcmpgeq_s32 (int32x4_t __a, int32x4_t __b) | |
5424 | { | |
5425 | return __builtin_mve_vcmpgeq_sv4si (__a, __b); | |
5426 | } | |
5427 | ||
5428 | __extension__ extern __inline mve_pred16_t | |
5429 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5430 | __arm_vcmpgeq_n_s32 (int32x4_t __a, int32_t __b) | |
5431 | { | |
5432 | return __builtin_mve_vcmpgeq_n_sv4si (__a, __b); | |
5433 | } | |
5434 | ||
5435 | __extension__ extern __inline mve_pred16_t | |
5436 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5437 | __arm_vcmpeqq_s32 (int32x4_t __a, int32x4_t __b) | |
5438 | { | |
5439 | return __builtin_mve_vcmpeqq_sv4si (__a, __b); | |
5440 | } | |
5441 | ||
5442 | __extension__ extern __inline mve_pred16_t | |
5443 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5444 | __arm_vcmpeqq_n_s32 (int32x4_t __a, int32_t __b) | |
5445 | { | |
5446 | return __builtin_mve_vcmpeqq_n_sv4si (__a, __b); | |
5447 | } | |
5448 | ||
5449 | __extension__ extern __inline uint32x4_t | |
5450 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5451 | __arm_vqshluq_n_s32 (int32x4_t __a, const int __imm) | |
5452 | { | |
5453 | return __builtin_mve_vqshluq_n_sv4si (__a, __imm); | |
5454 | } | |
5455 | ||
5456 | __extension__ extern __inline int32_t | |
5457 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5458 | __arm_vaddvq_p_s32 (int32x4_t __a, mve_pred16_t __p) | |
5459 | { | |
5460 | return __builtin_mve_vaddvq_p_sv4si (__a, __p); | |
5461 | } | |
5462 | ||
5463 | __extension__ extern __inline int32x4_t | |
5464 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5465 | __arm_vsubq_s32 (int32x4_t __a, int32x4_t __b) | |
5466 | { | |
5467 | return __builtin_mve_vsubq_sv4si (__a, __b); | |
5468 | } | |
5469 | ||
5470 | __extension__ extern __inline int32x4_t | |
5471 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5472 | __arm_vsubq_n_s32 (int32x4_t __a, int32_t __b) | |
5473 | { | |
5474 | return __builtin_mve_vsubq_n_sv4si (__a, __b); | |
5475 | } | |
5476 | ||
5477 | __extension__ extern __inline int32x4_t | |
5478 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5479 | __arm_vshlq_r_s32 (int32x4_t __a, int32_t __b) | |
5480 | { | |
5481 | return __builtin_mve_vshlq_r_sv4si (__a, __b); | |
5482 | } | |
5483 | ||
5484 | __extension__ extern __inline int32x4_t | |
5485 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5486 | __arm_vrshlq_s32 (int32x4_t __a, int32x4_t __b) | |
5487 | { | |
5488 | return __builtin_mve_vrshlq_sv4si (__a, __b); | |
5489 | } | |
5490 | ||
5491 | __extension__ extern __inline int32x4_t | |
5492 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5493 | __arm_vrshlq_n_s32 (int32x4_t __a, int32_t __b) | |
5494 | { | |
5495 | return __builtin_mve_vrshlq_n_sv4si (__a, __b); | |
5496 | } | |
5497 | ||
5498 | __extension__ extern __inline int32x4_t | |
5499 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5500 | __arm_vrmulhq_s32 (int32x4_t __a, int32x4_t __b) | |
5501 | { | |
5502 | return __builtin_mve_vrmulhq_sv4si (__a, __b); | |
5503 | } | |
5504 | ||
5505 | __extension__ extern __inline int32x4_t | |
5506 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5507 | __arm_vrhaddq_s32 (int32x4_t __a, int32x4_t __b) | |
5508 | { | |
5509 | return __builtin_mve_vrhaddq_sv4si (__a, __b); | |
5510 | } | |
5511 | ||
5512 | __extension__ extern __inline int32x4_t | |
5513 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5514 | __arm_vqsubq_s32 (int32x4_t __a, int32x4_t __b) | |
5515 | { | |
5516 | return __builtin_mve_vqsubq_sv4si (__a, __b); | |
5517 | } | |
5518 | ||
5519 | __extension__ extern __inline int32x4_t | |
5520 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5521 | __arm_vqsubq_n_s32 (int32x4_t __a, int32_t __b) | |
5522 | { | |
5523 | return __builtin_mve_vqsubq_n_sv4si (__a, __b); | |
5524 | } | |
5525 | ||
5526 | __extension__ extern __inline int32x4_t | |
5527 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5528 | __arm_vqshlq_s32 (int32x4_t __a, int32x4_t __b) | |
5529 | { | |
5530 | return __builtin_mve_vqshlq_sv4si (__a, __b); | |
5531 | } | |
5532 | ||
5533 | __extension__ extern __inline int32x4_t | |
5534 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5535 | __arm_vqshlq_r_s32 (int32x4_t __a, int32_t __b) | |
5536 | { | |
5537 | return __builtin_mve_vqshlq_r_sv4si (__a, __b); | |
5538 | } | |
5539 | ||
5540 | __extension__ extern __inline int32x4_t | |
5541 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5542 | __arm_vqrshlq_s32 (int32x4_t __a, int32x4_t __b) | |
5543 | { | |
5544 | return __builtin_mve_vqrshlq_sv4si (__a, __b); | |
5545 | } | |
5546 | ||
5547 | __extension__ extern __inline int32x4_t | |
5548 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5549 | __arm_vqrshlq_n_s32 (int32x4_t __a, int32_t __b) | |
5550 | { | |
5551 | return __builtin_mve_vqrshlq_n_sv4si (__a, __b); | |
5552 | } | |
5553 | ||
5554 | __extension__ extern __inline int32x4_t | |
5555 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5556 | __arm_vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b) | |
5557 | { | |
5558 | return __builtin_mve_vqrdmulhq_sv4si (__a, __b); | |
5559 | } | |
5560 | ||
5561 | __extension__ extern __inline int32x4_t | |
5562 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5563 | __arm_vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b) | |
5564 | { | |
5565 | return __builtin_mve_vqrdmulhq_n_sv4si (__a, __b); | |
5566 | } | |
5567 | ||
5568 | __extension__ extern __inline int32x4_t | |
5569 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5570 | __arm_vqdmulhq_s32 (int32x4_t __a, int32x4_t __b) | |
5571 | { | |
5572 | return __builtin_mve_vqdmulhq_sv4si (__a, __b); | |
5573 | } | |
5574 | ||
5575 | __extension__ extern __inline int32x4_t | |
5576 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5577 | __arm_vqdmulhq_n_s32 (int32x4_t __a, int32_t __b) | |
5578 | { | |
5579 | return __builtin_mve_vqdmulhq_n_sv4si (__a, __b); | |
5580 | } | |
5581 | ||
5582 | __extension__ extern __inline int32x4_t | |
5583 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5584 | __arm_vqaddq_s32 (int32x4_t __a, int32x4_t __b) | |
5585 | { | |
5586 | return __builtin_mve_vqaddq_sv4si (__a, __b); | |
5587 | } | |
5588 | ||
5589 | __extension__ extern __inline int32x4_t | |
5590 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5591 | __arm_vqaddq_n_s32 (int32x4_t __a, int32_t __b) | |
5592 | { | |
5593 | return __builtin_mve_vqaddq_n_sv4si (__a, __b); | |
5594 | } | |
5595 | ||
5596 | __extension__ extern __inline int32x4_t | |
5597 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5598 | __arm_vorrq_s32 (int32x4_t __a, int32x4_t __b) | |
5599 | { | |
5600 | return __builtin_mve_vorrq_sv4si (__a, __b); | |
5601 | } | |
5602 | ||
5603 | __extension__ extern __inline int32x4_t | |
5604 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5605 | __arm_vornq_s32 (int32x4_t __a, int32x4_t __b) | |
5606 | { | |
5607 | return __builtin_mve_vornq_sv4si (__a, __b); | |
5608 | } | |
5609 | ||
5610 | __extension__ extern __inline int32x4_t | |
5611 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5612 | __arm_vmulq_s32 (int32x4_t __a, int32x4_t __b) | |
5613 | { | |
5614 | return __builtin_mve_vmulq_sv4si (__a, __b); | |
5615 | } | |
5616 | ||
5617 | __extension__ extern __inline int32x4_t | |
5618 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5619 | __arm_vmulq_n_s32 (int32x4_t __a, int32_t __b) | |
5620 | { | |
5621 | return __builtin_mve_vmulq_n_sv4si (__a, __b); | |
5622 | } | |
5623 | ||
5624 | __extension__ extern __inline int64x2_t | |
5625 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5626 | __arm_vmulltq_int_s32 (int32x4_t __a, int32x4_t __b) | |
5627 | { | |
5628 | return __builtin_mve_vmulltq_int_sv4si (__a, __b); | |
5629 | } | |
5630 | ||
5631 | __extension__ extern __inline int64x2_t | |
5632 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5633 | __arm_vmullbq_int_s32 (int32x4_t __a, int32x4_t __b) | |
5634 | { | |
5635 | return __builtin_mve_vmullbq_int_sv4si (__a, __b); | |
5636 | } | |
5637 | ||
5638 | __extension__ extern __inline int32x4_t | |
5639 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5640 | __arm_vmulhq_s32 (int32x4_t __a, int32x4_t __b) | |
5641 | { | |
5642 | return __builtin_mve_vmulhq_sv4si (__a, __b); | |
5643 | } | |
5644 | ||
5645 | __extension__ extern __inline int32_t | |
5646 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5647 | __arm_vmlsdavxq_s32 (int32x4_t __a, int32x4_t __b) | |
5648 | { | |
5649 | return __builtin_mve_vmlsdavxq_sv4si (__a, __b); | |
5650 | } | |
5651 | ||
5652 | __extension__ extern __inline int32_t | |
5653 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5654 | __arm_vmlsdavq_s32 (int32x4_t __a, int32x4_t __b) | |
5655 | { | |
5656 | return __builtin_mve_vmlsdavq_sv4si (__a, __b); | |
5657 | } | |
5658 | ||
5659 | __extension__ extern __inline int32_t | |
5660 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5661 | __arm_vmladavxq_s32 (int32x4_t __a, int32x4_t __b) | |
5662 | { | |
5663 | return __builtin_mve_vmladavxq_sv4si (__a, __b); | |
5664 | } | |
5665 | ||
5666 | __extension__ extern __inline int32_t | |
5667 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5668 | __arm_vmladavq_s32 (int32x4_t __a, int32x4_t __b) | |
5669 | { | |
5670 | return __builtin_mve_vmladavq_sv4si (__a, __b); | |
5671 | } | |
5672 | ||
5673 | __extension__ extern __inline int32_t | |
5674 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5675 | __arm_vminvq_s32 (int32_t __a, int32x4_t __b) | |
5676 | { | |
5677 | return __builtin_mve_vminvq_sv4si (__a, __b); | |
5678 | } | |
5679 | ||
5680 | __extension__ extern __inline int32x4_t | |
5681 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5682 | __arm_vminq_s32 (int32x4_t __a, int32x4_t __b) | |
5683 | { | |
5684 | return __builtin_mve_vminq_sv4si (__a, __b); | |
5685 | } | |
5686 | ||
5687 | __extension__ extern __inline int32_t | |
5688 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5689 | __arm_vmaxvq_s32 (int32_t __a, int32x4_t __b) | |
5690 | { | |
5691 | return __builtin_mve_vmaxvq_sv4si (__a, __b); | |
5692 | } | |
5693 | ||
5694 | __extension__ extern __inline int32x4_t | |
5695 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5696 | __arm_vmaxq_s32 (int32x4_t __a, int32x4_t __b) | |
5697 | { | |
5698 | return __builtin_mve_vmaxq_sv4si (__a, __b); | |
5699 | } | |
5700 | ||
5701 | __extension__ extern __inline int32x4_t | |
5702 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5703 | __arm_vhsubq_s32 (int32x4_t __a, int32x4_t __b) | |
5704 | { | |
5705 | return __builtin_mve_vhsubq_sv4si (__a, __b); | |
5706 | } | |
5707 | ||
5708 | __extension__ extern __inline int32x4_t | |
5709 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5710 | __arm_vhsubq_n_s32 (int32x4_t __a, int32_t __b) | |
5711 | { | |
5712 | return __builtin_mve_vhsubq_n_sv4si (__a, __b); | |
5713 | } | |
5714 | ||
5715 | __extension__ extern __inline int32x4_t | |
5716 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5717 | __arm_vhcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b) | |
5718 | { | |
5719 | return __builtin_mve_vhcaddq_rot90_sv4si (__a, __b); | |
5720 | } | |
5721 | ||
5722 | __extension__ extern __inline int32x4_t | |
5723 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5724 | __arm_vhcaddq_rot270_s32 (int32x4_t __a, int32x4_t __b) | |
5725 | { | |
5726 | return __builtin_mve_vhcaddq_rot270_sv4si (__a, __b); | |
5727 | } | |
5728 | ||
5729 | __extension__ extern __inline int32x4_t | |
5730 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5731 | __arm_vhaddq_s32 (int32x4_t __a, int32x4_t __b) | |
5732 | { | |
5733 | return __builtin_mve_vhaddq_sv4si (__a, __b); | |
5734 | } | |
5735 | ||
5736 | __extension__ extern __inline int32x4_t | |
5737 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5738 | __arm_vhaddq_n_s32 (int32x4_t __a, int32_t __b) | |
5739 | { | |
5740 | return __builtin_mve_vhaddq_n_sv4si (__a, __b); | |
5741 | } | |
5742 | ||
5743 | __extension__ extern __inline int32x4_t | |
5744 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5745 | __arm_veorq_s32 (int32x4_t __a, int32x4_t __b) | |
5746 | { | |
5747 | return __builtin_mve_veorq_sv4si (__a, __b); | |
5748 | } | |
5749 | ||
5750 | __extension__ extern __inline int32x4_t | |
5751 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5752 | __arm_vcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b) | |
5753 | { | |
5754 | return __builtin_mve_vcaddq_rot90_sv4si (__a, __b); | |
5755 | } | |
5756 | ||
5757 | __extension__ extern __inline int32x4_t | |
5758 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5759 | __arm_vcaddq_rot270_s32 (int32x4_t __a, int32x4_t __b) | |
5760 | { | |
5761 | return __builtin_mve_vcaddq_rot270_sv4si (__a, __b); | |
5762 | } | |
5763 | ||
5764 | __extension__ extern __inline int32x4_t | |
5765 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5766 | __arm_vbrsrq_n_s32 (int32x4_t __a, int32_t __b) | |
5767 | { | |
5768 | return __builtin_mve_vbrsrq_n_sv4si (__a, __b); | |
5769 | } | |
5770 | ||
5771 | __extension__ extern __inline int32x4_t | |
5772 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5773 | __arm_vbicq_s32 (int32x4_t __a, int32x4_t __b) | |
5774 | { | |
5775 | return __builtin_mve_vbicq_sv4si (__a, __b); | |
5776 | } | |
5777 | ||
5778 | __extension__ extern __inline int32x4_t | |
5779 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5780 | __arm_vandq_s32 (int32x4_t __a, int32x4_t __b) | |
5781 | { | |
5782 | return __builtin_mve_vandq_sv4si (__a, __b); | |
5783 | } | |
5784 | ||
5785 | __extension__ extern __inline int32_t | |
5786 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5787 | __arm_vaddvaq_s32 (int32_t __a, int32x4_t __b) | |
5788 | { | |
5789 | return __builtin_mve_vaddvaq_sv4si (__a, __b); | |
5790 | } | |
5791 | ||
5792 | __extension__ extern __inline int32x4_t | |
5793 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5794 | __arm_vaddq_n_s32 (int32x4_t __a, int32_t __b) | |
5795 | { | |
5796 | return __builtin_mve_vaddq_n_sv4si (__a, __b); | |
5797 | } | |
5798 | ||
5799 | __extension__ extern __inline int32x4_t | |
5800 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5801 | __arm_vabdq_s32 (int32x4_t __a, int32x4_t __b) | |
5802 | { | |
5803 | return __builtin_mve_vabdq_sv4si (__a, __b); | |
5804 | } | |
5805 | ||
5806 | __extension__ extern __inline int32x4_t | |
5807 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5808 | __arm_vshlq_n_s32 (int32x4_t __a, const int __imm) | |
5809 | { | |
5810 | return __builtin_mve_vshlq_n_sv4si (__a, __imm); | |
5811 | } | |
5812 | ||
5813 | __extension__ extern __inline int32x4_t | |
5814 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5815 | __arm_vrshrq_n_s32 (int32x4_t __a, const int __imm) | |
5816 | { | |
5817 | return __builtin_mve_vrshrq_n_sv4si (__a, __imm); | |
5818 | } | |
5819 | ||
5820 | __extension__ extern __inline int32x4_t | |
5821 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5822 | __arm_vqshlq_n_s32 (int32x4_t __a, const int __imm) | |
5823 | { | |
5824 | return __builtin_mve_vqshlq_n_sv4si (__a, __imm); | |
5825 | } | |
f166a8cd | 5826 | |
f9355dee | 5827 | __extension__ extern __inline uint8x16_t |
14782c81 | 5828 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5829 | __arm_vqmovntq_u16 (uint8x16_t __a, uint16x8_t __b) |
14782c81 | 5830 | { |
f9355dee | 5831 | return __builtin_mve_vqmovntq_uv8hi (__a, __b); |
14782c81 SP |
5832 | } |
5833 | ||
f9355dee | 5834 | __extension__ extern __inline uint8x16_t |
14782c81 | 5835 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5836 | __arm_vqmovnbq_u16 (uint8x16_t __a, uint16x8_t __b) |
14782c81 | 5837 | { |
f9355dee | 5838 | return __builtin_mve_vqmovnbq_uv8hi (__a, __b); |
14782c81 SP |
5839 | } |
5840 | ||
f9355dee | 5841 | __extension__ extern __inline uint16x8_t |
a50f6abf | 5842 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5843 | __arm_vmulltq_poly_p8 (uint8x16_t __a, uint8x16_t __b) |
a50f6abf | 5844 | { |
f9355dee | 5845 | return __builtin_mve_vmulltq_poly_pv16qi (__a, __b); |
a50f6abf SP |
5846 | } |
5847 | ||
f9355dee | 5848 | __extension__ extern __inline uint16x8_t |
a50f6abf | 5849 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5850 | __arm_vmullbq_poly_p8 (uint8x16_t __a, uint8x16_t __b) |
a50f6abf | 5851 | { |
f9355dee | 5852 | return __builtin_mve_vmullbq_poly_pv16qi (__a, __b); |
a50f6abf SP |
5853 | } |
5854 | ||
f9355dee | 5855 | __extension__ extern __inline uint8x16_t |
a50f6abf | 5856 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5857 | __arm_vmovntq_u16 (uint8x16_t __a, uint16x8_t __b) |
a50f6abf | 5858 | { |
f9355dee | 5859 | return __builtin_mve_vmovntq_uv8hi (__a, __b); |
a50f6abf SP |
5860 | } |
5861 | ||
f9355dee | 5862 | __extension__ extern __inline uint8x16_t |
a50f6abf | 5863 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5864 | __arm_vmovnbq_u16 (uint8x16_t __a, uint16x8_t __b) |
a50f6abf | 5865 | { |
f9355dee | 5866 | return __builtin_mve_vmovnbq_uv8hi (__a, __b); |
a50f6abf SP |
5867 | } |
5868 | ||
f9355dee | 5869 | __extension__ extern __inline uint64_t |
a50f6abf | 5870 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5871 | __arm_vmlaldavq_u16 (uint16x8_t __a, uint16x8_t __b) |
a50f6abf | 5872 | { |
f9355dee | 5873 | return __builtin_mve_vmlaldavq_uv8hi (__a, __b); |
a50f6abf SP |
5874 | } |
5875 | ||
f9355dee | 5876 | __extension__ extern __inline uint8x16_t |
a50f6abf | 5877 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5878 | __arm_vqmovuntq_s16 (uint8x16_t __a, int16x8_t __b) |
a50f6abf | 5879 | { |
f9355dee | 5880 | return __builtin_mve_vqmovuntq_sv8hi (__a, __b); |
a50f6abf SP |
5881 | } |
5882 | ||
f9355dee | 5883 | __extension__ extern __inline uint8x16_t |
a50f6abf | 5884 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5885 | __arm_vqmovunbq_s16 (uint8x16_t __a, int16x8_t __b) |
a50f6abf | 5886 | { |
f9355dee | 5887 | return __builtin_mve_vqmovunbq_sv8hi (__a, __b); |
a50f6abf SP |
5888 | } |
5889 | ||
f9355dee | 5890 | __extension__ extern __inline uint16x8_t |
a50f6abf | 5891 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5892 | __arm_vshlltq_n_u8 (uint8x16_t __a, const int __imm) |
a50f6abf | 5893 | { |
f9355dee | 5894 | return __builtin_mve_vshlltq_n_uv16qi (__a, __imm); |
a50f6abf SP |
5895 | } |
5896 | ||
f9355dee | 5897 | __extension__ extern __inline uint16x8_t |
a50f6abf | 5898 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5899 | __arm_vshllbq_n_u8 (uint8x16_t __a, const int __imm) |
a50f6abf | 5900 | { |
f9355dee | 5901 | return __builtin_mve_vshllbq_n_uv16qi (__a, __imm); |
a50f6abf SP |
5902 | } |
5903 | ||
f9355dee | 5904 | __extension__ extern __inline uint16x8_t |
a50f6abf | 5905 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5906 | __arm_vorrq_n_u16 (uint16x8_t __a, const int __imm) |
a50f6abf | 5907 | { |
f9355dee | 5908 | return __builtin_mve_vorrq_n_uv8hi (__a, __imm); |
a50f6abf SP |
5909 | } |
5910 | ||
f9355dee | 5911 | __extension__ extern __inline uint16x8_t |
a50f6abf | 5912 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5913 | __arm_vbicq_n_u16 (uint16x8_t __a, const int __imm) |
a50f6abf | 5914 | { |
f9355dee | 5915 | return __builtin_mve_vbicq_n_uv8hi (__a, __imm); |
a50f6abf SP |
5916 | } |
5917 | ||
f9355dee | 5918 | __extension__ extern __inline int8x16_t |
a50f6abf | 5919 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5920 | __arm_vqmovntq_s16 (int8x16_t __a, int16x8_t __b) |
a50f6abf | 5921 | { |
f9355dee | 5922 | return __builtin_mve_vqmovntq_sv8hi (__a, __b); |
a50f6abf SP |
5923 | } |
5924 | ||
f9355dee | 5925 | __extension__ extern __inline int8x16_t |
a50f6abf | 5926 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5927 | __arm_vqmovnbq_s16 (int8x16_t __a, int16x8_t __b) |
a50f6abf | 5928 | { |
f9355dee | 5929 | return __builtin_mve_vqmovnbq_sv8hi (__a, __b); |
a50f6abf SP |
5930 | } |
5931 | ||
f9355dee | 5932 | __extension__ extern __inline int32x4_t |
a50f6abf | 5933 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5934 | __arm_vqdmulltq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 5935 | { |
f9355dee | 5936 | return __builtin_mve_vqdmulltq_sv8hi (__a, __b); |
a50f6abf SP |
5937 | } |
5938 | ||
f9355dee | 5939 | __extension__ extern __inline int32x4_t |
a50f6abf | 5940 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5941 | __arm_vqdmulltq_n_s16 (int16x8_t __a, int16_t __b) |
a50f6abf | 5942 | { |
f9355dee | 5943 | return __builtin_mve_vqdmulltq_n_sv8hi (__a, __b); |
a50f6abf SP |
5944 | } |
5945 | ||
f9355dee | 5946 | __extension__ extern __inline int32x4_t |
a50f6abf | 5947 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5948 | __arm_vqdmullbq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 5949 | { |
f9355dee | 5950 | return __builtin_mve_vqdmullbq_sv8hi (__a, __b); |
a50f6abf SP |
5951 | } |
5952 | ||
f9355dee | 5953 | __extension__ extern __inline int32x4_t |
a50f6abf | 5954 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5955 | __arm_vqdmullbq_n_s16 (int16x8_t __a, int16_t __b) |
a50f6abf | 5956 | { |
f9355dee | 5957 | return __builtin_mve_vqdmullbq_n_sv8hi (__a, __b); |
a50f6abf SP |
5958 | } |
5959 | ||
f9355dee | 5960 | __extension__ extern __inline int8x16_t |
a50f6abf | 5961 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5962 | __arm_vmovntq_s16 (int8x16_t __a, int16x8_t __b) |
a50f6abf | 5963 | { |
f9355dee | 5964 | return __builtin_mve_vmovntq_sv8hi (__a, __b); |
a50f6abf SP |
5965 | } |
5966 | ||
f9355dee | 5967 | __extension__ extern __inline int8x16_t |
a50f6abf | 5968 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5969 | __arm_vmovnbq_s16 (int8x16_t __a, int16x8_t __b) |
a50f6abf | 5970 | { |
f9355dee | 5971 | return __builtin_mve_vmovnbq_sv8hi (__a, __b); |
a50f6abf SP |
5972 | } |
5973 | ||
f9355dee | 5974 | __extension__ extern __inline int64_t |
a50f6abf | 5975 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5976 | __arm_vmlsldavxq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 5977 | { |
f9355dee | 5978 | return __builtin_mve_vmlsldavxq_sv8hi (__a, __b); |
a50f6abf SP |
5979 | } |
5980 | ||
f9355dee | 5981 | __extension__ extern __inline int64_t |
a50f6abf | 5982 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5983 | __arm_vmlsldavq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 5984 | { |
f9355dee | 5985 | return __builtin_mve_vmlsldavq_sv8hi (__a, __b); |
a50f6abf SP |
5986 | } |
5987 | ||
f9355dee | 5988 | __extension__ extern __inline int64_t |
a50f6abf | 5989 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5990 | __arm_vmlaldavxq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 5991 | { |
f9355dee | 5992 | return __builtin_mve_vmlaldavxq_sv8hi (__a, __b); |
a50f6abf SP |
5993 | } |
5994 | ||
f9355dee | 5995 | __extension__ extern __inline int64_t |
a50f6abf | 5996 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5997 | __arm_vmlaldavq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 5998 | { |
f9355dee | 5999 | return __builtin_mve_vmlaldavq_sv8hi (__a, __b); |
a50f6abf SP |
6000 | } |
6001 | ||
f9355dee | 6002 | __extension__ extern __inline int16x8_t |
a50f6abf | 6003 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6004 | __arm_vshlltq_n_s8 (int8x16_t __a, const int __imm) |
a50f6abf | 6005 | { |
f9355dee | 6006 | return __builtin_mve_vshlltq_n_sv16qi (__a, __imm); |
a50f6abf SP |
6007 | } |
6008 | ||
f9355dee | 6009 | __extension__ extern __inline int16x8_t |
a50f6abf | 6010 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6011 | __arm_vshllbq_n_s8 (int8x16_t __a, const int __imm) |
a50f6abf | 6012 | { |
f9355dee | 6013 | return __builtin_mve_vshllbq_n_sv16qi (__a, __imm); |
a50f6abf SP |
6014 | } |
6015 | ||
f9355dee | 6016 | __extension__ extern __inline int16x8_t |
a50f6abf | 6017 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6018 | __arm_vorrq_n_s16 (int16x8_t __a, const int __imm) |
a50f6abf | 6019 | { |
f9355dee | 6020 | return __builtin_mve_vorrq_n_sv8hi (__a, __imm); |
a50f6abf SP |
6021 | } |
6022 | ||
f9355dee | 6023 | __extension__ extern __inline int16x8_t |
a50f6abf | 6024 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6025 | __arm_vbicq_n_s16 (int16x8_t __a, const int __imm) |
a50f6abf | 6026 | { |
f9355dee | 6027 | return __builtin_mve_vbicq_n_sv8hi (__a, __imm); |
a50f6abf SP |
6028 | } |
6029 | ||
f9355dee | 6030 | __extension__ extern __inline uint16x8_t |
5db0eb95 | 6031 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6032 | __arm_vqmovntq_u32 (uint16x8_t __a, uint32x4_t __b) |
5db0eb95 | 6033 | { |
f9355dee | 6034 | return __builtin_mve_vqmovntq_uv4si (__a, __b); |
5db0eb95 SP |
6035 | } |
6036 | ||
f9355dee | 6037 | __extension__ extern __inline uint16x8_t |
5db0eb95 | 6038 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6039 | __arm_vqmovnbq_u32 (uint16x8_t __a, uint32x4_t __b) |
5db0eb95 | 6040 | { |
f9355dee | 6041 | return __builtin_mve_vqmovnbq_uv4si (__a, __b); |
5db0eb95 SP |
6042 | } |
6043 | ||
f9355dee | 6044 | __extension__ extern __inline uint32x4_t |
5db0eb95 | 6045 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6046 | __arm_vmulltq_poly_p16 (uint16x8_t __a, uint16x8_t __b) |
5db0eb95 | 6047 | { |
f9355dee | 6048 | return __builtin_mve_vmulltq_poly_pv8hi (__a, __b); |
5db0eb95 SP |
6049 | } |
6050 | ||
6051 | __extension__ extern __inline uint32x4_t | |
6052 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6053 | __arm_vmullbq_poly_p16 (uint16x8_t __a, uint16x8_t __b) |
5db0eb95 | 6054 | { |
f9355dee | 6055 | return __builtin_mve_vmullbq_poly_pv8hi (__a, __b); |
5db0eb95 SP |
6056 | } |
6057 | ||
6df4618c SP |
6058 | __extension__ extern __inline uint16x8_t |
6059 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6060 | __arm_vmovntq_u32 (uint16x8_t __a, uint32x4_t __b) |
6df4618c | 6061 | { |
f9355dee | 6062 | return __builtin_mve_vmovntq_uv4si (__a, __b); |
6df4618c SP |
6063 | } |
6064 | ||
f9355dee | 6065 | __extension__ extern __inline uint16x8_t |
6df4618c | 6066 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6067 | __arm_vmovnbq_u32 (uint16x8_t __a, uint32x4_t __b) |
6df4618c | 6068 | { |
f9355dee SP |
6069 | return __builtin_mve_vmovnbq_uv4si (__a, __b); |
6070 | } | |
6071 | ||
6072 | __extension__ extern __inline uint64_t | |
6073 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6074 | __arm_vmlaldavq_u32 (uint32x4_t __a, uint32x4_t __b) | |
6075 | { | |
6076 | return __builtin_mve_vmlaldavq_uv4si (__a, __b); | |
6df4618c SP |
6077 | } |
6078 | ||
6079 | __extension__ extern __inline uint16x8_t | |
6080 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6081 | __arm_vqmovuntq_s32 (uint16x8_t __a, int32x4_t __b) |
6df4618c | 6082 | { |
f9355dee | 6083 | return __builtin_mve_vqmovuntq_sv4si (__a, __b); |
6df4618c SP |
6084 | } |
6085 | ||
6086 | __extension__ extern __inline uint16x8_t | |
6087 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6088 | __arm_vqmovunbq_s32 (uint16x8_t __a, int32x4_t __b) |
6df4618c | 6089 | { |
f9355dee | 6090 | return __builtin_mve_vqmovunbq_sv4si (__a, __b); |
6df4618c SP |
6091 | } |
6092 | ||
6093 | __extension__ extern __inline uint32x4_t | |
6094 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6095 | __arm_vshlltq_n_u16 (uint16x8_t __a, const int __imm) |
6df4618c | 6096 | { |
f9355dee | 6097 | return __builtin_mve_vshlltq_n_uv8hi (__a, __imm); |
6df4618c SP |
6098 | } |
6099 | ||
f9355dee | 6100 | __extension__ extern __inline uint32x4_t |
6df4618c | 6101 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6102 | __arm_vshllbq_n_u16 (uint16x8_t __a, const int __imm) |
6df4618c | 6103 | { |
f9355dee | 6104 | return __builtin_mve_vshllbq_n_uv8hi (__a, __imm); |
6df4618c SP |
6105 | } |
6106 | ||
6107 | __extension__ extern __inline uint32x4_t | |
6108 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6109 | __arm_vorrq_n_u32 (uint32x4_t __a, const int __imm) |
6df4618c | 6110 | { |
f9355dee | 6111 | return __builtin_mve_vorrq_n_uv4si (__a, __imm); |
6df4618c SP |
6112 | } |
6113 | ||
f9355dee SP |
6114 | __extension__ extern __inline uint32x4_t |
6115 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6116 | __arm_vbicq_n_u32 (uint32x4_t __a, const int __imm) | |
6df4618c | 6117 | { |
f9355dee | 6118 | return __builtin_mve_vbicq_n_uv4si (__a, __imm); |
6df4618c SP |
6119 | } |
6120 | ||
f9355dee | 6121 | __extension__ extern __inline int16x8_t |
6df4618c | 6122 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6123 | __arm_vqmovntq_s32 (int16x8_t __a, int32x4_t __b) |
6df4618c | 6124 | { |
f9355dee | 6125 | return __builtin_mve_vqmovntq_sv4si (__a, __b); |
6df4618c SP |
6126 | } |
6127 | ||
6128 | __extension__ extern __inline int16x8_t | |
6129 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6130 | __arm_vqmovnbq_s32 (int16x8_t __a, int32x4_t __b) |
6df4618c | 6131 | { |
f9355dee | 6132 | return __builtin_mve_vqmovnbq_sv4si (__a, __b); |
6df4618c SP |
6133 | } |
6134 | ||
f9355dee | 6135 | __extension__ extern __inline int64x2_t |
6df4618c | 6136 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6137 | __arm_vqdmulltq_s32 (int32x4_t __a, int32x4_t __b) |
6df4618c | 6138 | { |
f9355dee | 6139 | return __builtin_mve_vqdmulltq_sv4si (__a, __b); |
6df4618c SP |
6140 | } |
6141 | ||
f9355dee | 6142 | __extension__ extern __inline int64x2_t |
6df4618c | 6143 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6144 | __arm_vqdmulltq_n_s32 (int32x4_t __a, int32_t __b) |
6df4618c | 6145 | { |
f9355dee | 6146 | return __builtin_mve_vqdmulltq_n_sv4si (__a, __b); |
6df4618c SP |
6147 | } |
6148 | ||
f9355dee | 6149 | __extension__ extern __inline int64x2_t |
6df4618c | 6150 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6151 | __arm_vqdmullbq_s32 (int32x4_t __a, int32x4_t __b) |
6df4618c | 6152 | { |
f9355dee SP |
6153 | return __builtin_mve_vqdmullbq_sv4si (__a, __b); |
6154 | } | |
6155 | ||
6156 | __extension__ extern __inline int64x2_t | |
6157 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6158 | __arm_vqdmullbq_n_s32 (int32x4_t __a, int32_t __b) | |
6159 | { | |
6160 | return __builtin_mve_vqdmullbq_n_sv4si (__a, __b); | |
6df4618c SP |
6161 | } |
6162 | ||
6163 | __extension__ extern __inline int16x8_t | |
6164 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6165 | __arm_vmovntq_s32 (int16x8_t __a, int32x4_t __b) |
6df4618c | 6166 | { |
f9355dee | 6167 | return __builtin_mve_vmovntq_sv4si (__a, __b); |
6df4618c SP |
6168 | } |
6169 | ||
f9355dee | 6170 | __extension__ extern __inline int16x8_t |
6df4618c | 6171 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6172 | __arm_vmovnbq_s32 (int16x8_t __a, int32x4_t __b) |
6df4618c | 6173 | { |
f9355dee | 6174 | return __builtin_mve_vmovnbq_sv4si (__a, __b); |
6df4618c SP |
6175 | } |
6176 | ||
f9355dee | 6177 | __extension__ extern __inline int64_t |
4be8cf77 | 6178 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6179 | __arm_vmlsldavxq_s32 (int32x4_t __a, int32x4_t __b) |
4be8cf77 | 6180 | { |
f9355dee | 6181 | return __builtin_mve_vmlsldavxq_sv4si (__a, __b); |
4be8cf77 SP |
6182 | } |
6183 | ||
f9355dee | 6184 | __extension__ extern __inline int64_t |
4be8cf77 | 6185 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6186 | __arm_vmlsldavq_s32 (int32x4_t __a, int32x4_t __b) |
4be8cf77 | 6187 | { |
f9355dee | 6188 | return __builtin_mve_vmlsldavq_sv4si (__a, __b); |
4be8cf77 SP |
6189 | } |
6190 | ||
f9355dee | 6191 | __extension__ extern __inline int64_t |
4be8cf77 | 6192 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6193 | __arm_vmlaldavxq_s32 (int32x4_t __a, int32x4_t __b) |
4be8cf77 | 6194 | { |
f9355dee | 6195 | return __builtin_mve_vmlaldavxq_sv4si (__a, __b); |
4be8cf77 SP |
6196 | } |
6197 | ||
f9355dee | 6198 | __extension__ extern __inline int64_t |
4be8cf77 | 6199 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6200 | __arm_vmlaldavq_s32 (int32x4_t __a, int32x4_t __b) |
4be8cf77 | 6201 | { |
f9355dee | 6202 | return __builtin_mve_vmlaldavq_sv4si (__a, __b); |
4be8cf77 SP |
6203 | } |
6204 | ||
f9355dee | 6205 | __extension__ extern __inline int32x4_t |
4be8cf77 | 6206 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6207 | __arm_vshlltq_n_s16 (int16x8_t __a, const int __imm) |
4be8cf77 | 6208 | { |
f9355dee | 6209 | return __builtin_mve_vshlltq_n_sv8hi (__a, __imm); |
4be8cf77 SP |
6210 | } |
6211 | ||
f9355dee | 6212 | __extension__ extern __inline int32x4_t |
4be8cf77 | 6213 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6214 | __arm_vshllbq_n_s16 (int16x8_t __a, const int __imm) |
4be8cf77 | 6215 | { |
f9355dee | 6216 | return __builtin_mve_vshllbq_n_sv8hi (__a, __imm); |
4be8cf77 SP |
6217 | } |
6218 | ||
f9355dee | 6219 | __extension__ extern __inline int32x4_t |
4be8cf77 | 6220 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6221 | __arm_vorrq_n_s32 (int32x4_t __a, const int __imm) |
4be8cf77 | 6222 | { |
f9355dee | 6223 | return __builtin_mve_vorrq_n_sv4si (__a, __imm); |
4be8cf77 SP |
6224 | } |
6225 | ||
f9355dee | 6226 | __extension__ extern __inline int32x4_t |
4be8cf77 | 6227 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6228 | __arm_vbicq_n_s32 (int32x4_t __a, const int __imm) |
4be8cf77 | 6229 | { |
f9355dee | 6230 | return __builtin_mve_vbicq_n_sv4si (__a, __imm); |
4be8cf77 SP |
6231 | } |
6232 | ||
f9355dee | 6233 | __extension__ extern __inline uint64_t |
4be8cf77 | 6234 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6235 | __arm_vrmlaldavhq_u32 (uint32x4_t __a, uint32x4_t __b) |
4be8cf77 | 6236 | { |
f9355dee | 6237 | return __builtin_mve_vrmlaldavhq_uv4si (__a, __b); |
4be8cf77 SP |
6238 | } |
6239 | ||
f9355dee | 6240 | __extension__ extern __inline mve_pred16_t |
4be8cf77 | 6241 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6242 | __arm_vctp8q_m (uint32_t __a, mve_pred16_t __p) |
4be8cf77 | 6243 | { |
f9355dee | 6244 | return __builtin_mve_vctp8q_mhi (__a, __p); |
4be8cf77 SP |
6245 | } |
6246 | ||
f9355dee | 6247 | __extension__ extern __inline mve_pred16_t |
f166a8cd | 6248 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6249 | __arm_vctp64q_m (uint32_t __a, mve_pred16_t __p) |
f166a8cd | 6250 | { |
f9355dee | 6251 | return __builtin_mve_vctp64q_mhi (__a, __p); |
f166a8cd SP |
6252 | } |
6253 | ||
f9355dee | 6254 | __extension__ extern __inline mve_pred16_t |
f166a8cd | 6255 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6256 | __arm_vctp32q_m (uint32_t __a, mve_pred16_t __p) |
f166a8cd | 6257 | { |
f9355dee | 6258 | return __builtin_mve_vctp32q_mhi (__a, __p); |
f166a8cd SP |
6259 | } |
6260 | ||
f9355dee | 6261 | __extension__ extern __inline mve_pred16_t |
f166a8cd | 6262 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6263 | __arm_vctp16q_m (uint32_t __a, mve_pred16_t __p) |
f166a8cd | 6264 | { |
f9355dee | 6265 | return __builtin_mve_vctp16q_mhi (__a, __p); |
f166a8cd SP |
6266 | } |
6267 | ||
f9355dee | 6268 | __extension__ extern __inline uint64_t |
f166a8cd | 6269 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6270 | __arm_vaddlvaq_u32 (uint64_t __a, uint32x4_t __b) |
f166a8cd | 6271 | { |
f9355dee | 6272 | return __builtin_mve_vaddlvaq_uv4si (__a, __b); |
f166a8cd SP |
6273 | } |
6274 | ||
f9355dee SP |
6275 | __extension__ extern __inline int64_t |
6276 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6277 | __arm_vrmlsldavhxq_s32 (int32x4_t __a, int32x4_t __b) | |
6278 | { | |
6279 | return __builtin_mve_vrmlsldavhxq_sv4si (__a, __b); | |
6280 | } | |
14782c81 | 6281 | |
f9355dee SP |
6282 | __extension__ extern __inline int64_t |
6283 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6284 | __arm_vrmlsldavhq_s32 (int32x4_t __a, int32x4_t __b) | |
6285 | { | |
6286 | return __builtin_mve_vrmlsldavhq_sv4si (__a, __b); | |
6287 | } | |
6288 | ||
6289 | __extension__ extern __inline int64_t | |
6290 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6291 | __arm_vrmlaldavhxq_s32 (int32x4_t __a, int32x4_t __b) | |
6292 | { | |
6293 | return __builtin_mve_vrmlaldavhxq_sv4si (__a, __b); | |
6294 | } | |
6295 | ||
6296 | __extension__ extern __inline int64_t | |
6297 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6298 | __arm_vrmlaldavhq_s32 (int32x4_t __a, int32x4_t __b) | |
6299 | { | |
6300 | return __builtin_mve_vrmlaldavhq_sv4si (__a, __b); | |
6301 | } | |
6302 | ||
6303 | __extension__ extern __inline int64_t | |
6304 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6305 | __arm_vaddlvaq_s32 (int64_t __a, int32x4_t __b) | |
6306 | { | |
6307 | return __builtin_mve_vaddlvaq_sv4si (__a, __b); | |
6308 | } | |
6309 | ||
0dad5b33 SP |
6310 | __extension__ extern __inline uint32_t |
6311 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6312 | __arm_vabavq_s8 (uint32_t __a, int8x16_t __b, int8x16_t __c) | |
6313 | { | |
6314 | return __builtin_mve_vabavq_sv16qi (__a, __b, __c); | |
6315 | } | |
6316 | ||
6317 | __extension__ extern __inline uint32_t | |
6318 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6319 | __arm_vabavq_s16 (uint32_t __a, int16x8_t __b, int16x8_t __c) | |
6320 | { | |
6321 | return __builtin_mve_vabavq_sv8hi (__a, __b, __c); | |
6322 | } | |
6323 | ||
6324 | __extension__ extern __inline uint32_t | |
6325 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6326 | __arm_vabavq_s32 (uint32_t __a, int32x4_t __b, int32x4_t __c) | |
6327 | { | |
6328 | return __builtin_mve_vabavq_sv4si (__a, __b, __c); | |
6329 | } | |
6330 | ||
6331 | __extension__ extern __inline uint32_t | |
6332 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6333 | __arm_vabavq_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c) | |
6334 | { | |
6335 | return __builtin_mve_vabavq_uv16qi(__a, __b, __c); | |
6336 | } | |
6337 | ||
6338 | __extension__ extern __inline uint32_t | |
6339 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6340 | __arm_vabavq_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c) | |
6341 | { | |
6342 | return __builtin_mve_vabavq_uv8hi(__a, __b, __c); | |
6343 | } | |
6344 | ||
6345 | __extension__ extern __inline uint32_t | |
6346 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6347 | __arm_vabavq_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c) | |
6348 | { | |
6349 | return __builtin_mve_vabavq_uv4si(__a, __b, __c); | |
6350 | } | |
6351 | ||
6352 | __extension__ extern __inline int16x8_t | |
6353 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6354 | __arm_vbicq_m_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
6355 | { | |
6356 | return __builtin_mve_vbicq_m_n_sv8hi (__a, __imm, __p); | |
6357 | } | |
6358 | ||
6359 | __extension__ extern __inline int32x4_t | |
6360 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6361 | __arm_vbicq_m_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
6362 | { | |
6363 | return __builtin_mve_vbicq_m_n_sv4si (__a, __imm, __p); | |
6364 | } | |
6365 | ||
6366 | __extension__ extern __inline uint16x8_t | |
6367 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6368 | __arm_vbicq_m_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
6369 | { | |
6370 | return __builtin_mve_vbicq_m_n_uv8hi (__a, __imm, __p); | |
6371 | } | |
6372 | ||
6373 | __extension__ extern __inline uint32x4_t | |
6374 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6375 | __arm_vbicq_m_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
6376 | { | |
6377 | return __builtin_mve_vbicq_m_n_uv4si (__a, __imm, __p); | |
6378 | } | |
6379 | ||
6380 | __extension__ extern __inline int8x16_t | |
6381 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6382 | __arm_vqrshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) | |
6383 | { | |
6384 | return __builtin_mve_vqrshrnbq_n_sv8hi (__a, __b, __imm); | |
6385 | } | |
6386 | ||
6387 | __extension__ extern __inline uint8x16_t | |
6388 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6389 | __arm_vqrshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) | |
6390 | { | |
6391 | return __builtin_mve_vqrshrnbq_n_uv8hi (__a, __b, __imm); | |
6392 | } | |
6393 | ||
6394 | __extension__ extern __inline int16x8_t | |
6395 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6396 | __arm_vqrshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) | |
6397 | { | |
6398 | return __builtin_mve_vqrshrnbq_n_sv4si (__a, __b, __imm); | |
6399 | } | |
6400 | ||
6401 | __extension__ extern __inline uint16x8_t | |
6402 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6403 | __arm_vqrshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) | |
6404 | { | |
6405 | return __builtin_mve_vqrshrnbq_n_uv4si (__a, __b, __imm); | |
6406 | } | |
6407 | ||
6408 | __extension__ extern __inline uint8x16_t | |
6409 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6410 | __arm_vqrshrunbq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm) | |
6411 | { | |
6412 | return __builtin_mve_vqrshrunbq_n_sv8hi (__a, __b, __imm); | |
6413 | } | |
6414 | ||
6415 | __extension__ extern __inline uint16x8_t | |
6416 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6417 | __arm_vqrshrunbq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm) | |
6418 | { | |
6419 | return __builtin_mve_vqrshrunbq_n_sv4si (__a, __b, __imm); | |
6420 | } | |
6421 | ||
6422 | __extension__ extern __inline int64_t | |
6423 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6424 | __arm_vrmlaldavhaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) | |
6425 | { | |
6426 | return __builtin_mve_vrmlaldavhaq_sv4si (__a, __b, __c); | |
6427 | } | |
6428 | ||
6429 | __extension__ extern __inline uint64_t | |
6430 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6431 | __arm_vrmlaldavhaq_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c) | |
6432 | { | |
6433 | return __builtin_mve_vrmlaldavhaq_uv4si (__a, __b, __c); | |
6434 | } | |
6435 | ||
6436 | __extension__ extern __inline int8x16_t | |
6437 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6438 | __arm_vshlcq_s8 (int8x16_t __a, uint32_t * __b, const int __imm) | |
6439 | { | |
6440 | int8x16_t __res = __builtin_mve_vshlcq_vec_sv16qi (__a, *__b, __imm); | |
6441 | *__b = __builtin_mve_vshlcq_carry_sv16qi (__a, *__b, __imm); | |
6442 | return __res; | |
6443 | } | |
6444 | ||
6445 | __extension__ extern __inline uint8x16_t | |
6446 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6447 | __arm_vshlcq_u8 (uint8x16_t __a, uint32_t * __b, const int __imm) | |
6448 | { | |
6449 | uint8x16_t __res = __builtin_mve_vshlcq_vec_uv16qi (__a, *__b, __imm); | |
6450 | *__b = __builtin_mve_vshlcq_carry_uv16qi (__a, *__b, __imm); | |
6451 | return __res; | |
6452 | } | |
6453 | ||
6454 | __extension__ extern __inline int16x8_t | |
6455 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6456 | __arm_vshlcq_s16 (int16x8_t __a, uint32_t * __b, const int __imm) | |
6457 | { | |
6458 | int16x8_t __res = __builtin_mve_vshlcq_vec_sv8hi (__a, *__b, __imm); | |
6459 | *__b = __builtin_mve_vshlcq_carry_sv8hi (__a, *__b, __imm); | |
6460 | return __res; | |
6461 | } | |
6462 | ||
6463 | __extension__ extern __inline uint16x8_t | |
6464 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6465 | __arm_vshlcq_u16 (uint16x8_t __a, uint32_t * __b, const int __imm) | |
6466 | { | |
6467 | uint16x8_t __res = __builtin_mve_vshlcq_vec_uv8hi (__a, *__b, __imm); | |
6468 | *__b = __builtin_mve_vshlcq_carry_uv8hi (__a, *__b, __imm); | |
6469 | return __res; | |
6470 | } | |
6471 | ||
6472 | __extension__ extern __inline int32x4_t | |
6473 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6474 | __arm_vshlcq_s32 (int32x4_t __a, uint32_t * __b, const int __imm) | |
6475 | { | |
6476 | int32x4_t __res = __builtin_mve_vshlcq_vec_sv4si (__a, *__b, __imm); | |
6477 | *__b = __builtin_mve_vshlcq_carry_sv4si (__a, *__b, __imm); | |
6478 | return __res; | |
6479 | } | |
6480 | ||
6481 | __extension__ extern __inline uint32x4_t | |
6482 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6483 | __arm_vshlcq_u32 (uint32x4_t __a, uint32_t * __b, const int __imm) | |
6484 | { | |
6485 | uint32x4_t __res = __builtin_mve_vshlcq_vec_uv4si (__a, *__b, __imm); | |
6486 | *__b = __builtin_mve_vshlcq_carry_uv4si (__a, *__b, __imm); | |
6487 | return __res; | |
6488 | } | |
6489 | ||
8165795c SP |
6490 | __extension__ extern __inline uint8x16_t |
6491 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6492 | __arm_vpselq_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6493 | { | |
6494 | return __builtin_mve_vpselq_uv16qi (__a, __b, __p); | |
6495 | } | |
6496 | ||
6497 | __extension__ extern __inline int8x16_t | |
6498 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6499 | __arm_vpselq_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6500 | { | |
6501 | return __builtin_mve_vpselq_sv16qi (__a, __b, __p); | |
6502 | } | |
6503 | ||
6504 | __extension__ extern __inline uint8x16_t | |
6505 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6506 | __arm_vrev64q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
6507 | { | |
6508 | return __builtin_mve_vrev64q_m_uv16qi (__inactive, __a, __p); | |
6509 | } | |
6510 | ||
6511 | __extension__ extern __inline uint8x16_t | |
6512 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6513 | __arm_vqrdmlashq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) | |
6514 | { | |
6515 | return __builtin_mve_vqrdmlashq_n_uv16qi (__a, __b, __c); | |
6516 | } | |
6517 | ||
6518 | __extension__ extern __inline uint8x16_t | |
6519 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6520 | __arm_vqrdmlahq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) | |
6521 | { | |
6522 | return __builtin_mve_vqrdmlahq_n_uv16qi (__a, __b, __c); | |
6523 | } | |
6524 | ||
6525 | __extension__ extern __inline uint8x16_t | |
6526 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6527 | __arm_vqdmlahq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) | |
6528 | { | |
6529 | return __builtin_mve_vqdmlahq_n_uv16qi (__a, __b, __c); | |
6530 | } | |
6531 | ||
6532 | __extension__ extern __inline uint8x16_t | |
6533 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6534 | __arm_vmvnq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
6535 | { | |
6536 | return __builtin_mve_vmvnq_m_uv16qi (__inactive, __a, __p); | |
6537 | } | |
6538 | ||
6539 | __extension__ extern __inline uint8x16_t | |
6540 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6541 | __arm_vmlasq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) | |
6542 | { | |
6543 | return __builtin_mve_vmlasq_n_uv16qi (__a, __b, __c); | |
6544 | } | |
6545 | ||
6546 | __extension__ extern __inline uint8x16_t | |
6547 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6548 | __arm_vmlaq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) | |
6549 | { | |
6550 | return __builtin_mve_vmlaq_n_uv16qi (__a, __b, __c); | |
6551 | } | |
6552 | ||
6553 | __extension__ extern __inline uint32_t | |
6554 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6555 | __arm_vmladavq_p_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6556 | { | |
6557 | return __builtin_mve_vmladavq_p_uv16qi (__a, __b, __p); | |
6558 | } | |
6559 | ||
6560 | __extension__ extern __inline uint32_t | |
6561 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6562 | __arm_vmladavaq_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c) | |
6563 | { | |
6564 | return __builtin_mve_vmladavaq_uv16qi (__a, __b, __c); | |
6565 | } | |
6566 | ||
6567 | __extension__ extern __inline uint8_t | |
6568 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6569 | __arm_vminvq_p_u8 (uint8_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6570 | { | |
6571 | return __builtin_mve_vminvq_p_uv16qi (__a, __b, __p); | |
6572 | } | |
6573 | ||
6574 | __extension__ extern __inline uint8_t | |
6575 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6576 | __arm_vmaxvq_p_u8 (uint8_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6577 | { | |
6578 | return __builtin_mve_vmaxvq_p_uv16qi (__a, __b, __p); | |
6579 | } | |
6580 | ||
6581 | __extension__ extern __inline uint8x16_t | |
6582 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6583 | __arm_vdupq_m_n_u8 (uint8x16_t __inactive, uint8_t __a, mve_pred16_t __p) | |
6584 | { | |
6585 | return __builtin_mve_vdupq_m_n_uv16qi (__inactive, __a, __p); | |
6586 | } | |
6587 | ||
6588 | __extension__ extern __inline mve_pred16_t | |
6589 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6590 | __arm_vcmpneq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6591 | { | |
6592 | return __builtin_mve_vcmpneq_m_uv16qi (__a, __b, __p); | |
6593 | } | |
6594 | ||
6595 | __extension__ extern __inline mve_pred16_t | |
6596 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6597 | __arm_vcmpneq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
6598 | { | |
6599 | return __builtin_mve_vcmpneq_m_n_uv16qi (__a, __b, __p); | |
6600 | } | |
6601 | ||
6602 | __extension__ extern __inline mve_pred16_t | |
6603 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6604 | __arm_vcmphiq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6605 | { | |
6606 | return __builtin_mve_vcmphiq_m_uv16qi (__a, __b, __p); | |
6607 | } | |
6608 | ||
6609 | __extension__ extern __inline mve_pred16_t | |
6610 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6611 | __arm_vcmphiq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
6612 | { | |
6613 | return __builtin_mve_vcmphiq_m_n_uv16qi (__a, __b, __p); | |
6614 | } | |
6615 | ||
6616 | __extension__ extern __inline mve_pred16_t | |
6617 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6618 | __arm_vcmpeqq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6619 | { | |
6620 | return __builtin_mve_vcmpeqq_m_uv16qi (__a, __b, __p); | |
6621 | } | |
6622 | ||
6623 | __extension__ extern __inline mve_pred16_t | |
6624 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6625 | __arm_vcmpeqq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
6626 | { | |
6627 | return __builtin_mve_vcmpeqq_m_n_uv16qi (__a, __b, __p); | |
6628 | } | |
6629 | ||
6630 | __extension__ extern __inline mve_pred16_t | |
6631 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6632 | __arm_vcmpcsq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6633 | { | |
6634 | return __builtin_mve_vcmpcsq_m_uv16qi (__a, __b, __p); | |
6635 | } | |
6636 | ||
6637 | __extension__ extern __inline mve_pred16_t | |
6638 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6639 | __arm_vcmpcsq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
6640 | { | |
6641 | return __builtin_mve_vcmpcsq_m_n_uv16qi (__a, __b, __p); | |
6642 | } | |
6643 | ||
6644 | __extension__ extern __inline uint8x16_t | |
6645 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6646 | __arm_vclzq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
6647 | { | |
6648 | return __builtin_mve_vclzq_m_uv16qi (__inactive, __a, __p); | |
6649 | } | |
6650 | ||
6651 | __extension__ extern __inline uint32_t | |
6652 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6653 | __arm_vaddvaq_p_u8 (uint32_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6654 | { | |
6655 | return __builtin_mve_vaddvaq_p_uv16qi (__a, __b, __p); | |
6656 | } | |
6657 | ||
6658 | __extension__ extern __inline uint8x16_t | |
6659 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6660 | __arm_vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm) | |
6661 | { | |
6662 | return __builtin_mve_vsriq_n_uv16qi (__a, __b, __imm); | |
6663 | } | |
6664 | ||
6665 | __extension__ extern __inline uint8x16_t | |
6666 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6667 | __arm_vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm) | |
6668 | { | |
6669 | return __builtin_mve_vsliq_n_uv16qi (__a, __b, __imm); | |
6670 | } | |
6671 | ||
6672 | __extension__ extern __inline uint8x16_t | |
6673 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6674 | __arm_vshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6675 | { | |
6676 | return __builtin_mve_vshlq_m_r_uv16qi (__a, __b, __p); | |
6677 | } | |
6678 | ||
6679 | __extension__ extern __inline uint8x16_t | |
6680 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6681 | __arm_vrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6682 | { | |
6683 | return __builtin_mve_vrshlq_m_n_uv16qi (__a, __b, __p); | |
6684 | } | |
6685 | ||
6686 | __extension__ extern __inline uint8x16_t | |
6687 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6688 | __arm_vqshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6689 | { | |
6690 | return __builtin_mve_vqshlq_m_r_uv16qi (__a, __b, __p); | |
6691 | } | |
6692 | ||
6693 | __extension__ extern __inline uint8x16_t | |
6694 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6695 | __arm_vqrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6696 | { | |
6697 | return __builtin_mve_vqrshlq_m_n_uv16qi (__a, __b, __p); | |
6698 | } | |
6699 | ||
6700 | __extension__ extern __inline uint8_t | |
6701 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6702 | __arm_vminavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p) | |
6703 | { | |
6704 | return __builtin_mve_vminavq_p_sv16qi (__a, __b, __p); | |
6705 | } | |
6706 | ||
6707 | __extension__ extern __inline uint8x16_t | |
6708 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6709 | __arm_vminaq_m_s8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6710 | { | |
6711 | return __builtin_mve_vminaq_m_sv16qi (__a, __b, __p); | |
6712 | } | |
6713 | ||
6714 | __extension__ extern __inline uint8_t | |
6715 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6716 | __arm_vmaxavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p) | |
6717 | { | |
6718 | return __builtin_mve_vmaxavq_p_sv16qi (__a, __b, __p); | |
6719 | } | |
6720 | ||
6721 | __extension__ extern __inline uint8x16_t | |
6722 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6723 | __arm_vmaxaq_m_s8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6724 | { | |
6725 | return __builtin_mve_vmaxaq_m_sv16qi (__a, __b, __p); | |
6726 | } | |
6727 | ||
6728 | __extension__ extern __inline mve_pred16_t | |
6729 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6730 | __arm_vcmpneq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6731 | { | |
6732 | return __builtin_mve_vcmpneq_m_sv16qi (__a, __b, __p); | |
6733 | } | |
6734 | ||
6735 | __extension__ extern __inline mve_pred16_t | |
6736 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6737 | __arm_vcmpneq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
6738 | { | |
6739 | return __builtin_mve_vcmpneq_m_n_sv16qi (__a, __b, __p); | |
6740 | } | |
6741 | ||
6742 | __extension__ extern __inline mve_pred16_t | |
6743 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6744 | __arm_vcmpltq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6745 | { | |
6746 | return __builtin_mve_vcmpltq_m_sv16qi (__a, __b, __p); | |
6747 | } | |
6748 | ||
6749 | __extension__ extern __inline mve_pred16_t | |
6750 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6751 | __arm_vcmpltq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
6752 | { | |
6753 | return __builtin_mve_vcmpltq_m_n_sv16qi (__a, __b, __p); | |
6754 | } | |
6755 | ||
6756 | __extension__ extern __inline mve_pred16_t | |
6757 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6758 | __arm_vcmpleq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6759 | { | |
6760 | return __builtin_mve_vcmpleq_m_sv16qi (__a, __b, __p); | |
6761 | } | |
6762 | ||
6763 | __extension__ extern __inline mve_pred16_t | |
6764 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6765 | __arm_vcmpleq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
6766 | { | |
6767 | return __builtin_mve_vcmpleq_m_n_sv16qi (__a, __b, __p); | |
6768 | } | |
6769 | ||
6770 | __extension__ extern __inline mve_pred16_t | |
6771 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6772 | __arm_vcmpgtq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6773 | { | |
6774 | return __builtin_mve_vcmpgtq_m_sv16qi (__a, __b, __p); | |
6775 | } | |
6776 | ||
6777 | __extension__ extern __inline mve_pred16_t | |
6778 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6779 | __arm_vcmpgtq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
6780 | { | |
6781 | return __builtin_mve_vcmpgtq_m_n_sv16qi (__a, __b, __p); | |
6782 | } | |
6783 | ||
6784 | __extension__ extern __inline mve_pred16_t | |
6785 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6786 | __arm_vcmpgeq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6787 | { | |
6788 | return __builtin_mve_vcmpgeq_m_sv16qi (__a, __b, __p); | |
6789 | } | |
6790 | ||
6791 | __extension__ extern __inline mve_pred16_t | |
6792 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6793 | __arm_vcmpgeq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
6794 | { | |
6795 | return __builtin_mve_vcmpgeq_m_n_sv16qi (__a, __b, __p); | |
6796 | } | |
6797 | ||
6798 | __extension__ extern __inline mve_pred16_t | |
6799 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6800 | __arm_vcmpeqq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6801 | { | |
6802 | return __builtin_mve_vcmpeqq_m_sv16qi (__a, __b, __p); | |
6803 | } | |
6804 | ||
6805 | __extension__ extern __inline mve_pred16_t | |
6806 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6807 | __arm_vcmpeqq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
6808 | { | |
6809 | return __builtin_mve_vcmpeqq_m_n_sv16qi (__a, __b, __p); | |
6810 | } | |
6811 | ||
6812 | __extension__ extern __inline int8x16_t | |
6813 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6814 | __arm_vshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6815 | { | |
6816 | return __builtin_mve_vshlq_m_r_sv16qi (__a, __b, __p); | |
6817 | } | |
6818 | ||
6819 | __extension__ extern __inline int8x16_t | |
6820 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6821 | __arm_vrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6822 | { | |
6823 | return __builtin_mve_vrshlq_m_n_sv16qi (__a, __b, __p); | |
6824 | } | |
6825 | ||
6826 | __extension__ extern __inline int8x16_t | |
6827 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6828 | __arm_vrev64q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6829 | { | |
6830 | return __builtin_mve_vrev64q_m_sv16qi (__inactive, __a, __p); | |
6831 | } | |
6832 | ||
6833 | __extension__ extern __inline int8x16_t | |
6834 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6835 | __arm_vqshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6836 | { | |
6837 | return __builtin_mve_vqshlq_m_r_sv16qi (__a, __b, __p); | |
6838 | } | |
6839 | ||
6840 | __extension__ extern __inline int8x16_t | |
6841 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6842 | __arm_vqrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6843 | { | |
6844 | return __builtin_mve_vqrshlq_m_n_sv16qi (__a, __b, __p); | |
6845 | } | |
6846 | ||
6847 | __extension__ extern __inline int8x16_t | |
6848 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6849 | __arm_vqnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6850 | { | |
6851 | return __builtin_mve_vqnegq_m_sv16qi (__inactive, __a, __p); | |
6852 | } | |
6853 | ||
6854 | __extension__ extern __inline int8x16_t | |
6855 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6856 | __arm_vqabsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6857 | { | |
6858 | return __builtin_mve_vqabsq_m_sv16qi (__inactive, __a, __p); | |
6859 | } | |
6860 | ||
6861 | __extension__ extern __inline int8x16_t | |
6862 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6863 | __arm_vnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6864 | { | |
6865 | return __builtin_mve_vnegq_m_sv16qi (__inactive, __a, __p); | |
6866 | } | |
6867 | ||
6868 | ||
6869 | __extension__ extern __inline int8x16_t | |
6870 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6871 | __arm_vmvnq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6872 | { | |
6873 | return __builtin_mve_vmvnq_m_sv16qi (__inactive, __a, __p); | |
6874 | } | |
6875 | ||
6876 | __extension__ extern __inline int32_t | |
6877 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6878 | __arm_vmlsdavxq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6879 | { | |
6880 | return __builtin_mve_vmlsdavxq_p_sv16qi (__a, __b, __p); | |
6881 | } | |
6882 | ||
6883 | __extension__ extern __inline int32_t | |
6884 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6885 | __arm_vmlsdavq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6886 | { | |
6887 | return __builtin_mve_vmlsdavq_p_sv16qi (__a, __b, __p); | |
6888 | } | |
6889 | ||
6890 | __extension__ extern __inline int32_t | |
6891 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6892 | __arm_vmladavxq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6893 | { | |
6894 | return __builtin_mve_vmladavxq_p_sv16qi (__a, __b, __p); | |
6895 | } | |
6896 | ||
6897 | __extension__ extern __inline int32_t | |
6898 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6899 | __arm_vmladavq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6900 | { | |
6901 | return __builtin_mve_vmladavq_p_sv16qi (__a, __b, __p); | |
6902 | } | |
6903 | ||
6904 | __extension__ extern __inline int8_t | |
6905 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6906 | __arm_vminvq_p_s8 (int8_t __a, int8x16_t __b, mve_pred16_t __p) | |
6907 | { | |
6908 | return __builtin_mve_vminvq_p_sv16qi (__a, __b, __p); | |
6909 | } | |
6910 | ||
6911 | __extension__ extern __inline int8_t | |
6912 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6913 | __arm_vmaxvq_p_s8 (int8_t __a, int8x16_t __b, mve_pred16_t __p) | |
6914 | { | |
6915 | return __builtin_mve_vmaxvq_p_sv16qi (__a, __b, __p); | |
6916 | } | |
6917 | ||
6918 | __extension__ extern __inline int8x16_t | |
6919 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6920 | __arm_vdupq_m_n_s8 (int8x16_t __inactive, int8_t __a, mve_pred16_t __p) | |
6921 | { | |
6922 | return __builtin_mve_vdupq_m_n_sv16qi (__inactive, __a, __p); | |
6923 | } | |
6924 | ||
6925 | __extension__ extern __inline int8x16_t | |
6926 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6927 | __arm_vclzq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6928 | { | |
6929 | return __builtin_mve_vclzq_m_sv16qi (__inactive, __a, __p); | |
6930 | } | |
6931 | ||
6932 | __extension__ extern __inline int8x16_t | |
6933 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6934 | __arm_vclsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6935 | { | |
6936 | return __builtin_mve_vclsq_m_sv16qi (__inactive, __a, __p); | |
6937 | } | |
6938 | ||
6939 | __extension__ extern __inline int32_t | |
6940 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6941 | __arm_vaddvaq_p_s8 (int32_t __a, int8x16_t __b, mve_pred16_t __p) | |
6942 | { | |
6943 | return __builtin_mve_vaddvaq_p_sv16qi (__a, __b, __p); | |
6944 | } | |
6945 | ||
6946 | __extension__ extern __inline int8x16_t | |
6947 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6948 | __arm_vabsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6949 | { | |
6950 | return __builtin_mve_vabsq_m_sv16qi (__inactive, __a, __p); | |
6951 | } | |
6952 | ||
6953 | __extension__ extern __inline int8x16_t | |
6954 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6955 | __arm_vqrdmlsdhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
6956 | { | |
6957 | return __builtin_mve_vqrdmlsdhxq_sv16qi (__inactive, __a, __b); | |
6958 | } | |
6959 | ||
6960 | __extension__ extern __inline int8x16_t | |
6961 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6962 | __arm_vqrdmlsdhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
6963 | { | |
6964 | return __builtin_mve_vqrdmlsdhq_sv16qi (__inactive, __a, __b); | |
6965 | } | |
6966 | ||
6967 | __extension__ extern __inline int8x16_t | |
6968 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6969 | __arm_vqrdmlashq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
6970 | { | |
6971 | return __builtin_mve_vqrdmlashq_n_sv16qi (__a, __b, __c); | |
6972 | } | |
6973 | ||
6974 | __extension__ extern __inline int8x16_t | |
6975 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6976 | __arm_vqrdmlahq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
6977 | { | |
6978 | return __builtin_mve_vqrdmlahq_n_sv16qi (__a, __b, __c); | |
6979 | } | |
6980 | ||
6981 | __extension__ extern __inline int8x16_t | |
6982 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6983 | __arm_vqrdmladhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
6984 | { | |
6985 | return __builtin_mve_vqrdmladhxq_sv16qi (__inactive, __a, __b); | |
6986 | } | |
6987 | ||
6988 | __extension__ extern __inline int8x16_t | |
6989 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6990 | __arm_vqrdmladhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
6991 | { | |
6992 | return __builtin_mve_vqrdmladhq_sv16qi (__inactive, __a, __b); | |
6993 | } | |
6994 | ||
6995 | __extension__ extern __inline int8x16_t | |
6996 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6997 | __arm_vqdmlsdhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
6998 | { | |
6999 | return __builtin_mve_vqdmlsdhxq_sv16qi (__inactive, __a, __b); | |
7000 | } | |
7001 | ||
7002 | __extension__ extern __inline int8x16_t | |
7003 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7004 | __arm_vqdmlsdhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
7005 | { | |
7006 | return __builtin_mve_vqdmlsdhq_sv16qi (__inactive, __a, __b); | |
7007 | } | |
7008 | ||
7009 | __extension__ extern __inline int8x16_t | |
7010 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7011 | __arm_vqdmlahq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
7012 | { | |
7013 | return __builtin_mve_vqdmlahq_n_sv16qi (__a, __b, __c); | |
7014 | } | |
7015 | ||
7016 | __extension__ extern __inline int8x16_t | |
7017 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7018 | __arm_vqdmladhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
7019 | { | |
7020 | return __builtin_mve_vqdmladhxq_sv16qi (__inactive, __a, __b); | |
7021 | } | |
7022 | ||
7023 | __extension__ extern __inline int8x16_t | |
7024 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7025 | __arm_vqdmladhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
7026 | { | |
7027 | return __builtin_mve_vqdmladhq_sv16qi (__inactive, __a, __b); | |
7028 | } | |
7029 | ||
7030 | __extension__ extern __inline int32_t | |
7031 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7032 | __arm_vmlsdavaxq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) | |
7033 | { | |
7034 | return __builtin_mve_vmlsdavaxq_sv16qi (__a, __b, __c); | |
7035 | } | |
7036 | ||
7037 | __extension__ extern __inline int32_t | |
7038 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7039 | __arm_vmlsdavaq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) | |
7040 | { | |
7041 | return __builtin_mve_vmlsdavaq_sv16qi (__a, __b, __c); | |
7042 | } | |
7043 | ||
7044 | __extension__ extern __inline int8x16_t | |
7045 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7046 | __arm_vmlasq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
7047 | { | |
7048 | return __builtin_mve_vmlasq_n_sv16qi (__a, __b, __c); | |
7049 | } | |
7050 | ||
7051 | __extension__ extern __inline int8x16_t | |
7052 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7053 | __arm_vmlaq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
7054 | { | |
7055 | return __builtin_mve_vmlaq_n_sv16qi (__a, __b, __c); | |
7056 | } | |
7057 | ||
7058 | __extension__ extern __inline int32_t | |
7059 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7060 | __arm_vmladavaxq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) | |
7061 | { | |
7062 | return __builtin_mve_vmladavaxq_sv16qi (__a, __b, __c); | |
7063 | } | |
7064 | ||
7065 | __extension__ extern __inline int32_t | |
7066 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7067 | __arm_vmladavaq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) | |
7068 | { | |
7069 | return __builtin_mve_vmladavaq_sv16qi (__a, __b, __c); | |
7070 | } | |
7071 | ||
7072 | __extension__ extern __inline int8x16_t | |
7073 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7074 | __arm_vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm) | |
7075 | { | |
7076 | return __builtin_mve_vsriq_n_sv16qi (__a, __b, __imm); | |
7077 | } | |
7078 | ||
7079 | __extension__ extern __inline int8x16_t | |
7080 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7081 | __arm_vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm) | |
7082 | { | |
7083 | return __builtin_mve_vsliq_n_sv16qi (__a, __b, __imm); | |
7084 | } | |
7085 | ||
7086 | __extension__ extern __inline uint16x8_t | |
7087 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7088 | __arm_vpselq_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7089 | { | |
7090 | return __builtin_mve_vpselq_uv8hi (__a, __b, __p); | |
7091 | } | |
7092 | ||
7093 | __extension__ extern __inline int16x8_t | |
7094 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7095 | __arm_vpselq_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7096 | { | |
7097 | return __builtin_mve_vpselq_sv8hi (__a, __b, __p); | |
7098 | } | |
7099 | ||
7100 | __extension__ extern __inline uint16x8_t | |
7101 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7102 | __arm_vrev64q_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
7103 | { | |
7104 | return __builtin_mve_vrev64q_m_uv8hi (__inactive, __a, __p); | |
7105 | } | |
7106 | ||
7107 | __extension__ extern __inline uint16x8_t | |
7108 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7109 | __arm_vqrdmlashq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) | |
7110 | { | |
7111 | return __builtin_mve_vqrdmlashq_n_uv8hi (__a, __b, __c); | |
7112 | } | |
7113 | ||
7114 | __extension__ extern __inline uint16x8_t | |
7115 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7116 | __arm_vqrdmlahq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) | |
7117 | { | |
7118 | return __builtin_mve_vqrdmlahq_n_uv8hi (__a, __b, __c); | |
7119 | } | |
7120 | ||
7121 | __extension__ extern __inline uint16x8_t | |
7122 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7123 | __arm_vqdmlahq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) | |
7124 | { | |
7125 | return __builtin_mve_vqdmlahq_n_uv8hi (__a, __b, __c); | |
7126 | } | |
7127 | ||
7128 | __extension__ extern __inline uint16x8_t | |
7129 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7130 | __arm_vmvnq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
7131 | { | |
7132 | return __builtin_mve_vmvnq_m_uv8hi (__inactive, __a, __p); | |
7133 | } | |
7134 | ||
7135 | __extension__ extern __inline uint16x8_t | |
7136 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7137 | __arm_vmlasq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) | |
7138 | { | |
7139 | return __builtin_mve_vmlasq_n_uv8hi (__a, __b, __c); | |
7140 | } | |
7141 | ||
7142 | __extension__ extern __inline uint16x8_t | |
7143 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7144 | __arm_vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) | |
7145 | { | |
7146 | return __builtin_mve_vmlaq_n_uv8hi (__a, __b, __c); | |
7147 | } | |
7148 | ||
7149 | __extension__ extern __inline uint32_t | |
7150 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7151 | __arm_vmladavq_p_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7152 | { | |
7153 | return __builtin_mve_vmladavq_p_uv8hi (__a, __b, __p); | |
7154 | } | |
7155 | ||
7156 | __extension__ extern __inline uint32_t | |
7157 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7158 | __arm_vmladavaq_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c) | |
7159 | { | |
7160 | return __builtin_mve_vmladavaq_uv8hi (__a, __b, __c); | |
7161 | } | |
7162 | ||
7163 | __extension__ extern __inline uint16_t | |
7164 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7165 | __arm_vminvq_p_u16 (uint16_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7166 | { | |
7167 | return __builtin_mve_vminvq_p_uv8hi (__a, __b, __p); | |
7168 | } | |
7169 | ||
7170 | __extension__ extern __inline uint16_t | |
7171 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7172 | __arm_vmaxvq_p_u16 (uint16_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7173 | { | |
7174 | return __builtin_mve_vmaxvq_p_uv8hi (__a, __b, __p); | |
7175 | } | |
7176 | ||
7177 | __extension__ extern __inline uint16x8_t | |
7178 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7179 | __arm_vdupq_m_n_u16 (uint16x8_t __inactive, uint16_t __a, mve_pred16_t __p) | |
7180 | { | |
7181 | return __builtin_mve_vdupq_m_n_uv8hi (__inactive, __a, __p); | |
7182 | } | |
7183 | ||
7184 | __extension__ extern __inline mve_pred16_t | |
7185 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7186 | __arm_vcmpneq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7187 | { | |
7188 | return __builtin_mve_vcmpneq_m_uv8hi (__a, __b, __p); | |
7189 | } | |
7190 | ||
7191 | __extension__ extern __inline mve_pred16_t | |
7192 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7193 | __arm_vcmpneq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
7194 | { | |
7195 | return __builtin_mve_vcmpneq_m_n_uv8hi (__a, __b, __p); | |
7196 | } | |
7197 | ||
7198 | __extension__ extern __inline mve_pred16_t | |
7199 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7200 | __arm_vcmphiq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7201 | { | |
7202 | return __builtin_mve_vcmphiq_m_uv8hi (__a, __b, __p); | |
7203 | } | |
7204 | ||
7205 | __extension__ extern __inline mve_pred16_t | |
7206 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7207 | __arm_vcmphiq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
7208 | { | |
7209 | return __builtin_mve_vcmphiq_m_n_uv8hi (__a, __b, __p); | |
7210 | } | |
7211 | ||
7212 | __extension__ extern __inline mve_pred16_t | |
7213 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7214 | __arm_vcmpeqq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7215 | { | |
7216 | return __builtin_mve_vcmpeqq_m_uv8hi (__a, __b, __p); | |
7217 | } | |
7218 | ||
7219 | __extension__ extern __inline mve_pred16_t | |
7220 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7221 | __arm_vcmpeqq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
7222 | { | |
7223 | return __builtin_mve_vcmpeqq_m_n_uv8hi (__a, __b, __p); | |
7224 | } | |
7225 | ||
7226 | __extension__ extern __inline mve_pred16_t | |
7227 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7228 | __arm_vcmpcsq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7229 | { | |
7230 | return __builtin_mve_vcmpcsq_m_uv8hi (__a, __b, __p); | |
7231 | } | |
7232 | ||
7233 | __extension__ extern __inline mve_pred16_t | |
7234 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7235 | __arm_vcmpcsq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
7236 | { | |
7237 | return __builtin_mve_vcmpcsq_m_n_uv8hi (__a, __b, __p); | |
7238 | } | |
7239 | ||
7240 | __extension__ extern __inline uint16x8_t | |
7241 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7242 | __arm_vclzq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
7243 | { | |
7244 | return __builtin_mve_vclzq_m_uv8hi (__inactive, __a, __p); | |
7245 | } | |
7246 | ||
7247 | __extension__ extern __inline uint32_t | |
7248 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7249 | __arm_vaddvaq_p_u16 (uint32_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7250 | { | |
7251 | return __builtin_mve_vaddvaq_p_uv8hi (__a, __b, __p); | |
7252 | } | |
7253 | ||
7254 | __extension__ extern __inline uint16x8_t | |
7255 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7256 | __arm_vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm) | |
7257 | { | |
7258 | return __builtin_mve_vsriq_n_uv8hi (__a, __b, __imm); | |
7259 | } | |
7260 | ||
7261 | __extension__ extern __inline uint16x8_t | |
7262 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7263 | __arm_vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm) | |
7264 | { | |
7265 | return __builtin_mve_vsliq_n_uv8hi (__a, __b, __imm); | |
7266 | } | |
7267 | ||
7268 | __extension__ extern __inline uint16x8_t | |
7269 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7270 | __arm_vshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7271 | { | |
7272 | return __builtin_mve_vshlq_m_r_uv8hi (__a, __b, __p); | |
7273 | } | |
7274 | ||
7275 | __extension__ extern __inline uint16x8_t | |
7276 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7277 | __arm_vrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7278 | { | |
7279 | return __builtin_mve_vrshlq_m_n_uv8hi (__a, __b, __p); | |
7280 | } | |
7281 | ||
7282 | __extension__ extern __inline uint16x8_t | |
7283 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7284 | __arm_vqshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7285 | { | |
7286 | return __builtin_mve_vqshlq_m_r_uv8hi (__a, __b, __p); | |
7287 | } | |
7288 | ||
7289 | __extension__ extern __inline uint16x8_t | |
7290 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7291 | __arm_vqrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7292 | { | |
7293 | return __builtin_mve_vqrshlq_m_n_uv8hi (__a, __b, __p); | |
7294 | } | |
7295 | ||
7296 | __extension__ extern __inline uint16_t | |
7297 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7298 | __arm_vminavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p) | |
7299 | { | |
7300 | return __builtin_mve_vminavq_p_sv8hi (__a, __b, __p); | |
7301 | } | |
7302 | ||
7303 | __extension__ extern __inline uint16x8_t | |
7304 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7305 | __arm_vminaq_m_s16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7306 | { | |
7307 | return __builtin_mve_vminaq_m_sv8hi (__a, __b, __p); | |
7308 | } | |
7309 | ||
7310 | __extension__ extern __inline uint16_t | |
7311 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7312 | __arm_vmaxavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p) | |
7313 | { | |
7314 | return __builtin_mve_vmaxavq_p_sv8hi (__a, __b, __p); | |
7315 | } | |
7316 | ||
7317 | __extension__ extern __inline uint16x8_t | |
7318 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7319 | __arm_vmaxaq_m_s16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7320 | { | |
7321 | return __builtin_mve_vmaxaq_m_sv8hi (__a, __b, __p); | |
7322 | } | |
7323 | ||
7324 | __extension__ extern __inline mve_pred16_t | |
7325 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7326 | __arm_vcmpneq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7327 | { | |
7328 | return __builtin_mve_vcmpneq_m_sv8hi (__a, __b, __p); | |
7329 | } | |
7330 | ||
7331 | __extension__ extern __inline mve_pred16_t | |
7332 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7333 | __arm_vcmpneq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7334 | { | |
7335 | return __builtin_mve_vcmpneq_m_n_sv8hi (__a, __b, __p); | |
7336 | } | |
7337 | ||
7338 | __extension__ extern __inline mve_pred16_t | |
7339 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7340 | __arm_vcmpltq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7341 | { | |
7342 | return __builtin_mve_vcmpltq_m_sv8hi (__a, __b, __p); | |
7343 | } | |
7344 | ||
7345 | __extension__ extern __inline mve_pred16_t | |
7346 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7347 | __arm_vcmpltq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7348 | { | |
7349 | return __builtin_mve_vcmpltq_m_n_sv8hi (__a, __b, __p); | |
7350 | } | |
7351 | ||
7352 | __extension__ extern __inline mve_pred16_t | |
7353 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7354 | __arm_vcmpleq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7355 | { | |
7356 | return __builtin_mve_vcmpleq_m_sv8hi (__a, __b, __p); | |
7357 | } | |
7358 | ||
7359 | __extension__ extern __inline mve_pred16_t | |
7360 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7361 | __arm_vcmpleq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7362 | { | |
7363 | return __builtin_mve_vcmpleq_m_n_sv8hi (__a, __b, __p); | |
7364 | } | |
7365 | ||
7366 | __extension__ extern __inline mve_pred16_t | |
7367 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7368 | __arm_vcmpgtq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7369 | { | |
7370 | return __builtin_mve_vcmpgtq_m_sv8hi (__a, __b, __p); | |
7371 | } | |
7372 | ||
7373 | __extension__ extern __inline mve_pred16_t | |
7374 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7375 | __arm_vcmpgtq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7376 | { | |
7377 | return __builtin_mve_vcmpgtq_m_n_sv8hi (__a, __b, __p); | |
7378 | } | |
7379 | ||
7380 | __extension__ extern __inline mve_pred16_t | |
7381 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7382 | __arm_vcmpgeq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7383 | { | |
7384 | return __builtin_mve_vcmpgeq_m_sv8hi (__a, __b, __p); | |
7385 | } | |
7386 | ||
7387 | __extension__ extern __inline mve_pred16_t | |
7388 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7389 | __arm_vcmpgeq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7390 | { | |
7391 | return __builtin_mve_vcmpgeq_m_n_sv8hi (__a, __b, __p); | |
7392 | } | |
7393 | ||
7394 | __extension__ extern __inline mve_pred16_t | |
7395 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7396 | __arm_vcmpeqq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7397 | { | |
7398 | return __builtin_mve_vcmpeqq_m_sv8hi (__a, __b, __p); | |
7399 | } | |
7400 | ||
7401 | __extension__ extern __inline mve_pred16_t | |
7402 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7403 | __arm_vcmpeqq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7404 | { | |
7405 | return __builtin_mve_vcmpeqq_m_n_sv8hi (__a, __b, __p); | |
7406 | } | |
7407 | ||
7408 | __extension__ extern __inline int16x8_t | |
7409 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7410 | __arm_vshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7411 | { | |
7412 | return __builtin_mve_vshlq_m_r_sv8hi (__a, __b, __p); | |
7413 | } | |
7414 | ||
7415 | __extension__ extern __inline int16x8_t | |
7416 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7417 | __arm_vrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7418 | { | |
7419 | return __builtin_mve_vrshlq_m_n_sv8hi (__a, __b, __p); | |
7420 | } | |
7421 | ||
7422 | __extension__ extern __inline int16x8_t | |
7423 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7424 | __arm_vrev64q_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7425 | { | |
7426 | return __builtin_mve_vrev64q_m_sv8hi (__inactive, __a, __p); | |
7427 | } | |
7428 | ||
7429 | __extension__ extern __inline int16x8_t | |
7430 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7431 | __arm_vqshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7432 | { | |
7433 | return __builtin_mve_vqshlq_m_r_sv8hi (__a, __b, __p); | |
7434 | } | |
7435 | ||
7436 | __extension__ extern __inline int16x8_t | |
7437 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7438 | __arm_vqrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7439 | { | |
7440 | return __builtin_mve_vqrshlq_m_n_sv8hi (__a, __b, __p); | |
7441 | } | |
7442 | ||
7443 | __extension__ extern __inline int16x8_t | |
7444 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7445 | __arm_vqnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7446 | { | |
7447 | return __builtin_mve_vqnegq_m_sv8hi (__inactive, __a, __p); | |
7448 | } | |
7449 | ||
7450 | __extension__ extern __inline int16x8_t | |
7451 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7452 | __arm_vqabsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7453 | { | |
7454 | return __builtin_mve_vqabsq_m_sv8hi (__inactive, __a, __p); | |
7455 | } | |
7456 | ||
7457 | __extension__ extern __inline int16x8_t | |
7458 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7459 | __arm_vnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7460 | { | |
7461 | return __builtin_mve_vnegq_m_sv8hi (__inactive, __a, __p); | |
7462 | } | |
7463 | ||
7464 | __extension__ extern __inline int16x8_t | |
7465 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7466 | __arm_vmvnq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7467 | { | |
7468 | return __builtin_mve_vmvnq_m_sv8hi (__inactive, __a, __p); | |
7469 | } | |
7470 | ||
7471 | __extension__ extern __inline int32_t | |
7472 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7473 | __arm_vmlsdavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7474 | { | |
7475 | return __builtin_mve_vmlsdavxq_p_sv8hi (__a, __b, __p); | |
7476 | } | |
7477 | ||
7478 | __extension__ extern __inline int32_t | |
7479 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7480 | __arm_vmlsdavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7481 | { | |
7482 | return __builtin_mve_vmlsdavq_p_sv8hi (__a, __b, __p); | |
7483 | } | |
7484 | ||
7485 | __extension__ extern __inline int32_t | |
7486 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7487 | __arm_vmladavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7488 | { | |
7489 | return __builtin_mve_vmladavxq_p_sv8hi (__a, __b, __p); | |
7490 | } | |
7491 | ||
7492 | __extension__ extern __inline int32_t | |
7493 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7494 | __arm_vmladavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7495 | { | |
7496 | return __builtin_mve_vmladavq_p_sv8hi (__a, __b, __p); | |
7497 | } | |
7498 | ||
7499 | __extension__ extern __inline int16_t | |
7500 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7501 | __arm_vminvq_p_s16 (int16_t __a, int16x8_t __b, mve_pred16_t __p) | |
7502 | { | |
7503 | return __builtin_mve_vminvq_p_sv8hi (__a, __b, __p); | |
7504 | } | |
7505 | ||
7506 | __extension__ extern __inline int16_t | |
7507 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7508 | __arm_vmaxvq_p_s16 (int16_t __a, int16x8_t __b, mve_pred16_t __p) | |
7509 | { | |
7510 | return __builtin_mve_vmaxvq_p_sv8hi (__a, __b, __p); | |
7511 | } | |
7512 | ||
7513 | __extension__ extern __inline int16x8_t | |
7514 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7515 | __arm_vdupq_m_n_s16 (int16x8_t __inactive, int16_t __a, mve_pred16_t __p) | |
7516 | { | |
7517 | return __builtin_mve_vdupq_m_n_sv8hi (__inactive, __a, __p); | |
7518 | } | |
7519 | ||
7520 | __extension__ extern __inline int16x8_t | |
7521 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7522 | __arm_vclzq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7523 | { | |
7524 | return __builtin_mve_vclzq_m_sv8hi (__inactive, __a, __p); | |
7525 | } | |
7526 | ||
7527 | __extension__ extern __inline int16x8_t | |
7528 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7529 | __arm_vclsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7530 | { | |
7531 | return __builtin_mve_vclsq_m_sv8hi (__inactive, __a, __p); | |
7532 | } | |
7533 | ||
7534 | __extension__ extern __inline int32_t | |
7535 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7536 | __arm_vaddvaq_p_s16 (int32_t __a, int16x8_t __b, mve_pred16_t __p) | |
7537 | { | |
7538 | return __builtin_mve_vaddvaq_p_sv8hi (__a, __b, __p); | |
7539 | } | |
7540 | ||
7541 | __extension__ extern __inline int16x8_t | |
7542 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7543 | __arm_vabsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7544 | { | |
7545 | return __builtin_mve_vabsq_m_sv8hi (__inactive, __a, __p); | |
7546 | } | |
7547 | ||
7548 | __extension__ extern __inline int16x8_t | |
7549 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7550 | __arm_vqrdmlsdhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7551 | { | |
7552 | return __builtin_mve_vqrdmlsdhxq_sv8hi (__inactive, __a, __b); | |
7553 | } | |
7554 | ||
7555 | __extension__ extern __inline int16x8_t | |
7556 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7557 | __arm_vqrdmlsdhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7558 | { | |
7559 | return __builtin_mve_vqrdmlsdhq_sv8hi (__inactive, __a, __b); | |
7560 | } | |
7561 | ||
7562 | __extension__ extern __inline int16x8_t | |
7563 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7564 | __arm_vqrdmlashq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
7565 | { | |
7566 | return __builtin_mve_vqrdmlashq_n_sv8hi (__a, __b, __c); | |
7567 | } | |
7568 | ||
7569 | __extension__ extern __inline int16x8_t | |
7570 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7571 | __arm_vqrdmlahq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
7572 | { | |
7573 | return __builtin_mve_vqrdmlahq_n_sv8hi (__a, __b, __c); | |
7574 | } | |
7575 | ||
7576 | __extension__ extern __inline int16x8_t | |
7577 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7578 | __arm_vqrdmladhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7579 | { | |
7580 | return __builtin_mve_vqrdmladhxq_sv8hi (__inactive, __a, __b); | |
7581 | } | |
7582 | ||
7583 | __extension__ extern __inline int16x8_t | |
7584 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7585 | __arm_vqrdmladhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7586 | { | |
7587 | return __builtin_mve_vqrdmladhq_sv8hi (__inactive, __a, __b); | |
7588 | } | |
7589 | ||
7590 | __extension__ extern __inline int16x8_t | |
7591 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7592 | __arm_vqdmlsdhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7593 | { | |
7594 | return __builtin_mve_vqdmlsdhxq_sv8hi (__inactive, __a, __b); | |
7595 | } | |
7596 | ||
7597 | __extension__ extern __inline int16x8_t | |
7598 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7599 | __arm_vqdmlsdhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7600 | { | |
7601 | return __builtin_mve_vqdmlsdhq_sv8hi (__inactive, __a, __b); | |
7602 | } | |
7603 | ||
7604 | __extension__ extern __inline int16x8_t | |
7605 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7606 | __arm_vqdmlahq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
7607 | { | |
7608 | return __builtin_mve_vqdmlahq_n_sv8hi (__a, __b, __c); | |
7609 | } | |
7610 | ||
7611 | __extension__ extern __inline int16x8_t | |
7612 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7613 | __arm_vqdmladhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7614 | { | |
7615 | return __builtin_mve_vqdmladhxq_sv8hi (__inactive, __a, __b); | |
7616 | } | |
7617 | ||
7618 | __extension__ extern __inline int16x8_t | |
7619 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7620 | __arm_vqdmladhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7621 | { | |
7622 | return __builtin_mve_vqdmladhq_sv8hi (__inactive, __a, __b); | |
7623 | } | |
7624 | ||
7625 | __extension__ extern __inline int32_t | |
7626 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7627 | __arm_vmlsdavaxq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) | |
7628 | { | |
7629 | return __builtin_mve_vmlsdavaxq_sv8hi (__a, __b, __c); | |
7630 | } | |
7631 | ||
7632 | __extension__ extern __inline int32_t | |
7633 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7634 | __arm_vmlsdavaq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) | |
7635 | { | |
7636 | return __builtin_mve_vmlsdavaq_sv8hi (__a, __b, __c); | |
7637 | } | |
7638 | ||
7639 | __extension__ extern __inline int16x8_t | |
7640 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7641 | __arm_vmlasq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
7642 | { | |
7643 | return __builtin_mve_vmlasq_n_sv8hi (__a, __b, __c); | |
7644 | } | |
7645 | ||
7646 | __extension__ extern __inline int16x8_t | |
7647 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7648 | __arm_vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
7649 | { | |
7650 | return __builtin_mve_vmlaq_n_sv8hi (__a, __b, __c); | |
7651 | } | |
7652 | ||
7653 | __extension__ extern __inline int32_t | |
7654 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7655 | __arm_vmladavaxq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) | |
7656 | { | |
7657 | return __builtin_mve_vmladavaxq_sv8hi (__a, __b, __c); | |
7658 | } | |
7659 | ||
7660 | __extension__ extern __inline int32_t | |
7661 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7662 | __arm_vmladavaq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) | |
7663 | { | |
7664 | return __builtin_mve_vmladavaq_sv8hi (__a, __b, __c); | |
7665 | } | |
7666 | ||
7667 | __extension__ extern __inline int16x8_t | |
7668 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7669 | __arm_vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm) | |
7670 | { | |
7671 | return __builtin_mve_vsriq_n_sv8hi (__a, __b, __imm); | |
7672 | } | |
7673 | ||
7674 | __extension__ extern __inline int16x8_t | |
7675 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7676 | __arm_vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm) | |
7677 | { | |
7678 | return __builtin_mve_vsliq_n_sv8hi (__a, __b, __imm); | |
7679 | } | |
7680 | ||
7681 | __extension__ extern __inline uint32x4_t | |
7682 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7683 | __arm_vpselq_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7684 | { | |
7685 | return __builtin_mve_vpselq_uv4si (__a, __b, __p); | |
7686 | } | |
7687 | ||
7688 | __extension__ extern __inline int32x4_t | |
7689 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7690 | __arm_vpselq_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7691 | { | |
7692 | return __builtin_mve_vpselq_sv4si (__a, __b, __p); | |
7693 | } | |
7694 | ||
7695 | __extension__ extern __inline uint32x4_t | |
7696 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7697 | __arm_vrev64q_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
7698 | { | |
7699 | return __builtin_mve_vrev64q_m_uv4si (__inactive, __a, __p); | |
7700 | } | |
7701 | ||
7702 | __extension__ extern __inline uint32x4_t | |
7703 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7704 | __arm_vqrdmlashq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) | |
7705 | { | |
7706 | return __builtin_mve_vqrdmlashq_n_uv4si (__a, __b, __c); | |
7707 | } | |
7708 | ||
7709 | __extension__ extern __inline uint32x4_t | |
7710 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7711 | __arm_vqrdmlahq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) | |
7712 | { | |
7713 | return __builtin_mve_vqrdmlahq_n_uv4si (__a, __b, __c); | |
7714 | } | |
7715 | ||
7716 | __extension__ extern __inline uint32x4_t | |
7717 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7718 | __arm_vqdmlahq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) | |
7719 | { | |
7720 | return __builtin_mve_vqdmlahq_n_uv4si (__a, __b, __c); | |
7721 | } | |
7722 | ||
7723 | __extension__ extern __inline uint32x4_t | |
7724 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7725 | __arm_vmvnq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
7726 | { | |
7727 | return __builtin_mve_vmvnq_m_uv4si (__inactive, __a, __p); | |
7728 | } | |
7729 | ||
7730 | __extension__ extern __inline uint32x4_t | |
7731 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7732 | __arm_vmlasq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) | |
7733 | { | |
7734 | return __builtin_mve_vmlasq_n_uv4si (__a, __b, __c); | |
7735 | } | |
7736 | ||
7737 | __extension__ extern __inline uint32x4_t | |
7738 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7739 | __arm_vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) | |
7740 | { | |
7741 | return __builtin_mve_vmlaq_n_uv4si (__a, __b, __c); | |
7742 | } | |
7743 | ||
7744 | __extension__ extern __inline uint32_t | |
7745 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7746 | __arm_vmladavq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7747 | { | |
7748 | return __builtin_mve_vmladavq_p_uv4si (__a, __b, __p); | |
7749 | } | |
7750 | ||
7751 | __extension__ extern __inline uint32_t | |
7752 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7753 | __arm_vmladavaq_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c) | |
7754 | { | |
7755 | return __builtin_mve_vmladavaq_uv4si (__a, __b, __c); | |
7756 | } | |
7757 | ||
7758 | __extension__ extern __inline uint32_t | |
7759 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7760 | __arm_vminvq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7761 | { | |
7762 | return __builtin_mve_vminvq_p_uv4si (__a, __b, __p); | |
7763 | } | |
7764 | ||
7765 | __extension__ extern __inline uint32_t | |
7766 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7767 | __arm_vmaxvq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7768 | { | |
7769 | return __builtin_mve_vmaxvq_p_uv4si (__a, __b, __p); | |
7770 | } | |
7771 | ||
7772 | __extension__ extern __inline uint32x4_t | |
7773 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7774 | __arm_vdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, mve_pred16_t __p) | |
7775 | { | |
7776 | return __builtin_mve_vdupq_m_n_uv4si (__inactive, __a, __p); | |
7777 | } | |
7778 | ||
7779 | __extension__ extern __inline mve_pred16_t | |
7780 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7781 | __arm_vcmpneq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7782 | { | |
7783 | return __builtin_mve_vcmpneq_m_uv4si (__a, __b, __p); | |
7784 | } | |
7785 | ||
7786 | __extension__ extern __inline mve_pred16_t | |
7787 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7788 | __arm_vcmpneq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
7789 | { | |
7790 | return __builtin_mve_vcmpneq_m_n_uv4si (__a, __b, __p); | |
7791 | } | |
7792 | ||
7793 | __extension__ extern __inline mve_pred16_t | |
7794 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7795 | __arm_vcmphiq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7796 | { | |
7797 | return __builtin_mve_vcmphiq_m_uv4si (__a, __b, __p); | |
7798 | } | |
7799 | ||
7800 | __extension__ extern __inline mve_pred16_t | |
7801 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7802 | __arm_vcmphiq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
7803 | { | |
7804 | return __builtin_mve_vcmphiq_m_n_uv4si (__a, __b, __p); | |
7805 | } | |
7806 | ||
7807 | __extension__ extern __inline mve_pred16_t | |
7808 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7809 | __arm_vcmpeqq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7810 | { | |
7811 | return __builtin_mve_vcmpeqq_m_uv4si (__a, __b, __p); | |
7812 | } | |
7813 | ||
7814 | __extension__ extern __inline mve_pred16_t | |
7815 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7816 | __arm_vcmpeqq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
7817 | { | |
7818 | return __builtin_mve_vcmpeqq_m_n_uv4si (__a, __b, __p); | |
7819 | } | |
7820 | ||
7821 | __extension__ extern __inline mve_pred16_t | |
7822 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7823 | __arm_vcmpcsq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7824 | { | |
7825 | return __builtin_mve_vcmpcsq_m_uv4si (__a, __b, __p); | |
7826 | } | |
7827 | ||
7828 | __extension__ extern __inline mve_pred16_t | |
7829 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7830 | __arm_vcmpcsq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
7831 | { | |
7832 | return __builtin_mve_vcmpcsq_m_n_uv4si (__a, __b, __p); | |
7833 | } | |
7834 | ||
7835 | __extension__ extern __inline uint32x4_t | |
7836 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7837 | __arm_vclzq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
7838 | { | |
7839 | return __builtin_mve_vclzq_m_uv4si (__inactive, __a, __p); | |
7840 | } | |
7841 | ||
7842 | __extension__ extern __inline uint32_t | |
7843 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7844 | __arm_vaddvaq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7845 | { | |
7846 | return __builtin_mve_vaddvaq_p_uv4si (__a, __b, __p); | |
7847 | } | |
7848 | ||
7849 | __extension__ extern __inline uint32x4_t | |
7850 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7851 | __arm_vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm) | |
7852 | { | |
7853 | return __builtin_mve_vsriq_n_uv4si (__a, __b, __imm); | |
7854 | } | |
7855 | ||
7856 | __extension__ extern __inline uint32x4_t | |
7857 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7858 | __arm_vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm) | |
7859 | { | |
7860 | return __builtin_mve_vsliq_n_uv4si (__a, __b, __imm); | |
7861 | } | |
7862 | ||
7863 | __extension__ extern __inline uint32x4_t | |
7864 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7865 | __arm_vshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7866 | { | |
7867 | return __builtin_mve_vshlq_m_r_uv4si (__a, __b, __p); | |
7868 | } | |
7869 | ||
7870 | __extension__ extern __inline uint32x4_t | |
7871 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7872 | __arm_vrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7873 | { | |
7874 | return __builtin_mve_vrshlq_m_n_uv4si (__a, __b, __p); | |
7875 | } | |
7876 | ||
7877 | __extension__ extern __inline uint32x4_t | |
7878 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7879 | __arm_vqshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7880 | { | |
7881 | return __builtin_mve_vqshlq_m_r_uv4si (__a, __b, __p); | |
7882 | } | |
7883 | ||
7884 | __extension__ extern __inline uint32x4_t | |
7885 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7886 | __arm_vqrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7887 | { | |
7888 | return __builtin_mve_vqrshlq_m_n_uv4si (__a, __b, __p); | |
7889 | } | |
7890 | ||
7891 | __extension__ extern __inline uint32_t | |
7892 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7893 | __arm_vminavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p) | |
7894 | { | |
7895 | return __builtin_mve_vminavq_p_sv4si (__a, __b, __p); | |
7896 | } | |
7897 | ||
7898 | __extension__ extern __inline uint32x4_t | |
7899 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7900 | __arm_vminaq_m_s32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7901 | { | |
7902 | return __builtin_mve_vminaq_m_sv4si (__a, __b, __p); | |
7903 | } | |
7904 | ||
7905 | __extension__ extern __inline uint32_t | |
7906 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7907 | __arm_vmaxavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p) | |
7908 | { | |
7909 | return __builtin_mve_vmaxavq_p_sv4si (__a, __b, __p); | |
7910 | } | |
7911 | ||
7912 | __extension__ extern __inline uint32x4_t | |
7913 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7914 | __arm_vmaxaq_m_s32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7915 | { | |
7916 | return __builtin_mve_vmaxaq_m_sv4si (__a, __b, __p); | |
7917 | } | |
7918 | ||
7919 | __extension__ extern __inline mve_pred16_t | |
7920 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7921 | __arm_vcmpneq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7922 | { | |
7923 | return __builtin_mve_vcmpneq_m_sv4si (__a, __b, __p); | |
7924 | } | |
7925 | ||
7926 | __extension__ extern __inline mve_pred16_t | |
7927 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7928 | __arm_vcmpneq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7929 | { | |
7930 | return __builtin_mve_vcmpneq_m_n_sv4si (__a, __b, __p); | |
7931 | } | |
7932 | ||
7933 | __extension__ extern __inline mve_pred16_t | |
7934 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7935 | __arm_vcmpltq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7936 | { | |
7937 | return __builtin_mve_vcmpltq_m_sv4si (__a, __b, __p); | |
7938 | } | |
7939 | ||
7940 | __extension__ extern __inline mve_pred16_t | |
7941 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7942 | __arm_vcmpltq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7943 | { | |
7944 | return __builtin_mve_vcmpltq_m_n_sv4si (__a, __b, __p); | |
7945 | } | |
7946 | ||
7947 | __extension__ extern __inline mve_pred16_t | |
7948 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7949 | __arm_vcmpleq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7950 | { | |
7951 | return __builtin_mve_vcmpleq_m_sv4si (__a, __b, __p); | |
7952 | } | |
7953 | ||
7954 | __extension__ extern __inline mve_pred16_t | |
7955 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7956 | __arm_vcmpleq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7957 | { | |
7958 | return __builtin_mve_vcmpleq_m_n_sv4si (__a, __b, __p); | |
7959 | } | |
7960 | ||
7961 | __extension__ extern __inline mve_pred16_t | |
7962 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7963 | __arm_vcmpgtq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7964 | { | |
7965 | return __builtin_mve_vcmpgtq_m_sv4si (__a, __b, __p); | |
7966 | } | |
7967 | ||
7968 | __extension__ extern __inline mve_pred16_t | |
7969 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7970 | __arm_vcmpgtq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7971 | { | |
7972 | return __builtin_mve_vcmpgtq_m_n_sv4si (__a, __b, __p); | |
7973 | } | |
7974 | ||
7975 | __extension__ extern __inline mve_pred16_t | |
7976 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7977 | __arm_vcmpgeq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7978 | { | |
7979 | return __builtin_mve_vcmpgeq_m_sv4si (__a, __b, __p); | |
7980 | } | |
7981 | ||
7982 | __extension__ extern __inline mve_pred16_t | |
7983 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7984 | __arm_vcmpgeq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7985 | { | |
7986 | return __builtin_mve_vcmpgeq_m_n_sv4si (__a, __b, __p); | |
7987 | } | |
7988 | ||
7989 | __extension__ extern __inline mve_pred16_t | |
7990 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7991 | __arm_vcmpeqq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7992 | { | |
7993 | return __builtin_mve_vcmpeqq_m_sv4si (__a, __b, __p); | |
7994 | } | |
7995 | ||
7996 | __extension__ extern __inline mve_pred16_t | |
7997 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7998 | __arm_vcmpeqq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7999 | { | |
8000 | return __builtin_mve_vcmpeqq_m_n_sv4si (__a, __b, __p); | |
8001 | } | |
8002 | ||
8003 | __extension__ extern __inline int32x4_t | |
8004 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8005 | __arm_vshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8006 | { | |
8007 | return __builtin_mve_vshlq_m_r_sv4si (__a, __b, __p); | |
8008 | } | |
8009 | ||
8010 | __extension__ extern __inline int32x4_t | |
8011 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8012 | __arm_vrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8013 | { | |
8014 | return __builtin_mve_vrshlq_m_n_sv4si (__a, __b, __p); | |
8015 | } | |
8016 | ||
8017 | __extension__ extern __inline int32x4_t | |
8018 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8019 | __arm_vrev64q_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8020 | { | |
8021 | return __builtin_mve_vrev64q_m_sv4si (__inactive, __a, __p); | |
8022 | } | |
8023 | ||
8024 | __extension__ extern __inline int32x4_t | |
8025 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8026 | __arm_vqshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8027 | { | |
8028 | return __builtin_mve_vqshlq_m_r_sv4si (__a, __b, __p); | |
8029 | } | |
8030 | ||
8031 | __extension__ extern __inline int32x4_t | |
8032 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8033 | __arm_vqrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8034 | { | |
8035 | return __builtin_mve_vqrshlq_m_n_sv4si (__a, __b, __p); | |
8036 | } | |
8037 | ||
8038 | __extension__ extern __inline int32x4_t | |
8039 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8040 | __arm_vqnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8041 | { | |
8042 | return __builtin_mve_vqnegq_m_sv4si (__inactive, __a, __p); | |
8043 | } | |
8044 | ||
8045 | __extension__ extern __inline int32x4_t | |
8046 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8047 | __arm_vqabsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8048 | { | |
8049 | return __builtin_mve_vqabsq_m_sv4si (__inactive, __a, __p); | |
8050 | } | |
8051 | ||
8052 | __extension__ extern __inline int32x4_t | |
8053 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8054 | __arm_vnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8055 | { | |
8056 | return __builtin_mve_vnegq_m_sv4si (__inactive, __a, __p); | |
8057 | } | |
8058 | ||
8059 | __extension__ extern __inline int32x4_t | |
8060 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8061 | __arm_vmvnq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8062 | { | |
8063 | return __builtin_mve_vmvnq_m_sv4si (__inactive, __a, __p); | |
8064 | } | |
8065 | ||
8066 | __extension__ extern __inline int32_t | |
8067 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8068 | __arm_vmlsdavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8069 | { | |
8070 | return __builtin_mve_vmlsdavxq_p_sv4si (__a, __b, __p); | |
8071 | } | |
8072 | ||
8073 | __extension__ extern __inline int32_t | |
8074 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8075 | __arm_vmlsdavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8076 | { | |
8077 | return __builtin_mve_vmlsdavq_p_sv4si (__a, __b, __p); | |
8078 | } | |
8079 | ||
8080 | __extension__ extern __inline int32_t | |
8081 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8082 | __arm_vmladavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8083 | { | |
8084 | return __builtin_mve_vmladavxq_p_sv4si (__a, __b, __p); | |
8085 | } | |
8086 | ||
8087 | __extension__ extern __inline int32_t | |
8088 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8089 | __arm_vmladavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8090 | { | |
8091 | return __builtin_mve_vmladavq_p_sv4si (__a, __b, __p); | |
8092 | } | |
8093 | ||
8094 | __extension__ extern __inline int32_t | |
8095 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8096 | __arm_vminvq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p) | |
8097 | { | |
8098 | return __builtin_mve_vminvq_p_sv4si (__a, __b, __p); | |
8099 | } | |
8100 | ||
8101 | __extension__ extern __inline int32_t | |
8102 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8103 | __arm_vmaxvq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p) | |
8104 | { | |
8105 | return __builtin_mve_vmaxvq_p_sv4si (__a, __b, __p); | |
8106 | } | |
8107 | ||
8108 | __extension__ extern __inline int32x4_t | |
8109 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8110 | __arm_vdupq_m_n_s32 (int32x4_t __inactive, int32_t __a, mve_pred16_t __p) | |
8111 | { | |
8112 | return __builtin_mve_vdupq_m_n_sv4si (__inactive, __a, __p); | |
8113 | } | |
8114 | ||
8115 | __extension__ extern __inline int32x4_t | |
8116 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8117 | __arm_vclzq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8118 | { | |
8119 | return __builtin_mve_vclzq_m_sv4si (__inactive, __a, __p); | |
8120 | } | |
8121 | ||
8122 | __extension__ extern __inline int32x4_t | |
8123 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8124 | __arm_vclsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8125 | { | |
8126 | return __builtin_mve_vclsq_m_sv4si (__inactive, __a, __p); | |
8127 | } | |
8128 | ||
8129 | __extension__ extern __inline int32_t | |
8130 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8131 | __arm_vaddvaq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p) | |
8132 | { | |
8133 | return __builtin_mve_vaddvaq_p_sv4si (__a, __b, __p); | |
8134 | } | |
8135 | ||
8136 | __extension__ extern __inline int32x4_t | |
8137 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8138 | __arm_vabsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8139 | { | |
8140 | return __builtin_mve_vabsq_m_sv4si (__inactive, __a, __p); | |
8141 | } | |
8142 | ||
8143 | __extension__ extern __inline int32x4_t | |
8144 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8145 | __arm_vqrdmlsdhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8146 | { | |
8147 | return __builtin_mve_vqrdmlsdhxq_sv4si (__inactive, __a, __b); | |
8148 | } | |
8149 | ||
8150 | __extension__ extern __inline int32x4_t | |
8151 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8152 | __arm_vqrdmlsdhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8153 | { | |
8154 | return __builtin_mve_vqrdmlsdhq_sv4si (__inactive, __a, __b); | |
8155 | } | |
8156 | ||
8157 | __extension__ extern __inline int32x4_t | |
8158 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8159 | __arm_vqrdmlashq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8160 | { | |
8161 | return __builtin_mve_vqrdmlashq_n_sv4si (__a, __b, __c); | |
8162 | } | |
8163 | ||
8164 | __extension__ extern __inline int32x4_t | |
8165 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8166 | __arm_vqrdmlahq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8167 | { | |
8168 | return __builtin_mve_vqrdmlahq_n_sv4si (__a, __b, __c); | |
8169 | } | |
8170 | ||
8171 | __extension__ extern __inline int32x4_t | |
8172 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8173 | __arm_vqrdmladhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8174 | { | |
8175 | return __builtin_mve_vqrdmladhxq_sv4si (__inactive, __a, __b); | |
8176 | } | |
8177 | ||
8178 | __extension__ extern __inline int32x4_t | |
8179 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8180 | __arm_vqrdmladhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8181 | { | |
8182 | return __builtin_mve_vqrdmladhq_sv4si (__inactive, __a, __b); | |
8183 | } | |
8184 | ||
8185 | __extension__ extern __inline int32x4_t | |
8186 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8187 | __arm_vqdmlsdhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8188 | { | |
8189 | return __builtin_mve_vqdmlsdhxq_sv4si (__inactive, __a, __b); | |
8190 | } | |
8191 | ||
8192 | __extension__ extern __inline int32x4_t | |
8193 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8194 | __arm_vqdmlsdhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8195 | { | |
8196 | return __builtin_mve_vqdmlsdhq_sv4si (__inactive, __a, __b); | |
8197 | } | |
8198 | ||
8199 | __extension__ extern __inline int32x4_t | |
8200 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8201 | __arm_vqdmlahq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8202 | { | |
8203 | return __builtin_mve_vqdmlahq_n_sv4si (__a, __b, __c); | |
8204 | } | |
8205 | ||
8206 | __extension__ extern __inline int32x4_t | |
8207 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8208 | __arm_vqdmladhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8209 | { | |
8210 | return __builtin_mve_vqdmladhxq_sv4si (__inactive, __a, __b); | |
8211 | } | |
8212 | ||
8213 | __extension__ extern __inline int32x4_t | |
8214 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8215 | __arm_vqdmladhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8216 | { | |
8217 | return __builtin_mve_vqdmladhq_sv4si (__inactive, __a, __b); | |
8218 | } | |
8219 | ||
8220 | __extension__ extern __inline int32_t | |
8221 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8222 | __arm_vmlsdavaxq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) | |
8223 | { | |
8224 | return __builtin_mve_vmlsdavaxq_sv4si (__a, __b, __c); | |
8225 | } | |
8226 | ||
8227 | __extension__ extern __inline int32_t | |
8228 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8229 | __arm_vmlsdavaq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) | |
8230 | { | |
8231 | return __builtin_mve_vmlsdavaq_sv4si (__a, __b, __c); | |
8232 | } | |
8233 | ||
8234 | __extension__ extern __inline int32x4_t | |
8235 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8236 | __arm_vmlasq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8237 | { | |
8238 | return __builtin_mve_vmlasq_n_sv4si (__a, __b, __c); | |
8239 | } | |
8240 | ||
8241 | __extension__ extern __inline int32x4_t | |
8242 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8243 | __arm_vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8244 | { | |
8245 | return __builtin_mve_vmlaq_n_sv4si (__a, __b, __c); | |
8246 | } | |
8247 | ||
8248 | __extension__ extern __inline int32_t | |
8249 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8250 | __arm_vmladavaxq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) | |
8251 | { | |
8252 | return __builtin_mve_vmladavaxq_sv4si (__a, __b, __c); | |
8253 | } | |
8254 | ||
8255 | __extension__ extern __inline int32_t | |
8256 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8257 | __arm_vmladavaq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) | |
8258 | { | |
8259 | return __builtin_mve_vmladavaq_sv4si (__a, __b, __c); | |
8260 | } | |
8261 | ||
8262 | __extension__ extern __inline int32x4_t | |
8263 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8264 | __arm_vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm) | |
8265 | { | |
8266 | return __builtin_mve_vsriq_n_sv4si (__a, __b, __imm); | |
8267 | } | |
8268 | ||
8269 | __extension__ extern __inline int32x4_t | |
8270 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8271 | __arm_vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm) | |
8272 | { | |
8273 | return __builtin_mve_vsliq_n_sv4si (__a, __b, __imm); | |
8274 | } | |
8275 | ||
8276 | __extension__ extern __inline uint64x2_t | |
8277 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8278 | __arm_vpselq_u64 (uint64x2_t __a, uint64x2_t __b, mve_pred16_t __p) | |
8279 | { | |
8280 | return __builtin_mve_vpselq_uv2di (__a, __b, __p); | |
8281 | } | |
8282 | ||
8283 | __extension__ extern __inline int64x2_t | |
8284 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8285 | __arm_vpselq_s64 (int64x2_t __a, int64x2_t __b, mve_pred16_t __p) | |
8286 | { | |
8287 | return __builtin_mve_vpselq_sv2di (__a, __b, __p); | |
8288 | } | |
f9355dee | 8289 | |
e3678b44 | 8290 | __extension__ extern __inline int64_t |
f9355dee | 8291 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8292 | __arm_vrmlaldavhaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8293 | { |
e3678b44 | 8294 | return __builtin_mve_vrmlaldavhaxq_sv4si (__a, __b, __c); |
f9355dee SP |
8295 | } |
8296 | ||
e3678b44 | 8297 | __extension__ extern __inline int64_t |
f9355dee | 8298 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8299 | __arm_vrmlsldavhaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8300 | { |
e3678b44 | 8301 | return __builtin_mve_vrmlsldavhaq_sv4si (__a, __b, __c); |
f9355dee SP |
8302 | } |
8303 | ||
e3678b44 | 8304 | __extension__ extern __inline int64_t |
f9355dee | 8305 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8306 | __arm_vrmlsldavhaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8307 | { |
e3678b44 | 8308 | return __builtin_mve_vrmlsldavhaxq_sv4si (__a, __b, __c); |
f9355dee SP |
8309 | } |
8310 | ||
e3678b44 | 8311 | __extension__ extern __inline int64_t |
f9355dee | 8312 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8313 | __arm_vaddlvaq_p_s32 (int64_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8314 | { |
e3678b44 | 8315 | return __builtin_mve_vaddlvaq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8316 | } |
8317 | ||
e3678b44 | 8318 | __extension__ extern __inline int8x16_t |
f9355dee | 8319 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8320 | __arm_vrev16q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) |
f9355dee | 8321 | { |
e3678b44 | 8322 | return __builtin_mve_vrev16q_m_sv16qi (__inactive, __a, __p); |
f9355dee SP |
8323 | } |
8324 | ||
e3678b44 | 8325 | __extension__ extern __inline int64_t |
f9355dee | 8326 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8327 | __arm_vrmlaldavhq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8328 | { |
e3678b44 | 8329 | return __builtin_mve_vrmlaldavhq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8330 | } |
8331 | ||
e3678b44 | 8332 | __extension__ extern __inline int64_t |
f9355dee | 8333 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8334 | __arm_vrmlaldavhxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8335 | { |
e3678b44 | 8336 | return __builtin_mve_vrmlaldavhxq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8337 | } |
8338 | ||
e3678b44 | 8339 | __extension__ extern __inline int64_t |
f9355dee | 8340 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8341 | __arm_vrmlsldavhq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8342 | { |
e3678b44 | 8343 | return __builtin_mve_vrmlsldavhq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8344 | } |
8345 | ||
e3678b44 | 8346 | __extension__ extern __inline int64_t |
f9355dee | 8347 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8348 | __arm_vrmlsldavhxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8349 | { |
e3678b44 | 8350 | return __builtin_mve_vrmlsldavhxq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8351 | } |
8352 | ||
e3678b44 | 8353 | __extension__ extern __inline uint64_t |
f9355dee | 8354 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8355 | __arm_vaddlvaq_p_u32 (uint64_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 8356 | { |
e3678b44 | 8357 | return __builtin_mve_vaddlvaq_p_uv4si (__a, __b, __p); |
f9355dee SP |
8358 | } |
8359 | ||
e3678b44 | 8360 | __extension__ extern __inline uint8x16_t |
f9355dee | 8361 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8362 | __arm_vrev16q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) |
f9355dee | 8363 | { |
e3678b44 | 8364 | return __builtin_mve_vrev16q_m_uv16qi (__inactive, __a, __p); |
f9355dee SP |
8365 | } |
8366 | ||
e3678b44 | 8367 | __extension__ extern __inline uint64_t |
f9355dee | 8368 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8369 | __arm_vrmlaldavhq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 8370 | { |
e3678b44 | 8371 | return __builtin_mve_vrmlaldavhq_p_uv4si (__a, __b, __p); |
f9355dee SP |
8372 | } |
8373 | ||
e3678b44 | 8374 | __extension__ extern __inline int16x8_t |
f9355dee | 8375 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8376 | __arm_vmvnq_m_n_s16 (int16x8_t __inactive, const int __imm, mve_pred16_t __p) |
f9355dee | 8377 | { |
e3678b44 | 8378 | return __builtin_mve_vmvnq_m_n_sv8hi (__inactive, __imm, __p); |
f9355dee SP |
8379 | } |
8380 | ||
e3678b44 | 8381 | __extension__ extern __inline int16x8_t |
f9355dee | 8382 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8383 | __arm_vorrq_m_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 8384 | { |
e3678b44 | 8385 | return __builtin_mve_vorrq_m_n_sv8hi (__a, __imm, __p); |
f9355dee SP |
8386 | } |
8387 | ||
e3678b44 | 8388 | __extension__ extern __inline int8x16_t |
f9355dee | 8389 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8390 | __arm_vqrshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8391 | { |
e3678b44 | 8392 | return __builtin_mve_vqrshrntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8393 | } |
8394 | ||
e3678b44 | 8395 | __extension__ extern __inline int8x16_t |
f9355dee | 8396 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8397 | __arm_vqshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8398 | { |
e3678b44 | 8399 | return __builtin_mve_vqshrnbq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8400 | } |
8401 | ||
e3678b44 | 8402 | __extension__ extern __inline int8x16_t |
f9355dee | 8403 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8404 | __arm_vqshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8405 | { |
e3678b44 | 8406 | return __builtin_mve_vqshrntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8407 | } |
8408 | ||
e3678b44 | 8409 | __extension__ extern __inline int8x16_t |
f9355dee | 8410 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8411 | __arm_vrshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8412 | { |
e3678b44 | 8413 | return __builtin_mve_vrshrnbq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8414 | } |
8415 | ||
e3678b44 | 8416 | __extension__ extern __inline int8x16_t |
f9355dee | 8417 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8418 | __arm_vrshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8419 | { |
e3678b44 | 8420 | return __builtin_mve_vrshrntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8421 | } |
8422 | ||
e3678b44 | 8423 | __extension__ extern __inline int8x16_t |
f9355dee | 8424 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8425 | __arm_vshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8426 | { |
e3678b44 | 8427 | return __builtin_mve_vshrnbq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8428 | } |
8429 | ||
e3678b44 | 8430 | __extension__ extern __inline int8x16_t |
f9355dee | 8431 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8432 | __arm_vshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8433 | { |
e3678b44 | 8434 | return __builtin_mve_vshrntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8435 | } |
8436 | ||
e3678b44 | 8437 | __extension__ extern __inline int64_t |
f9355dee | 8438 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8439 | __arm_vmlaldavaq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c) |
f9355dee | 8440 | { |
e3678b44 | 8441 | return __builtin_mve_vmlaldavaq_sv8hi (__a, __b, __c); |
f9355dee SP |
8442 | } |
8443 | ||
e3678b44 | 8444 | __extension__ extern __inline int64_t |
f9355dee | 8445 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8446 | __arm_vmlaldavaxq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c) |
f9355dee | 8447 | { |
e3678b44 | 8448 | return __builtin_mve_vmlaldavaxq_sv8hi (__a, __b, __c); |
f9355dee SP |
8449 | } |
8450 | ||
e3678b44 | 8451 | __extension__ extern __inline int64_t |
f9355dee | 8452 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8453 | __arm_vmlsldavaq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c) |
f9355dee | 8454 | { |
e3678b44 | 8455 | return __builtin_mve_vmlsldavaq_sv8hi (__a, __b, __c); |
f9355dee SP |
8456 | } |
8457 | ||
e3678b44 | 8458 | __extension__ extern __inline int64_t |
f9355dee | 8459 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8460 | __arm_vmlsldavaxq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c) |
f9355dee | 8461 | { |
e3678b44 | 8462 | return __builtin_mve_vmlsldavaxq_sv8hi (__a, __b, __c); |
f9355dee SP |
8463 | } |
8464 | ||
e3678b44 | 8465 | __extension__ extern __inline int64_t |
f9355dee | 8466 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8467 | __arm_vmlaldavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8468 | { |
e3678b44 | 8469 | return __builtin_mve_vmlaldavq_p_sv8hi (__a, __b, __p); |
f9355dee SP |
8470 | } |
8471 | ||
e3678b44 | 8472 | __extension__ extern __inline int64_t |
f9355dee | 8473 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8474 | __arm_vmlaldavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8475 | { |
e3678b44 | 8476 | return __builtin_mve_vmlaldavxq_p_sv8hi (__a, __b, __p); |
f9355dee SP |
8477 | } |
8478 | ||
e3678b44 | 8479 | __extension__ extern __inline int64_t |
f9355dee | 8480 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8481 | __arm_vmlsldavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8482 | { |
e3678b44 | 8483 | return __builtin_mve_vmlsldavq_p_sv8hi (__a, __b, __p); |
f9355dee SP |
8484 | } |
8485 | ||
e3678b44 | 8486 | __extension__ extern __inline int64_t |
f9355dee | 8487 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8488 | __arm_vmlsldavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8489 | { |
e3678b44 | 8490 | return __builtin_mve_vmlsldavxq_p_sv8hi (__a, __b, __p); |
f9355dee SP |
8491 | } |
8492 | ||
8493 | __extension__ extern __inline int16x8_t | |
8494 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e3678b44 | 8495 | __arm_vmovlbq_m_s8 (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p) |
f9355dee | 8496 | { |
e3678b44 | 8497 | return __builtin_mve_vmovlbq_m_sv16qi (__inactive, __a, __p); |
f9355dee SP |
8498 | } |
8499 | ||
e3678b44 | 8500 | __extension__ extern __inline int16x8_t |
f9355dee | 8501 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8502 | __arm_vmovltq_m_s8 (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p) |
f9355dee | 8503 | { |
e3678b44 | 8504 | return __builtin_mve_vmovltq_m_sv16qi (__inactive, __a, __p); |
f9355dee SP |
8505 | } |
8506 | ||
e3678b44 | 8507 | __extension__ extern __inline int8x16_t |
f9355dee | 8508 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8509 | __arm_vmovnbq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8510 | { |
e3678b44 | 8511 | return __builtin_mve_vmovnbq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8512 | } |
8513 | ||
e3678b44 | 8514 | __extension__ extern __inline int8x16_t |
f9355dee | 8515 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8516 | __arm_vmovntq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8517 | { |
e3678b44 | 8518 | return __builtin_mve_vmovntq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8519 | } |
8520 | ||
e3678b44 | 8521 | __extension__ extern __inline int8x16_t |
f9355dee | 8522 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8523 | __arm_vqmovnbq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8524 | { |
e3678b44 | 8525 | return __builtin_mve_vqmovnbq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8526 | } |
8527 | ||
e3678b44 | 8528 | __extension__ extern __inline int8x16_t |
f9355dee | 8529 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8530 | __arm_vqmovntq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8531 | { |
e3678b44 | 8532 | return __builtin_mve_vqmovntq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8533 | } |
8534 | ||
e3678b44 | 8535 | __extension__ extern __inline int8x16_t |
f9355dee | 8536 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8537 | __arm_vrev32q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) |
f9355dee | 8538 | { |
e3678b44 | 8539 | return __builtin_mve_vrev32q_m_sv16qi (__inactive, __a, __p); |
f9355dee SP |
8540 | } |
8541 | ||
8542 | __extension__ extern __inline uint16x8_t | |
8543 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e3678b44 | 8544 | __arm_vmvnq_m_n_u16 (uint16x8_t __inactive, const int __imm, mve_pred16_t __p) |
f9355dee | 8545 | { |
e3678b44 | 8546 | return __builtin_mve_vmvnq_m_n_uv8hi (__inactive, __imm, __p); |
f9355dee SP |
8547 | } |
8548 | ||
e3678b44 | 8549 | __extension__ extern __inline uint16x8_t |
f9355dee | 8550 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8551 | __arm_vorrq_m_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 8552 | { |
e3678b44 | 8553 | return __builtin_mve_vorrq_m_n_uv8hi (__a, __imm, __p); |
f9355dee SP |
8554 | } |
8555 | ||
e3678b44 | 8556 | __extension__ extern __inline uint8x16_t |
f9355dee | 8557 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8558 | __arm_vqrshruntq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8559 | { |
e3678b44 | 8560 | return __builtin_mve_vqrshruntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8561 | } |
8562 | ||
e3678b44 | 8563 | __extension__ extern __inline uint8x16_t |
f9355dee | 8564 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8565 | __arm_vqshrunbq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8566 | { |
e3678b44 | 8567 | return __builtin_mve_vqshrunbq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8568 | } |
8569 | ||
e3678b44 | 8570 | __extension__ extern __inline uint8x16_t |
f9355dee | 8571 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8572 | __arm_vqshruntq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8573 | { |
e3678b44 | 8574 | return __builtin_mve_vqshruntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8575 | } |
8576 | ||
e3678b44 | 8577 | __extension__ extern __inline uint8x16_t |
f9355dee | 8578 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8579 | __arm_vqmovunbq_m_s16 (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8580 | { |
e3678b44 | 8581 | return __builtin_mve_vqmovunbq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8582 | } |
8583 | ||
e3678b44 | 8584 | __extension__ extern __inline uint8x16_t |
f9355dee | 8585 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8586 | __arm_vqmovuntq_m_s16 (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8587 | { |
e3678b44 | 8588 | return __builtin_mve_vqmovuntq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8589 | } |
8590 | ||
e3678b44 | 8591 | __extension__ extern __inline uint8x16_t |
f9355dee | 8592 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8593 | __arm_vqrshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 8594 | { |
e3678b44 | 8595 | return __builtin_mve_vqrshrntq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
8596 | } |
8597 | ||
e3678b44 | 8598 | __extension__ extern __inline uint8x16_t |
f9355dee | 8599 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8600 | __arm_vqshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 8601 | { |
e3678b44 | 8602 | return __builtin_mve_vqshrnbq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
8603 | } |
8604 | ||
e3678b44 | 8605 | __extension__ extern __inline uint8x16_t |
f9355dee | 8606 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8607 | __arm_vqshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 8608 | { |
e3678b44 | 8609 | return __builtin_mve_vqshrntq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
8610 | } |
8611 | ||
e3678b44 | 8612 | __extension__ extern __inline uint8x16_t |
f9355dee | 8613 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8614 | __arm_vrshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 8615 | { |
e3678b44 | 8616 | return __builtin_mve_vrshrnbq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
8617 | } |
8618 | ||
e3678b44 | 8619 | __extension__ extern __inline uint8x16_t |
f9355dee | 8620 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8621 | __arm_vrshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 8622 | { |
e3678b44 | 8623 | return __builtin_mve_vrshrntq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
8624 | } |
8625 | ||
e3678b44 | 8626 | __extension__ extern __inline uint8x16_t |
f9355dee | 8627 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8628 | __arm_vshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 8629 | { |
e3678b44 | 8630 | return __builtin_mve_vshrnbq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
8631 | } |
8632 | ||
e3678b44 | 8633 | __extension__ extern __inline uint8x16_t |
f9355dee | 8634 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8635 | __arm_vshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 8636 | { |
e3678b44 | 8637 | return __builtin_mve_vshrntq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
8638 | } |
8639 | ||
e3678b44 | 8640 | __extension__ extern __inline uint64_t |
f9355dee | 8641 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8642 | __arm_vmlaldavaq_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c) |
f9355dee | 8643 | { |
e3678b44 | 8644 | return __builtin_mve_vmlaldavaq_uv8hi (__a, __b, __c); |
f9355dee SP |
8645 | } |
8646 | ||
e3678b44 | 8647 | __extension__ extern __inline uint64_t |
f9355dee | 8648 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8649 | __arm_vmlaldavq_p_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
f9355dee | 8650 | { |
e3678b44 | 8651 | return __builtin_mve_vmlaldavq_p_uv8hi (__a, __b, __p); |
f9355dee SP |
8652 | } |
8653 | ||
e3678b44 | 8654 | __extension__ extern __inline uint16x8_t |
f9355dee | 8655 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8656 | __arm_vmovlbq_m_u8 (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p) |
f9355dee | 8657 | { |
e3678b44 | 8658 | return __builtin_mve_vmovlbq_m_uv16qi (__inactive, __a, __p); |
f9355dee SP |
8659 | } |
8660 | ||
e3678b44 | 8661 | __extension__ extern __inline uint16x8_t |
f9355dee | 8662 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8663 | __arm_vmovltq_m_u8 (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p) |
f9355dee | 8664 | { |
e3678b44 | 8665 | return __builtin_mve_vmovltq_m_uv16qi (__inactive, __a, __p); |
f9355dee SP |
8666 | } |
8667 | ||
e3678b44 | 8668 | __extension__ extern __inline uint8x16_t |
f9355dee | 8669 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8670 | __arm_vmovnbq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) |
f9355dee | 8671 | { |
e3678b44 | 8672 | return __builtin_mve_vmovnbq_m_uv8hi (__a, __b, __p); |
f9355dee SP |
8673 | } |
8674 | ||
e3678b44 | 8675 | __extension__ extern __inline uint8x16_t |
f9355dee | 8676 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8677 | __arm_vmovntq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) |
f9355dee | 8678 | { |
e3678b44 | 8679 | return __builtin_mve_vmovntq_m_uv8hi (__a, __b, __p); |
f9355dee SP |
8680 | } |
8681 | ||
e3678b44 | 8682 | __extension__ extern __inline uint8x16_t |
f9355dee | 8683 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8684 | __arm_vqmovnbq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) |
f9355dee | 8685 | { |
e3678b44 | 8686 | return __builtin_mve_vqmovnbq_m_uv8hi (__a, __b, __p); |
f9355dee SP |
8687 | } |
8688 | ||
e3678b44 | 8689 | __extension__ extern __inline uint8x16_t |
f9355dee | 8690 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8691 | __arm_vqmovntq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) |
f9355dee | 8692 | { |
e3678b44 | 8693 | return __builtin_mve_vqmovntq_m_uv8hi (__a, __b, __p); |
f9355dee SP |
8694 | } |
8695 | ||
e3678b44 | 8696 | __extension__ extern __inline uint8x16_t |
f9355dee | 8697 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8698 | __arm_vrev32q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) |
f9355dee | 8699 | { |
e3678b44 | 8700 | return __builtin_mve_vrev32q_m_uv16qi (__inactive, __a, __p); |
f9355dee SP |
8701 | } |
8702 | ||
8703 | __extension__ extern __inline int32x4_t | |
8704 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e3678b44 | 8705 | __arm_vmvnq_m_n_s32 (int32x4_t __inactive, const int __imm, mve_pred16_t __p) |
f9355dee | 8706 | { |
e3678b44 | 8707 | return __builtin_mve_vmvnq_m_n_sv4si (__inactive, __imm, __p); |
f9355dee SP |
8708 | } |
8709 | ||
e3678b44 | 8710 | __extension__ extern __inline int32x4_t |
f9355dee | 8711 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8712 | __arm_vorrq_m_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 8713 | { |
e3678b44 | 8714 | return __builtin_mve_vorrq_m_n_sv4si (__a, __imm, __p); |
f9355dee SP |
8715 | } |
8716 | ||
e3678b44 | 8717 | __extension__ extern __inline int16x8_t |
f9355dee | 8718 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8719 | __arm_vqrshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8720 | { |
e3678b44 | 8721 | return __builtin_mve_vqrshrntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8722 | } |
8723 | ||
e3678b44 | 8724 | __extension__ extern __inline int16x8_t |
f9355dee | 8725 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8726 | __arm_vqshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8727 | { |
e3678b44 | 8728 | return __builtin_mve_vqshrnbq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8729 | } |
8730 | ||
e3678b44 | 8731 | __extension__ extern __inline int16x8_t |
f9355dee | 8732 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8733 | __arm_vqshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8734 | { |
e3678b44 | 8735 | return __builtin_mve_vqshrntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8736 | } |
8737 | ||
e3678b44 | 8738 | __extension__ extern __inline int16x8_t |
f9355dee | 8739 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8740 | __arm_vrshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8741 | { |
e3678b44 | 8742 | return __builtin_mve_vrshrnbq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8743 | } |
8744 | ||
e3678b44 | 8745 | __extension__ extern __inline int16x8_t |
f9355dee | 8746 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8747 | __arm_vrshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8748 | { |
e3678b44 | 8749 | return __builtin_mve_vrshrntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8750 | } |
8751 | ||
e3678b44 | 8752 | __extension__ extern __inline int16x8_t |
f9355dee | 8753 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8754 | __arm_vshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8755 | { |
e3678b44 | 8756 | return __builtin_mve_vshrnbq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8757 | } |
8758 | ||
e3678b44 | 8759 | __extension__ extern __inline int16x8_t |
f9355dee | 8760 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8761 | __arm_vshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8762 | { |
e3678b44 | 8763 | return __builtin_mve_vshrntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8764 | } |
8765 | ||
e3678b44 | 8766 | __extension__ extern __inline int64_t |
f9355dee | 8767 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8768 | __arm_vmlaldavaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8769 | { |
e3678b44 | 8770 | return __builtin_mve_vmlaldavaq_sv4si (__a, __b, __c); |
f9355dee SP |
8771 | } |
8772 | ||
e3678b44 | 8773 | __extension__ extern __inline int64_t |
f9355dee | 8774 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8775 | __arm_vmlaldavaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8776 | { |
e3678b44 | 8777 | return __builtin_mve_vmlaldavaxq_sv4si (__a, __b, __c); |
f9355dee SP |
8778 | } |
8779 | ||
e3678b44 | 8780 | __extension__ extern __inline int64_t |
f9355dee | 8781 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8782 | __arm_vmlsldavaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8783 | { |
e3678b44 | 8784 | return __builtin_mve_vmlsldavaq_sv4si (__a, __b, __c); |
f9355dee SP |
8785 | } |
8786 | ||
e3678b44 | 8787 | __extension__ extern __inline int64_t |
f9355dee | 8788 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8789 | __arm_vmlsldavaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8790 | { |
e3678b44 | 8791 | return __builtin_mve_vmlsldavaxq_sv4si (__a, __b, __c); |
f9355dee SP |
8792 | } |
8793 | ||
e3678b44 | 8794 | __extension__ extern __inline int64_t |
f9355dee | 8795 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8796 | __arm_vmlaldavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8797 | { |
e3678b44 | 8798 | return __builtin_mve_vmlaldavq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8799 | } |
8800 | ||
e3678b44 | 8801 | __extension__ extern __inline int64_t |
f9355dee | 8802 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8803 | __arm_vmlaldavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8804 | { |
e3678b44 | 8805 | return __builtin_mve_vmlaldavxq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8806 | } |
8807 | ||
e3678b44 | 8808 | __extension__ extern __inline int64_t |
f9355dee | 8809 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8810 | __arm_vmlsldavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8811 | { |
e3678b44 | 8812 | return __builtin_mve_vmlsldavq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8813 | } |
8814 | ||
e3678b44 | 8815 | __extension__ extern __inline int64_t |
f9355dee | 8816 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8817 | __arm_vmlsldavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8818 | { |
e3678b44 | 8819 | return __builtin_mve_vmlsldavxq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8820 | } |
8821 | ||
e3678b44 | 8822 | __extension__ extern __inline int32x4_t |
f9355dee | 8823 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8824 | __arm_vmovlbq_m_s16 (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p) |
f9355dee | 8825 | { |
e3678b44 | 8826 | return __builtin_mve_vmovlbq_m_sv8hi (__inactive, __a, __p); |
f9355dee SP |
8827 | } |
8828 | ||
e3678b44 | 8829 | __extension__ extern __inline int32x4_t |
f9355dee | 8830 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8831 | __arm_vmovltq_m_s16 (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p) |
f9355dee | 8832 | { |
e3678b44 | 8833 | return __builtin_mve_vmovltq_m_sv8hi (__inactive, __a, __p); |
f9355dee SP |
8834 | } |
8835 | ||
e3678b44 | 8836 | __extension__ extern __inline int16x8_t |
f9355dee | 8837 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8838 | __arm_vmovnbq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8839 | { |
e3678b44 | 8840 | return __builtin_mve_vmovnbq_m_sv4si (__a, __b, __p); |
f9355dee SP |
8841 | } |
8842 | ||
e3678b44 | 8843 | __extension__ extern __inline int16x8_t |
f9355dee | 8844 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8845 | __arm_vmovntq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8846 | { |
e3678b44 | 8847 | return __builtin_mve_vmovntq_m_sv4si (__a, __b, __p); |
f9355dee SP |
8848 | } |
8849 | ||
e3678b44 | 8850 | __extension__ extern __inline int16x8_t |
f9355dee | 8851 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8852 | __arm_vqmovnbq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8853 | { |
e3678b44 | 8854 | return __builtin_mve_vqmovnbq_m_sv4si (__a, __b, __p); |
f9355dee SP |
8855 | } |
8856 | ||
e3678b44 | 8857 | __extension__ extern __inline int16x8_t |
f9355dee | 8858 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8859 | __arm_vqmovntq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8860 | { |
e3678b44 | 8861 | return __builtin_mve_vqmovntq_m_sv4si (__a, __b, __p); |
f9355dee SP |
8862 | } |
8863 | ||
e3678b44 | 8864 | __extension__ extern __inline int16x8_t |
f9355dee | 8865 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8866 | __arm_vrev32q_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) |
f9355dee | 8867 | { |
e3678b44 | 8868 | return __builtin_mve_vrev32q_m_sv8hi (__inactive, __a, __p); |
f9355dee SP |
8869 | } |
8870 | ||
e3678b44 | 8871 | __extension__ extern __inline uint32x4_t |
f9355dee | 8872 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8873 | __arm_vmvnq_m_n_u32 (uint32x4_t __inactive, const int __imm, mve_pred16_t __p) |
f9355dee | 8874 | { |
e3678b44 | 8875 | return __builtin_mve_vmvnq_m_n_uv4si (__inactive, __imm, __p); |
f9355dee SP |
8876 | } |
8877 | ||
e3678b44 | 8878 | __extension__ extern __inline uint32x4_t |
f9355dee | 8879 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8880 | __arm_vorrq_m_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 8881 | { |
e3678b44 | 8882 | return __builtin_mve_vorrq_m_n_uv4si (__a, __imm, __p); |
f9355dee SP |
8883 | } |
8884 | ||
e3678b44 | 8885 | __extension__ extern __inline uint16x8_t |
f9355dee | 8886 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8887 | __arm_vqrshruntq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8888 | { |
e3678b44 | 8889 | return __builtin_mve_vqrshruntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8890 | } |
8891 | ||
e3678b44 | 8892 | __extension__ extern __inline uint16x8_t |
f9355dee | 8893 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8894 | __arm_vqshrunbq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8895 | { |
e3678b44 | 8896 | return __builtin_mve_vqshrunbq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8897 | } |
8898 | ||
e3678b44 | 8899 | __extension__ extern __inline uint16x8_t |
f9355dee | 8900 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8901 | __arm_vqshruntq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8902 | { |
e3678b44 | 8903 | return __builtin_mve_vqshruntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8904 | } |
8905 | ||
e3678b44 | 8906 | __extension__ extern __inline uint16x8_t |
f9355dee | 8907 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8908 | __arm_vqmovunbq_m_s32 (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8909 | { |
e3678b44 | 8910 | return __builtin_mve_vqmovunbq_m_sv4si (__a, __b, __p); |
f9355dee SP |
8911 | } |
8912 | ||
e3678b44 | 8913 | __extension__ extern __inline uint16x8_t |
f9355dee | 8914 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8915 | __arm_vqmovuntq_m_s32 (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8916 | { |
e3678b44 | 8917 | return __builtin_mve_vqmovuntq_m_sv4si (__a, __b, __p); |
f9355dee SP |
8918 | } |
8919 | ||
e3678b44 | 8920 | __extension__ extern __inline uint16x8_t |
f9355dee | 8921 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8922 | __arm_vqrshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 8923 | { |
e3678b44 | 8924 | return __builtin_mve_vqrshrntq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
8925 | } |
8926 | ||
e3678b44 | 8927 | __extension__ extern __inline uint16x8_t |
f9355dee | 8928 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8929 | __arm_vqshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 8930 | { |
e3678b44 | 8931 | return __builtin_mve_vqshrnbq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
8932 | } |
8933 | ||
e3678b44 | 8934 | __extension__ extern __inline uint16x8_t |
f9355dee | 8935 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8936 | __arm_vqshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 8937 | { |
e3678b44 | 8938 | return __builtin_mve_vqshrntq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
8939 | } |
8940 | ||
e3678b44 | 8941 | __extension__ extern __inline uint16x8_t |
f9355dee | 8942 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8943 | __arm_vrshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 8944 | { |
e3678b44 | 8945 | return __builtin_mve_vrshrnbq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
8946 | } |
8947 | ||
e3678b44 | 8948 | __extension__ extern __inline uint16x8_t |
f9355dee | 8949 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8950 | __arm_vrshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 8951 | { |
e3678b44 | 8952 | return __builtin_mve_vrshrntq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
8953 | } |
8954 | ||
e3678b44 | 8955 | __extension__ extern __inline uint16x8_t |
f9355dee | 8956 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8957 | __arm_vshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 8958 | { |
e3678b44 | 8959 | return __builtin_mve_vshrnbq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
8960 | } |
8961 | ||
e3678b44 | 8962 | __extension__ extern __inline uint16x8_t |
f9355dee | 8963 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8964 | __arm_vshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 8965 | { |
e3678b44 | 8966 | return __builtin_mve_vshrntq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
8967 | } |
8968 | ||
e3678b44 | 8969 | __extension__ extern __inline uint64_t |
f9355dee | 8970 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8971 | __arm_vmlaldavaq_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c) |
f9355dee | 8972 | { |
e3678b44 | 8973 | return __builtin_mve_vmlaldavaq_uv4si (__a, __b, __c); |
f9355dee SP |
8974 | } |
8975 | ||
e3678b44 | 8976 | __extension__ extern __inline uint64_t |
f9355dee | 8977 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8978 | __arm_vmlaldavq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 8979 | { |
e3678b44 | 8980 | return __builtin_mve_vmlaldavq_p_uv4si (__a, __b, __p); |
f9355dee SP |
8981 | } |
8982 | ||
e3678b44 | 8983 | __extension__ extern __inline uint32x4_t |
f9355dee | 8984 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8985 | __arm_vmovlbq_m_u16 (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p) |
f9355dee | 8986 | { |
e3678b44 | 8987 | return __builtin_mve_vmovlbq_m_uv8hi (__inactive, __a, __p); |
f9355dee SP |
8988 | } |
8989 | ||
e3678b44 | 8990 | __extension__ extern __inline uint32x4_t |
f9355dee | 8991 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8992 | __arm_vmovltq_m_u16 (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p) |
f9355dee | 8993 | { |
e3678b44 | 8994 | return __builtin_mve_vmovltq_m_uv8hi (__inactive, __a, __p); |
f9355dee SP |
8995 | } |
8996 | ||
e3678b44 | 8997 | __extension__ extern __inline uint16x8_t |
f9355dee | 8998 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8999 | __arm_vmovnbq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 9000 | { |
e3678b44 | 9001 | return __builtin_mve_vmovnbq_m_uv4si (__a, __b, __p); |
f9355dee SP |
9002 | } |
9003 | ||
e3678b44 | 9004 | __extension__ extern __inline uint16x8_t |
f9355dee | 9005 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9006 | __arm_vmovntq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 9007 | { |
e3678b44 | 9008 | return __builtin_mve_vmovntq_m_uv4si (__a, __b, __p); |
f9355dee SP |
9009 | } |
9010 | ||
e3678b44 | 9011 | __extension__ extern __inline uint16x8_t |
f9355dee | 9012 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9013 | __arm_vqmovnbq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 9014 | { |
e3678b44 | 9015 | return __builtin_mve_vqmovnbq_m_uv4si (__a, __b, __p); |
f9355dee SP |
9016 | } |
9017 | ||
e3678b44 | 9018 | __extension__ extern __inline uint16x8_t |
f9355dee | 9019 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9020 | __arm_vqmovntq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 9021 | { |
e3678b44 | 9022 | return __builtin_mve_vqmovntq_m_uv4si (__a, __b, __p); |
f9355dee SP |
9023 | } |
9024 | ||
e3678b44 | 9025 | __extension__ extern __inline uint16x8_t |
f9355dee | 9026 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9027 | __arm_vrev32q_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) |
f9355dee | 9028 | { |
e3678b44 | 9029 | return __builtin_mve_vrev32q_m_uv8hi (__inactive, __a, __p); |
f9355dee | 9030 | } |
db5db9d2 SP |
9031 | |
9032 | __extension__ extern __inline int8x16_t | |
9033 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9034 | __arm_vsriq_m_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p) | |
9035 | { | |
9036 | return __builtin_mve_vsriq_m_n_sv16qi (__a, __b, __imm, __p); | |
9037 | } | |
9038 | ||
9039 | __extension__ extern __inline int8x16_t | |
9040 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9041 | __arm_vsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9042 | { | |
9043 | return __builtin_mve_vsubq_m_sv16qi (__inactive, __a, __b, __p); | |
9044 | } | |
9045 | ||
9046 | __extension__ extern __inline uint8x16_t | |
9047 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9048 | __arm_vqshluq_m_n_s8 (uint8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
9049 | { | |
9050 | return __builtin_mve_vqshluq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
9051 | } | |
9052 | ||
9053 | __extension__ extern __inline uint32_t | |
9054 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9055 | __arm_vabavq_p_s8 (uint32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
9056 | { | |
9057 | return __builtin_mve_vabavq_p_sv16qi (__a, __b, __c, __p); | |
9058 | } | |
9059 | ||
9060 | __extension__ extern __inline uint8x16_t | |
9061 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9062 | __arm_vsriq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p) | |
9063 | { | |
9064 | return __builtin_mve_vsriq_m_n_uv16qi (__a, __b, __imm, __p); | |
9065 | } | |
9066 | ||
9067 | __extension__ extern __inline uint8x16_t | |
9068 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9069 | __arm_vshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9070 | { | |
9071 | return __builtin_mve_vshlq_m_uv16qi (__inactive, __a, __b, __p); | |
9072 | } | |
9073 | ||
9074 | __extension__ extern __inline uint8x16_t | |
9075 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9076 | __arm_vsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9077 | { | |
9078 | return __builtin_mve_vsubq_m_uv16qi (__inactive, __a, __b, __p); | |
9079 | } | |
9080 | ||
9081 | __extension__ extern __inline uint32_t | |
9082 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9083 | __arm_vabavq_p_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p) | |
9084 | { | |
9085 | return __builtin_mve_vabavq_p_uv16qi (__a, __b, __c, __p); | |
9086 | } | |
9087 | ||
9088 | __extension__ extern __inline int8x16_t | |
9089 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9090 | __arm_vshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9091 | { | |
9092 | return __builtin_mve_vshlq_m_sv16qi (__inactive, __a, __b, __p); | |
9093 | } | |
9094 | ||
9095 | __extension__ extern __inline int16x8_t | |
9096 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9097 | __arm_vsriq_m_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
9098 | { | |
9099 | return __builtin_mve_vsriq_m_n_sv8hi (__a, __b, __imm, __p); | |
9100 | } | |
9101 | ||
9102 | __extension__ extern __inline int16x8_t | |
9103 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9104 | __arm_vsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9105 | { | |
9106 | return __builtin_mve_vsubq_m_sv8hi (__inactive, __a, __b, __p); | |
9107 | } | |
9108 | ||
9109 | __extension__ extern __inline uint16x8_t | |
9110 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9111 | __arm_vqshluq_m_n_s16 (uint16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
9112 | { | |
9113 | return __builtin_mve_vqshluq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
9114 | } | |
9115 | ||
9116 | __extension__ extern __inline uint32_t | |
9117 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9118 | __arm_vabavq_p_s16 (uint32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
9119 | { | |
9120 | return __builtin_mve_vabavq_p_sv8hi (__a, __b, __c, __p); | |
9121 | } | |
9122 | ||
9123 | __extension__ extern __inline uint16x8_t | |
9124 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9125 | __arm_vsriq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
9126 | { | |
9127 | return __builtin_mve_vsriq_m_n_uv8hi (__a, __b, __imm, __p); | |
9128 | } | |
9129 | ||
9130 | __extension__ extern __inline uint16x8_t | |
9131 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9132 | __arm_vshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9133 | { | |
9134 | return __builtin_mve_vshlq_m_uv8hi (__inactive, __a, __b, __p); | |
9135 | } | |
9136 | ||
9137 | __extension__ extern __inline uint16x8_t | |
9138 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9139 | __arm_vsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9140 | { | |
9141 | return __builtin_mve_vsubq_m_uv8hi (__inactive, __a, __b, __p); | |
9142 | } | |
9143 | ||
9144 | __extension__ extern __inline uint32_t | |
9145 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9146 | __arm_vabavq_p_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) | |
9147 | { | |
9148 | return __builtin_mve_vabavq_p_uv8hi (__a, __b, __c, __p); | |
9149 | } | |
9150 | ||
9151 | __extension__ extern __inline int16x8_t | |
9152 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9153 | __arm_vshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9154 | { | |
9155 | return __builtin_mve_vshlq_m_sv8hi (__inactive, __a, __b, __p); | |
9156 | } | |
9157 | ||
9158 | __extension__ extern __inline int32x4_t | |
9159 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9160 | __arm_vsriq_m_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
9161 | { | |
9162 | return __builtin_mve_vsriq_m_n_sv4si (__a, __b, __imm, __p); | |
9163 | } | |
9164 | ||
9165 | __extension__ extern __inline int32x4_t | |
9166 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9167 | __arm_vsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9168 | { | |
9169 | return __builtin_mve_vsubq_m_sv4si (__inactive, __a, __b, __p); | |
9170 | } | |
9171 | ||
9172 | __extension__ extern __inline uint32x4_t | |
9173 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9174 | __arm_vqshluq_m_n_s32 (uint32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
9175 | { | |
9176 | return __builtin_mve_vqshluq_m_n_sv4si (__inactive, __a, __imm, __p); | |
9177 | } | |
9178 | ||
9179 | __extension__ extern __inline uint32_t | |
9180 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9181 | __arm_vabavq_p_s32 (uint32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
9182 | { | |
9183 | return __builtin_mve_vabavq_p_sv4si (__a, __b, __c, __p); | |
9184 | } | |
9185 | ||
9186 | __extension__ extern __inline uint32x4_t | |
9187 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9188 | __arm_vsriq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
9189 | { | |
9190 | return __builtin_mve_vsriq_m_n_uv4si (__a, __b, __imm, __p); | |
9191 | } | |
9192 | ||
9193 | __extension__ extern __inline uint32x4_t | |
9194 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9195 | __arm_vshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9196 | { | |
9197 | return __builtin_mve_vshlq_m_uv4si (__inactive, __a, __b, __p); | |
9198 | } | |
9199 | ||
9200 | __extension__ extern __inline uint32x4_t | |
9201 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9202 | __arm_vsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9203 | { | |
9204 | return __builtin_mve_vsubq_m_uv4si (__inactive, __a, __b, __p); | |
9205 | } | |
9206 | ||
9207 | __extension__ extern __inline uint32_t | |
9208 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9209 | __arm_vabavq_p_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
9210 | { | |
9211 | return __builtin_mve_vabavq_p_uv4si (__a, __b, __c, __p); | |
9212 | } | |
9213 | ||
9214 | __extension__ extern __inline int32x4_t | |
9215 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9216 | __arm_vshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9217 | { | |
9218 | return __builtin_mve_vshlq_m_sv4si (__inactive, __a, __b, __p); | |
9219 | } | |
9220 | ||
8eb3b6b9 SP |
9221 | __extension__ extern __inline int8x16_t |
9222 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9223 | __arm_vabdq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9224 | { | |
9225 | return __builtin_mve_vabdq_m_sv16qi (__inactive, __a, __b, __p); | |
9226 | } | |
9227 | ||
9228 | __extension__ extern __inline int32x4_t | |
9229 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9230 | __arm_vabdq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9231 | { | |
9232 | return __builtin_mve_vabdq_m_sv4si (__inactive, __a, __b, __p); | |
9233 | } | |
9234 | ||
9235 | __extension__ extern __inline int16x8_t | |
9236 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9237 | __arm_vabdq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9238 | { | |
9239 | return __builtin_mve_vabdq_m_sv8hi (__inactive, __a, __b, __p); | |
9240 | } | |
9241 | ||
9242 | __extension__ extern __inline uint8x16_t | |
9243 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9244 | __arm_vabdq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9245 | { | |
9246 | return __builtin_mve_vabdq_m_uv16qi (__inactive, __a, __b, __p); | |
9247 | } | |
9248 | ||
9249 | __extension__ extern __inline uint32x4_t | |
9250 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9251 | __arm_vabdq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9252 | { | |
9253 | return __builtin_mve_vabdq_m_uv4si (__inactive, __a, __b, __p); | |
9254 | } | |
9255 | ||
9256 | __extension__ extern __inline uint16x8_t | |
9257 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9258 | __arm_vabdq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9259 | { | |
9260 | return __builtin_mve_vabdq_m_uv8hi (__inactive, __a, __b, __p); | |
9261 | } | |
9262 | ||
9263 | __extension__ extern __inline int8x16_t | |
9264 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9265 | __arm_vaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
9266 | { | |
9267 | return __builtin_mve_vaddq_m_n_sv16qi (__inactive, __a, __b, __p); | |
9268 | } | |
9269 | ||
9270 | __extension__ extern __inline int32x4_t | |
9271 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9272 | __arm_vaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
9273 | { | |
9274 | return __builtin_mve_vaddq_m_n_sv4si (__inactive, __a, __b, __p); | |
9275 | } | |
9276 | ||
9277 | __extension__ extern __inline int16x8_t | |
9278 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9279 | __arm_vaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
9280 | { | |
9281 | return __builtin_mve_vaddq_m_n_sv8hi (__inactive, __a, __b, __p); | |
9282 | } | |
9283 | ||
9284 | __extension__ extern __inline uint8x16_t | |
9285 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9286 | __arm_vaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
9287 | { | |
9288 | return __builtin_mve_vaddq_m_n_uv16qi (__inactive, __a, __b, __p); | |
9289 | } | |
9290 | ||
9291 | __extension__ extern __inline uint32x4_t | |
9292 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9293 | __arm_vaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
9294 | { | |
9295 | return __builtin_mve_vaddq_m_n_uv4si (__inactive, __a, __b, __p); | |
9296 | } | |
9297 | ||
9298 | __extension__ extern __inline uint16x8_t | |
9299 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9300 | __arm_vaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
9301 | { | |
9302 | return __builtin_mve_vaddq_m_n_uv8hi (__inactive, __a, __b, __p); | |
9303 | } | |
9304 | ||
9305 | __extension__ extern __inline int8x16_t | |
9306 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9307 | __arm_vaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9308 | { | |
9309 | return __builtin_mve_vaddq_m_sv16qi (__inactive, __a, __b, __p); | |
9310 | } | |
9311 | ||
9312 | __extension__ extern __inline int32x4_t | |
9313 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9314 | __arm_vaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9315 | { | |
9316 | return __builtin_mve_vaddq_m_sv4si (__inactive, __a, __b, __p); | |
9317 | } | |
9318 | ||
9319 | __extension__ extern __inline int16x8_t | |
9320 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9321 | __arm_vaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9322 | { | |
9323 | return __builtin_mve_vaddq_m_sv8hi (__inactive, __a, __b, __p); | |
9324 | } | |
9325 | ||
9326 | __extension__ extern __inline uint8x16_t | |
9327 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9328 | __arm_vaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9329 | { | |
9330 | return __builtin_mve_vaddq_m_uv16qi (__inactive, __a, __b, __p); | |
9331 | } | |
9332 | ||
9333 | __extension__ extern __inline uint32x4_t | |
9334 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9335 | __arm_vaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9336 | { | |
9337 | return __builtin_mve_vaddq_m_uv4si (__inactive, __a, __b, __p); | |
9338 | } | |
9339 | ||
9340 | __extension__ extern __inline uint16x8_t | |
9341 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9342 | __arm_vaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9343 | { | |
9344 | return __builtin_mve_vaddq_m_uv8hi (__inactive, __a, __b, __p); | |
9345 | } | |
9346 | ||
9347 | __extension__ extern __inline int8x16_t | |
9348 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9349 | __arm_vandq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9350 | { | |
9351 | return __builtin_mve_vandq_m_sv16qi (__inactive, __a, __b, __p); | |
9352 | } | |
9353 | ||
9354 | __extension__ extern __inline int32x4_t | |
9355 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9356 | __arm_vandq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9357 | { | |
9358 | return __builtin_mve_vandq_m_sv4si (__inactive, __a, __b, __p); | |
9359 | } | |
9360 | ||
9361 | __extension__ extern __inline int16x8_t | |
9362 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9363 | __arm_vandq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9364 | { | |
9365 | return __builtin_mve_vandq_m_sv8hi (__inactive, __a, __b, __p); | |
9366 | } | |
9367 | ||
9368 | __extension__ extern __inline uint8x16_t | |
9369 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9370 | __arm_vandq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9371 | { | |
9372 | return __builtin_mve_vandq_m_uv16qi (__inactive, __a, __b, __p); | |
9373 | } | |
9374 | ||
9375 | __extension__ extern __inline uint32x4_t | |
9376 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9377 | __arm_vandq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9378 | { | |
9379 | return __builtin_mve_vandq_m_uv4si (__inactive, __a, __b, __p); | |
9380 | } | |
9381 | ||
9382 | __extension__ extern __inline uint16x8_t | |
9383 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9384 | __arm_vandq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9385 | { | |
9386 | return __builtin_mve_vandq_m_uv8hi (__inactive, __a, __b, __p); | |
9387 | } | |
9388 | ||
9389 | __extension__ extern __inline int8x16_t | |
9390 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9391 | __arm_vbicq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9392 | { | |
9393 | return __builtin_mve_vbicq_m_sv16qi (__inactive, __a, __b, __p); | |
9394 | } | |
9395 | ||
9396 | __extension__ extern __inline int32x4_t | |
9397 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9398 | __arm_vbicq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9399 | { | |
9400 | return __builtin_mve_vbicq_m_sv4si (__inactive, __a, __b, __p); | |
9401 | } | |
9402 | ||
9403 | __extension__ extern __inline int16x8_t | |
9404 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9405 | __arm_vbicq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9406 | { | |
9407 | return __builtin_mve_vbicq_m_sv8hi (__inactive, __a, __b, __p); | |
9408 | } | |
9409 | ||
9410 | __extension__ extern __inline uint8x16_t | |
9411 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9412 | __arm_vbicq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9413 | { | |
9414 | return __builtin_mve_vbicq_m_uv16qi (__inactive, __a, __b, __p); | |
9415 | } | |
9416 | ||
9417 | __extension__ extern __inline uint32x4_t | |
9418 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9419 | __arm_vbicq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9420 | { | |
9421 | return __builtin_mve_vbicq_m_uv4si (__inactive, __a, __b, __p); | |
9422 | } | |
9423 | ||
9424 | __extension__ extern __inline uint16x8_t | |
9425 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9426 | __arm_vbicq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9427 | { | |
9428 | return __builtin_mve_vbicq_m_uv8hi (__inactive, __a, __b, __p); | |
9429 | } | |
9430 | ||
9431 | __extension__ extern __inline int8x16_t | |
9432 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9433 | __arm_vbrsrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
9434 | { | |
9435 | return __builtin_mve_vbrsrq_m_n_sv16qi (__inactive, __a, __b, __p); | |
9436 | } | |
9437 | ||
9438 | __extension__ extern __inline int32x4_t | |
9439 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9440 | __arm_vbrsrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
9441 | { | |
9442 | return __builtin_mve_vbrsrq_m_n_sv4si (__inactive, __a, __b, __p); | |
9443 | } | |
9444 | ||
9445 | __extension__ extern __inline int16x8_t | |
9446 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9447 | __arm_vbrsrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
9448 | { | |
9449 | return __builtin_mve_vbrsrq_m_n_sv8hi (__inactive, __a, __b, __p); | |
9450 | } | |
9451 | ||
9452 | __extension__ extern __inline uint8x16_t | |
9453 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9454 | __arm_vbrsrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
9455 | { | |
9456 | return __builtin_mve_vbrsrq_m_n_uv16qi (__inactive, __a, __b, __p); | |
9457 | } | |
9458 | ||
9459 | __extension__ extern __inline uint32x4_t | |
9460 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9461 | __arm_vbrsrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
9462 | { | |
9463 | return __builtin_mve_vbrsrq_m_n_uv4si (__inactive, __a, __b, __p); | |
9464 | } | |
9465 | ||
9466 | __extension__ extern __inline uint16x8_t | |
9467 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9468 | __arm_vbrsrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
9469 | { | |
9470 | return __builtin_mve_vbrsrq_m_n_uv8hi (__inactive, __a, __b, __p); | |
9471 | } | |
9472 | ||
9473 | __extension__ extern __inline int8x16_t | |
9474 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9475 | __arm_vcaddq_rot270_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9476 | { | |
9477 | return __builtin_mve_vcaddq_rot270_m_sv16qi (__inactive, __a, __b, __p); | |
9478 | } | |
9479 | ||
9480 | __extension__ extern __inline int32x4_t | |
9481 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9482 | __arm_vcaddq_rot270_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9483 | { | |
9484 | return __builtin_mve_vcaddq_rot270_m_sv4si (__inactive, __a, __b, __p); | |
9485 | } | |
9486 | ||
9487 | __extension__ extern __inline int16x8_t | |
9488 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9489 | __arm_vcaddq_rot270_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9490 | { | |
9491 | return __builtin_mve_vcaddq_rot270_m_sv8hi (__inactive, __a, __b, __p); | |
9492 | } | |
9493 | ||
9494 | __extension__ extern __inline uint8x16_t | |
9495 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9496 | __arm_vcaddq_rot270_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9497 | { | |
9498 | return __builtin_mve_vcaddq_rot270_m_uv16qi (__inactive, __a, __b, __p); | |
9499 | } | |
9500 | ||
9501 | __extension__ extern __inline uint32x4_t | |
9502 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9503 | __arm_vcaddq_rot270_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9504 | { | |
9505 | return __builtin_mve_vcaddq_rot270_m_uv4si (__inactive, __a, __b, __p); | |
9506 | } | |
9507 | ||
9508 | __extension__ extern __inline uint16x8_t | |
9509 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9510 | __arm_vcaddq_rot270_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9511 | { | |
9512 | return __builtin_mve_vcaddq_rot270_m_uv8hi (__inactive, __a, __b, __p); | |
9513 | } | |
9514 | ||
9515 | __extension__ extern __inline int8x16_t | |
9516 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9517 | __arm_vcaddq_rot90_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9518 | { | |
9519 | return __builtin_mve_vcaddq_rot90_m_sv16qi (__inactive, __a, __b, __p); | |
9520 | } | |
9521 | ||
9522 | __extension__ extern __inline int32x4_t | |
9523 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9524 | __arm_vcaddq_rot90_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9525 | { | |
9526 | return __builtin_mve_vcaddq_rot90_m_sv4si (__inactive, __a, __b, __p); | |
9527 | } | |
9528 | ||
9529 | __extension__ extern __inline int16x8_t | |
9530 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9531 | __arm_vcaddq_rot90_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9532 | { | |
9533 | return __builtin_mve_vcaddq_rot90_m_sv8hi (__inactive, __a, __b, __p); | |
9534 | } | |
9535 | ||
9536 | __extension__ extern __inline uint8x16_t | |
9537 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9538 | __arm_vcaddq_rot90_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9539 | { | |
9540 | return __builtin_mve_vcaddq_rot90_m_uv16qi (__inactive, __a, __b, __p); | |
9541 | } | |
9542 | ||
9543 | __extension__ extern __inline uint32x4_t | |
9544 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9545 | __arm_vcaddq_rot90_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9546 | { | |
9547 | return __builtin_mve_vcaddq_rot90_m_uv4si (__inactive, __a, __b, __p); | |
9548 | } | |
9549 | ||
9550 | __extension__ extern __inline uint16x8_t | |
9551 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9552 | __arm_vcaddq_rot90_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9553 | { | |
9554 | return __builtin_mve_vcaddq_rot90_m_uv8hi (__inactive, __a, __b, __p); | |
9555 | } | |
9556 | ||
9557 | __extension__ extern __inline int8x16_t | |
9558 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9559 | __arm_veorq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9560 | { | |
9561 | return __builtin_mve_veorq_m_sv16qi (__inactive, __a, __b, __p); | |
9562 | } | |
9563 | ||
9564 | __extension__ extern __inline int32x4_t | |
9565 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9566 | __arm_veorq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9567 | { | |
9568 | return __builtin_mve_veorq_m_sv4si (__inactive, __a, __b, __p); | |
9569 | } | |
9570 | ||
9571 | __extension__ extern __inline int16x8_t | |
9572 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9573 | __arm_veorq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9574 | { | |
9575 | return __builtin_mve_veorq_m_sv8hi (__inactive, __a, __b, __p); | |
9576 | } | |
9577 | ||
9578 | __extension__ extern __inline uint8x16_t | |
9579 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9580 | __arm_veorq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9581 | { | |
9582 | return __builtin_mve_veorq_m_uv16qi (__inactive, __a, __b, __p); | |
9583 | } | |
9584 | ||
9585 | __extension__ extern __inline uint32x4_t | |
9586 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9587 | __arm_veorq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9588 | { | |
9589 | return __builtin_mve_veorq_m_uv4si (__inactive, __a, __b, __p); | |
9590 | } | |
9591 | ||
9592 | __extension__ extern __inline uint16x8_t | |
9593 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9594 | __arm_veorq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9595 | { | |
9596 | return __builtin_mve_veorq_m_uv8hi (__inactive, __a, __b, __p); | |
9597 | } | |
9598 | ||
9599 | __extension__ extern __inline int8x16_t | |
9600 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9601 | __arm_vhaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
9602 | { | |
9603 | return __builtin_mve_vhaddq_m_n_sv16qi (__inactive, __a, __b, __p); | |
9604 | } | |
9605 | ||
9606 | __extension__ extern __inline int32x4_t | |
9607 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9608 | __arm_vhaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
9609 | { | |
9610 | return __builtin_mve_vhaddq_m_n_sv4si (__inactive, __a, __b, __p); | |
9611 | } | |
9612 | ||
9613 | __extension__ extern __inline int16x8_t | |
9614 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9615 | __arm_vhaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
9616 | { | |
9617 | return __builtin_mve_vhaddq_m_n_sv8hi (__inactive, __a, __b, __p); | |
9618 | } | |
9619 | ||
9620 | __extension__ extern __inline uint8x16_t | |
9621 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9622 | __arm_vhaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
9623 | { | |
9624 | return __builtin_mve_vhaddq_m_n_uv16qi (__inactive, __a, __b, __p); | |
9625 | } | |
9626 | ||
9627 | __extension__ extern __inline uint32x4_t | |
9628 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9629 | __arm_vhaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
9630 | { | |
9631 | return __builtin_mve_vhaddq_m_n_uv4si (__inactive, __a, __b, __p); | |
9632 | } | |
9633 | ||
9634 | __extension__ extern __inline uint16x8_t | |
9635 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9636 | __arm_vhaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
9637 | { | |
9638 | return __builtin_mve_vhaddq_m_n_uv8hi (__inactive, __a, __b, __p); | |
9639 | } | |
9640 | ||
9641 | __extension__ extern __inline int8x16_t | |
9642 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9643 | __arm_vhaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9644 | { | |
9645 | return __builtin_mve_vhaddq_m_sv16qi (__inactive, __a, __b, __p); | |
9646 | } | |
9647 | ||
9648 | __extension__ extern __inline int32x4_t | |
9649 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9650 | __arm_vhaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9651 | { | |
9652 | return __builtin_mve_vhaddq_m_sv4si (__inactive, __a, __b, __p); | |
9653 | } | |
9654 | ||
9655 | __extension__ extern __inline int16x8_t | |
9656 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9657 | __arm_vhaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9658 | { | |
9659 | return __builtin_mve_vhaddq_m_sv8hi (__inactive, __a, __b, __p); | |
9660 | } | |
9661 | ||
9662 | __extension__ extern __inline uint8x16_t | |
9663 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9664 | __arm_vhaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9665 | { | |
9666 | return __builtin_mve_vhaddq_m_uv16qi (__inactive, __a, __b, __p); | |
9667 | } | |
9668 | ||
9669 | __extension__ extern __inline uint32x4_t | |
9670 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9671 | __arm_vhaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9672 | { | |
9673 | return __builtin_mve_vhaddq_m_uv4si (__inactive, __a, __b, __p); | |
9674 | } | |
9675 | ||
9676 | __extension__ extern __inline uint16x8_t | |
9677 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9678 | __arm_vhaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9679 | { | |
9680 | return __builtin_mve_vhaddq_m_uv8hi (__inactive, __a, __b, __p); | |
9681 | } | |
9682 | ||
9683 | __extension__ extern __inline int8x16_t | |
9684 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9685 | __arm_vhcaddq_rot270_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9686 | { | |
9687 | return __builtin_mve_vhcaddq_rot270_m_sv16qi (__inactive, __a, __b, __p); | |
9688 | } | |
9689 | ||
9690 | __extension__ extern __inline int32x4_t | |
9691 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9692 | __arm_vhcaddq_rot270_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9693 | { | |
9694 | return __builtin_mve_vhcaddq_rot270_m_sv4si (__inactive, __a, __b, __p); | |
9695 | } | |
9696 | ||
9697 | __extension__ extern __inline int16x8_t | |
9698 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9699 | __arm_vhcaddq_rot270_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9700 | { | |
9701 | return __builtin_mve_vhcaddq_rot270_m_sv8hi (__inactive, __a, __b, __p); | |
9702 | } | |
9703 | ||
9704 | __extension__ extern __inline int8x16_t | |
9705 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9706 | __arm_vhcaddq_rot90_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9707 | { | |
9708 | return __builtin_mve_vhcaddq_rot90_m_sv16qi (__inactive, __a, __b, __p); | |
9709 | } | |
9710 | ||
9711 | __extension__ extern __inline int32x4_t | |
9712 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9713 | __arm_vhcaddq_rot90_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9714 | { | |
9715 | return __builtin_mve_vhcaddq_rot90_m_sv4si (__inactive, __a, __b, __p); | |
9716 | } | |
9717 | ||
9718 | __extension__ extern __inline int16x8_t | |
9719 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9720 | __arm_vhcaddq_rot90_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9721 | { | |
9722 | return __builtin_mve_vhcaddq_rot90_m_sv8hi (__inactive, __a, __b, __p); | |
9723 | } | |
9724 | ||
9725 | __extension__ extern __inline int8x16_t | |
9726 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9727 | __arm_vhsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
9728 | { | |
9729 | return __builtin_mve_vhsubq_m_n_sv16qi (__inactive, __a, __b, __p); | |
9730 | } | |
9731 | ||
9732 | __extension__ extern __inline int32x4_t | |
9733 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9734 | __arm_vhsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
9735 | { | |
9736 | return __builtin_mve_vhsubq_m_n_sv4si (__inactive, __a, __b, __p); | |
9737 | } | |
9738 | ||
9739 | __extension__ extern __inline int16x8_t | |
9740 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9741 | __arm_vhsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
9742 | { | |
9743 | return __builtin_mve_vhsubq_m_n_sv8hi (__inactive, __a, __b, __p); | |
9744 | } | |
9745 | ||
9746 | __extension__ extern __inline uint8x16_t | |
9747 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9748 | __arm_vhsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
9749 | { | |
9750 | return __builtin_mve_vhsubq_m_n_uv16qi (__inactive, __a, __b, __p); | |
9751 | } | |
9752 | ||
9753 | __extension__ extern __inline uint32x4_t | |
9754 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9755 | __arm_vhsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
9756 | { | |
9757 | return __builtin_mve_vhsubq_m_n_uv4si (__inactive, __a, __b, __p); | |
9758 | } | |
9759 | ||
9760 | __extension__ extern __inline uint16x8_t | |
9761 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9762 | __arm_vhsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
9763 | { | |
9764 | return __builtin_mve_vhsubq_m_n_uv8hi (__inactive, __a, __b, __p); | |
9765 | } | |
9766 | ||
9767 | __extension__ extern __inline int8x16_t | |
9768 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9769 | __arm_vhsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9770 | { | |
9771 | return __builtin_mve_vhsubq_m_sv16qi (__inactive, __a, __b, __p); | |
9772 | } | |
9773 | ||
9774 | __extension__ extern __inline int32x4_t | |
9775 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9776 | __arm_vhsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9777 | { | |
9778 | return __builtin_mve_vhsubq_m_sv4si (__inactive, __a, __b, __p); | |
9779 | } | |
9780 | ||
9781 | __extension__ extern __inline int16x8_t | |
9782 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9783 | __arm_vhsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9784 | { | |
9785 | return __builtin_mve_vhsubq_m_sv8hi (__inactive, __a, __b, __p); | |
9786 | } | |
9787 | ||
9788 | __extension__ extern __inline uint8x16_t | |
9789 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9790 | __arm_vhsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9791 | { | |
9792 | return __builtin_mve_vhsubq_m_uv16qi (__inactive, __a, __b, __p); | |
9793 | } | |
9794 | ||
9795 | __extension__ extern __inline uint32x4_t | |
9796 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9797 | __arm_vhsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9798 | { | |
9799 | return __builtin_mve_vhsubq_m_uv4si (__inactive, __a, __b, __p); | |
9800 | } | |
9801 | ||
9802 | __extension__ extern __inline uint16x8_t | |
9803 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9804 | __arm_vhsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9805 | { | |
9806 | return __builtin_mve_vhsubq_m_uv8hi (__inactive, __a, __b, __p); | |
9807 | } | |
9808 | ||
9809 | __extension__ extern __inline int8x16_t | |
9810 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9811 | __arm_vmaxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9812 | { | |
9813 | return __builtin_mve_vmaxq_m_sv16qi (__inactive, __a, __b, __p); | |
9814 | } | |
9815 | ||
9816 | __extension__ extern __inline int32x4_t | |
9817 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9818 | __arm_vmaxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9819 | { | |
9820 | return __builtin_mve_vmaxq_m_sv4si (__inactive, __a, __b, __p); | |
9821 | } | |
9822 | ||
9823 | __extension__ extern __inline int16x8_t | |
9824 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9825 | __arm_vmaxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9826 | { | |
9827 | return __builtin_mve_vmaxq_m_sv8hi (__inactive, __a, __b, __p); | |
9828 | } | |
9829 | ||
9830 | __extension__ extern __inline uint8x16_t | |
9831 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9832 | __arm_vmaxq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9833 | { | |
9834 | return __builtin_mve_vmaxq_m_uv16qi (__inactive, __a, __b, __p); | |
9835 | } | |
9836 | ||
9837 | __extension__ extern __inline uint32x4_t | |
9838 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9839 | __arm_vmaxq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9840 | { | |
9841 | return __builtin_mve_vmaxq_m_uv4si (__inactive, __a, __b, __p); | |
9842 | } | |
9843 | ||
9844 | __extension__ extern __inline uint16x8_t | |
9845 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9846 | __arm_vmaxq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9847 | { | |
9848 | return __builtin_mve_vmaxq_m_uv8hi (__inactive, __a, __b, __p); | |
9849 | } | |
9850 | ||
9851 | __extension__ extern __inline int8x16_t | |
9852 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9853 | __arm_vminq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9854 | { | |
9855 | return __builtin_mve_vminq_m_sv16qi (__inactive, __a, __b, __p); | |
9856 | } | |
9857 | ||
9858 | __extension__ extern __inline int32x4_t | |
9859 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9860 | __arm_vminq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9861 | { | |
9862 | return __builtin_mve_vminq_m_sv4si (__inactive, __a, __b, __p); | |
9863 | } | |
9864 | ||
9865 | __extension__ extern __inline int16x8_t | |
9866 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9867 | __arm_vminq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9868 | { | |
9869 | return __builtin_mve_vminq_m_sv8hi (__inactive, __a, __b, __p); | |
9870 | } | |
9871 | ||
9872 | __extension__ extern __inline uint8x16_t | |
9873 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9874 | __arm_vminq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9875 | { | |
9876 | return __builtin_mve_vminq_m_uv16qi (__inactive, __a, __b, __p); | |
9877 | } | |
9878 | ||
9879 | __extension__ extern __inline uint32x4_t | |
9880 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9881 | __arm_vminq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9882 | { | |
9883 | return __builtin_mve_vminq_m_uv4si (__inactive, __a, __b, __p); | |
9884 | } | |
9885 | ||
9886 | __extension__ extern __inline uint16x8_t | |
9887 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9888 | __arm_vminq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9889 | { | |
9890 | return __builtin_mve_vminq_m_uv8hi (__inactive, __a, __b, __p); | |
9891 | } | |
9892 | ||
9893 | __extension__ extern __inline int32_t | |
9894 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9895 | __arm_vmladavaq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
9896 | { | |
9897 | return __builtin_mve_vmladavaq_p_sv16qi (__a, __b, __c, __p); | |
9898 | } | |
9899 | ||
9900 | __extension__ extern __inline int32_t | |
9901 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9902 | __arm_vmladavaq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
9903 | { | |
9904 | return __builtin_mve_vmladavaq_p_sv4si (__a, __b, __c, __p); | |
9905 | } | |
9906 | ||
9907 | __extension__ extern __inline int32_t | |
9908 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9909 | __arm_vmladavaq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
9910 | { | |
9911 | return __builtin_mve_vmladavaq_p_sv8hi (__a, __b, __c, __p); | |
9912 | } | |
9913 | ||
9914 | __extension__ extern __inline uint32_t | |
9915 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9916 | __arm_vmladavaq_p_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p) | |
9917 | { | |
9918 | return __builtin_mve_vmladavaq_p_uv16qi (__a, __b, __c, __p); | |
9919 | } | |
9920 | ||
9921 | __extension__ extern __inline uint32_t | |
9922 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9923 | __arm_vmladavaq_p_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
9924 | { | |
9925 | return __builtin_mve_vmladavaq_p_uv4si (__a, __b, __c, __p); | |
9926 | } | |
9927 | ||
9928 | __extension__ extern __inline uint32_t | |
9929 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9930 | __arm_vmladavaq_p_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) | |
9931 | { | |
9932 | return __builtin_mve_vmladavaq_p_uv8hi (__a, __b, __c, __p); | |
9933 | } | |
9934 | ||
9935 | __extension__ extern __inline int32_t | |
9936 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9937 | __arm_vmladavaxq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
9938 | { | |
9939 | return __builtin_mve_vmladavaxq_p_sv16qi (__a, __b, __c, __p); | |
9940 | } | |
9941 | ||
9942 | __extension__ extern __inline int32_t | |
9943 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9944 | __arm_vmladavaxq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
9945 | { | |
9946 | return __builtin_mve_vmladavaxq_p_sv4si (__a, __b, __c, __p); | |
9947 | } | |
9948 | ||
9949 | __extension__ extern __inline int32_t | |
9950 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9951 | __arm_vmladavaxq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
9952 | { | |
9953 | return __builtin_mve_vmladavaxq_p_sv8hi (__a, __b, __c, __p); | |
9954 | } | |
9955 | ||
9956 | __extension__ extern __inline int8x16_t | |
9957 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9958 | __arm_vmlaq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
9959 | { | |
9960 | return __builtin_mve_vmlaq_m_n_sv16qi (__a, __b, __c, __p); | |
9961 | } | |
9962 | ||
9963 | __extension__ extern __inline int32x4_t | |
9964 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9965 | __arm_vmlaq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
9966 | { | |
9967 | return __builtin_mve_vmlaq_m_n_sv4si (__a, __b, __c, __p); | |
9968 | } | |
9969 | ||
9970 | __extension__ extern __inline int16x8_t | |
9971 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9972 | __arm_vmlaq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
9973 | { | |
9974 | return __builtin_mve_vmlaq_m_n_sv8hi (__a, __b, __c, __p); | |
9975 | } | |
9976 | ||
9977 | __extension__ extern __inline uint8x16_t | |
9978 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9979 | __arm_vmlaq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p) | |
9980 | { | |
9981 | return __builtin_mve_vmlaq_m_n_uv16qi (__a, __b, __c, __p); | |
9982 | } | |
9983 | ||
9984 | __extension__ extern __inline uint32x4_t | |
9985 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9986 | __arm_vmlaq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p) | |
9987 | { | |
9988 | return __builtin_mve_vmlaq_m_n_uv4si (__a, __b, __c, __p); | |
9989 | } | |
9990 | ||
9991 | __extension__ extern __inline uint16x8_t | |
9992 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9993 | __arm_vmlaq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p) | |
9994 | { | |
9995 | return __builtin_mve_vmlaq_m_n_uv8hi (__a, __b, __c, __p); | |
9996 | } | |
9997 | ||
9998 | __extension__ extern __inline int8x16_t | |
9999 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10000 | __arm_vmlasq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
10001 | { | |
10002 | return __builtin_mve_vmlasq_m_n_sv16qi (__a, __b, __c, __p); | |
10003 | } | |
10004 | ||
10005 | __extension__ extern __inline int32x4_t | |
10006 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10007 | __arm_vmlasq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
10008 | { | |
10009 | return __builtin_mve_vmlasq_m_n_sv4si (__a, __b, __c, __p); | |
10010 | } | |
10011 | ||
10012 | __extension__ extern __inline int16x8_t | |
10013 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10014 | __arm_vmlasq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
10015 | { | |
10016 | return __builtin_mve_vmlasq_m_n_sv8hi (__a, __b, __c, __p); | |
10017 | } | |
10018 | ||
10019 | __extension__ extern __inline uint8x16_t | |
10020 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10021 | __arm_vmlasq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p) | |
10022 | { | |
10023 | return __builtin_mve_vmlasq_m_n_uv16qi (__a, __b, __c, __p); | |
10024 | } | |
10025 | ||
10026 | __extension__ extern __inline uint32x4_t | |
10027 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10028 | __arm_vmlasq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p) | |
10029 | { | |
10030 | return __builtin_mve_vmlasq_m_n_uv4si (__a, __b, __c, __p); | |
10031 | } | |
10032 | ||
10033 | __extension__ extern __inline uint16x8_t | |
10034 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10035 | __arm_vmlasq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p) | |
10036 | { | |
10037 | return __builtin_mve_vmlasq_m_n_uv8hi (__a, __b, __c, __p); | |
10038 | } | |
10039 | ||
10040 | __extension__ extern __inline int32_t | |
10041 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10042 | __arm_vmlsdavaq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
10043 | { | |
10044 | return __builtin_mve_vmlsdavaq_p_sv16qi (__a, __b, __c, __p); | |
10045 | } | |
10046 | ||
10047 | __extension__ extern __inline int32_t | |
10048 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10049 | __arm_vmlsdavaq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
10050 | { | |
10051 | return __builtin_mve_vmlsdavaq_p_sv4si (__a, __b, __c, __p); | |
10052 | } | |
10053 | ||
10054 | __extension__ extern __inline int32_t | |
10055 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10056 | __arm_vmlsdavaq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
10057 | { | |
10058 | return __builtin_mve_vmlsdavaq_p_sv8hi (__a, __b, __c, __p); | |
10059 | } | |
10060 | ||
10061 | __extension__ extern __inline int32_t | |
10062 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10063 | __arm_vmlsdavaxq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
10064 | { | |
10065 | return __builtin_mve_vmlsdavaxq_p_sv16qi (__a, __b, __c, __p); | |
10066 | } | |
10067 | ||
10068 | __extension__ extern __inline int32_t | |
10069 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10070 | __arm_vmlsdavaxq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
10071 | { | |
10072 | return __builtin_mve_vmlsdavaxq_p_sv4si (__a, __b, __c, __p); | |
10073 | } | |
10074 | ||
10075 | __extension__ extern __inline int32_t | |
10076 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10077 | __arm_vmlsdavaxq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
10078 | { | |
10079 | return __builtin_mve_vmlsdavaxq_p_sv8hi (__a, __b, __c, __p); | |
10080 | } | |
10081 | ||
10082 | __extension__ extern __inline int8x16_t | |
10083 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10084 | __arm_vmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10085 | { | |
10086 | return __builtin_mve_vmulhq_m_sv16qi (__inactive, __a, __b, __p); | |
10087 | } | |
10088 | ||
10089 | __extension__ extern __inline int32x4_t | |
10090 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10091 | __arm_vmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10092 | { | |
10093 | return __builtin_mve_vmulhq_m_sv4si (__inactive, __a, __b, __p); | |
10094 | } | |
10095 | ||
10096 | __extension__ extern __inline int16x8_t | |
10097 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10098 | __arm_vmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10099 | { | |
10100 | return __builtin_mve_vmulhq_m_sv8hi (__inactive, __a, __b, __p); | |
10101 | } | |
10102 | ||
10103 | __extension__ extern __inline uint8x16_t | |
10104 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10105 | __arm_vmulhq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10106 | { | |
10107 | return __builtin_mve_vmulhq_m_uv16qi (__inactive, __a, __b, __p); | |
10108 | } | |
10109 | ||
10110 | __extension__ extern __inline uint32x4_t | |
10111 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10112 | __arm_vmulhq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10113 | { | |
10114 | return __builtin_mve_vmulhq_m_uv4si (__inactive, __a, __b, __p); | |
10115 | } | |
10116 | ||
10117 | __extension__ extern __inline uint16x8_t | |
10118 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10119 | __arm_vmulhq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10120 | { | |
10121 | return __builtin_mve_vmulhq_m_uv8hi (__inactive, __a, __b, __p); | |
10122 | } | |
10123 | ||
10124 | __extension__ extern __inline int16x8_t | |
10125 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10126 | __arm_vmullbq_int_m_s8 (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10127 | { | |
10128 | return __builtin_mve_vmullbq_int_m_sv16qi (__inactive, __a, __b, __p); | |
10129 | } | |
10130 | ||
10131 | __extension__ extern __inline int64x2_t | |
10132 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10133 | __arm_vmullbq_int_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10134 | { | |
10135 | return __builtin_mve_vmullbq_int_m_sv4si (__inactive, __a, __b, __p); | |
10136 | } | |
10137 | ||
10138 | __extension__ extern __inline int32x4_t | |
10139 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10140 | __arm_vmullbq_int_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10141 | { | |
10142 | return __builtin_mve_vmullbq_int_m_sv8hi (__inactive, __a, __b, __p); | |
10143 | } | |
10144 | ||
10145 | __extension__ extern __inline uint16x8_t | |
10146 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10147 | __arm_vmullbq_int_m_u8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10148 | { | |
10149 | return __builtin_mve_vmullbq_int_m_uv16qi (__inactive, __a, __b, __p); | |
10150 | } | |
10151 | ||
10152 | __extension__ extern __inline uint64x2_t | |
10153 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10154 | __arm_vmullbq_int_m_u32 (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10155 | { | |
10156 | return __builtin_mve_vmullbq_int_m_uv4si (__inactive, __a, __b, __p); | |
10157 | } | |
10158 | ||
10159 | __extension__ extern __inline uint32x4_t | |
10160 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10161 | __arm_vmullbq_int_m_u16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10162 | { | |
10163 | return __builtin_mve_vmullbq_int_m_uv8hi (__inactive, __a, __b, __p); | |
10164 | } | |
10165 | ||
10166 | __extension__ extern __inline int16x8_t | |
10167 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10168 | __arm_vmulltq_int_m_s8 (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10169 | { | |
10170 | return __builtin_mve_vmulltq_int_m_sv16qi (__inactive, __a, __b, __p); | |
10171 | } | |
10172 | ||
10173 | __extension__ extern __inline int64x2_t | |
10174 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10175 | __arm_vmulltq_int_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10176 | { | |
10177 | return __builtin_mve_vmulltq_int_m_sv4si (__inactive, __a, __b, __p); | |
10178 | } | |
10179 | ||
10180 | __extension__ extern __inline int32x4_t | |
10181 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10182 | __arm_vmulltq_int_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10183 | { | |
10184 | return __builtin_mve_vmulltq_int_m_sv8hi (__inactive, __a, __b, __p); | |
10185 | } | |
10186 | ||
10187 | __extension__ extern __inline uint16x8_t | |
10188 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10189 | __arm_vmulltq_int_m_u8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10190 | { | |
10191 | return __builtin_mve_vmulltq_int_m_uv16qi (__inactive, __a, __b, __p); | |
10192 | } | |
10193 | ||
10194 | __extension__ extern __inline uint64x2_t | |
10195 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10196 | __arm_vmulltq_int_m_u32 (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10197 | { | |
10198 | return __builtin_mve_vmulltq_int_m_uv4si (__inactive, __a, __b, __p); | |
10199 | } | |
10200 | ||
10201 | __extension__ extern __inline uint32x4_t | |
10202 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10203 | __arm_vmulltq_int_m_u16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10204 | { | |
10205 | return __builtin_mve_vmulltq_int_m_uv8hi (__inactive, __a, __b, __p); | |
10206 | } | |
10207 | ||
10208 | __extension__ extern __inline int8x16_t | |
10209 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10210 | __arm_vmulq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
10211 | { | |
10212 | return __builtin_mve_vmulq_m_n_sv16qi (__inactive, __a, __b, __p); | |
10213 | } | |
10214 | ||
10215 | __extension__ extern __inline int32x4_t | |
10216 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10217 | __arm_vmulq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
10218 | { | |
10219 | return __builtin_mve_vmulq_m_n_sv4si (__inactive, __a, __b, __p); | |
10220 | } | |
10221 | ||
10222 | __extension__ extern __inline int16x8_t | |
10223 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10224 | __arm_vmulq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
10225 | { | |
10226 | return __builtin_mve_vmulq_m_n_sv8hi (__inactive, __a, __b, __p); | |
10227 | } | |
10228 | ||
10229 | __extension__ extern __inline uint8x16_t | |
10230 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10231 | __arm_vmulq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
10232 | { | |
10233 | return __builtin_mve_vmulq_m_n_uv16qi (__inactive, __a, __b, __p); | |
10234 | } | |
10235 | ||
10236 | __extension__ extern __inline uint32x4_t | |
10237 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10238 | __arm_vmulq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
10239 | { | |
10240 | return __builtin_mve_vmulq_m_n_uv4si (__inactive, __a, __b, __p); | |
10241 | } | |
10242 | ||
10243 | __extension__ extern __inline uint16x8_t | |
10244 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10245 | __arm_vmulq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
10246 | { | |
10247 | return __builtin_mve_vmulq_m_n_uv8hi (__inactive, __a, __b, __p); | |
10248 | } | |
10249 | ||
10250 | __extension__ extern __inline int8x16_t | |
10251 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10252 | __arm_vmulq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10253 | { | |
10254 | return __builtin_mve_vmulq_m_sv16qi (__inactive, __a, __b, __p); | |
10255 | } | |
10256 | ||
10257 | __extension__ extern __inline int32x4_t | |
10258 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10259 | __arm_vmulq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10260 | { | |
10261 | return __builtin_mve_vmulq_m_sv4si (__inactive, __a, __b, __p); | |
10262 | } | |
10263 | ||
10264 | __extension__ extern __inline int16x8_t | |
10265 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10266 | __arm_vmulq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10267 | { | |
10268 | return __builtin_mve_vmulq_m_sv8hi (__inactive, __a, __b, __p); | |
10269 | } | |
10270 | ||
10271 | __extension__ extern __inline uint8x16_t | |
10272 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10273 | __arm_vmulq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10274 | { | |
10275 | return __builtin_mve_vmulq_m_uv16qi (__inactive, __a, __b, __p); | |
10276 | } | |
10277 | ||
10278 | __extension__ extern __inline uint32x4_t | |
10279 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10280 | __arm_vmulq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10281 | { | |
10282 | return __builtin_mve_vmulq_m_uv4si (__inactive, __a, __b, __p); | |
10283 | } | |
10284 | ||
10285 | __extension__ extern __inline uint16x8_t | |
10286 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10287 | __arm_vmulq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10288 | { | |
10289 | return __builtin_mve_vmulq_m_uv8hi (__inactive, __a, __b, __p); | |
10290 | } | |
10291 | ||
10292 | __extension__ extern __inline int8x16_t | |
10293 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10294 | __arm_vornq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10295 | { | |
10296 | return __builtin_mve_vornq_m_sv16qi (__inactive, __a, __b, __p); | |
10297 | } | |
10298 | ||
10299 | __extension__ extern __inline int32x4_t | |
10300 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10301 | __arm_vornq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10302 | { | |
10303 | return __builtin_mve_vornq_m_sv4si (__inactive, __a, __b, __p); | |
10304 | } | |
10305 | ||
10306 | __extension__ extern __inline int16x8_t | |
10307 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10308 | __arm_vornq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10309 | { | |
10310 | return __builtin_mve_vornq_m_sv8hi (__inactive, __a, __b, __p); | |
10311 | } | |
10312 | ||
10313 | __extension__ extern __inline uint8x16_t | |
10314 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10315 | __arm_vornq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10316 | { | |
10317 | return __builtin_mve_vornq_m_uv16qi (__inactive, __a, __b, __p); | |
10318 | } | |
10319 | ||
10320 | __extension__ extern __inline uint32x4_t | |
10321 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10322 | __arm_vornq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10323 | { | |
10324 | return __builtin_mve_vornq_m_uv4si (__inactive, __a, __b, __p); | |
10325 | } | |
10326 | ||
10327 | __extension__ extern __inline uint16x8_t | |
10328 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10329 | __arm_vornq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10330 | { | |
10331 | return __builtin_mve_vornq_m_uv8hi (__inactive, __a, __b, __p); | |
10332 | } | |
10333 | ||
10334 | __extension__ extern __inline int8x16_t | |
10335 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10336 | __arm_vorrq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10337 | { | |
10338 | return __builtin_mve_vorrq_m_sv16qi (__inactive, __a, __b, __p); | |
10339 | } | |
10340 | ||
10341 | __extension__ extern __inline int32x4_t | |
10342 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10343 | __arm_vorrq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10344 | { | |
10345 | return __builtin_mve_vorrq_m_sv4si (__inactive, __a, __b, __p); | |
10346 | } | |
10347 | ||
10348 | __extension__ extern __inline int16x8_t | |
10349 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10350 | __arm_vorrq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10351 | { | |
10352 | return __builtin_mve_vorrq_m_sv8hi (__inactive, __a, __b, __p); | |
10353 | } | |
10354 | ||
10355 | __extension__ extern __inline uint8x16_t | |
10356 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10357 | __arm_vorrq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10358 | { | |
10359 | return __builtin_mve_vorrq_m_uv16qi (__inactive, __a, __b, __p); | |
10360 | } | |
10361 | ||
10362 | __extension__ extern __inline uint32x4_t | |
10363 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10364 | __arm_vorrq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10365 | { | |
10366 | return __builtin_mve_vorrq_m_uv4si (__inactive, __a, __b, __p); | |
10367 | } | |
10368 | ||
10369 | __extension__ extern __inline uint16x8_t | |
10370 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10371 | __arm_vorrq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10372 | { | |
10373 | return __builtin_mve_vorrq_m_uv8hi (__inactive, __a, __b, __p); | |
10374 | } | |
10375 | ||
10376 | __extension__ extern __inline int8x16_t | |
10377 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10378 | __arm_vqaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
10379 | { | |
10380 | return __builtin_mve_vqaddq_m_n_sv16qi (__inactive, __a, __b, __p); | |
10381 | } | |
10382 | ||
10383 | __extension__ extern __inline int32x4_t | |
10384 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10385 | __arm_vqaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
10386 | { | |
10387 | return __builtin_mve_vqaddq_m_n_sv4si (__inactive, __a, __b, __p); | |
10388 | } | |
10389 | ||
10390 | __extension__ extern __inline int16x8_t | |
10391 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10392 | __arm_vqaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
10393 | { | |
10394 | return __builtin_mve_vqaddq_m_n_sv8hi (__inactive, __a, __b, __p); | |
10395 | } | |
10396 | ||
10397 | __extension__ extern __inline uint8x16_t | |
10398 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10399 | __arm_vqaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
10400 | { | |
10401 | return __builtin_mve_vqaddq_m_n_uv16qi (__inactive, __a, __b, __p); | |
10402 | } | |
10403 | ||
10404 | __extension__ extern __inline uint32x4_t | |
10405 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10406 | __arm_vqaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
10407 | { | |
10408 | return __builtin_mve_vqaddq_m_n_uv4si (__inactive, __a, __b, __p); | |
10409 | } | |
10410 | ||
10411 | __extension__ extern __inline uint16x8_t | |
10412 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10413 | __arm_vqaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
10414 | { | |
10415 | return __builtin_mve_vqaddq_m_n_uv8hi (__inactive, __a, __b, __p); | |
10416 | } | |
10417 | ||
10418 | __extension__ extern __inline int8x16_t | |
10419 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10420 | __arm_vqaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10421 | { | |
10422 | return __builtin_mve_vqaddq_m_sv16qi (__inactive, __a, __b, __p); | |
10423 | } | |
10424 | ||
10425 | __extension__ extern __inline int32x4_t | |
10426 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10427 | __arm_vqaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10428 | { | |
10429 | return __builtin_mve_vqaddq_m_sv4si (__inactive, __a, __b, __p); | |
10430 | } | |
10431 | ||
10432 | __extension__ extern __inline int16x8_t | |
10433 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10434 | __arm_vqaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10435 | { | |
10436 | return __builtin_mve_vqaddq_m_sv8hi (__inactive, __a, __b, __p); | |
10437 | } | |
10438 | ||
10439 | __extension__ extern __inline uint8x16_t | |
10440 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10441 | __arm_vqaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10442 | { | |
10443 | return __builtin_mve_vqaddq_m_uv16qi (__inactive, __a, __b, __p); | |
10444 | } | |
10445 | ||
10446 | __extension__ extern __inline uint32x4_t | |
10447 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10448 | __arm_vqaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10449 | { | |
10450 | return __builtin_mve_vqaddq_m_uv4si (__inactive, __a, __b, __p); | |
10451 | } | |
10452 | ||
10453 | __extension__ extern __inline uint16x8_t | |
10454 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10455 | __arm_vqaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10456 | { | |
10457 | return __builtin_mve_vqaddq_m_uv8hi (__inactive, __a, __b, __p); | |
10458 | } | |
10459 | ||
10460 | __extension__ extern __inline int8x16_t | |
10461 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10462 | __arm_vqdmladhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10463 | { | |
10464 | return __builtin_mve_vqdmladhq_m_sv16qi (__inactive, __a, __b, __p); | |
10465 | } | |
10466 | ||
10467 | __extension__ extern __inline int32x4_t | |
10468 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10469 | __arm_vqdmladhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10470 | { | |
10471 | return __builtin_mve_vqdmladhq_m_sv4si (__inactive, __a, __b, __p); | |
10472 | } | |
10473 | ||
10474 | __extension__ extern __inline int16x8_t | |
10475 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10476 | __arm_vqdmladhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10477 | { | |
10478 | return __builtin_mve_vqdmladhq_m_sv8hi (__inactive, __a, __b, __p); | |
10479 | } | |
10480 | ||
10481 | __extension__ extern __inline int8x16_t | |
10482 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10483 | __arm_vqdmladhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10484 | { | |
10485 | return __builtin_mve_vqdmladhxq_m_sv16qi (__inactive, __a, __b, __p); | |
10486 | } | |
10487 | ||
10488 | __extension__ extern __inline int32x4_t | |
10489 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10490 | __arm_vqdmladhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10491 | { | |
10492 | return __builtin_mve_vqdmladhxq_m_sv4si (__inactive, __a, __b, __p); | |
10493 | } | |
10494 | ||
10495 | __extension__ extern __inline int16x8_t | |
10496 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10497 | __arm_vqdmladhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10498 | { | |
10499 | return __builtin_mve_vqdmladhxq_m_sv8hi (__inactive, __a, __b, __p); | |
10500 | } | |
10501 | ||
10502 | __extension__ extern __inline int8x16_t | |
10503 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10504 | __arm_vqdmlahq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
10505 | { | |
10506 | return __builtin_mve_vqdmlahq_m_n_sv16qi (__a, __b, __c, __p); | |
10507 | } | |
10508 | ||
10509 | __extension__ extern __inline int32x4_t | |
10510 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10511 | __arm_vqdmlahq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
10512 | { | |
10513 | return __builtin_mve_vqdmlahq_m_n_sv4si (__a, __b, __c, __p); | |
10514 | } | |
10515 | ||
10516 | __extension__ extern __inline int16x8_t | |
10517 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10518 | __arm_vqdmlahq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
10519 | { | |
10520 | return __builtin_mve_vqdmlahq_m_n_sv8hi (__a, __b, __c, __p); | |
10521 | } | |
10522 | ||
10523 | __extension__ extern __inline int8x16_t | |
10524 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10525 | __arm_vqdmlsdhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10526 | { | |
10527 | return __builtin_mve_vqdmlsdhq_m_sv16qi (__inactive, __a, __b, __p); | |
10528 | } | |
10529 | ||
10530 | __extension__ extern __inline int32x4_t | |
10531 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10532 | __arm_vqdmlsdhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10533 | { | |
10534 | return __builtin_mve_vqdmlsdhq_m_sv4si (__inactive, __a, __b, __p); | |
10535 | } | |
10536 | ||
10537 | __extension__ extern __inline int16x8_t | |
10538 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10539 | __arm_vqdmlsdhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10540 | { | |
10541 | return __builtin_mve_vqdmlsdhq_m_sv8hi (__inactive, __a, __b, __p); | |
10542 | } | |
10543 | ||
10544 | __extension__ extern __inline int8x16_t | |
10545 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10546 | __arm_vqdmlsdhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10547 | { | |
10548 | return __builtin_mve_vqdmlsdhxq_m_sv16qi (__inactive, __a, __b, __p); | |
10549 | } | |
10550 | ||
10551 | __extension__ extern __inline int32x4_t | |
10552 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10553 | __arm_vqdmlsdhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10554 | { | |
10555 | return __builtin_mve_vqdmlsdhxq_m_sv4si (__inactive, __a, __b, __p); | |
10556 | } | |
10557 | ||
10558 | __extension__ extern __inline int16x8_t | |
10559 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10560 | __arm_vqdmlsdhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10561 | { | |
10562 | return __builtin_mve_vqdmlsdhxq_m_sv8hi (__inactive, __a, __b, __p); | |
10563 | } | |
10564 | ||
10565 | __extension__ extern __inline int8x16_t | |
10566 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10567 | __arm_vqdmulhq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
10568 | { | |
10569 | return __builtin_mve_vqdmulhq_m_n_sv16qi (__inactive, __a, __b, __p); | |
10570 | } | |
10571 | ||
10572 | __extension__ extern __inline int32x4_t | |
10573 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10574 | __arm_vqdmulhq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
10575 | { | |
10576 | return __builtin_mve_vqdmulhq_m_n_sv4si (__inactive, __a, __b, __p); | |
10577 | } | |
10578 | ||
10579 | __extension__ extern __inline int16x8_t | |
10580 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10581 | __arm_vqdmulhq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
10582 | { | |
10583 | return __builtin_mve_vqdmulhq_m_n_sv8hi (__inactive, __a, __b, __p); | |
10584 | } | |
10585 | ||
10586 | __extension__ extern __inline int8x16_t | |
10587 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10588 | __arm_vqdmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10589 | { | |
10590 | return __builtin_mve_vqdmulhq_m_sv16qi (__inactive, __a, __b, __p); | |
10591 | } | |
10592 | ||
10593 | __extension__ extern __inline int32x4_t | |
10594 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10595 | __arm_vqdmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10596 | { | |
10597 | return __builtin_mve_vqdmulhq_m_sv4si (__inactive, __a, __b, __p); | |
10598 | } | |
10599 | ||
10600 | __extension__ extern __inline int16x8_t | |
10601 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10602 | __arm_vqdmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10603 | { | |
10604 | return __builtin_mve_vqdmulhq_m_sv8hi (__inactive, __a, __b, __p); | |
10605 | } | |
10606 | ||
10607 | __extension__ extern __inline int8x16_t | |
10608 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10609 | __arm_vqrdmladhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10610 | { | |
10611 | return __builtin_mve_vqrdmladhq_m_sv16qi (__inactive, __a, __b, __p); | |
10612 | } | |
10613 | ||
10614 | __extension__ extern __inline int32x4_t | |
10615 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10616 | __arm_vqrdmladhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10617 | { | |
10618 | return __builtin_mve_vqrdmladhq_m_sv4si (__inactive, __a, __b, __p); | |
10619 | } | |
10620 | ||
10621 | __extension__ extern __inline int16x8_t | |
10622 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10623 | __arm_vqrdmladhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10624 | { | |
10625 | return __builtin_mve_vqrdmladhq_m_sv8hi (__inactive, __a, __b, __p); | |
10626 | } | |
10627 | ||
10628 | __extension__ extern __inline int8x16_t | |
10629 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10630 | __arm_vqrdmladhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10631 | { | |
10632 | return __builtin_mve_vqrdmladhxq_m_sv16qi (__inactive, __a, __b, __p); | |
10633 | } | |
10634 | ||
10635 | __extension__ extern __inline int32x4_t | |
10636 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10637 | __arm_vqrdmladhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10638 | { | |
10639 | return __builtin_mve_vqrdmladhxq_m_sv4si (__inactive, __a, __b, __p); | |
10640 | } | |
10641 | ||
10642 | __extension__ extern __inline int16x8_t | |
10643 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10644 | __arm_vqrdmladhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10645 | { | |
10646 | return __builtin_mve_vqrdmladhxq_m_sv8hi (__inactive, __a, __b, __p); | |
10647 | } | |
10648 | ||
10649 | __extension__ extern __inline int8x16_t | |
10650 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10651 | __arm_vqrdmlahq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
10652 | { | |
10653 | return __builtin_mve_vqrdmlahq_m_n_sv16qi (__a, __b, __c, __p); | |
10654 | } | |
10655 | ||
10656 | __extension__ extern __inline int32x4_t | |
10657 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10658 | __arm_vqrdmlahq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
10659 | { | |
10660 | return __builtin_mve_vqrdmlahq_m_n_sv4si (__a, __b, __c, __p); | |
10661 | } | |
10662 | ||
10663 | __extension__ extern __inline int16x8_t | |
10664 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10665 | __arm_vqrdmlahq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
10666 | { | |
10667 | return __builtin_mve_vqrdmlahq_m_n_sv8hi (__a, __b, __c, __p); | |
10668 | } | |
10669 | ||
10670 | __extension__ extern __inline int8x16_t | |
10671 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10672 | __arm_vqrdmlashq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
10673 | { | |
10674 | return __builtin_mve_vqrdmlashq_m_n_sv16qi (__a, __b, __c, __p); | |
10675 | } | |
10676 | ||
10677 | __extension__ extern __inline int32x4_t | |
10678 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10679 | __arm_vqrdmlashq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
10680 | { | |
10681 | return __builtin_mve_vqrdmlashq_m_n_sv4si (__a, __b, __c, __p); | |
10682 | } | |
10683 | ||
10684 | __extension__ extern __inline int16x8_t | |
10685 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10686 | __arm_vqrdmlashq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
10687 | { | |
10688 | return __builtin_mve_vqrdmlashq_m_n_sv8hi (__a, __b, __c, __p); | |
10689 | } | |
10690 | ||
10691 | __extension__ extern __inline int8x16_t | |
10692 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10693 | __arm_vqrdmlsdhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10694 | { | |
10695 | return __builtin_mve_vqrdmlsdhq_m_sv16qi (__inactive, __a, __b, __p); | |
10696 | } | |
10697 | ||
10698 | __extension__ extern __inline int32x4_t | |
10699 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10700 | __arm_vqrdmlsdhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10701 | { | |
10702 | return __builtin_mve_vqrdmlsdhq_m_sv4si (__inactive, __a, __b, __p); | |
10703 | } | |
10704 | ||
10705 | __extension__ extern __inline int16x8_t | |
10706 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10707 | __arm_vqrdmlsdhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10708 | { | |
10709 | return __builtin_mve_vqrdmlsdhq_m_sv8hi (__inactive, __a, __b, __p); | |
10710 | } | |
10711 | ||
10712 | __extension__ extern __inline int8x16_t | |
10713 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10714 | __arm_vqrdmlsdhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10715 | { | |
10716 | return __builtin_mve_vqrdmlsdhxq_m_sv16qi (__inactive, __a, __b, __p); | |
10717 | } | |
10718 | ||
10719 | __extension__ extern __inline int32x4_t | |
10720 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10721 | __arm_vqrdmlsdhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10722 | { | |
10723 | return __builtin_mve_vqrdmlsdhxq_m_sv4si (__inactive, __a, __b, __p); | |
10724 | } | |
10725 | ||
10726 | __extension__ extern __inline int16x8_t | |
10727 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10728 | __arm_vqrdmlsdhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10729 | { | |
10730 | return __builtin_mve_vqrdmlsdhxq_m_sv8hi (__inactive, __a, __b, __p); | |
10731 | } | |
10732 | ||
10733 | __extension__ extern __inline int8x16_t | |
10734 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10735 | __arm_vqrdmulhq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
10736 | { | |
10737 | return __builtin_mve_vqrdmulhq_m_n_sv16qi (__inactive, __a, __b, __p); | |
10738 | } | |
10739 | ||
10740 | __extension__ extern __inline int32x4_t | |
10741 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10742 | __arm_vqrdmulhq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
10743 | { | |
10744 | return __builtin_mve_vqrdmulhq_m_n_sv4si (__inactive, __a, __b, __p); | |
10745 | } | |
10746 | ||
10747 | __extension__ extern __inline int16x8_t | |
10748 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10749 | __arm_vqrdmulhq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
10750 | { | |
10751 | return __builtin_mve_vqrdmulhq_m_n_sv8hi (__inactive, __a, __b, __p); | |
10752 | } | |
10753 | ||
10754 | __extension__ extern __inline int8x16_t | |
10755 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10756 | __arm_vqrdmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10757 | { | |
10758 | return __builtin_mve_vqrdmulhq_m_sv16qi (__inactive, __a, __b, __p); | |
10759 | } | |
10760 | ||
10761 | __extension__ extern __inline int32x4_t | |
10762 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10763 | __arm_vqrdmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10764 | { | |
10765 | return __builtin_mve_vqrdmulhq_m_sv4si (__inactive, __a, __b, __p); | |
10766 | } | |
10767 | ||
10768 | __extension__ extern __inline int16x8_t | |
10769 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10770 | __arm_vqrdmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10771 | { | |
10772 | return __builtin_mve_vqrdmulhq_m_sv8hi (__inactive, __a, __b, __p); | |
10773 | } | |
10774 | ||
10775 | __extension__ extern __inline int8x16_t | |
10776 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10777 | __arm_vqrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10778 | { | |
10779 | return __builtin_mve_vqrshlq_m_sv16qi (__inactive, __a, __b, __p); | |
10780 | } | |
10781 | ||
10782 | __extension__ extern __inline int32x4_t | |
10783 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10784 | __arm_vqrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10785 | { | |
10786 | return __builtin_mve_vqrshlq_m_sv4si (__inactive, __a, __b, __p); | |
10787 | } | |
10788 | ||
10789 | __extension__ extern __inline int16x8_t | |
10790 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10791 | __arm_vqrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10792 | { | |
10793 | return __builtin_mve_vqrshlq_m_sv8hi (__inactive, __a, __b, __p); | |
10794 | } | |
10795 | ||
10796 | __extension__ extern __inline uint8x16_t | |
10797 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10798 | __arm_vqrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10799 | { | |
10800 | return __builtin_mve_vqrshlq_m_uv16qi (__inactive, __a, __b, __p); | |
10801 | } | |
10802 | ||
10803 | __extension__ extern __inline uint32x4_t | |
10804 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10805 | __arm_vqrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10806 | { | |
10807 | return __builtin_mve_vqrshlq_m_uv4si (__inactive, __a, __b, __p); | |
10808 | } | |
10809 | ||
10810 | __extension__ extern __inline uint16x8_t | |
10811 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10812 | __arm_vqrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10813 | { | |
10814 | return __builtin_mve_vqrshlq_m_uv8hi (__inactive, __a, __b, __p); | |
10815 | } | |
10816 | ||
10817 | __extension__ extern __inline int8x16_t | |
10818 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10819 | __arm_vqshlq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
10820 | { | |
10821 | return __builtin_mve_vqshlq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
10822 | } | |
10823 | ||
10824 | __extension__ extern __inline int32x4_t | |
10825 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10826 | __arm_vqshlq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
10827 | { | |
10828 | return __builtin_mve_vqshlq_m_n_sv4si (__inactive, __a, __imm, __p); | |
10829 | } | |
10830 | ||
10831 | __extension__ extern __inline int16x8_t | |
10832 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10833 | __arm_vqshlq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
10834 | { | |
10835 | return __builtin_mve_vqshlq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
10836 | } | |
10837 | ||
10838 | __extension__ extern __inline uint8x16_t | |
10839 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10840 | __arm_vqshlq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
10841 | { | |
10842 | return __builtin_mve_vqshlq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
10843 | } | |
10844 | ||
10845 | __extension__ extern __inline uint32x4_t | |
10846 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10847 | __arm_vqshlq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
10848 | { | |
10849 | return __builtin_mve_vqshlq_m_n_uv4si (__inactive, __a, __imm, __p); | |
10850 | } | |
10851 | ||
10852 | __extension__ extern __inline uint16x8_t | |
10853 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10854 | __arm_vqshlq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
10855 | { | |
10856 | return __builtin_mve_vqshlq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
10857 | } | |
10858 | ||
10859 | __extension__ extern __inline int8x16_t | |
10860 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10861 | __arm_vqshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10862 | { | |
10863 | return __builtin_mve_vqshlq_m_sv16qi (__inactive, __a, __b, __p); | |
10864 | } | |
10865 | ||
10866 | __extension__ extern __inline int32x4_t | |
10867 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10868 | __arm_vqshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10869 | { | |
10870 | return __builtin_mve_vqshlq_m_sv4si (__inactive, __a, __b, __p); | |
10871 | } | |
10872 | ||
10873 | __extension__ extern __inline int16x8_t | |
10874 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10875 | __arm_vqshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10876 | { | |
10877 | return __builtin_mve_vqshlq_m_sv8hi (__inactive, __a, __b, __p); | |
10878 | } | |
10879 | ||
10880 | __extension__ extern __inline uint8x16_t | |
10881 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10882 | __arm_vqshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10883 | { | |
10884 | return __builtin_mve_vqshlq_m_uv16qi (__inactive, __a, __b, __p); | |
10885 | } | |
10886 | ||
10887 | __extension__ extern __inline uint32x4_t | |
10888 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10889 | __arm_vqshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10890 | { | |
10891 | return __builtin_mve_vqshlq_m_uv4si (__inactive, __a, __b, __p); | |
10892 | } | |
10893 | ||
10894 | __extension__ extern __inline uint16x8_t | |
10895 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10896 | __arm_vqshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10897 | { | |
10898 | return __builtin_mve_vqshlq_m_uv8hi (__inactive, __a, __b, __p); | |
10899 | } | |
10900 | ||
10901 | __extension__ extern __inline int8x16_t | |
10902 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10903 | __arm_vqsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
10904 | { | |
10905 | return __builtin_mve_vqsubq_m_n_sv16qi (__inactive, __a, __b, __p); | |
10906 | } | |
10907 | ||
10908 | __extension__ extern __inline int32x4_t | |
10909 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10910 | __arm_vqsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
10911 | { | |
10912 | return __builtin_mve_vqsubq_m_n_sv4si (__inactive, __a, __b, __p); | |
10913 | } | |
10914 | ||
10915 | __extension__ extern __inline int16x8_t | |
10916 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10917 | __arm_vqsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
10918 | { | |
10919 | return __builtin_mve_vqsubq_m_n_sv8hi (__inactive, __a, __b, __p); | |
10920 | } | |
10921 | ||
10922 | __extension__ extern __inline uint8x16_t | |
10923 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10924 | __arm_vqsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
10925 | { | |
10926 | return __builtin_mve_vqsubq_m_n_uv16qi (__inactive, __a, __b, __p); | |
10927 | } | |
10928 | ||
10929 | __extension__ extern __inline uint32x4_t | |
10930 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10931 | __arm_vqsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
10932 | { | |
10933 | return __builtin_mve_vqsubq_m_n_uv4si (__inactive, __a, __b, __p); | |
10934 | } | |
10935 | ||
10936 | __extension__ extern __inline uint16x8_t | |
10937 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10938 | __arm_vqsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
10939 | { | |
10940 | return __builtin_mve_vqsubq_m_n_uv8hi (__inactive, __a, __b, __p); | |
10941 | } | |
10942 | ||
10943 | __extension__ extern __inline int8x16_t | |
10944 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10945 | __arm_vqsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10946 | { | |
10947 | return __builtin_mve_vqsubq_m_sv16qi (__inactive, __a, __b, __p); | |
10948 | } | |
10949 | ||
10950 | __extension__ extern __inline int32x4_t | |
10951 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10952 | __arm_vqsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10953 | { | |
10954 | return __builtin_mve_vqsubq_m_sv4si (__inactive, __a, __b, __p); | |
10955 | } | |
10956 | ||
10957 | __extension__ extern __inline int16x8_t | |
10958 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10959 | __arm_vqsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10960 | { | |
10961 | return __builtin_mve_vqsubq_m_sv8hi (__inactive, __a, __b, __p); | |
10962 | } | |
10963 | ||
10964 | __extension__ extern __inline uint8x16_t | |
10965 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10966 | __arm_vqsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10967 | { | |
10968 | return __builtin_mve_vqsubq_m_uv16qi (__inactive, __a, __b, __p); | |
10969 | } | |
10970 | ||
10971 | __extension__ extern __inline uint32x4_t | |
10972 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10973 | __arm_vqsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10974 | { | |
10975 | return __builtin_mve_vqsubq_m_uv4si (__inactive, __a, __b, __p); | |
10976 | } | |
10977 | ||
10978 | __extension__ extern __inline uint16x8_t | |
10979 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10980 | __arm_vqsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10981 | { | |
10982 | return __builtin_mve_vqsubq_m_uv8hi (__inactive, __a, __b, __p); | |
10983 | } | |
10984 | ||
10985 | __extension__ extern __inline int8x16_t | |
10986 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10987 | __arm_vrhaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10988 | { | |
10989 | return __builtin_mve_vrhaddq_m_sv16qi (__inactive, __a, __b, __p); | |
10990 | } | |
10991 | ||
10992 | __extension__ extern __inline int32x4_t | |
10993 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10994 | __arm_vrhaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10995 | { | |
10996 | return __builtin_mve_vrhaddq_m_sv4si (__inactive, __a, __b, __p); | |
10997 | } | |
10998 | ||
10999 | __extension__ extern __inline int16x8_t | |
11000 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11001 | __arm_vrhaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11002 | { | |
11003 | return __builtin_mve_vrhaddq_m_sv8hi (__inactive, __a, __b, __p); | |
11004 | } | |
11005 | ||
11006 | __extension__ extern __inline uint8x16_t | |
11007 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11008 | __arm_vrhaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
11009 | { | |
11010 | return __builtin_mve_vrhaddq_m_uv16qi (__inactive, __a, __b, __p); | |
11011 | } | |
11012 | ||
11013 | __extension__ extern __inline uint32x4_t | |
11014 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11015 | __arm_vrhaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
11016 | { | |
11017 | return __builtin_mve_vrhaddq_m_uv4si (__inactive, __a, __b, __p); | |
11018 | } | |
11019 | ||
11020 | __extension__ extern __inline uint16x8_t | |
11021 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11022 | __arm_vrhaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
11023 | { | |
11024 | return __builtin_mve_vrhaddq_m_uv8hi (__inactive, __a, __b, __p); | |
11025 | } | |
11026 | ||
11027 | __extension__ extern __inline int8x16_t | |
11028 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11029 | __arm_vrmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11030 | { | |
11031 | return __builtin_mve_vrmulhq_m_sv16qi (__inactive, __a, __b, __p); | |
11032 | } | |
11033 | ||
11034 | __extension__ extern __inline int32x4_t | |
11035 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11036 | __arm_vrmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11037 | { | |
11038 | return __builtin_mve_vrmulhq_m_sv4si (__inactive, __a, __b, __p); | |
11039 | } | |
11040 | ||
11041 | __extension__ extern __inline int16x8_t | |
11042 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11043 | __arm_vrmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11044 | { | |
11045 | return __builtin_mve_vrmulhq_m_sv8hi (__inactive, __a, __b, __p); | |
11046 | } | |
11047 | ||
11048 | __extension__ extern __inline uint8x16_t | |
11049 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11050 | __arm_vrmulhq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
11051 | { | |
11052 | return __builtin_mve_vrmulhq_m_uv16qi (__inactive, __a, __b, __p); | |
11053 | } | |
11054 | ||
11055 | __extension__ extern __inline uint32x4_t | |
11056 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11057 | __arm_vrmulhq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
11058 | { | |
11059 | return __builtin_mve_vrmulhq_m_uv4si (__inactive, __a, __b, __p); | |
11060 | } | |
11061 | ||
11062 | __extension__ extern __inline uint16x8_t | |
11063 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11064 | __arm_vrmulhq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
11065 | { | |
11066 | return __builtin_mve_vrmulhq_m_uv8hi (__inactive, __a, __b, __p); | |
11067 | } | |
11068 | ||
11069 | __extension__ extern __inline int8x16_t | |
11070 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11071 | __arm_vrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11072 | { | |
11073 | return __builtin_mve_vrshlq_m_sv16qi (__inactive, __a, __b, __p); | |
11074 | } | |
11075 | ||
11076 | __extension__ extern __inline int32x4_t | |
11077 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11078 | __arm_vrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11079 | { | |
11080 | return __builtin_mve_vrshlq_m_sv4si (__inactive, __a, __b, __p); | |
11081 | } | |
11082 | ||
11083 | __extension__ extern __inline int16x8_t | |
11084 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11085 | __arm_vrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11086 | { | |
11087 | return __builtin_mve_vrshlq_m_sv8hi (__inactive, __a, __b, __p); | |
11088 | } | |
11089 | ||
11090 | __extension__ extern __inline uint8x16_t | |
11091 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11092 | __arm_vrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11093 | { | |
11094 | return __builtin_mve_vrshlq_m_uv16qi (__inactive, __a, __b, __p); | |
11095 | } | |
11096 | ||
11097 | __extension__ extern __inline uint32x4_t | |
11098 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11099 | __arm_vrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11100 | { | |
11101 | return __builtin_mve_vrshlq_m_uv4si (__inactive, __a, __b, __p); | |
11102 | } | |
11103 | ||
11104 | __extension__ extern __inline uint16x8_t | |
11105 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11106 | __arm_vrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11107 | { | |
11108 | return __builtin_mve_vrshlq_m_uv8hi (__inactive, __a, __b, __p); | |
11109 | } | |
11110 | ||
11111 | __extension__ extern __inline int8x16_t | |
11112 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11113 | __arm_vrshrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
11114 | { | |
11115 | return __builtin_mve_vrshrq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
11116 | } | |
11117 | ||
11118 | __extension__ extern __inline int32x4_t | |
11119 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11120 | __arm_vrshrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
11121 | { | |
11122 | return __builtin_mve_vrshrq_m_n_sv4si (__inactive, __a, __imm, __p); | |
11123 | } | |
11124 | ||
11125 | __extension__ extern __inline int16x8_t | |
11126 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11127 | __arm_vrshrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
11128 | { | |
11129 | return __builtin_mve_vrshrq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
11130 | } | |
11131 | ||
11132 | __extension__ extern __inline uint8x16_t | |
11133 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11134 | __arm_vrshrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
11135 | { | |
11136 | return __builtin_mve_vrshrq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
11137 | } | |
11138 | ||
11139 | __extension__ extern __inline uint32x4_t | |
11140 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11141 | __arm_vrshrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
11142 | { | |
11143 | return __builtin_mve_vrshrq_m_n_uv4si (__inactive, __a, __imm, __p); | |
11144 | } | |
11145 | ||
11146 | __extension__ extern __inline uint16x8_t | |
11147 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11148 | __arm_vrshrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
11149 | { | |
11150 | return __builtin_mve_vrshrq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
11151 | } | |
11152 | ||
11153 | __extension__ extern __inline int8x16_t | |
11154 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11155 | __arm_vshlq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
11156 | { | |
11157 | return __builtin_mve_vshlq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
11158 | } | |
11159 | ||
11160 | __extension__ extern __inline int32x4_t | |
11161 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11162 | __arm_vshlq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
11163 | { | |
11164 | return __builtin_mve_vshlq_m_n_sv4si (__inactive, __a, __imm, __p); | |
11165 | } | |
11166 | ||
11167 | __extension__ extern __inline int16x8_t | |
11168 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11169 | __arm_vshlq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
11170 | { | |
11171 | return __builtin_mve_vshlq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
11172 | } | |
11173 | ||
11174 | __extension__ extern __inline uint8x16_t | |
11175 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11176 | __arm_vshlq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
11177 | { | |
11178 | return __builtin_mve_vshlq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
11179 | } | |
11180 | ||
11181 | __extension__ extern __inline uint32x4_t | |
11182 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11183 | __arm_vshlq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
11184 | { | |
11185 | return __builtin_mve_vshlq_m_n_uv4si (__inactive, __a, __imm, __p); | |
11186 | } | |
11187 | ||
11188 | __extension__ extern __inline uint16x8_t | |
11189 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11190 | __arm_vshlq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
11191 | { | |
11192 | return __builtin_mve_vshlq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
11193 | } | |
11194 | ||
11195 | __extension__ extern __inline int8x16_t | |
11196 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11197 | __arm_vshrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
11198 | { | |
11199 | return __builtin_mve_vshrq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
11200 | } | |
11201 | ||
11202 | __extension__ extern __inline int32x4_t | |
11203 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11204 | __arm_vshrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
11205 | { | |
11206 | return __builtin_mve_vshrq_m_n_sv4si (__inactive, __a, __imm, __p); | |
11207 | } | |
11208 | ||
11209 | __extension__ extern __inline int16x8_t | |
11210 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11211 | __arm_vshrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
11212 | { | |
11213 | return __builtin_mve_vshrq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
11214 | } | |
11215 | ||
11216 | __extension__ extern __inline uint8x16_t | |
11217 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11218 | __arm_vshrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
11219 | { | |
11220 | return __builtin_mve_vshrq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
11221 | } | |
11222 | ||
11223 | __extension__ extern __inline uint32x4_t | |
11224 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11225 | __arm_vshrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
11226 | { | |
11227 | return __builtin_mve_vshrq_m_n_uv4si (__inactive, __a, __imm, __p); | |
11228 | } | |
11229 | ||
11230 | __extension__ extern __inline uint16x8_t | |
11231 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11232 | __arm_vshrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
11233 | { | |
11234 | return __builtin_mve_vshrq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
11235 | } | |
11236 | ||
11237 | __extension__ extern __inline int8x16_t | |
11238 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11239 | __arm_vsliq_m_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p) | |
11240 | { | |
11241 | return __builtin_mve_vsliq_m_n_sv16qi (__a, __b, __imm, __p); | |
11242 | } | |
11243 | ||
11244 | __extension__ extern __inline int32x4_t | |
11245 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11246 | __arm_vsliq_m_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11247 | { | |
11248 | return __builtin_mve_vsliq_m_n_sv4si (__a, __b, __imm, __p); | |
11249 | } | |
11250 | ||
11251 | __extension__ extern __inline int16x8_t | |
11252 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11253 | __arm_vsliq_m_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11254 | { | |
11255 | return __builtin_mve_vsliq_m_n_sv8hi (__a, __b, __imm, __p); | |
11256 | } | |
11257 | ||
11258 | __extension__ extern __inline uint8x16_t | |
11259 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11260 | __arm_vsliq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p) | |
11261 | { | |
11262 | return __builtin_mve_vsliq_m_n_uv16qi (__a, __b, __imm, __p); | |
11263 | } | |
11264 | ||
11265 | __extension__ extern __inline uint32x4_t | |
11266 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11267 | __arm_vsliq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11268 | { | |
11269 | return __builtin_mve_vsliq_m_n_uv4si (__a, __b, __imm, __p); | |
11270 | } | |
11271 | ||
11272 | __extension__ extern __inline uint16x8_t | |
11273 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11274 | __arm_vsliq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11275 | { | |
11276 | return __builtin_mve_vsliq_m_n_uv8hi (__a, __b, __imm, __p); | |
11277 | } | |
11278 | ||
11279 | __extension__ extern __inline int8x16_t | |
11280 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11281 | __arm_vsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
11282 | { | |
11283 | return __builtin_mve_vsubq_m_n_sv16qi (__inactive, __a, __b, __p); | |
11284 | } | |
11285 | ||
11286 | __extension__ extern __inline int32x4_t | |
11287 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11288 | __arm_vsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
11289 | { | |
11290 | return __builtin_mve_vsubq_m_n_sv4si (__inactive, __a, __b, __p); | |
11291 | } | |
11292 | ||
11293 | __extension__ extern __inline int16x8_t | |
11294 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11295 | __arm_vsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
11296 | { | |
11297 | return __builtin_mve_vsubq_m_n_sv8hi (__inactive, __a, __b, __p); | |
11298 | } | |
11299 | ||
11300 | __extension__ extern __inline uint8x16_t | |
11301 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11302 | __arm_vsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
11303 | { | |
11304 | return __builtin_mve_vsubq_m_n_uv16qi (__inactive, __a, __b, __p); | |
11305 | } | |
11306 | ||
11307 | __extension__ extern __inline uint32x4_t | |
11308 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11309 | __arm_vsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
11310 | { | |
11311 | return __builtin_mve_vsubq_m_n_uv4si (__inactive, __a, __b, __p); | |
11312 | } | |
11313 | ||
11314 | __extension__ extern __inline uint16x8_t | |
11315 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11316 | __arm_vsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
11317 | { | |
11318 | return __builtin_mve_vsubq_m_n_uv8hi (__inactive, __a, __b, __p); | |
11319 | } | |
11320 | ||
f2170a37 SP |
11321 | __extension__ extern __inline int64_t |
11322 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11323 | __arm_vmlaldavaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11324 | { | |
11325 | return __builtin_mve_vmlaldavaq_p_sv4si (__a, __b, __c, __p); | |
11326 | } | |
11327 | ||
11328 | __extension__ extern __inline int64_t | |
11329 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11330 | __arm_vmlaldavaq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
11331 | { | |
11332 | return __builtin_mve_vmlaldavaq_p_sv8hi (__a, __b, __c, __p); | |
11333 | } | |
11334 | ||
11335 | __extension__ extern __inline uint64_t | |
11336 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11337 | __arm_vmlaldavaq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
11338 | { | |
11339 | return __builtin_mve_vmlaldavaq_p_uv4si (__a, __b, __c, __p); | |
11340 | } | |
11341 | ||
11342 | __extension__ extern __inline uint64_t | |
11343 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11344 | __arm_vmlaldavaq_p_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) | |
11345 | { | |
11346 | return __builtin_mve_vmlaldavaq_p_uv8hi (__a, __b, __c, __p); | |
11347 | } | |
11348 | ||
11349 | __extension__ extern __inline int64_t | |
11350 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11351 | __arm_vmlaldavaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11352 | { | |
11353 | return __builtin_mve_vmlaldavaxq_p_sv4si (__a, __b, __c, __p); | |
11354 | } | |
11355 | ||
11356 | __extension__ extern __inline int64_t | |
11357 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11358 | __arm_vmlaldavaxq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
11359 | { | |
11360 | return __builtin_mve_vmlaldavaxq_p_sv8hi (__a, __b, __c, __p); | |
11361 | } | |
11362 | ||
11363 | __extension__ extern __inline uint64_t | |
11364 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11365 | __arm_vmlaldavaxq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
11366 | { | |
11367 | return __builtin_mve_vmlaldavaxq_p_uv4si (__a, __b, __c, __p); | |
11368 | } | |
11369 | ||
11370 | __extension__ extern __inline uint64_t | |
11371 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11372 | __arm_vmlaldavaxq_p_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) | |
11373 | { | |
11374 | return __builtin_mve_vmlaldavaxq_p_uv8hi (__a, __b, __c, __p); | |
11375 | } | |
11376 | ||
11377 | __extension__ extern __inline int64_t | |
11378 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11379 | __arm_vmlsldavaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11380 | { | |
11381 | return __builtin_mve_vmlsldavaq_p_sv4si (__a, __b, __c, __p); | |
11382 | } | |
11383 | ||
11384 | __extension__ extern __inline int64_t | |
11385 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11386 | __arm_vmlsldavaq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
11387 | { | |
11388 | return __builtin_mve_vmlsldavaq_p_sv8hi (__a, __b, __c, __p); | |
11389 | } | |
11390 | ||
11391 | __extension__ extern __inline int64_t | |
11392 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11393 | __arm_vmlsldavaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11394 | { | |
11395 | return __builtin_mve_vmlsldavaxq_p_sv4si (__a, __b, __c, __p); | |
11396 | } | |
11397 | ||
11398 | __extension__ extern __inline int64_t | |
11399 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11400 | __arm_vmlsldavaxq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
11401 | { | |
11402 | return __builtin_mve_vmlsldavaxq_p_sv8hi (__a, __b, __c, __p); | |
11403 | } | |
11404 | ||
11405 | __extension__ extern __inline uint16x8_t | |
11406 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11407 | __arm_vmullbq_poly_m_p8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
11408 | { | |
11409 | return __builtin_mve_vmullbq_poly_m_pv16qi (__inactive, __a, __b, __p); | |
11410 | } | |
11411 | ||
11412 | __extension__ extern __inline uint32x4_t | |
11413 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11414 | __arm_vmullbq_poly_m_p16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
11415 | { | |
11416 | return __builtin_mve_vmullbq_poly_m_pv8hi (__inactive, __a, __b, __p); | |
11417 | } | |
11418 | ||
11419 | __extension__ extern __inline uint16x8_t | |
11420 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11421 | __arm_vmulltq_poly_m_p8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
11422 | { | |
11423 | return __builtin_mve_vmulltq_poly_m_pv16qi (__inactive, __a, __b, __p); | |
11424 | } | |
11425 | ||
11426 | __extension__ extern __inline uint32x4_t | |
11427 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11428 | __arm_vmulltq_poly_m_p16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
11429 | { | |
11430 | return __builtin_mve_vmulltq_poly_m_pv8hi (__inactive, __a, __b, __p); | |
11431 | } | |
11432 | ||
11433 | __extension__ extern __inline int64x2_t | |
11434 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11435 | __arm_vqdmullbq_m_n_s32 (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
11436 | { | |
11437 | return __builtin_mve_vqdmullbq_m_n_sv4si (__inactive, __a, __b, __p); | |
11438 | } | |
11439 | ||
11440 | __extension__ extern __inline int32x4_t | |
11441 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11442 | __arm_vqdmullbq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
11443 | { | |
11444 | return __builtin_mve_vqdmullbq_m_n_sv8hi (__inactive, __a, __b, __p); | |
11445 | } | |
11446 | ||
11447 | __extension__ extern __inline int64x2_t | |
11448 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11449 | __arm_vqdmullbq_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11450 | { | |
11451 | return __builtin_mve_vqdmullbq_m_sv4si (__inactive, __a, __b, __p); | |
11452 | } | |
11453 | ||
11454 | __extension__ extern __inline int32x4_t | |
11455 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11456 | __arm_vqdmullbq_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11457 | { | |
11458 | return __builtin_mve_vqdmullbq_m_sv8hi (__inactive, __a, __b, __p); | |
11459 | } | |
11460 | ||
11461 | __extension__ extern __inline int64x2_t | |
11462 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11463 | __arm_vqdmulltq_m_n_s32 (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
11464 | { | |
11465 | return __builtin_mve_vqdmulltq_m_n_sv4si (__inactive, __a, __b, __p); | |
11466 | } | |
11467 | ||
11468 | __extension__ extern __inline int32x4_t | |
11469 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11470 | __arm_vqdmulltq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
11471 | { | |
11472 | return __builtin_mve_vqdmulltq_m_n_sv8hi (__inactive, __a, __b, __p); | |
11473 | } | |
11474 | ||
11475 | __extension__ extern __inline int64x2_t | |
11476 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11477 | __arm_vqdmulltq_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11478 | { | |
11479 | return __builtin_mve_vqdmulltq_m_sv4si (__inactive, __a, __b, __p); | |
11480 | } | |
11481 | ||
11482 | __extension__ extern __inline int32x4_t | |
11483 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11484 | __arm_vqdmulltq_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11485 | { | |
11486 | return __builtin_mve_vqdmulltq_m_sv8hi (__inactive, __a, __b, __p); | |
11487 | } | |
11488 | ||
11489 | __extension__ extern __inline int16x8_t | |
11490 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11491 | __arm_vqrshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11492 | { | |
11493 | return __builtin_mve_vqrshrnbq_m_n_sv4si (__a, __b, __imm, __p); | |
11494 | } | |
11495 | ||
11496 | __extension__ extern __inline int8x16_t | |
11497 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11498 | __arm_vqrshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11499 | { | |
11500 | return __builtin_mve_vqrshrnbq_m_n_sv8hi (__a, __b, __imm, __p); | |
11501 | } | |
11502 | ||
11503 | __extension__ extern __inline uint16x8_t | |
11504 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11505 | __arm_vqrshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11506 | { | |
11507 | return __builtin_mve_vqrshrnbq_m_n_uv4si (__a, __b, __imm, __p); | |
11508 | } | |
11509 | ||
11510 | __extension__ extern __inline uint8x16_t | |
11511 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11512 | __arm_vqrshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11513 | { | |
11514 | return __builtin_mve_vqrshrnbq_m_n_uv8hi (__a, __b, __imm, __p); | |
11515 | } | |
11516 | ||
11517 | __extension__ extern __inline int16x8_t | |
11518 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11519 | __arm_vqrshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11520 | { | |
11521 | return __builtin_mve_vqrshrntq_m_n_sv4si (__a, __b, __imm, __p); | |
11522 | } | |
11523 | ||
11524 | __extension__ extern __inline int8x16_t | |
11525 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11526 | __arm_vqrshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11527 | { | |
11528 | return __builtin_mve_vqrshrntq_m_n_sv8hi (__a, __b, __imm, __p); | |
11529 | } | |
11530 | ||
11531 | __extension__ extern __inline uint16x8_t | |
11532 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11533 | __arm_vqrshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11534 | { | |
11535 | return __builtin_mve_vqrshrntq_m_n_uv4si (__a, __b, __imm, __p); | |
11536 | } | |
11537 | ||
11538 | __extension__ extern __inline uint8x16_t | |
11539 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11540 | __arm_vqrshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11541 | { | |
11542 | return __builtin_mve_vqrshrntq_m_n_uv8hi (__a, __b, __imm, __p); | |
11543 | } | |
11544 | ||
11545 | __extension__ extern __inline uint16x8_t | |
11546 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11547 | __arm_vqrshrunbq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11548 | { | |
11549 | return __builtin_mve_vqrshrunbq_m_n_sv4si (__a, __b, __imm, __p); | |
11550 | } | |
11551 | ||
11552 | __extension__ extern __inline uint8x16_t | |
11553 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11554 | __arm_vqrshrunbq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11555 | { | |
11556 | return __builtin_mve_vqrshrunbq_m_n_sv8hi (__a, __b, __imm, __p); | |
11557 | } | |
11558 | ||
11559 | __extension__ extern __inline uint16x8_t | |
11560 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11561 | __arm_vqrshruntq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11562 | { | |
11563 | return __builtin_mve_vqrshruntq_m_n_sv4si (__a, __b, __imm, __p); | |
11564 | } | |
11565 | ||
11566 | __extension__ extern __inline uint8x16_t | |
11567 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11568 | __arm_vqrshruntq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11569 | { | |
11570 | return __builtin_mve_vqrshruntq_m_n_sv8hi (__a, __b, __imm, __p); | |
11571 | } | |
11572 | ||
11573 | __extension__ extern __inline int16x8_t | |
11574 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11575 | __arm_vqshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11576 | { | |
11577 | return __builtin_mve_vqshrnbq_m_n_sv4si (__a, __b, __imm, __p); | |
11578 | } | |
11579 | ||
11580 | __extension__ extern __inline int8x16_t | |
11581 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11582 | __arm_vqshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11583 | { | |
11584 | return __builtin_mve_vqshrnbq_m_n_sv8hi (__a, __b, __imm, __p); | |
11585 | } | |
11586 | ||
11587 | __extension__ extern __inline uint16x8_t | |
11588 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11589 | __arm_vqshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11590 | { | |
11591 | return __builtin_mve_vqshrnbq_m_n_uv4si (__a, __b, __imm, __p); | |
11592 | } | |
11593 | ||
11594 | __extension__ extern __inline uint8x16_t | |
11595 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11596 | __arm_vqshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11597 | { | |
11598 | return __builtin_mve_vqshrnbq_m_n_uv8hi (__a, __b, __imm, __p); | |
11599 | } | |
11600 | ||
11601 | __extension__ extern __inline int16x8_t | |
11602 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11603 | __arm_vqshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11604 | { | |
11605 | return __builtin_mve_vqshrntq_m_n_sv4si (__a, __b, __imm, __p); | |
11606 | } | |
11607 | ||
11608 | __extension__ extern __inline int8x16_t | |
11609 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11610 | __arm_vqshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11611 | { | |
11612 | return __builtin_mve_vqshrntq_m_n_sv8hi (__a, __b, __imm, __p); | |
11613 | } | |
11614 | ||
11615 | __extension__ extern __inline uint16x8_t | |
11616 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11617 | __arm_vqshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11618 | { | |
11619 | return __builtin_mve_vqshrntq_m_n_uv4si (__a, __b, __imm, __p); | |
11620 | } | |
11621 | ||
11622 | __extension__ extern __inline uint8x16_t | |
11623 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11624 | __arm_vqshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11625 | { | |
11626 | return __builtin_mve_vqshrntq_m_n_uv8hi (__a, __b, __imm, __p); | |
11627 | } | |
11628 | ||
11629 | __extension__ extern __inline uint16x8_t | |
11630 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11631 | __arm_vqshrunbq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11632 | { | |
11633 | return __builtin_mve_vqshrunbq_m_n_sv4si (__a, __b, __imm, __p); | |
11634 | } | |
11635 | ||
11636 | __extension__ extern __inline uint8x16_t | |
11637 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11638 | __arm_vqshrunbq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11639 | { | |
11640 | return __builtin_mve_vqshrunbq_m_n_sv8hi (__a, __b, __imm, __p); | |
11641 | } | |
11642 | ||
11643 | __extension__ extern __inline uint16x8_t | |
11644 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11645 | __arm_vqshruntq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11646 | { | |
11647 | return __builtin_mve_vqshruntq_m_n_sv4si (__a, __b, __imm, __p); | |
11648 | } | |
11649 | ||
11650 | __extension__ extern __inline uint8x16_t | |
11651 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11652 | __arm_vqshruntq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11653 | { | |
11654 | return __builtin_mve_vqshruntq_m_n_sv8hi (__a, __b, __imm, __p); | |
11655 | } | |
11656 | ||
11657 | __extension__ extern __inline int64_t | |
11658 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11659 | __arm_vrmlaldavhaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11660 | { | |
11661 | return __builtin_mve_vrmlaldavhaq_p_sv4si (__a, __b, __c, __p); | |
11662 | } | |
11663 | ||
11664 | __extension__ extern __inline uint64_t | |
11665 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11666 | __arm_vrmlaldavhaq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
11667 | { | |
11668 | return __builtin_mve_vrmlaldavhaq_p_uv4si (__a, __b, __c, __p); | |
11669 | } | |
11670 | ||
11671 | __extension__ extern __inline int64_t | |
11672 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11673 | __arm_vrmlaldavhaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11674 | { | |
11675 | return __builtin_mve_vrmlaldavhaxq_p_sv4si (__a, __b, __c, __p); | |
11676 | } | |
11677 | ||
11678 | __extension__ extern __inline int64_t | |
11679 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11680 | __arm_vrmlsldavhaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11681 | { | |
11682 | return __builtin_mve_vrmlsldavhaq_p_sv4si (__a, __b, __c, __p); | |
11683 | } | |
11684 | ||
11685 | __extension__ extern __inline int64_t | |
11686 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11687 | __arm_vrmlsldavhaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11688 | { | |
11689 | return __builtin_mve_vrmlsldavhaxq_p_sv4si (__a, __b, __c, __p); | |
11690 | } | |
11691 | ||
11692 | __extension__ extern __inline int16x8_t | |
11693 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11694 | __arm_vrshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11695 | { | |
11696 | return __builtin_mve_vrshrnbq_m_n_sv4si (__a, __b, __imm, __p); | |
11697 | } | |
11698 | ||
11699 | __extension__ extern __inline int8x16_t | |
11700 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11701 | __arm_vrshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11702 | { | |
11703 | return __builtin_mve_vrshrnbq_m_n_sv8hi (__a, __b, __imm, __p); | |
11704 | } | |
11705 | ||
11706 | __extension__ extern __inline uint16x8_t | |
11707 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11708 | __arm_vrshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11709 | { | |
11710 | return __builtin_mve_vrshrnbq_m_n_uv4si (__a, __b, __imm, __p); | |
11711 | } | |
11712 | ||
11713 | __extension__ extern __inline uint8x16_t | |
11714 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11715 | __arm_vrshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11716 | { | |
11717 | return __builtin_mve_vrshrnbq_m_n_uv8hi (__a, __b, __imm, __p); | |
11718 | } | |
11719 | ||
11720 | __extension__ extern __inline int16x8_t | |
11721 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11722 | __arm_vrshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11723 | { | |
11724 | return __builtin_mve_vrshrntq_m_n_sv4si (__a, __b, __imm, __p); | |
11725 | } | |
11726 | ||
11727 | __extension__ extern __inline int8x16_t | |
11728 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11729 | __arm_vrshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11730 | { | |
11731 | return __builtin_mve_vrshrntq_m_n_sv8hi (__a, __b, __imm, __p); | |
11732 | } | |
11733 | ||
11734 | __extension__ extern __inline uint16x8_t | |
11735 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11736 | __arm_vrshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11737 | { | |
11738 | return __builtin_mve_vrshrntq_m_n_uv4si (__a, __b, __imm, __p); | |
11739 | } | |
11740 | ||
11741 | __extension__ extern __inline uint8x16_t | |
11742 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11743 | __arm_vrshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11744 | { | |
11745 | return __builtin_mve_vrshrntq_m_n_uv8hi (__a, __b, __imm, __p); | |
11746 | } | |
11747 | ||
11748 | __extension__ extern __inline int16x8_t | |
11749 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11750 | __arm_vshllbq_m_n_s8 (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
11751 | { | |
11752 | return __builtin_mve_vshllbq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
11753 | } | |
11754 | ||
11755 | __extension__ extern __inline int32x4_t | |
11756 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11757 | __arm_vshllbq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
11758 | { | |
11759 | return __builtin_mve_vshllbq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
11760 | } | |
11761 | ||
11762 | __extension__ extern __inline uint16x8_t | |
11763 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11764 | __arm_vshllbq_m_n_u8 (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
11765 | { | |
11766 | return __builtin_mve_vshllbq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
11767 | } | |
11768 | ||
11769 | __extension__ extern __inline uint32x4_t | |
11770 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11771 | __arm_vshllbq_m_n_u16 (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
11772 | { | |
11773 | return __builtin_mve_vshllbq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
11774 | } | |
11775 | ||
11776 | __extension__ extern __inline int16x8_t | |
11777 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11778 | __arm_vshlltq_m_n_s8 (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
11779 | { | |
11780 | return __builtin_mve_vshlltq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
11781 | } | |
11782 | ||
11783 | __extension__ extern __inline int32x4_t | |
11784 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11785 | __arm_vshlltq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
11786 | { | |
11787 | return __builtin_mve_vshlltq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
11788 | } | |
11789 | ||
11790 | __extension__ extern __inline uint16x8_t | |
11791 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11792 | __arm_vshlltq_m_n_u8 (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
11793 | { | |
11794 | return __builtin_mve_vshlltq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
11795 | } | |
11796 | ||
11797 | __extension__ extern __inline uint32x4_t | |
11798 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11799 | __arm_vshlltq_m_n_u16 (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
11800 | { | |
11801 | return __builtin_mve_vshlltq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
11802 | } | |
11803 | ||
11804 | __extension__ extern __inline int16x8_t | |
11805 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11806 | __arm_vshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11807 | { | |
11808 | return __builtin_mve_vshrnbq_m_n_sv4si (__a, __b, __imm, __p); | |
11809 | } | |
11810 | ||
11811 | __extension__ extern __inline int8x16_t | |
11812 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11813 | __arm_vshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11814 | { | |
11815 | return __builtin_mve_vshrnbq_m_n_sv8hi (__a, __b, __imm, __p); | |
11816 | } | |
11817 | ||
11818 | __extension__ extern __inline uint16x8_t | |
11819 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11820 | __arm_vshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11821 | { | |
11822 | return __builtin_mve_vshrnbq_m_n_uv4si (__a, __b, __imm, __p); | |
11823 | } | |
11824 | ||
11825 | __extension__ extern __inline uint8x16_t | |
11826 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11827 | __arm_vshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11828 | { | |
11829 | return __builtin_mve_vshrnbq_m_n_uv8hi (__a, __b, __imm, __p); | |
11830 | } | |
11831 | ||
11832 | __extension__ extern __inline int16x8_t | |
11833 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11834 | __arm_vshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11835 | { | |
11836 | return __builtin_mve_vshrntq_m_n_sv4si (__a, __b, __imm, __p); | |
11837 | } | |
11838 | ||
11839 | __extension__ extern __inline int8x16_t | |
11840 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11841 | __arm_vshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11842 | { | |
11843 | return __builtin_mve_vshrntq_m_n_sv8hi (__a, __b, __imm, __p); | |
11844 | } | |
11845 | ||
11846 | __extension__ extern __inline uint16x8_t | |
11847 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11848 | __arm_vshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11849 | { | |
11850 | return __builtin_mve_vshrntq_m_n_uv4si (__a, __b, __imm, __p); | |
11851 | } | |
11852 | ||
11853 | __extension__ extern __inline uint8x16_t | |
11854 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11855 | __arm_vshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11856 | { | |
11857 | return __builtin_mve_vshrntq_m_n_uv8hi (__a, __b, __imm, __p); | |
11858 | } | |
11859 | ||
4ff68575 SP |
11860 | __extension__ extern __inline void |
11861 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11862 | __arm_vstrbq_scatter_offset_s8 (int8_t * __base, uint8x16_t __offset, int8x16_t __value) | |
11863 | { | |
11864 | __builtin_mve_vstrbq_scatter_offset_sv16qi ((__builtin_neon_qi *) __base, __offset, __value); | |
11865 | } | |
11866 | ||
11867 | __extension__ extern __inline void | |
11868 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11869 | __arm_vstrbq_scatter_offset_s32 (int8_t * __base, uint32x4_t __offset, int32x4_t __value) | |
11870 | { | |
11871 | __builtin_mve_vstrbq_scatter_offset_sv4si ((__builtin_neon_qi *) __base, __offset, __value); | |
11872 | } | |
11873 | ||
11874 | __extension__ extern __inline void | |
11875 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11876 | __arm_vstrbq_scatter_offset_s16 (int8_t * __base, uint16x8_t __offset, int16x8_t __value) | |
11877 | { | |
11878 | __builtin_mve_vstrbq_scatter_offset_sv8hi ((__builtin_neon_qi *) __base, __offset, __value); | |
11879 | } | |
11880 | ||
11881 | __extension__ extern __inline void | |
11882 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11883 | __arm_vstrbq_scatter_offset_u8 (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value) | |
11884 | { | |
11885 | __builtin_mve_vstrbq_scatter_offset_uv16qi ((__builtin_neon_qi *) __base, __offset, __value); | |
11886 | } | |
11887 | ||
11888 | __extension__ extern __inline void | |
11889 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11890 | __arm_vstrbq_scatter_offset_u32 (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
11891 | { | |
11892 | __builtin_mve_vstrbq_scatter_offset_uv4si ((__builtin_neon_qi *) __base, __offset, __value); | |
11893 | } | |
11894 | ||
11895 | __extension__ extern __inline void | |
11896 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11897 | __arm_vstrbq_scatter_offset_u16 (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value) | |
11898 | { | |
11899 | __builtin_mve_vstrbq_scatter_offset_uv8hi ((__builtin_neon_qi *) __base, __offset, __value); | |
11900 | } | |
11901 | ||
11902 | __extension__ extern __inline void | |
11903 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11904 | __arm_vstrbq_s8 (int8_t * __addr, int8x16_t __value) | |
11905 | { | |
11906 | __builtin_mve_vstrbq_sv16qi ((__builtin_neon_qi *) __addr, __value); | |
11907 | } | |
11908 | ||
11909 | __extension__ extern __inline void | |
11910 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11911 | __arm_vstrbq_s32 (int8_t * __addr, int32x4_t __value) | |
11912 | { | |
11913 | __builtin_mve_vstrbq_sv4si ((__builtin_neon_qi *) __addr, __value); | |
11914 | } | |
11915 | ||
11916 | __extension__ extern __inline void | |
11917 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11918 | __arm_vstrbq_s16 (int8_t * __addr, int16x8_t __value) | |
11919 | { | |
11920 | __builtin_mve_vstrbq_sv8hi ((__builtin_neon_qi *) __addr, __value); | |
11921 | } | |
11922 | ||
11923 | __extension__ extern __inline void | |
11924 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11925 | __arm_vstrbq_u8 (uint8_t * __addr, uint8x16_t __value) | |
11926 | { | |
11927 | __builtin_mve_vstrbq_uv16qi ((__builtin_neon_qi *) __addr, __value); | |
11928 | } | |
11929 | ||
11930 | __extension__ extern __inline void | |
11931 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11932 | __arm_vstrbq_u32 (uint8_t * __addr, uint32x4_t __value) | |
11933 | { | |
11934 | __builtin_mve_vstrbq_uv4si ((__builtin_neon_qi *) __addr, __value); | |
11935 | } | |
11936 | ||
11937 | __extension__ extern __inline void | |
11938 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11939 | __arm_vstrbq_u16 (uint8_t * __addr, uint16x8_t __value) | |
11940 | { | |
11941 | __builtin_mve_vstrbq_uv8hi ((__builtin_neon_qi *) __addr, __value); | |
11942 | } | |
11943 | ||
11944 | __extension__ extern __inline void | |
11945 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11946 | __arm_vstrwq_scatter_base_s32 (uint32x4_t __addr, const int __offset, int32x4_t __value) | |
11947 | { | |
11948 | __builtin_mve_vstrwq_scatter_base_sv4si (__addr, __offset, __value); | |
11949 | } | |
11950 | ||
11951 | __extension__ extern __inline void | |
11952 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11953 | __arm_vstrwq_scatter_base_u32 (uint32x4_t __addr, const int __offset, uint32x4_t __value) | |
11954 | { | |
11955 | __builtin_mve_vstrwq_scatter_base_uv4si (__addr, __offset, __value); | |
11956 | } | |
535a8645 SP |
11957 | |
11958 | __extension__ extern __inline uint8x16_t | |
11959 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11960 | __arm_vldrbq_gather_offset_u8 (uint8_t const * __base, uint8x16_t __offset) | |
11961 | { | |
11962 | return __builtin_mve_vldrbq_gather_offset_uv16qi ((__builtin_neon_qi *) __base, __offset); | |
11963 | } | |
11964 | ||
11965 | __extension__ extern __inline int8x16_t | |
11966 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11967 | __arm_vldrbq_gather_offset_s8 (int8_t const * __base, uint8x16_t __offset) | |
11968 | { | |
11969 | return __builtin_mve_vldrbq_gather_offset_sv16qi ((__builtin_neon_qi *) __base, __offset); | |
11970 | } | |
11971 | ||
11972 | __extension__ extern __inline int8x16_t | |
11973 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11974 | __arm_vldrbq_s8 (int8_t const * __base) | |
11975 | { | |
11976 | return __builtin_mve_vldrbq_sv16qi ((__builtin_neon_qi *) __base); | |
11977 | } | |
11978 | ||
11979 | __extension__ extern __inline uint8x16_t | |
11980 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11981 | __arm_vldrbq_u8 (uint8_t const * __base) | |
11982 | { | |
11983 | return __builtin_mve_vldrbq_uv16qi ((__builtin_neon_qi *) __base); | |
11984 | } | |
11985 | ||
11986 | __extension__ extern __inline uint16x8_t | |
11987 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11988 | __arm_vldrbq_gather_offset_u16 (uint8_t const * __base, uint16x8_t __offset) | |
11989 | { | |
11990 | return __builtin_mve_vldrbq_gather_offset_uv8hi ((__builtin_neon_qi *) __base, __offset); | |
11991 | } | |
11992 | ||
11993 | __extension__ extern __inline int16x8_t | |
11994 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11995 | __arm_vldrbq_gather_offset_s16 (int8_t const * __base, uint16x8_t __offset) | |
11996 | { | |
11997 | return __builtin_mve_vldrbq_gather_offset_sv8hi ((__builtin_neon_qi *) __base, __offset); | |
11998 | } | |
11999 | ||
12000 | __extension__ extern __inline int16x8_t | |
12001 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12002 | __arm_vldrbq_s16 (int8_t const * __base) | |
12003 | { | |
12004 | return __builtin_mve_vldrbq_sv8hi ((__builtin_neon_qi *) __base); | |
12005 | } | |
12006 | ||
12007 | __extension__ extern __inline uint16x8_t | |
12008 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12009 | __arm_vldrbq_u16 (uint8_t const * __base) | |
12010 | { | |
12011 | return __builtin_mve_vldrbq_uv8hi ((__builtin_neon_qi *) __base); | |
12012 | } | |
12013 | ||
12014 | __extension__ extern __inline uint32x4_t | |
12015 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12016 | __arm_vldrbq_gather_offset_u32 (uint8_t const * __base, uint32x4_t __offset) | |
12017 | { | |
12018 | return __builtin_mve_vldrbq_gather_offset_uv4si ((__builtin_neon_qi *) __base, __offset); | |
12019 | } | |
12020 | ||
12021 | __extension__ extern __inline int32x4_t | |
12022 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12023 | __arm_vldrbq_gather_offset_s32 (int8_t const * __base, uint32x4_t __offset) | |
12024 | { | |
12025 | return __builtin_mve_vldrbq_gather_offset_sv4si ((__builtin_neon_qi *) __base, __offset); | |
12026 | } | |
12027 | ||
12028 | __extension__ extern __inline int32x4_t | |
12029 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12030 | __arm_vldrbq_s32 (int8_t const * __base) | |
12031 | { | |
12032 | return __builtin_mve_vldrbq_sv4si ((__builtin_neon_qi *) __base); | |
12033 | } | |
12034 | ||
12035 | __extension__ extern __inline uint32x4_t | |
12036 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12037 | __arm_vldrbq_u32 (uint8_t const * __base) | |
12038 | { | |
12039 | return __builtin_mve_vldrbq_uv4si ((__builtin_neon_qi *) __base); | |
12040 | } | |
12041 | ||
12042 | __extension__ extern __inline int32x4_t | |
12043 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12044 | __arm_vldrwq_gather_base_s32 (uint32x4_t __addr, const int __offset) | |
12045 | { | |
12046 | return __builtin_mve_vldrwq_gather_base_sv4si (__addr, __offset); | |
12047 | } | |
12048 | ||
12049 | __extension__ extern __inline uint32x4_t | |
12050 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12051 | __arm_vldrwq_gather_base_u32 (uint32x4_t __addr, const int __offset) | |
12052 | { | |
12053 | return __builtin_mve_vldrwq_gather_base_uv4si (__addr, __offset); | |
12054 | } | |
12055 | ||
405e918c SP |
12056 | __extension__ extern __inline void |
12057 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12058 | __arm_vstrbq_p_s8 (int8_t * __addr, int8x16_t __value, mve_pred16_t __p) | |
12059 | { | |
12060 | __builtin_mve_vstrbq_p_sv16qi ((__builtin_neon_qi *) __addr, __value, __p); | |
12061 | } | |
12062 | ||
12063 | __extension__ extern __inline void | |
12064 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12065 | __arm_vstrbq_p_s32 (int8_t * __addr, int32x4_t __value, mve_pred16_t __p) | |
12066 | { | |
12067 | __builtin_mve_vstrbq_p_sv4si ((__builtin_neon_qi *) __addr, __value, __p); | |
12068 | } | |
12069 | ||
12070 | __extension__ extern __inline void | |
12071 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12072 | __arm_vstrbq_p_s16 (int8_t * __addr, int16x8_t __value, mve_pred16_t __p) | |
12073 | { | |
12074 | __builtin_mve_vstrbq_p_sv8hi ((__builtin_neon_qi *) __addr, __value, __p); | |
12075 | } | |
12076 | ||
12077 | __extension__ extern __inline void | |
12078 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12079 | __arm_vstrbq_p_u8 (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p) | |
12080 | { | |
12081 | __builtin_mve_vstrbq_p_uv16qi ((__builtin_neon_qi *) __addr, __value, __p); | |
12082 | } | |
12083 | ||
12084 | __extension__ extern __inline void | |
12085 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12086 | __arm_vstrbq_p_u32 (uint8_t * __addr, uint32x4_t __value, mve_pred16_t __p) | |
12087 | { | |
12088 | __builtin_mve_vstrbq_p_uv4si ((__builtin_neon_qi *) __addr, __value, __p); | |
12089 | } | |
12090 | ||
12091 | __extension__ extern __inline void | |
12092 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12093 | __arm_vstrbq_p_u16 (uint8_t * __addr, uint16x8_t __value, mve_pred16_t __p) | |
12094 | { | |
12095 | __builtin_mve_vstrbq_p_uv8hi ((__builtin_neon_qi *) __addr, __value, __p); | |
12096 | } | |
12097 | ||
12098 | __extension__ extern __inline void | |
12099 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12100 | __arm_vstrbq_scatter_offset_p_s8 (int8_t * __base, uint8x16_t __offset, int8x16_t __value, mve_pred16_t __p) | |
12101 | { | |
12102 | __builtin_mve_vstrbq_scatter_offset_p_sv16qi ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12103 | } | |
12104 | ||
12105 | __extension__ extern __inline void | |
12106 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12107 | __arm_vstrbq_scatter_offset_p_s32 (int8_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
12108 | { | |
12109 | __builtin_mve_vstrbq_scatter_offset_p_sv4si ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12110 | } | |
12111 | ||
12112 | __extension__ extern __inline void | |
12113 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12114 | __arm_vstrbq_scatter_offset_p_s16 (int8_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p) | |
12115 | { | |
12116 | __builtin_mve_vstrbq_scatter_offset_p_sv8hi ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12117 | } | |
12118 | ||
12119 | __extension__ extern __inline void | |
12120 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12121 | __arm_vstrbq_scatter_offset_p_u8 (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value, mve_pred16_t __p) | |
12122 | { | |
12123 | __builtin_mve_vstrbq_scatter_offset_p_uv16qi ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12124 | } | |
12125 | ||
12126 | __extension__ extern __inline void | |
12127 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12128 | __arm_vstrbq_scatter_offset_p_u32 (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
12129 | { | |
12130 | __builtin_mve_vstrbq_scatter_offset_p_uv4si ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12131 | } | |
12132 | ||
12133 | __extension__ extern __inline void | |
12134 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12135 | __arm_vstrbq_scatter_offset_p_u16 (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p) | |
12136 | { | |
12137 | __builtin_mve_vstrbq_scatter_offset_p_uv8hi ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12138 | } | |
12139 | ||
12140 | __extension__ extern __inline void | |
12141 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12142 | __arm_vstrwq_scatter_base_p_s32 (uint32x4_t __addr, const int __offset, int32x4_t __value, mve_pred16_t __p) | |
12143 | { | |
12144 | __builtin_mve_vstrwq_scatter_base_p_sv4si (__addr, __offset, __value, __p); | |
12145 | } | |
12146 | ||
12147 | __extension__ extern __inline void | |
12148 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12149 | __arm_vstrwq_scatter_base_p_u32 (uint32x4_t __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p) | |
12150 | { | |
12151 | __builtin_mve_vstrwq_scatter_base_p_uv4si (__addr, __offset, __value, __p); | |
12152 | } | |
429d607b SP |
12153 | |
12154 | __extension__ extern __inline int8x16_t | |
12155 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12156 | __arm_vldrbq_gather_offset_z_s8 (int8_t const * __base, uint8x16_t __offset, mve_pred16_t __p) | |
12157 | { | |
12158 | return __builtin_mve_vldrbq_gather_offset_z_sv16qi ((__builtin_neon_qi *) __base, __offset, __p); | |
12159 | } | |
12160 | ||
12161 | __extension__ extern __inline int32x4_t | |
12162 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12163 | __arm_vldrbq_gather_offset_z_s32 (int8_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12164 | { | |
12165 | return __builtin_mve_vldrbq_gather_offset_z_sv4si ((__builtin_neon_qi *) __base, __offset, __p); | |
12166 | } | |
12167 | ||
12168 | __extension__ extern __inline int16x8_t | |
12169 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12170 | __arm_vldrbq_gather_offset_z_s16 (int8_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12171 | { | |
12172 | return __builtin_mve_vldrbq_gather_offset_z_sv8hi ((__builtin_neon_qi *) __base, __offset, __p); | |
12173 | } | |
12174 | ||
12175 | __extension__ extern __inline uint8x16_t | |
12176 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12177 | __arm_vldrbq_gather_offset_z_u8 (uint8_t const * __base, uint8x16_t __offset, mve_pred16_t __p) | |
12178 | { | |
12179 | return __builtin_mve_vldrbq_gather_offset_z_uv16qi ((__builtin_neon_qi *) __base, __offset, __p); | |
12180 | } | |
12181 | ||
12182 | __extension__ extern __inline uint32x4_t | |
12183 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12184 | __arm_vldrbq_gather_offset_z_u32 (uint8_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12185 | { | |
12186 | return __builtin_mve_vldrbq_gather_offset_z_uv4si ((__builtin_neon_qi *) __base, __offset, __p); | |
12187 | } | |
12188 | ||
12189 | __extension__ extern __inline uint16x8_t | |
12190 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12191 | __arm_vldrbq_gather_offset_z_u16 (uint8_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12192 | { | |
12193 | return __builtin_mve_vldrbq_gather_offset_z_uv8hi ((__builtin_neon_qi *) __base, __offset, __p); | |
12194 | } | |
12195 | ||
12196 | __extension__ extern __inline int8x16_t | |
12197 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12198 | __arm_vldrbq_z_s8 (int8_t const * __base, mve_pred16_t __p) | |
12199 | { | |
12200 | return __builtin_mve_vldrbq_z_sv16qi ((__builtin_neon_qi *) __base, __p); | |
12201 | } | |
12202 | ||
12203 | __extension__ extern __inline int32x4_t | |
12204 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12205 | __arm_vldrbq_z_s32 (int8_t const * __base, mve_pred16_t __p) | |
12206 | { | |
12207 | return __builtin_mve_vldrbq_z_sv4si ((__builtin_neon_qi *) __base, __p); | |
12208 | } | |
12209 | ||
12210 | __extension__ extern __inline int16x8_t | |
12211 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12212 | __arm_vldrbq_z_s16 (int8_t const * __base, mve_pred16_t __p) | |
12213 | { | |
12214 | return __builtin_mve_vldrbq_z_sv8hi ((__builtin_neon_qi *) __base, __p); | |
12215 | } | |
12216 | ||
12217 | __extension__ extern __inline uint8x16_t | |
12218 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12219 | __arm_vldrbq_z_u8 (uint8_t const * __base, mve_pred16_t __p) | |
12220 | { | |
12221 | return __builtin_mve_vldrbq_z_uv16qi ((__builtin_neon_qi *) __base, __p); | |
12222 | } | |
12223 | ||
12224 | __extension__ extern __inline uint32x4_t | |
12225 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12226 | __arm_vldrbq_z_u32 (uint8_t const * __base, mve_pred16_t __p) | |
12227 | { | |
12228 | return __builtin_mve_vldrbq_z_uv4si ((__builtin_neon_qi *) __base, __p); | |
12229 | } | |
12230 | ||
12231 | __extension__ extern __inline uint16x8_t | |
12232 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12233 | __arm_vldrbq_z_u16 (uint8_t const * __base, mve_pred16_t __p) | |
12234 | { | |
12235 | return __builtin_mve_vldrbq_z_uv8hi ((__builtin_neon_qi *) __base, __p); | |
12236 | } | |
12237 | ||
12238 | __extension__ extern __inline int32x4_t | |
12239 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12240 | __arm_vldrwq_gather_base_z_s32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p) | |
12241 | { | |
12242 | return __builtin_mve_vldrwq_gather_base_z_sv4si (__addr, __offset, __p); | |
12243 | } | |
12244 | ||
12245 | __extension__ extern __inline uint32x4_t | |
12246 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12247 | __arm_vldrwq_gather_base_z_u32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p) | |
12248 | { | |
12249 | return __builtin_mve_vldrwq_gather_base_z_uv4si (__addr, __offset, __p); | |
12250 | } | |
12251 | ||
bf1e3d5a SP |
12252 | __extension__ extern __inline int8x16_t |
12253 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12254 | __arm_vld1q_s8 (int8_t const * __base) | |
12255 | { | |
12256 | return __builtin_mve_vld1q_sv16qi ((__builtin_neon_qi *) __base); | |
12257 | } | |
12258 | ||
12259 | __extension__ extern __inline int32x4_t | |
12260 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12261 | __arm_vld1q_s32 (int32_t const * __base) | |
12262 | { | |
12263 | return __builtin_mve_vld1q_sv4si ((__builtin_neon_si *) __base); | |
12264 | } | |
12265 | ||
12266 | __extension__ extern __inline int16x8_t | |
12267 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12268 | __arm_vld1q_s16 (int16_t const * __base) | |
12269 | { | |
12270 | return __builtin_mve_vld1q_sv8hi ((__builtin_neon_hi *) __base); | |
12271 | } | |
12272 | ||
12273 | __extension__ extern __inline uint8x16_t | |
12274 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12275 | __arm_vld1q_u8 (uint8_t const * __base) | |
12276 | { | |
12277 | return __builtin_mve_vld1q_uv16qi ((__builtin_neon_qi *) __base); | |
12278 | } | |
12279 | ||
12280 | __extension__ extern __inline uint32x4_t | |
12281 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12282 | __arm_vld1q_u32 (uint32_t const * __base) | |
12283 | { | |
12284 | return __builtin_mve_vld1q_uv4si ((__builtin_neon_si *) __base); | |
12285 | } | |
12286 | ||
12287 | __extension__ extern __inline uint16x8_t | |
12288 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12289 | __arm_vld1q_u16 (uint16_t const * __base) | |
12290 | { | |
12291 | return __builtin_mve_vld1q_uv8hi ((__builtin_neon_hi *) __base); | |
12292 | } | |
12293 | ||
12294 | __extension__ extern __inline int32x4_t | |
12295 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12296 | __arm_vldrhq_gather_offset_s32 (int16_t const * __base, uint32x4_t __offset) | |
12297 | { | |
12298 | return __builtin_mve_vldrhq_gather_offset_sv4si ((__builtin_neon_hi *) __base, __offset); | |
12299 | } | |
12300 | ||
12301 | __extension__ extern __inline int16x8_t | |
12302 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12303 | __arm_vldrhq_gather_offset_s16 (int16_t const * __base, uint16x8_t __offset) | |
12304 | { | |
12305 | return __builtin_mve_vldrhq_gather_offset_sv8hi ((__builtin_neon_hi *) __base, __offset); | |
12306 | } | |
12307 | ||
12308 | __extension__ extern __inline uint32x4_t | |
12309 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12310 | __arm_vldrhq_gather_offset_u32 (uint16_t const * __base, uint32x4_t __offset) | |
12311 | { | |
12312 | return __builtin_mve_vldrhq_gather_offset_uv4si ((__builtin_neon_hi *) __base, __offset); | |
12313 | } | |
12314 | ||
12315 | __extension__ extern __inline uint16x8_t | |
12316 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12317 | __arm_vldrhq_gather_offset_u16 (uint16_t const * __base, uint16x8_t __offset) | |
12318 | { | |
12319 | return __builtin_mve_vldrhq_gather_offset_uv8hi ((__builtin_neon_hi *) __base, __offset); | |
12320 | } | |
12321 | ||
12322 | __extension__ extern __inline int32x4_t | |
12323 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12324 | __arm_vldrhq_gather_offset_z_s32 (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12325 | { | |
12326 | return __builtin_mve_vldrhq_gather_offset_z_sv4si ((__builtin_neon_hi *) __base, __offset, __p); | |
12327 | } | |
12328 | ||
12329 | __extension__ extern __inline int16x8_t | |
12330 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12331 | __arm_vldrhq_gather_offset_z_s16 (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12332 | { | |
12333 | return __builtin_mve_vldrhq_gather_offset_z_sv8hi ((__builtin_neon_hi *) __base, __offset, __p); | |
12334 | } | |
12335 | ||
12336 | __extension__ extern __inline uint32x4_t | |
12337 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12338 | __arm_vldrhq_gather_offset_z_u32 (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12339 | { | |
12340 | return __builtin_mve_vldrhq_gather_offset_z_uv4si ((__builtin_neon_hi *) __base, __offset, __p); | |
12341 | } | |
12342 | ||
12343 | __extension__ extern __inline uint16x8_t | |
12344 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12345 | __arm_vldrhq_gather_offset_z_u16 (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12346 | { | |
12347 | return __builtin_mve_vldrhq_gather_offset_z_uv8hi ((__builtin_neon_hi *) __base, __offset, __p); | |
12348 | } | |
12349 | ||
12350 | __extension__ extern __inline int32x4_t | |
12351 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12352 | __arm_vldrhq_gather_shifted_offset_s32 (int16_t const * __base, uint32x4_t __offset) | |
12353 | { | |
12354 | return __builtin_mve_vldrhq_gather_shifted_offset_sv4si ((__builtin_neon_hi *) __base, __offset); | |
12355 | } | |
12356 | ||
12357 | __extension__ extern __inline int16x8_t | |
12358 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12359 | __arm_vldrhq_gather_shifted_offset_s16 (int16_t const * __base, uint16x8_t __offset) | |
12360 | { | |
12361 | return __builtin_mve_vldrhq_gather_shifted_offset_sv8hi ((__builtin_neon_hi *) __base, __offset); | |
12362 | } | |
12363 | ||
12364 | __extension__ extern __inline uint32x4_t | |
12365 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12366 | __arm_vldrhq_gather_shifted_offset_u32 (uint16_t const * __base, uint32x4_t __offset) | |
12367 | { | |
12368 | return __builtin_mve_vldrhq_gather_shifted_offset_uv4si ((__builtin_neon_hi *) __base, __offset); | |
12369 | } | |
12370 | ||
12371 | __extension__ extern __inline uint16x8_t | |
12372 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12373 | __arm_vldrhq_gather_shifted_offset_u16 (uint16_t const * __base, uint16x8_t __offset) | |
12374 | { | |
12375 | return __builtin_mve_vldrhq_gather_shifted_offset_uv8hi ((__builtin_neon_hi *) __base, __offset); | |
12376 | } | |
12377 | ||
12378 | __extension__ extern __inline int32x4_t | |
12379 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12380 | __arm_vldrhq_gather_shifted_offset_z_s32 (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12381 | { | |
12382 | return __builtin_mve_vldrhq_gather_shifted_offset_z_sv4si ((__builtin_neon_hi *) __base, __offset, __p); | |
12383 | } | |
12384 | ||
12385 | __extension__ extern __inline int16x8_t | |
12386 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12387 | __arm_vldrhq_gather_shifted_offset_z_s16 (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12388 | { | |
12389 | return __builtin_mve_vldrhq_gather_shifted_offset_z_sv8hi ((__builtin_neon_hi *) __base, __offset, __p); | |
12390 | } | |
12391 | ||
12392 | __extension__ extern __inline uint32x4_t | |
12393 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12394 | __arm_vldrhq_gather_shifted_offset_z_u32 (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12395 | { | |
12396 | return __builtin_mve_vldrhq_gather_shifted_offset_z_uv4si ((__builtin_neon_hi *) __base, __offset, __p); | |
12397 | } | |
12398 | ||
12399 | __extension__ extern __inline uint16x8_t | |
12400 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12401 | __arm_vldrhq_gather_shifted_offset_z_u16 (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12402 | { | |
12403 | return __builtin_mve_vldrhq_gather_shifted_offset_z_uv8hi ((__builtin_neon_hi *) __base, __offset, __p); | |
12404 | } | |
12405 | ||
12406 | __extension__ extern __inline int32x4_t | |
12407 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12408 | __arm_vldrhq_s32 (int16_t const * __base) | |
12409 | { | |
12410 | return __builtin_mve_vldrhq_sv4si ((__builtin_neon_hi *) __base); | |
12411 | } | |
12412 | ||
12413 | __extension__ extern __inline int16x8_t | |
12414 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12415 | __arm_vldrhq_s16 (int16_t const * __base) | |
12416 | { | |
12417 | return __builtin_mve_vldrhq_sv8hi ((__builtin_neon_hi *) __base); | |
12418 | } | |
12419 | ||
12420 | __extension__ extern __inline uint32x4_t | |
12421 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12422 | __arm_vldrhq_u32 (uint16_t const * __base) | |
12423 | { | |
12424 | return __builtin_mve_vldrhq_uv4si ((__builtin_neon_hi *) __base); | |
12425 | } | |
12426 | ||
12427 | __extension__ extern __inline uint16x8_t | |
12428 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12429 | __arm_vldrhq_u16 (uint16_t const * __base) | |
12430 | { | |
12431 | return __builtin_mve_vldrhq_uv8hi ((__builtin_neon_hi *) __base); | |
12432 | } | |
12433 | ||
12434 | __extension__ extern __inline int32x4_t | |
12435 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12436 | __arm_vldrhq_z_s32 (int16_t const * __base, mve_pred16_t __p) | |
12437 | { | |
12438 | return __builtin_mve_vldrhq_z_sv4si ((__builtin_neon_hi *) __base, __p); | |
12439 | } | |
12440 | ||
12441 | __extension__ extern __inline int16x8_t | |
12442 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12443 | __arm_vldrhq_z_s16 (int16_t const * __base, mve_pred16_t __p) | |
12444 | { | |
12445 | return __builtin_mve_vldrhq_z_sv8hi ((__builtin_neon_hi *) __base, __p); | |
12446 | } | |
12447 | ||
12448 | __extension__ extern __inline uint32x4_t | |
12449 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12450 | __arm_vldrhq_z_u32 (uint16_t const * __base, mve_pred16_t __p) | |
12451 | { | |
12452 | return __builtin_mve_vldrhq_z_uv4si ((__builtin_neon_hi *) __base, __p); | |
12453 | } | |
12454 | ||
12455 | __extension__ extern __inline uint16x8_t | |
12456 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12457 | __arm_vldrhq_z_u16 (uint16_t const * __base, mve_pred16_t __p) | |
12458 | { | |
12459 | return __builtin_mve_vldrhq_z_uv8hi ((__builtin_neon_hi *) __base, __p); | |
12460 | } | |
12461 | ||
12462 | __extension__ extern __inline int32x4_t | |
12463 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12464 | __arm_vldrwq_s32 (int32_t const * __base) | |
12465 | { | |
12466 | return __builtin_mve_vldrwq_sv4si ((__builtin_neon_si *) __base); | |
12467 | } | |
12468 | ||
12469 | __extension__ extern __inline uint32x4_t | |
12470 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12471 | __arm_vldrwq_u32 (uint32_t const * __base) | |
12472 | { | |
12473 | return __builtin_mve_vldrwq_uv4si ((__builtin_neon_si *) __base); | |
12474 | } | |
12475 | ||
12476 | ||
12477 | __extension__ extern __inline int32x4_t | |
12478 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12479 | __arm_vldrwq_z_s32 (int32_t const * __base, mve_pred16_t __p) | |
12480 | { | |
12481 | return __builtin_mve_vldrwq_z_sv4si ((__builtin_neon_si *) __base, __p); | |
12482 | } | |
12483 | ||
12484 | __extension__ extern __inline uint32x4_t | |
12485 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12486 | __arm_vldrwq_z_u32 (uint32_t const * __base, mve_pred16_t __p) | |
12487 | { | |
12488 | return __builtin_mve_vldrwq_z_uv4si ((__builtin_neon_si *) __base, __p); | |
12489 | } | |
12490 | ||
4cc23303 SP |
12491 | __extension__ extern __inline int64x2_t |
12492 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12493 | __arm_vldrdq_gather_base_s64 (uint64x2_t __addr, const int __offset) | |
12494 | { | |
12495 | return __builtin_mve_vldrdq_gather_base_sv2di (__addr, __offset); | |
12496 | } | |
12497 | ||
12498 | __extension__ extern __inline uint64x2_t | |
12499 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12500 | __arm_vldrdq_gather_base_u64 (uint64x2_t __addr, const int __offset) | |
12501 | { | |
12502 | return __builtin_mve_vldrdq_gather_base_uv2di (__addr, __offset); | |
12503 | } | |
12504 | ||
12505 | __extension__ extern __inline int64x2_t | |
12506 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12507 | __arm_vldrdq_gather_base_z_s64 (uint64x2_t __addr, const int __offset, mve_pred16_t __p) | |
12508 | { | |
12509 | return __builtin_mve_vldrdq_gather_base_z_sv2di (__addr, __offset, __p); | |
12510 | } | |
12511 | ||
12512 | __extension__ extern __inline uint64x2_t | |
12513 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12514 | __arm_vldrdq_gather_base_z_u64 (uint64x2_t __addr, const int __offset, mve_pred16_t __p) | |
12515 | { | |
12516 | return __builtin_mve_vldrdq_gather_base_z_uv2di (__addr, __offset, __p); | |
12517 | } | |
12518 | ||
12519 | __extension__ extern __inline int64x2_t | |
12520 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12521 | __arm_vldrdq_gather_offset_s64 (int64_t const * __base, uint64x2_t __offset) | |
12522 | { | |
12523 | return __builtin_mve_vldrdq_gather_offset_sv2di ((__builtin_neon_di *) __base, __offset); | |
12524 | } | |
12525 | ||
12526 | __extension__ extern __inline uint64x2_t | |
12527 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12528 | __arm_vldrdq_gather_offset_u64 (uint64_t const * __base, uint64x2_t __offset) | |
12529 | { | |
12530 | return __builtin_mve_vldrdq_gather_offset_uv2di ((__builtin_neon_di *) __base, __offset); | |
12531 | } | |
12532 | ||
12533 | __extension__ extern __inline int64x2_t | |
12534 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12535 | __arm_vldrdq_gather_offset_z_s64 (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
12536 | { | |
12537 | return __builtin_mve_vldrdq_gather_offset_z_sv2di ((__builtin_neon_di *) __base, __offset, __p); | |
12538 | } | |
12539 | ||
12540 | ||
12541 | __extension__ extern __inline uint64x2_t | |
12542 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12543 | __arm_vldrdq_gather_offset_z_u64 (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
12544 | { | |
12545 | return __builtin_mve_vldrdq_gather_offset_z_uv2di ((__builtin_neon_di *) __base, __offset, __p); | |
12546 | } | |
12547 | ||
12548 | __extension__ extern __inline int64x2_t | |
12549 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12550 | __arm_vldrdq_gather_shifted_offset_s64 (int64_t const * __base, uint64x2_t __offset) | |
12551 | { | |
12552 | return __builtin_mve_vldrdq_gather_shifted_offset_sv2di ((__builtin_neon_di *) __base, __offset); | |
12553 | } | |
12554 | ||
12555 | __extension__ extern __inline uint64x2_t | |
12556 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12557 | __arm_vldrdq_gather_shifted_offset_u64 (uint64_t const * __base, uint64x2_t __offset) | |
12558 | { | |
12559 | return __builtin_mve_vldrdq_gather_shifted_offset_uv2di ((__builtin_neon_di *) __base, __offset); | |
12560 | } | |
12561 | ||
12562 | __extension__ extern __inline int64x2_t | |
12563 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12564 | __arm_vldrdq_gather_shifted_offset_z_s64 (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
12565 | { | |
12566 | return __builtin_mve_vldrdq_gather_shifted_offset_z_sv2di ((__builtin_neon_di *) __base, __offset, __p); | |
12567 | } | |
12568 | ||
12569 | __extension__ extern __inline uint64x2_t | |
12570 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12571 | __arm_vldrdq_gather_shifted_offset_z_u64 (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
12572 | { | |
12573 | return __builtin_mve_vldrdq_gather_shifted_offset_z_uv2di ((__builtin_neon_di *) __base, __offset, __p); | |
12574 | } | |
12575 | ||
12576 | __extension__ extern __inline int32x4_t | |
12577 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12578 | __arm_vldrwq_gather_offset_s32 (int32_t const * __base, uint32x4_t __offset) | |
12579 | { | |
12580 | return __builtin_mve_vldrwq_gather_offset_sv4si ((__builtin_neon_si *) __base, __offset); | |
12581 | } | |
12582 | ||
12583 | __extension__ extern __inline uint32x4_t | |
12584 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12585 | __arm_vldrwq_gather_offset_u32 (uint32_t const * __base, uint32x4_t __offset) | |
12586 | { | |
12587 | return __builtin_mve_vldrwq_gather_offset_uv4si ((__builtin_neon_si *) __base, __offset); | |
12588 | } | |
12589 | ||
12590 | __extension__ extern __inline int32x4_t | |
12591 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12592 | __arm_vldrwq_gather_offset_z_s32 (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12593 | { | |
12594 | return __builtin_mve_vldrwq_gather_offset_z_sv4si ((__builtin_neon_si *) __base, __offset, __p); | |
12595 | } | |
12596 | ||
12597 | __extension__ extern __inline uint32x4_t | |
12598 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12599 | __arm_vldrwq_gather_offset_z_u32 (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12600 | { | |
12601 | return __builtin_mve_vldrwq_gather_offset_z_uv4si ((__builtin_neon_si *) __base, __offset, __p); | |
12602 | } | |
12603 | ||
12604 | __extension__ extern __inline int32x4_t | |
12605 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12606 | __arm_vldrwq_gather_shifted_offset_s32 (int32_t const * __base, uint32x4_t __offset) | |
12607 | { | |
12608 | return __builtin_mve_vldrwq_gather_shifted_offset_sv4si ((__builtin_neon_si *) __base, __offset); | |
12609 | } | |
12610 | ||
12611 | __extension__ extern __inline uint32x4_t | |
12612 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12613 | __arm_vldrwq_gather_shifted_offset_u32 (uint32_t const * __base, uint32x4_t __offset) | |
12614 | { | |
12615 | return __builtin_mve_vldrwq_gather_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset); | |
12616 | } | |
12617 | ||
12618 | __extension__ extern __inline int32x4_t | |
12619 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12620 | __arm_vldrwq_gather_shifted_offset_z_s32 (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12621 | { | |
12622 | return __builtin_mve_vldrwq_gather_shifted_offset_z_sv4si ((__builtin_neon_si *) __base, __offset, __p); | |
12623 | } | |
12624 | ||
12625 | __extension__ extern __inline uint32x4_t | |
12626 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12627 | __arm_vldrwq_gather_shifted_offset_z_u32 (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12628 | { | |
12629 | return __builtin_mve_vldrwq_gather_shifted_offset_z_uv4si ((__builtin_neon_si *) __base, __offset, __p); | |
12630 | } | |
12631 | ||
5cad47e0 SP |
12632 | __extension__ extern __inline void |
12633 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12634 | __arm_vst1q_s8 (int8_t * __addr, int8x16_t __value) | |
12635 | { | |
12636 | __builtin_mve_vst1q_sv16qi ((__builtin_neon_qi *) __addr, __value); | |
12637 | } | |
12638 | ||
12639 | __extension__ extern __inline void | |
12640 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12641 | __arm_vst1q_s32 (int32_t * __addr, int32x4_t __value) | |
12642 | { | |
12643 | __builtin_mve_vst1q_sv4si ((__builtin_neon_si *) __addr, __value); | |
12644 | } | |
12645 | ||
12646 | __extension__ extern __inline void | |
12647 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12648 | __arm_vst1q_s16 (int16_t * __addr, int16x8_t __value) | |
12649 | { | |
12650 | __builtin_mve_vst1q_sv8hi ((__builtin_neon_hi *) __addr, __value); | |
12651 | } | |
12652 | ||
12653 | __extension__ extern __inline void | |
12654 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12655 | __arm_vst1q_u8 (uint8_t * __addr, uint8x16_t __value) | |
12656 | { | |
12657 | __builtin_mve_vst1q_uv16qi ((__builtin_neon_qi *) __addr, __value); | |
12658 | } | |
12659 | ||
12660 | __extension__ extern __inline void | |
12661 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12662 | __arm_vst1q_u32 (uint32_t * __addr, uint32x4_t __value) | |
12663 | { | |
12664 | __builtin_mve_vst1q_uv4si ((__builtin_neon_si *) __addr, __value); | |
12665 | } | |
12666 | ||
12667 | __extension__ extern __inline void | |
12668 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12669 | __arm_vst1q_u16 (uint16_t * __addr, uint16x8_t __value) | |
12670 | { | |
12671 | __builtin_mve_vst1q_uv8hi ((__builtin_neon_hi *) __addr, __value); | |
12672 | } | |
12673 | ||
12674 | __extension__ extern __inline void | |
12675 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12676 | __arm_vstrhq_scatter_offset_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value) | |
12677 | { | |
12678 | __builtin_mve_vstrhq_scatter_offset_sv4si ((__builtin_neon_hi *) __base, __offset, __value); | |
12679 | } | |
12680 | ||
12681 | __extension__ extern __inline void | |
12682 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12683 | __arm_vstrhq_scatter_offset_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value) | |
12684 | { | |
12685 | __builtin_mve_vstrhq_scatter_offset_sv8hi ((__builtin_neon_hi *) __base, __offset, __value); | |
12686 | } | |
12687 | ||
12688 | __extension__ extern __inline void | |
12689 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12690 | __arm_vstrhq_scatter_offset_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
12691 | { | |
12692 | __builtin_mve_vstrhq_scatter_offset_uv4si ((__builtin_neon_hi *) __base, __offset, __value); | |
12693 | } | |
12694 | ||
12695 | __extension__ extern __inline void | |
12696 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12697 | __arm_vstrhq_scatter_offset_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value) | |
12698 | { | |
12699 | __builtin_mve_vstrhq_scatter_offset_uv8hi ((__builtin_neon_hi *) __base, __offset, __value); | |
12700 | } | |
12701 | ||
12702 | __extension__ extern __inline void | |
12703 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12704 | __arm_vstrhq_scatter_offset_p_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
12705 | { | |
12706 | __builtin_mve_vstrhq_scatter_offset_p_sv4si ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12707 | } | |
12708 | ||
12709 | __extension__ extern __inline void | |
12710 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12711 | __arm_vstrhq_scatter_offset_p_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p) | |
12712 | { | |
12713 | __builtin_mve_vstrhq_scatter_offset_p_sv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12714 | } | |
12715 | ||
12716 | __extension__ extern __inline void | |
12717 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12718 | __arm_vstrhq_scatter_offset_p_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
12719 | { | |
12720 | __builtin_mve_vstrhq_scatter_offset_p_uv4si ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12721 | } | |
12722 | ||
12723 | __extension__ extern __inline void | |
12724 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12725 | __arm_vstrhq_scatter_offset_p_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p) | |
12726 | { | |
12727 | __builtin_mve_vstrhq_scatter_offset_p_uv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12728 | } | |
12729 | ||
12730 | __extension__ extern __inline void | |
12731 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12732 | __arm_vstrhq_scatter_shifted_offset_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value) | |
12733 | { | |
12734 | __builtin_mve_vstrhq_scatter_shifted_offset_sv4si ((__builtin_neon_hi *) __base, __offset, __value); | |
12735 | } | |
12736 | ||
12737 | __extension__ extern __inline void | |
12738 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12739 | __arm_vstrhq_scatter_shifted_offset_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value) | |
12740 | { | |
12741 | __builtin_mve_vstrhq_scatter_shifted_offset_sv8hi ((__builtin_neon_hi *) __base, __offset, __value); | |
12742 | } | |
12743 | ||
12744 | __extension__ extern __inline void | |
12745 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12746 | __arm_vstrhq_scatter_shifted_offset_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
12747 | { | |
12748 | __builtin_mve_vstrhq_scatter_shifted_offset_uv4si ((__builtin_neon_hi *) __base, __offset, __value); | |
12749 | } | |
12750 | ||
12751 | __extension__ extern __inline void | |
12752 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12753 | __arm_vstrhq_scatter_shifted_offset_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value) | |
12754 | { | |
12755 | __builtin_mve_vstrhq_scatter_shifted_offset_uv8hi ((__builtin_neon_hi *) __base, __offset, __value); | |
12756 | } | |
12757 | ||
12758 | __extension__ extern __inline void | |
12759 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12760 | __arm_vstrhq_scatter_shifted_offset_p_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
12761 | { | |
12762 | __builtin_mve_vstrhq_scatter_shifted_offset_p_sv4si ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12763 | } | |
12764 | ||
12765 | __extension__ extern __inline void | |
12766 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12767 | __arm_vstrhq_scatter_shifted_offset_p_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p) | |
12768 | { | |
12769 | __builtin_mve_vstrhq_scatter_shifted_offset_p_sv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12770 | } | |
12771 | ||
12772 | __extension__ extern __inline void | |
12773 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12774 | __arm_vstrhq_scatter_shifted_offset_p_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
12775 | { | |
12776 | __builtin_mve_vstrhq_scatter_shifted_offset_p_uv4si ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12777 | } | |
12778 | ||
12779 | __extension__ extern __inline void | |
12780 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12781 | __arm_vstrhq_scatter_shifted_offset_p_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p) | |
12782 | { | |
12783 | __builtin_mve_vstrhq_scatter_shifted_offset_p_uv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12784 | } | |
12785 | ||
12786 | __extension__ extern __inline void | |
12787 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12788 | __arm_vstrhq_s32 (int16_t * __addr, int32x4_t __value) | |
12789 | { | |
12790 | __builtin_mve_vstrhq_sv4si ((__builtin_neon_hi *) __addr, __value); | |
12791 | } | |
12792 | ||
12793 | __extension__ extern __inline void | |
12794 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12795 | __arm_vstrhq_s16 (int16_t * __addr, int16x8_t __value) | |
12796 | { | |
12797 | __builtin_mve_vstrhq_sv8hi ((__builtin_neon_hi *) __addr, __value); | |
12798 | } | |
12799 | ||
12800 | __extension__ extern __inline void | |
12801 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12802 | __arm_vstrhq_u32 (uint16_t * __addr, uint32x4_t __value) | |
12803 | { | |
12804 | __builtin_mve_vstrhq_uv4si ((__builtin_neon_hi *) __addr, __value); | |
12805 | } | |
12806 | ||
12807 | __extension__ extern __inline void | |
12808 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12809 | __arm_vstrhq_u16 (uint16_t * __addr, uint16x8_t __value) | |
12810 | { | |
12811 | __builtin_mve_vstrhq_uv8hi ((__builtin_neon_hi *) __addr, __value); | |
12812 | } | |
12813 | ||
12814 | __extension__ extern __inline void | |
12815 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12816 | __arm_vstrhq_p_s32 (int16_t * __addr, int32x4_t __value, mve_pred16_t __p) | |
12817 | { | |
12818 | __builtin_mve_vstrhq_p_sv4si ((__builtin_neon_hi *) __addr, __value, __p); | |
12819 | } | |
12820 | ||
12821 | __extension__ extern __inline void | |
12822 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12823 | __arm_vstrhq_p_s16 (int16_t * __addr, int16x8_t __value, mve_pred16_t __p) | |
12824 | { | |
12825 | __builtin_mve_vstrhq_p_sv8hi ((__builtin_neon_hi *) __addr, __value, __p); | |
12826 | } | |
12827 | ||
12828 | __extension__ extern __inline void | |
12829 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12830 | __arm_vstrhq_p_u32 (uint16_t * __addr, uint32x4_t __value, mve_pred16_t __p) | |
12831 | { | |
12832 | __builtin_mve_vstrhq_p_uv4si ((__builtin_neon_hi *) __addr, __value, __p); | |
12833 | } | |
12834 | ||
12835 | __extension__ extern __inline void | |
12836 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12837 | __arm_vstrhq_p_u16 (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p) | |
12838 | { | |
12839 | __builtin_mve_vstrhq_p_uv8hi ((__builtin_neon_hi *) __addr, __value, __p); | |
12840 | } | |
12841 | ||
12842 | __extension__ extern __inline void | |
12843 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12844 | __arm_vstrwq_s32 (int32_t * __addr, int32x4_t __value) | |
12845 | { | |
12846 | __builtin_mve_vstrwq_sv4si ((__builtin_neon_si *) __addr, __value); | |
12847 | } | |
12848 | ||
12849 | __extension__ extern __inline void | |
12850 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12851 | __arm_vstrwq_u32 (uint32_t * __addr, uint32x4_t __value) | |
12852 | { | |
12853 | __builtin_mve_vstrwq_uv4si ((__builtin_neon_si *) __addr, __value); | |
12854 | } | |
12855 | ||
12856 | __extension__ extern __inline void | |
12857 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12858 | __arm_vstrwq_p_s32 (int32_t * __addr, int32x4_t __value, mve_pred16_t __p) | |
12859 | { | |
12860 | __builtin_mve_vstrwq_p_sv4si ((__builtin_neon_si *) __addr, __value, __p); | |
12861 | } | |
12862 | ||
12863 | __extension__ extern __inline void | |
12864 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12865 | __arm_vstrwq_p_u32 (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p) | |
12866 | { | |
12867 | __builtin_mve_vstrwq_p_uv4si ((__builtin_neon_si *) __addr, __value, __p); | |
12868 | } | |
12869 | ||
7a5fffa5 SP |
12870 | __extension__ extern __inline void |
12871 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12872 | __arm_vstrdq_scatter_base_p_s64 (uint64x2_t __addr, const int __offset, int64x2_t __value, mve_pred16_t __p) | |
12873 | { | |
12874 | __builtin_mve_vstrdq_scatter_base_p_sv2di (__addr, __offset, __value, __p); | |
12875 | } | |
12876 | ||
12877 | __extension__ extern __inline void | |
12878 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12879 | __arm_vstrdq_scatter_base_p_u64 (uint64x2_t __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p) | |
12880 | { | |
12881 | __builtin_mve_vstrdq_scatter_base_p_uv2di (__addr, __offset, __value, __p); | |
12882 | } | |
12883 | ||
12884 | __extension__ extern __inline void | |
12885 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12886 | __arm_vstrdq_scatter_base_s64 (uint64x2_t __addr, const int __offset, int64x2_t __value) | |
12887 | { | |
12888 | __builtin_mve_vstrdq_scatter_base_sv2di (__addr, __offset, __value); | |
12889 | } | |
12890 | ||
12891 | __extension__ extern __inline void | |
12892 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12893 | __arm_vstrdq_scatter_base_u64 (uint64x2_t __addr, const int __offset, uint64x2_t __value) | |
12894 | { | |
12895 | __builtin_mve_vstrdq_scatter_base_uv2di (__addr, __offset, __value); | |
12896 | } | |
12897 | ||
12898 | __extension__ extern __inline void | |
12899 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12900 | __arm_vstrdq_scatter_offset_p_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p) | |
12901 | { | |
12902 | __builtin_mve_vstrdq_scatter_offset_p_sv2di (__base, __offset, __value, __p); | |
12903 | } | |
12904 | ||
12905 | __extension__ extern __inline void | |
12906 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12907 | __arm_vstrdq_scatter_offset_p_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p) | |
12908 | { | |
12909 | __builtin_mve_vstrdq_scatter_offset_p_uv2di (__base, __offset, __value, __p); | |
12910 | } | |
12911 | ||
12912 | __extension__ extern __inline void | |
12913 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12914 | __arm_vstrdq_scatter_offset_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value) | |
12915 | { | |
12916 | __builtin_mve_vstrdq_scatter_offset_sv2di (__base, __offset, __value); | |
12917 | } | |
12918 | ||
12919 | __extension__ extern __inline void | |
12920 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12921 | __arm_vstrdq_scatter_offset_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value) | |
12922 | { | |
12923 | __builtin_mve_vstrdq_scatter_offset_uv2di (__base, __offset, __value); | |
12924 | } | |
12925 | ||
12926 | __extension__ extern __inline void | |
12927 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12928 | __arm_vstrdq_scatter_shifted_offset_p_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p) | |
12929 | { | |
12930 | __builtin_mve_vstrdq_scatter_shifted_offset_p_sv2di (__base, __offset, __value, __p); | |
12931 | } | |
12932 | ||
12933 | __extension__ extern __inline void | |
12934 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12935 | __arm_vstrdq_scatter_shifted_offset_p_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p) | |
12936 | { | |
12937 | __builtin_mve_vstrdq_scatter_shifted_offset_p_uv2di (__base, __offset, __value, __p); | |
12938 | } | |
12939 | ||
12940 | __extension__ extern __inline void | |
12941 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12942 | __arm_vstrdq_scatter_shifted_offset_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value) | |
12943 | { | |
12944 | __builtin_mve_vstrdq_scatter_shifted_offset_sv2di (__base, __offset, __value); | |
12945 | } | |
12946 | ||
12947 | __extension__ extern __inline void | |
12948 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12949 | __arm_vstrdq_scatter_shifted_offset_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value) | |
12950 | { | |
12951 | __builtin_mve_vstrdq_scatter_shifted_offset_uv2di (__base, __offset, __value); | |
12952 | } | |
12953 | ||
12954 | __extension__ extern __inline void | |
12955 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12956 | __arm_vstrwq_scatter_offset_p_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
12957 | { | |
12958 | __builtin_mve_vstrwq_scatter_offset_p_sv4si ((__builtin_neon_si *) __base, __offset, __value, __p); | |
12959 | } | |
12960 | ||
12961 | __extension__ extern __inline void | |
12962 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12963 | __arm_vstrwq_scatter_offset_p_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
12964 | { | |
12965 | __builtin_mve_vstrwq_scatter_offset_p_uv4si ((__builtin_neon_si *) __base, __offset, __value, __p); | |
12966 | } | |
12967 | ||
12968 | __extension__ extern __inline void | |
12969 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12970 | __arm_vstrwq_scatter_offset_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value) | |
12971 | { | |
12972 | __builtin_mve_vstrwq_scatter_offset_sv4si ((__builtin_neon_si *) __base, __offset, __value); | |
12973 | } | |
12974 | ||
12975 | __extension__ extern __inline void | |
12976 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12977 | __arm_vstrwq_scatter_offset_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
12978 | { | |
12979 | __builtin_mve_vstrwq_scatter_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value); | |
12980 | } | |
12981 | ||
12982 | __extension__ extern __inline void | |
12983 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12984 | __arm_vstrwq_scatter_shifted_offset_p_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
12985 | { | |
12986 | __builtin_mve_vstrwq_scatter_shifted_offset_p_sv4si ((__builtin_neon_si *) __base, __offset, __value, __p); | |
12987 | } | |
12988 | ||
12989 | __extension__ extern __inline void | |
12990 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12991 | __arm_vstrwq_scatter_shifted_offset_p_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
12992 | { | |
12993 | __builtin_mve_vstrwq_scatter_shifted_offset_p_uv4si ((__builtin_neon_si *) __base, __offset, __value, __p); | |
12994 | } | |
12995 | ||
12996 | __extension__ extern __inline void | |
12997 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12998 | __arm_vstrwq_scatter_shifted_offset_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value) | |
12999 | { | |
13000 | __builtin_mve_vstrwq_scatter_shifted_offset_sv4si ((__builtin_neon_si *) __base, __offset, __value); | |
13001 | } | |
13002 | ||
13003 | __extension__ extern __inline void | |
13004 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13005 | __arm_vstrwq_scatter_shifted_offset_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
13006 | { | |
13007 | __builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value); | |
13008 | } | |
13009 | ||
3eff57aa SP |
13010 | __extension__ extern __inline int8x16_t |
13011 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13012 | __arm_vaddq_s8 (int8x16_t __a, int8x16_t __b) | |
13013 | { | |
13014 | return __a + __b; | |
13015 | } | |
13016 | ||
13017 | __extension__ extern __inline int16x8_t | |
13018 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13019 | __arm_vaddq_s16 (int16x8_t __a, int16x8_t __b) | |
13020 | { | |
13021 | return __a + __b; | |
13022 | } | |
13023 | ||
13024 | __extension__ extern __inline int32x4_t | |
13025 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13026 | __arm_vaddq_s32 (int32x4_t __a, int32x4_t __b) | |
13027 | { | |
13028 | return __a + __b; | |
13029 | } | |
13030 | ||
13031 | __extension__ extern __inline uint8x16_t | |
13032 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13033 | __arm_vaddq_u8 (uint8x16_t __a, uint8x16_t __b) | |
13034 | { | |
13035 | return __a + __b; | |
13036 | } | |
13037 | ||
13038 | __extension__ extern __inline uint16x8_t | |
13039 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13040 | __arm_vaddq_u16 (uint16x8_t __a, uint16x8_t __b) | |
13041 | { | |
13042 | return __a + __b; | |
13043 | } | |
13044 | ||
13045 | __extension__ extern __inline uint32x4_t | |
13046 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13047 | __arm_vaddq_u32 (uint32x4_t __a, uint32x4_t __b) | |
13048 | { | |
13049 | return __a + __b; | |
13050 | } | |
13051 | ||
85a94e87 SP |
13052 | __extension__ extern __inline uint8x16_t |
13053 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13054 | __arm_vuninitializedq_u8 (void) | |
13055 | { | |
13056 | uint8x16_t __uninit; | |
13057 | __asm__ ("": "=w"(__uninit)); | |
13058 | return __uninit; | |
13059 | } | |
13060 | ||
13061 | __extension__ extern __inline uint16x8_t | |
13062 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13063 | __arm_vuninitializedq_u16 (void) | |
13064 | { | |
13065 | uint16x8_t __uninit; | |
13066 | __asm__ ("": "=w"(__uninit)); | |
13067 | return __uninit; | |
13068 | } | |
13069 | ||
13070 | __extension__ extern __inline uint32x4_t | |
13071 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13072 | __arm_vuninitializedq_u32 (void) | |
13073 | { | |
13074 | uint32x4_t __uninit; | |
13075 | __asm__ ("": "=w"(__uninit)); | |
13076 | return __uninit; | |
13077 | } | |
13078 | ||
13079 | __extension__ extern __inline uint64x2_t | |
13080 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13081 | __arm_vuninitializedq_u64 (void) | |
13082 | { | |
13083 | uint64x2_t __uninit; | |
13084 | __asm__ ("": "=w"(__uninit)); | |
13085 | return __uninit; | |
13086 | } | |
13087 | ||
13088 | __extension__ extern __inline int8x16_t | |
13089 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13090 | __arm_vuninitializedq_s8 (void) | |
13091 | { | |
13092 | int8x16_t __uninit; | |
13093 | __asm__ ("": "=w"(__uninit)); | |
13094 | return __uninit; | |
13095 | } | |
13096 | ||
13097 | __extension__ extern __inline int16x8_t | |
13098 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13099 | __arm_vuninitializedq_s16 (void) | |
13100 | { | |
13101 | int16x8_t __uninit; | |
13102 | __asm__ ("": "=w"(__uninit)); | |
13103 | return __uninit; | |
13104 | } | |
13105 | ||
13106 | __extension__ extern __inline int32x4_t | |
13107 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13108 | __arm_vuninitializedq_s32 (void) | |
13109 | { | |
13110 | int32x4_t __uninit; | |
13111 | __asm__ ("": "=w"(__uninit)); | |
13112 | return __uninit; | |
13113 | } | |
13114 | ||
13115 | __extension__ extern __inline int64x2_t | |
13116 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13117 | __arm_vuninitializedq_s64 (void) | |
13118 | { | |
13119 | int64x2_t __uninit; | |
13120 | __asm__ ("": "=w"(__uninit)); | |
13121 | return __uninit; | |
13122 | } | |
13123 | ||
13124 | __extension__ extern __inline int16x8_t | |
13125 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13126 | __arm_vreinterpretq_s16_s32 (int32x4_t __a) | |
13127 | { | |
13128 | return (int16x8_t) __a; | |
13129 | } | |
13130 | ||
13131 | __extension__ extern __inline int16x8_t | |
13132 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13133 | __arm_vreinterpretq_s16_s64 (int64x2_t __a) | |
13134 | { | |
13135 | return (int16x8_t) __a; | |
13136 | } | |
13137 | ||
13138 | __extension__ extern __inline int16x8_t | |
13139 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13140 | __arm_vreinterpretq_s16_s8 (int8x16_t __a) | |
13141 | { | |
13142 | return (int16x8_t) __a; | |
13143 | } | |
13144 | ||
13145 | __extension__ extern __inline int16x8_t | |
13146 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13147 | __arm_vreinterpretq_s16_u16 (uint16x8_t __a) | |
13148 | { | |
13149 | return (int16x8_t) __a; | |
13150 | } | |
13151 | ||
13152 | __extension__ extern __inline int16x8_t | |
13153 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13154 | __arm_vreinterpretq_s16_u32 (uint32x4_t __a) | |
13155 | { | |
13156 | return (int16x8_t) __a; | |
13157 | } | |
13158 | ||
13159 | __extension__ extern __inline int16x8_t | |
13160 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13161 | __arm_vreinterpretq_s16_u64 (uint64x2_t __a) | |
13162 | { | |
13163 | return (int16x8_t) __a; | |
13164 | } | |
13165 | ||
13166 | __extension__ extern __inline int16x8_t | |
13167 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13168 | __arm_vreinterpretq_s16_u8 (uint8x16_t __a) | |
13169 | { | |
13170 | return (int16x8_t) __a; | |
13171 | } | |
13172 | ||
13173 | __extension__ extern __inline int32x4_t | |
13174 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13175 | __arm_vreinterpretq_s32_s16 (int16x8_t __a) | |
13176 | { | |
13177 | return (int32x4_t) __a; | |
13178 | } | |
13179 | ||
13180 | __extension__ extern __inline int32x4_t | |
13181 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13182 | __arm_vreinterpretq_s32_s64 (int64x2_t __a) | |
13183 | { | |
13184 | return (int32x4_t) __a; | |
13185 | } | |
13186 | ||
13187 | __extension__ extern __inline int32x4_t | |
13188 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13189 | __arm_vreinterpretq_s32_s8 (int8x16_t __a) | |
13190 | { | |
13191 | return (int32x4_t) __a; | |
13192 | } | |
13193 | ||
13194 | __extension__ extern __inline int32x4_t | |
13195 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13196 | __arm_vreinterpretq_s32_u16 (uint16x8_t __a) | |
13197 | { | |
13198 | return (int32x4_t) __a; | |
13199 | } | |
13200 | ||
13201 | __extension__ extern __inline int32x4_t | |
13202 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13203 | __arm_vreinterpretq_s32_u32 (uint32x4_t __a) | |
13204 | { | |
13205 | return (int32x4_t) __a; | |
13206 | } | |
13207 | ||
13208 | __extension__ extern __inline int32x4_t | |
13209 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13210 | __arm_vreinterpretq_s32_u64 (uint64x2_t __a) | |
13211 | { | |
13212 | return (int32x4_t) __a; | |
13213 | } | |
13214 | ||
13215 | __extension__ extern __inline int32x4_t | |
13216 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13217 | __arm_vreinterpretq_s32_u8 (uint8x16_t __a) | |
13218 | { | |
13219 | return (int32x4_t) __a; | |
13220 | } | |
13221 | ||
13222 | __extension__ extern __inline int64x2_t | |
13223 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13224 | __arm_vreinterpretq_s64_s16 (int16x8_t __a) | |
13225 | { | |
13226 | return (int64x2_t) __a; | |
13227 | } | |
13228 | ||
13229 | __extension__ extern __inline int64x2_t | |
13230 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13231 | __arm_vreinterpretq_s64_s32 (int32x4_t __a) | |
13232 | { | |
13233 | return (int64x2_t) __a; | |
13234 | } | |
13235 | ||
13236 | __extension__ extern __inline int64x2_t | |
13237 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13238 | __arm_vreinterpretq_s64_s8 (int8x16_t __a) | |
13239 | { | |
13240 | return (int64x2_t) __a; | |
13241 | } | |
13242 | ||
13243 | __extension__ extern __inline int64x2_t | |
13244 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13245 | __arm_vreinterpretq_s64_u16 (uint16x8_t __a) | |
13246 | { | |
13247 | return (int64x2_t) __a; | |
13248 | } | |
13249 | ||
13250 | __extension__ extern __inline int64x2_t | |
13251 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13252 | __arm_vreinterpretq_s64_u32 (uint32x4_t __a) | |
13253 | { | |
13254 | return (int64x2_t) __a; | |
13255 | } | |
13256 | ||
13257 | __extension__ extern __inline int64x2_t | |
13258 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13259 | __arm_vreinterpretq_s64_u64 (uint64x2_t __a) | |
13260 | { | |
13261 | return (int64x2_t) __a; | |
13262 | } | |
13263 | ||
13264 | __extension__ extern __inline int64x2_t | |
13265 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13266 | __arm_vreinterpretq_s64_u8 (uint8x16_t __a) | |
13267 | { | |
13268 | return (int64x2_t) __a; | |
13269 | } | |
13270 | ||
13271 | __extension__ extern __inline int8x16_t | |
13272 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13273 | __arm_vreinterpretq_s8_s16 (int16x8_t __a) | |
13274 | { | |
13275 | return (int8x16_t) __a; | |
13276 | } | |
13277 | ||
13278 | __extension__ extern __inline int8x16_t | |
13279 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13280 | __arm_vreinterpretq_s8_s32 (int32x4_t __a) | |
13281 | { | |
13282 | return (int8x16_t) __a; | |
13283 | } | |
13284 | ||
13285 | __extension__ extern __inline int8x16_t | |
13286 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13287 | __arm_vreinterpretq_s8_s64 (int64x2_t __a) | |
13288 | { | |
13289 | return (int8x16_t) __a; | |
13290 | } | |
13291 | ||
13292 | __extension__ extern __inline int8x16_t | |
13293 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13294 | __arm_vreinterpretq_s8_u16 (uint16x8_t __a) | |
13295 | { | |
13296 | return (int8x16_t) __a; | |
13297 | } | |
13298 | ||
13299 | __extension__ extern __inline int8x16_t | |
13300 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13301 | __arm_vreinterpretq_s8_u32 (uint32x4_t __a) | |
13302 | { | |
13303 | return (int8x16_t) __a; | |
13304 | } | |
13305 | ||
13306 | __extension__ extern __inline int8x16_t | |
13307 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13308 | __arm_vreinterpretq_s8_u64 (uint64x2_t __a) | |
13309 | { | |
13310 | return (int8x16_t) __a; | |
13311 | } | |
13312 | ||
13313 | __extension__ extern __inline int8x16_t | |
13314 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13315 | __arm_vreinterpretq_s8_u8 (uint8x16_t __a) | |
13316 | { | |
13317 | return (int8x16_t) __a; | |
13318 | } | |
13319 | ||
13320 | __extension__ extern __inline uint16x8_t | |
13321 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13322 | __arm_vreinterpretq_u16_s16 (int16x8_t __a) | |
13323 | { | |
13324 | return (uint16x8_t) __a; | |
13325 | } | |
13326 | ||
13327 | __extension__ extern __inline uint16x8_t | |
13328 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13329 | __arm_vreinterpretq_u16_s32 (int32x4_t __a) | |
13330 | { | |
13331 | return (uint16x8_t) __a; | |
13332 | } | |
13333 | ||
13334 | __extension__ extern __inline uint16x8_t | |
13335 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13336 | __arm_vreinterpretq_u16_s64 (int64x2_t __a) | |
13337 | { | |
13338 | return (uint16x8_t) __a; | |
13339 | } | |
13340 | ||
13341 | __extension__ extern __inline uint16x8_t | |
13342 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13343 | __arm_vreinterpretq_u16_s8 (int8x16_t __a) | |
13344 | { | |
13345 | return (uint16x8_t) __a; | |
13346 | } | |
13347 | ||
13348 | __extension__ extern __inline uint16x8_t | |
13349 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13350 | __arm_vreinterpretq_u16_u32 (uint32x4_t __a) | |
13351 | { | |
13352 | return (uint16x8_t) __a; | |
13353 | } | |
13354 | ||
13355 | __extension__ extern __inline uint16x8_t | |
13356 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13357 | __arm_vreinterpretq_u16_u64 (uint64x2_t __a) | |
13358 | { | |
13359 | return (uint16x8_t) __a; | |
13360 | } | |
13361 | ||
13362 | __extension__ extern __inline uint16x8_t | |
13363 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13364 | __arm_vreinterpretq_u16_u8 (uint8x16_t __a) | |
13365 | { | |
13366 | return (uint16x8_t) __a; | |
13367 | } | |
13368 | ||
13369 | ||
13370 | __extension__ extern __inline uint32x4_t | |
13371 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13372 | __arm_vreinterpretq_u32_s16 (int16x8_t __a) | |
13373 | { | |
13374 | return (uint32x4_t) __a; | |
13375 | } | |
13376 | ||
13377 | __extension__ extern __inline uint32x4_t | |
13378 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13379 | __arm_vreinterpretq_u32_s32 (int32x4_t __a) | |
13380 | { | |
13381 | return (uint32x4_t) __a; | |
13382 | } | |
13383 | ||
13384 | __extension__ extern __inline uint32x4_t | |
13385 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13386 | __arm_vreinterpretq_u32_s64 (int64x2_t __a) | |
13387 | { | |
13388 | return (uint32x4_t) __a; | |
13389 | } | |
13390 | ||
13391 | __extension__ extern __inline uint32x4_t | |
13392 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13393 | __arm_vreinterpretq_u32_s8 (int8x16_t __a) | |
13394 | { | |
13395 | return (uint32x4_t) __a; | |
13396 | } | |
13397 | ||
13398 | __extension__ extern __inline uint32x4_t | |
13399 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13400 | __arm_vreinterpretq_u32_u16 (uint16x8_t __a) | |
13401 | { | |
13402 | return (uint32x4_t) __a; | |
13403 | } | |
13404 | ||
13405 | __extension__ extern __inline uint32x4_t | |
13406 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13407 | __arm_vreinterpretq_u32_u64 (uint64x2_t __a) | |
13408 | { | |
13409 | return (uint32x4_t) __a; | |
13410 | } | |
13411 | ||
13412 | __extension__ extern __inline uint32x4_t | |
13413 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13414 | __arm_vreinterpretq_u32_u8 (uint8x16_t __a) | |
13415 | { | |
13416 | return (uint32x4_t) __a; | |
13417 | } | |
13418 | ||
13419 | __extension__ extern __inline uint64x2_t | |
13420 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13421 | __arm_vreinterpretq_u64_s16 (int16x8_t __a) | |
13422 | { | |
13423 | return (uint64x2_t) __a; | |
13424 | } | |
13425 | ||
13426 | __extension__ extern __inline uint64x2_t | |
13427 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13428 | __arm_vreinterpretq_u64_s32 (int32x4_t __a) | |
13429 | { | |
13430 | return (uint64x2_t) __a; | |
13431 | } | |
13432 | ||
13433 | __extension__ extern __inline uint64x2_t | |
13434 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13435 | __arm_vreinterpretq_u64_s64 (int64x2_t __a) | |
13436 | { | |
13437 | return (uint64x2_t) __a; | |
13438 | } | |
13439 | ||
13440 | __extension__ extern __inline uint64x2_t | |
13441 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13442 | __arm_vreinterpretq_u64_s8 (int8x16_t __a) | |
13443 | { | |
13444 | return (uint64x2_t) __a; | |
13445 | } | |
13446 | ||
13447 | __extension__ extern __inline uint64x2_t | |
13448 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13449 | __arm_vreinterpretq_u64_u16 (uint16x8_t __a) | |
13450 | { | |
13451 | return (uint64x2_t) __a; | |
13452 | } | |
13453 | ||
13454 | __extension__ extern __inline uint64x2_t | |
13455 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13456 | __arm_vreinterpretq_u64_u32 (uint32x4_t __a) | |
13457 | { | |
13458 | return (uint64x2_t) __a; | |
13459 | } | |
13460 | ||
13461 | __extension__ extern __inline uint64x2_t | |
13462 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13463 | __arm_vreinterpretq_u64_u8 (uint8x16_t __a) | |
13464 | { | |
13465 | return (uint64x2_t) __a; | |
13466 | } | |
13467 | ||
13468 | __extension__ extern __inline uint8x16_t | |
13469 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13470 | __arm_vreinterpretq_u8_s16 (int16x8_t __a) | |
13471 | { | |
13472 | return (uint8x16_t) __a; | |
13473 | } | |
13474 | ||
13475 | __extension__ extern __inline uint8x16_t | |
13476 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13477 | __arm_vreinterpretq_u8_s32 (int32x4_t __a) | |
13478 | { | |
13479 | return (uint8x16_t) __a; | |
13480 | } | |
13481 | ||
13482 | __extension__ extern __inline uint8x16_t | |
13483 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13484 | __arm_vreinterpretq_u8_s64 (int64x2_t __a) | |
13485 | { | |
13486 | return (uint8x16_t) __a; | |
13487 | } | |
13488 | ||
13489 | __extension__ extern __inline uint8x16_t | |
13490 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13491 | __arm_vreinterpretq_u8_s8 (int8x16_t __a) | |
13492 | { | |
13493 | return (uint8x16_t) __a; | |
13494 | } | |
13495 | ||
13496 | __extension__ extern __inline uint8x16_t | |
13497 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13498 | __arm_vreinterpretq_u8_u16 (uint16x8_t __a) | |
13499 | { | |
13500 | return (uint8x16_t) __a; | |
13501 | } | |
13502 | ||
13503 | __extension__ extern __inline uint8x16_t | |
13504 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13505 | __arm_vreinterpretq_u8_u32 (uint32x4_t __a) | |
13506 | { | |
13507 | return (uint8x16_t) __a; | |
13508 | } | |
13509 | ||
13510 | __extension__ extern __inline uint8x16_t | |
13511 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13512 | __arm_vreinterpretq_u8_u64 (uint64x2_t __a) | |
13513 | { | |
13514 | return (uint8x16_t) __a; | |
13515 | } | |
13516 | ||
92f80065 SP |
13517 | __extension__ extern __inline uint8x16_t |
13518 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13519 | __arm_vddupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13520 | { | |
13521 | return __builtin_mve_vddupq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
13522 | } | |
13523 | ||
13524 | __extension__ extern __inline uint32x4_t | |
13525 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13526 | __arm_vddupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13527 | { | |
13528 | return __builtin_mve_vddupq_m_n_uv4si (__inactive, __a, __imm, __p); | |
13529 | } | |
13530 | ||
13531 | __extension__ extern __inline uint16x8_t | |
13532 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13533 | __arm_vddupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13534 | { | |
13535 | return __builtin_mve_vddupq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
13536 | } | |
13537 | ||
13538 | __extension__ extern __inline uint8x16_t | |
13539 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13540 | __arm_vddupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13541 | { | |
13542 | uint8x16_t __res = __builtin_mve_vddupq_m_n_uv16qi (__inactive, * __a, __imm, __p); | |
13543 | *__a -= __imm * 16u; | |
13544 | return __res; | |
13545 | } | |
13546 | ||
13547 | __extension__ extern __inline uint16x8_t | |
13548 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13549 | __arm_vddupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13550 | { | |
13551 | uint16x8_t __res = __builtin_mve_vddupq_m_n_uv8hi (__inactive, *__a, __imm, __p); | |
13552 | *__a -= __imm * 8u; | |
13553 | return __res; | |
13554 | } | |
13555 | ||
13556 | __extension__ extern __inline uint32x4_t | |
13557 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13558 | __arm_vddupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13559 | { | |
13560 | uint32x4_t __res = __builtin_mve_vddupq_m_n_uv4si (__inactive, *__a, __imm, __p); | |
13561 | *__a -= __imm * 4u; | |
13562 | return __res; | |
13563 | } | |
13564 | ||
13565 | __extension__ extern __inline uint8x16_t | |
13566 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13567 | __arm_vddupq_n_u8 (uint32_t __a, const int __imm) | |
13568 | { | |
13569 | return __builtin_mve_vddupq_n_uv16qi (__a, __imm); | |
13570 | } | |
13571 | ||
13572 | __extension__ extern __inline uint32x4_t | |
13573 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13574 | __arm_vddupq_n_u32 (uint32_t __a, const int __imm) | |
13575 | { | |
13576 | return __builtin_mve_vddupq_n_uv4si (__a, __imm); | |
13577 | } | |
13578 | ||
13579 | __extension__ extern __inline uint16x8_t | |
13580 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13581 | __arm_vddupq_n_u16 (uint32_t __a, const int __imm) | |
13582 | { | |
13583 | return __builtin_mve_vddupq_n_uv8hi (__a, __imm); | |
13584 | } | |
13585 | ||
13586 | __extension__ extern __inline uint8x16_t | |
13587 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13588 | __arm_vdwdupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13589 | { | |
13590 | return __builtin_mve_vdwdupq_m_n_uv16qi (__inactive, __a, __b, __imm, __p); | |
13591 | } | |
13592 | ||
13593 | __extension__ extern __inline uint32x4_t | |
13594 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13595 | __arm_vdwdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13596 | { | |
13597 | return __builtin_mve_vdwdupq_m_n_uv4si (__inactive, __a, __b, __imm, __p); | |
13598 | } | |
13599 | ||
13600 | __extension__ extern __inline uint16x8_t | |
13601 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13602 | __arm_vdwdupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13603 | { | |
13604 | return __builtin_mve_vdwdupq_m_n_uv8hi (__inactive, __a, __b, __imm, __p); | |
13605 | } | |
13606 | ||
13607 | __extension__ extern __inline uint8x16_t | |
13608 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13609 | __arm_vdwdupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13610 | { | |
13611 | uint8x16_t __res = __builtin_mve_vdwdupq_m_n_uv16qi (__inactive, *__a, __b, __imm, __p); | |
13612 | *__a = __builtin_mve_vdwdupq_m_wb_uv16qi (__inactive, *__a, __b, __imm, __p); | |
13613 | return __res; | |
13614 | } | |
13615 | ||
13616 | __extension__ extern __inline uint32x4_t | |
13617 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13618 | __arm_vdwdupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13619 | { | |
13620 | uint32x4_t __res = __builtin_mve_vdwdupq_m_n_uv4si (__inactive, *__a, __b, __imm, __p); | |
13621 | *__a = __builtin_mve_vdwdupq_m_wb_uv4si (__inactive, *__a, __b, __imm, __p); | |
13622 | return __res; | |
13623 | } | |
13624 | ||
13625 | __extension__ extern __inline uint16x8_t | |
13626 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13627 | __arm_vdwdupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13628 | { | |
13629 | uint16x8_t __res = __builtin_mve_vdwdupq_m_n_uv8hi (__inactive, *__a, __b, __imm, __p); | |
13630 | *__a = __builtin_mve_vdwdupq_m_wb_uv8hi (__inactive, *__a, __b, __imm, __p); | |
13631 | return __res; | |
13632 | } | |
13633 | ||
13634 | __extension__ extern __inline uint8x16_t | |
13635 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13636 | __arm_vdwdupq_n_u8 (uint32_t __a, uint32_t __b, const int __imm) | |
13637 | { | |
13638 | return __builtin_mve_vdwdupq_n_uv16qi (__a, __b, __imm); | |
13639 | } | |
13640 | ||
13641 | __extension__ extern __inline uint32x4_t | |
13642 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13643 | __arm_vdwdupq_n_u32 (uint32_t __a, uint32_t __b, const int __imm) | |
13644 | { | |
13645 | return __builtin_mve_vdwdupq_n_uv4si (__a, __b, __imm); | |
13646 | } | |
13647 | ||
13648 | __extension__ extern __inline uint16x8_t | |
13649 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13650 | __arm_vdwdupq_n_u16 (uint32_t __a, uint32_t __b, const int __imm) | |
13651 | { | |
13652 | return __builtin_mve_vdwdupq_n_uv8hi (__a, __b, __imm); | |
13653 | } | |
13654 | ||
13655 | __extension__ extern __inline uint8x16_t | |
13656 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13657 | __arm_vdwdupq_wb_u8 (uint32_t * __a, uint32_t __b, const int __imm) | |
13658 | { | |
13659 | uint8x16_t __res = __builtin_mve_vdwdupq_n_uv16qi (*__a, __b, __imm); | |
13660 | *__a = __builtin_mve_vdwdupq_wb_uv16qi (*__a, __b, __imm); | |
13661 | return __res; | |
13662 | } | |
13663 | ||
13664 | __extension__ extern __inline uint32x4_t | |
13665 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13666 | __arm_vdwdupq_wb_u32 (uint32_t * __a, uint32_t __b, const int __imm) | |
13667 | { | |
13668 | uint32x4_t __res = __builtin_mve_vdwdupq_n_uv4si (*__a, __b, __imm); | |
13669 | *__a = __builtin_mve_vdwdupq_wb_uv4si (*__a, __b, __imm); | |
13670 | return __res; | |
13671 | } | |
13672 | ||
13673 | __extension__ extern __inline uint16x8_t | |
13674 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13675 | __arm_vdwdupq_wb_u16 (uint32_t * __a, uint32_t __b, const int __imm) | |
13676 | { | |
13677 | uint16x8_t __res = __builtin_mve_vdwdupq_n_uv8hi (*__a, __b, __imm); | |
13678 | *__a = __builtin_mve_vdwdupq_wb_uv8hi (*__a, __b, __imm); | |
13679 | return __res; | |
13680 | } | |
13681 | ||
13682 | __extension__ extern __inline uint8x16_t | |
13683 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13684 | __arm_vidupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13685 | { | |
13686 | return __builtin_mve_vidupq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
13687 | } | |
13688 | ||
13689 | __extension__ extern __inline uint32x4_t | |
13690 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13691 | __arm_vidupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13692 | { | |
13693 | return __builtin_mve_vidupq_m_n_uv4si (__inactive, __a, __imm, __p); | |
13694 | } | |
13695 | ||
13696 | __extension__ extern __inline uint16x8_t | |
13697 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13698 | __arm_vidupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13699 | { | |
13700 | return __builtin_mve_vidupq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
13701 | } | |
13702 | ||
13703 | __extension__ extern __inline uint8x16_t | |
13704 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13705 | __arm_vidupq_n_u8 (uint32_t __a, const int __imm) | |
13706 | { | |
13707 | return __builtin_mve_vidupq_n_uv16qi (__a, __imm); | |
13708 | } | |
13709 | ||
13710 | __extension__ extern __inline uint8x16_t | |
13711 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13712 | __arm_vidupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13713 | { | |
13714 | uint8x16_t __res = __builtin_mve_vidupq_m_n_uv16qi (__inactive, *__a, __imm, __p); | |
13715 | *__a += __imm * 16u; | |
13716 | return __res; | |
13717 | } | |
13718 | ||
13719 | __extension__ extern __inline uint16x8_t | |
13720 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13721 | __arm_vidupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13722 | { | |
13723 | uint16x8_t __res = __builtin_mve_vidupq_m_n_uv8hi (__inactive, *__a, __imm, __p); | |
13724 | *__a += __imm * 8u; | |
13725 | return __res; | |
13726 | } | |
13727 | ||
13728 | __extension__ extern __inline uint32x4_t | |
13729 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13730 | __arm_vidupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13731 | { | |
13732 | uint32x4_t __res = __builtin_mve_vidupq_m_n_uv4si (__inactive, *__a, __imm, __p); | |
13733 | *__a += __imm * 4u; | |
13734 | return __res; | |
13735 | } | |
13736 | ||
13737 | __extension__ extern __inline uint32x4_t | |
13738 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13739 | __arm_vidupq_n_u32 (uint32_t __a, const int __imm) | |
13740 | { | |
13741 | return __builtin_mve_vidupq_n_uv4si (__a, __imm); | |
13742 | } | |
13743 | ||
13744 | __extension__ extern __inline uint16x8_t | |
13745 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13746 | __arm_vidupq_n_u16 (uint32_t __a, const int __imm) | |
13747 | { | |
13748 | return __builtin_mve_vidupq_n_uv8hi (__a, __imm); | |
13749 | } | |
13750 | ||
13751 | __extension__ extern __inline uint8x16_t | |
13752 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13753 | __arm_vidupq_wb_u8 (uint32_t * __a, const int __imm) | |
13754 | { | |
13755 | uint8x16_t __res = __builtin_mve_vidupq_n_uv16qi (*__a, __imm); | |
13756 | *__a += __imm * 16u; | |
13757 | return __res; | |
13758 | } | |
13759 | ||
13760 | __extension__ extern __inline uint16x8_t | |
13761 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13762 | __arm_vidupq_wb_u16 (uint32_t * __a, const int __imm) | |
13763 | { | |
13764 | uint16x8_t __res = __builtin_mve_vidupq_n_uv8hi (*__a, __imm); | |
13765 | *__a += __imm * 8u; | |
13766 | return __res; | |
13767 | } | |
13768 | ||
13769 | __extension__ extern __inline uint32x4_t | |
13770 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13771 | __arm_vidupq_wb_u32 (uint32_t * __a, const int __imm) | |
13772 | { | |
13773 | uint32x4_t __res = __builtin_mve_vidupq_n_uv4si (*__a, __imm); | |
13774 | *__a += __imm * 4u; | |
13775 | return __res; | |
13776 | } | |
13777 | ||
13778 | __extension__ extern __inline uint8x16_t | |
13779 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13780 | __arm_vddupq_wb_u8 (uint32_t * __a, const int __imm) | |
13781 | { | |
13782 | uint8x16_t __res = __builtin_mve_vddupq_n_uv16qi (*__a, __imm); | |
13783 | *__a -= __imm * 16u; | |
13784 | return __res; | |
13785 | } | |
13786 | ||
13787 | __extension__ extern __inline uint16x8_t | |
13788 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13789 | __arm_vddupq_wb_u16 (uint32_t * __a, const int __imm) | |
13790 | { | |
13791 | uint16x8_t __res = __builtin_mve_vddupq_n_uv8hi (*__a, __imm); | |
13792 | *__a -= __imm * 8u; | |
13793 | return __res; | |
13794 | } | |
13795 | ||
13796 | __extension__ extern __inline uint32x4_t | |
13797 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13798 | __arm_vddupq_wb_u32 (uint32_t * __a, const int __imm) | |
13799 | { | |
13800 | uint32x4_t __res = __builtin_mve_vddupq_n_uv4si (*__a, __imm); | |
13801 | *__a -= __imm * 4u; | |
13802 | return __res; | |
13803 | } | |
13804 | ||
13805 | __extension__ extern __inline uint8x16_t | |
13806 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13807 | __arm_viwdupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13808 | { | |
13809 | return __builtin_mve_viwdupq_m_n_uv16qi (__inactive, __a, __b, __imm, __p); | |
13810 | } | |
13811 | ||
13812 | __extension__ extern __inline uint32x4_t | |
13813 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13814 | __arm_viwdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13815 | { | |
13816 | return __builtin_mve_viwdupq_m_n_uv4si (__inactive, __a, __b, __imm, __p); | |
13817 | } | |
13818 | ||
13819 | __extension__ extern __inline uint16x8_t | |
13820 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13821 | __arm_viwdupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13822 | { | |
13823 | return __builtin_mve_viwdupq_m_n_uv8hi (__inactive, __a, __b, __imm, __p); | |
13824 | } | |
13825 | ||
13826 | __extension__ extern __inline uint8x16_t | |
13827 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13828 | __arm_viwdupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13829 | { | |
13830 | uint8x16_t __res = __builtin_mve_viwdupq_m_n_uv16qi (__inactive, *__a, __b, __imm, __p); | |
13831 | *__a = __builtin_mve_viwdupq_m_wb_uv16qi (__inactive, *__a, __b, __imm, __p); | |
13832 | return __res; | |
13833 | } | |
13834 | ||
13835 | __extension__ extern __inline uint32x4_t | |
13836 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13837 | __arm_viwdupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13838 | { | |
13839 | uint32x4_t __res = __builtin_mve_viwdupq_m_n_uv4si (__inactive, *__a, __b, __imm, __p); | |
13840 | *__a = __builtin_mve_viwdupq_m_wb_uv4si (__inactive, *__a, __b, __imm, __p); | |
13841 | return __res; | |
13842 | } | |
13843 | ||
13844 | __extension__ extern __inline uint16x8_t | |
13845 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13846 | __arm_viwdupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13847 | { | |
13848 | uint16x8_t __res = __builtin_mve_viwdupq_m_n_uv8hi (__inactive, *__a, __b, __imm, __p); | |
13849 | *__a = __builtin_mve_viwdupq_m_wb_uv8hi (__inactive, *__a, __b, __imm, __p); | |
13850 | return __res; | |
13851 | } | |
13852 | ||
13853 | __extension__ extern __inline uint8x16_t | |
13854 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13855 | __arm_viwdupq_n_u8 (uint32_t __a, uint32_t __b, const int __imm) | |
13856 | { | |
13857 | return __builtin_mve_viwdupq_n_uv16qi (__a, __b, __imm); | |
13858 | } | |
13859 | ||
13860 | __extension__ extern __inline uint32x4_t | |
13861 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13862 | __arm_viwdupq_n_u32 (uint32_t __a, uint32_t __b, const int __imm) | |
13863 | { | |
13864 | return __builtin_mve_viwdupq_n_uv4si (__a, __b, __imm); | |
13865 | } | |
13866 | ||
13867 | __extension__ extern __inline uint16x8_t | |
13868 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13869 | __arm_viwdupq_n_u16 (uint32_t __a, uint32_t __b, const int __imm) | |
13870 | { | |
13871 | return __builtin_mve_viwdupq_n_uv8hi (__a, __b, __imm); | |
13872 | } | |
13873 | ||
13874 | __extension__ extern __inline uint8x16_t | |
13875 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13876 | __arm_viwdupq_wb_u8 (uint32_t * __a, uint32_t __b, const int __imm) | |
13877 | { | |
13878 | uint8x16_t __res = __builtin_mve_viwdupq_n_uv16qi (*__a, __b, __imm); | |
13879 | *__a = __builtin_mve_viwdupq_wb_uv16qi (*__a, __b, __imm); | |
13880 | return __res; | |
13881 | } | |
13882 | ||
13883 | __extension__ extern __inline uint32x4_t | |
13884 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13885 | __arm_viwdupq_wb_u32 (uint32_t * __a, uint32_t __b, const int __imm) | |
13886 | { | |
13887 | uint32x4_t __res = __builtin_mve_viwdupq_n_uv4si (*__a, __b, __imm); | |
13888 | *__a = __builtin_mve_viwdupq_wb_uv4si (*__a, __b, __imm); | |
13889 | return __res; | |
13890 | } | |
13891 | ||
13892 | __extension__ extern __inline uint16x8_t | |
13893 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13894 | __arm_viwdupq_wb_u16 (uint32_t * __a, uint32_t __b, const int __imm) | |
13895 | { | |
13896 | uint16x8_t __res = __builtin_mve_viwdupq_n_uv8hi (*__a, __b, __imm); | |
13897 | *__a = __builtin_mve_viwdupq_wb_uv8hi (*__a, __b, __imm); | |
13898 | return __res; | |
13899 | } | |
13900 | ||
41e1a7ff SP |
13901 | __extension__ extern __inline int64x2_t |
13902 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13903 | __arm_vldrdq_gather_base_wb_s64 (uint64x2_t * __addr, const int __offset) | |
13904 | { | |
13905 | int64x2_t | |
ff825b81 SP |
13906 | result = __builtin_mve_vldrdq_gather_base_nowb_sv2di (*__addr, __offset); |
13907 | *__addr = __builtin_mve_vldrdq_gather_base_wb_sv2di (*__addr, __offset); | |
41e1a7ff SP |
13908 | return result; |
13909 | } | |
13910 | ||
13911 | __extension__ extern __inline uint64x2_t | |
13912 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13913 | __arm_vldrdq_gather_base_wb_u64 (uint64x2_t * __addr, const int __offset) | |
13914 | { | |
13915 | uint64x2_t | |
ff825b81 SP |
13916 | result = __builtin_mve_vldrdq_gather_base_nowb_uv2di (*__addr, __offset); |
13917 | *__addr = __builtin_mve_vldrdq_gather_base_wb_uv2di (*__addr, __offset); | |
41e1a7ff SP |
13918 | return result; |
13919 | } | |
13920 | ||
13921 | __extension__ extern __inline int64x2_t | |
13922 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13923 | __arm_vldrdq_gather_base_wb_z_s64 (uint64x2_t * __addr, const int __offset, mve_pred16_t __p) | |
13924 | { | |
13925 | int64x2_t | |
ff825b81 SP |
13926 | result = __builtin_mve_vldrdq_gather_base_nowb_z_sv2di (*__addr, __offset, __p); |
13927 | *__addr = __builtin_mve_vldrdq_gather_base_wb_z_sv2di (*__addr, __offset, __p); | |
41e1a7ff SP |
13928 | return result; |
13929 | } | |
13930 | ||
13931 | __extension__ extern __inline uint64x2_t | |
13932 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13933 | __arm_vldrdq_gather_base_wb_z_u64 (uint64x2_t * __addr, const int __offset, mve_pred16_t __p) | |
13934 | { | |
13935 | uint64x2_t | |
ff825b81 SP |
13936 | result = __builtin_mve_vldrdq_gather_base_nowb_z_uv2di (*__addr, __offset, __p); |
13937 | *__addr = __builtin_mve_vldrdq_gather_base_wb_z_uv2di (*__addr, __offset, __p); | |
41e1a7ff SP |
13938 | return result; |
13939 | } | |
13940 | ||
13941 | __extension__ extern __inline int32x4_t | |
13942 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13943 | __arm_vldrwq_gather_base_wb_s32 (uint32x4_t * __addr, const int __offset) | |
13944 | { | |
13945 | int32x4_t | |
ff825b81 SP |
13946 | result = __builtin_mve_vldrwq_gather_base_nowb_sv4si (*__addr, __offset); |
13947 | *__addr = __builtin_mve_vldrwq_gather_base_wb_sv4si (*__addr, __offset); | |
41e1a7ff SP |
13948 | return result; |
13949 | } | |
13950 | ||
13951 | __extension__ extern __inline uint32x4_t | |
13952 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13953 | __arm_vldrwq_gather_base_wb_u32 (uint32x4_t * __addr, const int __offset) | |
13954 | { | |
13955 | uint32x4_t | |
ff825b81 SP |
13956 | result = __builtin_mve_vldrwq_gather_base_nowb_uv4si (*__addr, __offset); |
13957 | *__addr = __builtin_mve_vldrwq_gather_base_wb_uv4si (*__addr, __offset); | |
41e1a7ff SP |
13958 | return result; |
13959 | } | |
13960 | ||
13961 | __extension__ extern __inline int32x4_t | |
13962 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13963 | __arm_vldrwq_gather_base_wb_z_s32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p) | |
13964 | { | |
13965 | int32x4_t | |
ff825b81 SP |
13966 | result = __builtin_mve_vldrwq_gather_base_nowb_z_sv4si (*__addr, __offset, __p); |
13967 | *__addr = __builtin_mve_vldrwq_gather_base_wb_z_sv4si (*__addr, __offset, __p); | |
41e1a7ff SP |
13968 | return result; |
13969 | } | |
13970 | ||
13971 | __extension__ extern __inline uint32x4_t | |
13972 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13973 | __arm_vldrwq_gather_base_wb_z_u32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p) | |
13974 | { | |
13975 | uint32x4_t | |
ff825b81 SP |
13976 | result = __builtin_mve_vldrwq_gather_base_nowb_z_uv4si (*__addr, __offset, __p); |
13977 | *__addr = __builtin_mve_vldrwq_gather_base_wb_z_uv4si (*__addr, __offset, __p); | |
41e1a7ff SP |
13978 | return result; |
13979 | } | |
13980 | ||
13981 | __extension__ extern __inline void | |
13982 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13983 | __arm_vstrdq_scatter_base_wb_s64 (uint64x2_t * __addr, const int __offset, int64x2_t __value) | |
13984 | { | |
13985 | __builtin_mve_vstrdq_scatter_base_wb_sv2di (*__addr, __offset, __value); | |
13986 | __builtin_mve_vstrdq_scatter_base_wb_add_sv2di (*__addr, __offset, *__addr); | |
13987 | } | |
13988 | ||
13989 | __extension__ extern __inline void | |
13990 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13991 | __arm_vstrdq_scatter_base_wb_u64 (uint64x2_t * __addr, const int __offset, uint64x2_t __value) | |
13992 | { | |
13993 | __builtin_mve_vstrdq_scatter_base_wb_uv2di (*__addr, __offset, __value); | |
13994 | __builtin_mve_vstrdq_scatter_base_wb_add_uv2di (*__addr, __offset, *__addr); | |
13995 | } | |
13996 | ||
13997 | __extension__ extern __inline void | |
13998 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13999 | __arm_vstrdq_scatter_base_wb_p_s64 (uint64x2_t * __addr, const int __offset, int64x2_t __value, mve_pred16_t __p) | |
14000 | { | |
14001 | __builtin_mve_vstrdq_scatter_base_wb_p_sv2di (*__addr, __offset, __value, __p); | |
14002 | __builtin_mve_vstrdq_scatter_base_wb_p_add_sv2di (*__addr, __offset, *__addr, __p); | |
14003 | } | |
14004 | ||
14005 | __extension__ extern __inline void | |
14006 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
14007 | __arm_vstrdq_scatter_base_wb_p_u64 (uint64x2_t * __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p) | |
14008 | { | |
14009 | __builtin_mve_vstrdq_scatter_base_wb_p_uv2di (*__addr, __offset, __value, __p); | |
14010 | __builtin_mve_vstrdq_scatter_base_wb_p_add_uv2di (*__addr, __offset, *__addr, __p); | |
14011 | } | |
14012 | ||
14013 | __extension__ extern __inline void | |
14014 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
14015 | __arm_vstrwq_scatter_base_wb_p_s32 (uint32x4_t * __addr, const int __offset, int32x4_t __value, mve_pred16_t __p) | |
14016 | { | |
14017 | __builtin_mve_vstrwq_scatter_base_wb_p_sv4si (*__addr, __offset, __value, __p); | |
14018 | __builtin_mve_vstrwq_scatter_base_wb_p_add_sv4si (*__addr, __offset, *__addr, __p); | |
14019 | } | |
14020 | ||
14021 | __extension__ extern __inline void | |
14022 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
14023 | __arm_vstrwq_scatter_base_wb_p_u32 (uint32x4_t * __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p) | |
14024 | { | |
14025 | __builtin_mve_vstrwq_scatter_base_wb_p_uv4si (*__addr, __offset, __value, __p); | |
14026 | __builtin_mve_vstrwq_scatter_base_wb_p_add_uv4si (*__addr, __offset, *__addr, __p); | |
14027 | } | |
14028 | ||
14029 | __extension__ extern __inline void | |
14030 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
14031 | __arm_vstrwq_scatter_base_wb_s32 (uint32x4_t * __addr, const int __offset, int32x4_t __value) | |
14032 | { | |
14033 | __builtin_mve_vstrwq_scatter_base_wb_sv4si (*__addr, __offset, __value); | |
14034 | __builtin_mve_vstrwq_scatter_base_wb_add_sv4si (*__addr, __offset, *__addr); | |
14035 | } | |
14036 | ||
14037 | __extension__ extern __inline void | |
14038 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
14039 | __arm_vstrwq_scatter_base_wb_u32 (uint32x4_t * __addr, const int __offset, uint32x4_t __value) | |
14040 | { | |
14041 | __builtin_mve_vstrwq_scatter_base_wb_uv4si (*__addr, __offset, __value); | |
14042 | __builtin_mve_vstrwq_scatter_base_wb_add_uv4si (*__addr, __offset, *__addr); | |
14043 | } | |
14044 | ||
261014a1 SP |
14045 | __extension__ extern __inline uint8x16_t |
14046 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
14047 | __arm_vddupq_x_n_u8 (uint32_t __a, const int __imm, mve_pred16_t __p) | |
14048 | { | |
14049 | return __builtin_mve_vddupq_m_n_uv16qi (vuninitializedq_u8 (), __a, __imm, __p); | |
14050 | } | |
f9355dee | 14051 | |
261014a1 | 14052 | __extension__ extern __inline uint16x8_t |
f9355dee | 14053 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14054 | __arm_vddupq_x_n_u16 (uint32_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 14055 | { |
261014a1 | 14056 | return __builtin_mve_vddupq_m_n_uv8hi (vuninitializedq_u16 (), __a, __imm, __p); |
f9355dee SP |
14057 | } |
14058 | ||
261014a1 | 14059 | __extension__ extern __inline uint32x4_t |
f9355dee | 14060 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14061 | __arm_vddupq_x_n_u32 (uint32_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 14062 | { |
261014a1 | 14063 | return __builtin_mve_vddupq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p); |
f9355dee SP |
14064 | } |
14065 | ||
261014a1 | 14066 | __extension__ extern __inline uint8x16_t |
f9355dee | 14067 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14068 | __arm_vddupq_x_wb_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 14069 | { |
261014a1 SP |
14070 | uint8x16_t __arg1 = vuninitializedq_u8 (); |
14071 | uint8x16_t __res = __builtin_mve_vddupq_m_n_uv16qi (__arg1, * __a, __imm, __p); | |
14072 | *__a -= __imm * 16u; | |
14073 | return __res; | |
f9355dee SP |
14074 | } |
14075 | ||
261014a1 | 14076 | __extension__ extern __inline uint16x8_t |
f9355dee | 14077 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14078 | __arm_vddupq_x_wb_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 14079 | { |
261014a1 SP |
14080 | uint16x8_t __arg1 = vuninitializedq_u16 (); |
14081 | uint16x8_t __res = __builtin_mve_vddupq_m_n_uv8hi (__arg1, *__a, __imm, __p); | |
14082 | *__a -= __imm * 8u; | |
14083 | return __res; | |
f9355dee SP |
14084 | } |
14085 | ||
261014a1 | 14086 | __extension__ extern __inline uint32x4_t |
f9355dee | 14087 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14088 | __arm_vddupq_x_wb_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 14089 | { |
261014a1 SP |
14090 | uint32x4_t __arg1 = vuninitializedq_u32 (); |
14091 | uint32x4_t __res = __builtin_mve_vddupq_m_n_uv4si (__arg1, *__a, __imm, __p); | |
14092 | *__a -= __imm * 4u; | |
14093 | return __res; | |
f9355dee SP |
14094 | } |
14095 | ||
261014a1 | 14096 | __extension__ extern __inline uint8x16_t |
f9355dee | 14097 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14098 | __arm_vdwdupq_x_n_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14099 | { |
261014a1 | 14100 | return __builtin_mve_vdwdupq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __imm, __p); |
f9355dee SP |
14101 | } |
14102 | ||
261014a1 | 14103 | __extension__ extern __inline uint16x8_t |
f9355dee | 14104 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14105 | __arm_vdwdupq_x_n_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14106 | { |
261014a1 | 14107 | return __builtin_mve_vdwdupq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __imm, __p); |
f9355dee SP |
14108 | } |
14109 | ||
261014a1 | 14110 | __extension__ extern __inline uint32x4_t |
f9355dee | 14111 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14112 | __arm_vdwdupq_x_n_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14113 | { |
261014a1 | 14114 | return __builtin_mve_vdwdupq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __imm, __p); |
f9355dee SP |
14115 | } |
14116 | ||
261014a1 | 14117 | __extension__ extern __inline uint8x16_t |
f9355dee | 14118 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14119 | __arm_vdwdupq_x_wb_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14120 | { |
261014a1 SP |
14121 | uint8x16_t __arg1 = vuninitializedq_u8 (); |
14122 | uint8x16_t __res = __builtin_mve_vdwdupq_m_n_uv16qi (__arg1, *__a, __b, __imm, __p); | |
14123 | *__a = __builtin_mve_vdwdupq_m_wb_uv16qi (__arg1, *__a, __b, __imm, __p); | |
14124 | return __res; | |
f9355dee SP |
14125 | } |
14126 | ||
261014a1 | 14127 | __extension__ extern __inline uint16x8_t |
f9355dee | 14128 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14129 | __arm_vdwdupq_x_wb_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14130 | { |
261014a1 SP |
14131 | uint16x8_t __arg1 = vuninitializedq_u16 (); |
14132 | uint16x8_t __res = __builtin_mve_vdwdupq_m_n_uv8hi (__arg1, *__a, __b, __imm, __p); | |
14133 | *__a = __builtin_mve_vdwdupq_m_wb_uv8hi (__arg1, *__a, __b, __imm, __p); | |
14134 | return __res; | |
f9355dee SP |
14135 | } |
14136 | ||
261014a1 | 14137 | __extension__ extern __inline uint32x4_t |
f9355dee | 14138 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14139 | __arm_vdwdupq_x_wb_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14140 | { |
261014a1 SP |
14141 | uint32x4_t __arg1 = vuninitializedq_u32 (); |
14142 | uint32x4_t __res = __builtin_mve_vdwdupq_m_n_uv4si (__arg1, *__a, __b, __imm, __p); | |
14143 | *__a = __builtin_mve_vdwdupq_m_wb_uv4si (__arg1, *__a, __b, __imm, __p); | |
14144 | return __res; | |
f9355dee SP |
14145 | } |
14146 | ||
261014a1 | 14147 | __extension__ extern __inline uint8x16_t |
f9355dee | 14148 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14149 | __arm_vidupq_x_n_u8 (uint32_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 14150 | { |
261014a1 | 14151 | return __builtin_mve_vidupq_m_n_uv16qi (vuninitializedq_u8 (), __a, __imm, __p); |
f9355dee SP |
14152 | } |
14153 | ||
261014a1 | 14154 | __extension__ extern __inline uint16x8_t |
f9355dee | 14155 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14156 | __arm_vidupq_x_n_u16 (uint32_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 14157 | { |
261014a1 | 14158 | return __builtin_mve_vidupq_m_n_uv8hi (vuninitializedq_u16 (), __a, __imm, __p); |
f9355dee SP |
14159 | } |
14160 | ||
261014a1 | 14161 | __extension__ extern __inline uint32x4_t |
f9355dee | 14162 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14163 | __arm_vidupq_x_n_u32 (uint32_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 14164 | { |
261014a1 | 14165 | return __builtin_mve_vidupq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p); |
f9355dee SP |
14166 | } |
14167 | ||
261014a1 | 14168 | __extension__ extern __inline uint8x16_t |
f9355dee | 14169 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14170 | __arm_vidupq_x_wb_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 14171 | { |
261014a1 SP |
14172 | uint8x16_t __arg1 = vuninitializedq_u8 (); |
14173 | uint8x16_t __res = __builtin_mve_vidupq_m_n_uv16qi (__arg1, *__a, __imm, __p); | |
14174 | *__a += __imm * 16u; | |
14175 | return __res; | |
f9355dee SP |
14176 | } |
14177 | ||
261014a1 | 14178 | __extension__ extern __inline uint16x8_t |
f9355dee | 14179 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14180 | __arm_vidupq_x_wb_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 14181 | { |
261014a1 SP |
14182 | uint16x8_t __arg1 = vuninitializedq_u16 (); |
14183 | uint16x8_t __res = __builtin_mve_vidupq_m_n_uv8hi (__arg1, *__a, __imm, __p); | |
14184 | *__a += __imm * 8u; | |
14185 | return __res; | |
f9355dee SP |
14186 | } |
14187 | ||
261014a1 | 14188 | __extension__ extern __inline uint32x4_t |
f9355dee | 14189 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14190 | __arm_vidupq_x_wb_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 14191 | { |
261014a1 SP |
14192 | uint32x4_t __arg1 = vuninitializedq_u32 (); |
14193 | uint32x4_t __res = __builtin_mve_vidupq_m_n_uv4si (__arg1, *__a, __imm, __p); | |
14194 | *__a += __imm * 4u; | |
14195 | return __res; | |
f9355dee SP |
14196 | } |
14197 | ||
261014a1 | 14198 | __extension__ extern __inline uint8x16_t |
f9355dee | 14199 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14200 | __arm_viwdupq_x_n_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14201 | { |
261014a1 | 14202 | return __builtin_mve_viwdupq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __imm, __p); |
f9355dee SP |
14203 | } |
14204 | ||
261014a1 | 14205 | __extension__ extern __inline uint16x8_t |
f9355dee | 14206 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14207 | __arm_viwdupq_x_n_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14208 | { |
261014a1 | 14209 | return __builtin_mve_viwdupq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __imm, __p); |
f9355dee SP |
14210 | } |
14211 | ||
261014a1 | 14212 | __extension__ extern __inline uint32x4_t |
f9355dee | 14213 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14214 | __arm_viwdupq_x_n_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14215 | { |
261014a1 | 14216 | return __builtin_mve_viwdupq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __imm, __p); |
f9355dee SP |
14217 | } |
14218 | ||
261014a1 | 14219 | __extension__ extern __inline uint8x16_t |
f9355dee | 14220 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14221 | __arm_viwdupq_x_wb_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14222 | { |
261014a1 SP |
14223 | uint8x16_t __arg1 = vuninitializedq_u8 (); |
14224 | uint8x16_t __res = __builtin_mve_viwdupq_m_n_uv16qi (__arg1, *__a, __b, __imm, __p); | |
14225 | *__a = __builtin_mve_viwdupq_m_wb_uv16qi (__arg1, *__a, __b, __imm, __p); | |
14226 | return __res; | |
f9355dee SP |
14227 | } |
14228 | ||
261014a1 | 14229 | __extension__ extern __inline uint16x8_t |
f9355dee | 14230 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14231 | __arm_viwdupq_x_wb_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14232 | { |
261014a1 SP |
14233 | uint16x8_t __arg1 = vuninitializedq_u16 (); |
14234 | uint16x8_t __res = __builtin_mve_viwdupq_m_n_uv8hi (__arg1, *__a, __b, __imm, __p); | |
14235 | *__a = __builtin_mve_viwdupq_m_wb_uv8hi (__arg1, *__a, __b, __imm, __p); | |
14236 | return __res; | |
f9355dee SP |
14237 | } |
14238 | ||
261014a1 | 14239 | __extension__ extern __inline uint32x4_t |
f9355dee | 14240 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14241 | __arm_viwdupq_x_wb_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14242 | { |
261014a1 SP |
14243 | uint32x4_t __arg1 = vuninitializedq_u32 (); |
14244 | uint32x4_t __res = __builtin_mve_viwdupq_m_n_uv4si (__arg1, *__a, __b, __imm, __p); | |
14245 | *__a = __builtin_mve_viwdupq_m_wb_uv4si (__arg1, *__a, __b, __imm, __p); | |
14246 | return __res; | |
f9355dee SP |
14247 | } |
14248 | ||
261014a1 | 14249 | __extension__ extern __inline int8x16_t |
f9355dee | 14250 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14251 | __arm_vdupq_x_n_s8 (int8_t __a, mve_pred16_t __p) |
f9355dee | 14252 | { |
261014a1 | 14253 | return __builtin_mve_vdupq_m_n_sv16qi (vuninitializedq_s8 (), __a, __p); |
f9355dee SP |
14254 | } |
14255 | ||
261014a1 | 14256 | __extension__ extern __inline int16x8_t |
f9355dee | 14257 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14258 | __arm_vdupq_x_n_s16 (int16_t __a, mve_pred16_t __p) |
f9355dee | 14259 | { |
261014a1 | 14260 | return __builtin_mve_vdupq_m_n_sv8hi (vuninitializedq_s16 (), __a, __p); |
f9355dee SP |
14261 | } |
14262 | ||
261014a1 | 14263 | __extension__ extern __inline int32x4_t |
f9355dee | 14264 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14265 | __arm_vdupq_x_n_s32 (int32_t __a, mve_pred16_t __p) |
f9355dee | 14266 | { |
261014a1 | 14267 | return __builtin_mve_vdupq_m_n_sv4si (vuninitializedq_s32 (), __a, __p); |
f9355dee SP |
14268 | } |
14269 | ||
261014a1 | 14270 | __extension__ extern __inline uint8x16_t |
f9355dee | 14271 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14272 | __arm_vdupq_x_n_u8 (uint8_t __a, mve_pred16_t __p) |
f9355dee | 14273 | { |
261014a1 | 14274 | return __builtin_mve_vdupq_m_n_uv16qi (vuninitializedq_u8 (), __a, __p); |
f9355dee SP |
14275 | } |
14276 | ||
261014a1 | 14277 | __extension__ extern __inline uint16x8_t |
f9355dee | 14278 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14279 | __arm_vdupq_x_n_u16 (uint16_t __a, mve_pred16_t __p) |
f9355dee | 14280 | { |
261014a1 | 14281 | return __builtin_mve_vdupq_m_n_uv8hi (vuninitializedq_u16 (), __a, __p); |
f9355dee SP |
14282 | } |
14283 | ||
261014a1 | 14284 | __extension__ extern __inline uint32x4_t |
f9355dee | 14285 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14286 | __arm_vdupq_x_n_u32 (uint32_t __a, mve_pred16_t __p) |
f9355dee | 14287 | { |
261014a1 | 14288 | return __builtin_mve_vdupq_m_n_uv4si (vuninitializedq_u32 (), __a, __p); |
f9355dee SP |
14289 | } |
14290 | ||
261014a1 | 14291 | __extension__ extern __inline int8x16_t |
0dad5b33 | 14292 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14293 | __arm_vminq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
0dad5b33 | 14294 | { |
261014a1 | 14295 | return __builtin_mve_vminq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
0dad5b33 SP |
14296 | } |
14297 | ||
261014a1 | 14298 | __extension__ extern __inline int16x8_t |
0dad5b33 | 14299 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14300 | __arm_vminq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
0dad5b33 | 14301 | { |
261014a1 | 14302 | return __builtin_mve_vminq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
0dad5b33 SP |
14303 | } |
14304 | ||
261014a1 | 14305 | __extension__ extern __inline int32x4_t |
0dad5b33 | 14306 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14307 | __arm_vminq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
0dad5b33 | 14308 | { |
261014a1 | 14309 | return __builtin_mve_vminq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14310 | } |
14311 | ||
261014a1 | 14312 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14313 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14314 | __arm_vminq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14315 | { |
261014a1 | 14316 | return __builtin_mve_vminq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
0dad5b33 SP |
14317 | } |
14318 | ||
14319 | __extension__ extern __inline uint16x8_t | |
14320 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14321 | __arm_vminq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
0dad5b33 | 14322 | { |
261014a1 | 14323 | return __builtin_mve_vminq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
0dad5b33 SP |
14324 | } |
14325 | ||
e3678b44 | 14326 | __extension__ extern __inline uint32x4_t |
0dad5b33 | 14327 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14328 | __arm_vminq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
0dad5b33 | 14329 | { |
261014a1 | 14330 | return __builtin_mve_vminq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14331 | } |
14332 | ||
261014a1 | 14333 | __extension__ extern __inline int8x16_t |
e3678b44 | 14334 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14335 | __arm_vmaxq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14336 | { |
261014a1 | 14337 | return __builtin_mve_vmaxq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14338 | } |
14339 | ||
261014a1 | 14340 | __extension__ extern __inline int16x8_t |
e3678b44 | 14341 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14342 | __arm_vmaxq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14343 | { |
261014a1 | 14344 | return __builtin_mve_vmaxq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
0dad5b33 SP |
14345 | } |
14346 | ||
261014a1 | 14347 | __extension__ extern __inline int32x4_t |
0dad5b33 | 14348 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14349 | __arm_vmaxq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
0dad5b33 | 14350 | { |
261014a1 SP |
14351 | return __builtin_mve_vmaxq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
14352 | } | |
14353 | ||
14354 | __extension__ extern __inline uint8x16_t | |
14355 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
14356 | __arm_vmaxq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
14357 | { | |
14358 | return __builtin_mve_vmaxq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); | |
e3678b44 SP |
14359 | } |
14360 | ||
14361 | __extension__ extern __inline uint16x8_t | |
14362 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14363 | __arm_vmaxq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14364 | { |
261014a1 | 14365 | return __builtin_mve_vmaxq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14366 | } |
14367 | ||
14368 | __extension__ extern __inline uint32x4_t | |
14369 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14370 | __arm_vmaxq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14371 | { |
261014a1 SP |
14372 | return __builtin_mve_vmaxq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
14373 | } | |
14374 | ||
14375 | __extension__ extern __inline int8x16_t | |
14376 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
14377 | __arm_vabdq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
14378 | { | |
14379 | return __builtin_mve_vabdq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); | |
e3678b44 SP |
14380 | } |
14381 | ||
14382 | __extension__ extern __inline int16x8_t | |
14383 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14384 | __arm_vabdq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14385 | { |
261014a1 | 14386 | return __builtin_mve_vabdq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14387 | } |
14388 | ||
14389 | __extension__ extern __inline int32x4_t | |
14390 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14391 | __arm_vabdq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14392 | { |
261014a1 | 14393 | return __builtin_mve_vabdq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14394 | } |
14395 | ||
261014a1 | 14396 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14397 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14398 | __arm_vabdq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14399 | { |
261014a1 | 14400 | return __builtin_mve_vabdq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14401 | } |
14402 | ||
261014a1 | 14403 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14404 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14405 | __arm_vabdq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14406 | { |
261014a1 | 14407 | return __builtin_mve_vabdq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14408 | } |
14409 | ||
261014a1 | 14410 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14411 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14412 | __arm_vabdq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14413 | { |
261014a1 | 14414 | return __builtin_mve_vabdq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14415 | } |
14416 | ||
261014a1 | 14417 | __extension__ extern __inline int8x16_t |
e3678b44 | 14418 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14419 | __arm_vabsq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 14420 | { |
261014a1 | 14421 | return __builtin_mve_vabsq_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
14422 | } |
14423 | ||
14424 | __extension__ extern __inline int16x8_t | |
14425 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14426 | __arm_vabsq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 14427 | { |
261014a1 | 14428 | return __builtin_mve_vabsq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
14429 | } |
14430 | ||
14431 | __extension__ extern __inline int32x4_t | |
14432 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14433 | __arm_vabsq_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 14434 | { |
261014a1 | 14435 | return __builtin_mve_vabsq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
0dad5b33 SP |
14436 | } |
14437 | ||
261014a1 | 14438 | __extension__ extern __inline int8x16_t |
0dad5b33 | 14439 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14440 | __arm_vaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
0dad5b33 | 14441 | { |
261014a1 | 14442 | return __builtin_mve_vaddq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14443 | } |
14444 | ||
261014a1 | 14445 | __extension__ extern __inline int16x8_t |
e3678b44 | 14446 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14447 | __arm_vaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14448 | { |
261014a1 | 14449 | return __builtin_mve_vaddq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
0dad5b33 SP |
14450 | } |
14451 | ||
261014a1 | 14452 | __extension__ extern __inline int32x4_t |
0dad5b33 | 14453 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14454 | __arm_vaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
0dad5b33 | 14455 | { |
261014a1 | 14456 | return __builtin_mve_vaddq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
0dad5b33 SP |
14457 | } |
14458 | ||
261014a1 | 14459 | __extension__ extern __inline int8x16_t |
0dad5b33 | 14460 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14461 | __arm_vaddq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) |
0dad5b33 | 14462 | { |
261014a1 | 14463 | return __builtin_mve_vaddq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14464 | } |
14465 | ||
261014a1 | 14466 | __extension__ extern __inline int16x8_t |
e3678b44 | 14467 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14468 | __arm_vaddq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) |
e3678b44 | 14469 | { |
261014a1 | 14470 | return __builtin_mve_vaddq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
0dad5b33 SP |
14471 | } |
14472 | ||
261014a1 | 14473 | __extension__ extern __inline int32x4_t |
0dad5b33 | 14474 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14475 | __arm_vaddq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
0dad5b33 | 14476 | { |
261014a1 | 14477 | return __builtin_mve_vaddq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
0dad5b33 SP |
14478 | } |
14479 | ||
261014a1 | 14480 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14481 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14482 | __arm_vaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14483 | { |
261014a1 | 14484 | return __builtin_mve_vaddq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 | 14485 | } |
f9355dee | 14486 | |
261014a1 | 14487 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14488 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14489 | __arm_vaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14490 | { |
261014a1 | 14491 | return __builtin_mve_vaddq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14492 | } |
14493 | ||
261014a1 | 14494 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14495 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14496 | __arm_vaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14497 | { |
261014a1 | 14498 | return __builtin_mve_vaddq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14499 | } |
14500 | ||
261014a1 | 14501 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14502 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14503 | __arm_vaddq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
e3678b44 | 14504 | { |
261014a1 | 14505 | return __builtin_mve_vaddq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14506 | } |
14507 | ||
261014a1 | 14508 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14509 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14510 | __arm_vaddq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
e3678b44 | 14511 | { |
261014a1 | 14512 | return __builtin_mve_vaddq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14513 | } |
14514 | ||
261014a1 | 14515 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14516 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14517 | __arm_vaddq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
e3678b44 | 14518 | { |
261014a1 | 14519 | return __builtin_mve_vaddq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14520 | } |
14521 | ||
261014a1 | 14522 | __extension__ extern __inline int8x16_t |
e3678b44 | 14523 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14524 | __arm_vclsq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 14525 | { |
261014a1 | 14526 | return __builtin_mve_vclsq_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
14527 | } |
14528 | ||
261014a1 | 14529 | __extension__ extern __inline int16x8_t |
e3678b44 | 14530 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14531 | __arm_vclsq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 14532 | { |
261014a1 | 14533 | return __builtin_mve_vclsq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
14534 | } |
14535 | ||
261014a1 | 14536 | __extension__ extern __inline int32x4_t |
e3678b44 | 14537 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14538 | __arm_vclsq_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 14539 | { |
261014a1 | 14540 | return __builtin_mve_vclsq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
14541 | } |
14542 | ||
261014a1 | 14543 | __extension__ extern __inline int8x16_t |
e3678b44 | 14544 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14545 | __arm_vclzq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 14546 | { |
261014a1 | 14547 | return __builtin_mve_vclzq_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
14548 | } |
14549 | ||
261014a1 | 14550 | __extension__ extern __inline int16x8_t |
e3678b44 | 14551 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14552 | __arm_vclzq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 14553 | { |
261014a1 | 14554 | return __builtin_mve_vclzq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
14555 | } |
14556 | ||
261014a1 | 14557 | __extension__ extern __inline int32x4_t |
e3678b44 | 14558 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14559 | __arm_vclzq_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 14560 | { |
261014a1 | 14561 | return __builtin_mve_vclzq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
14562 | } |
14563 | ||
261014a1 | 14564 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14565 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14566 | __arm_vclzq_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 14567 | { |
261014a1 | 14568 | return __builtin_mve_vclzq_m_uv16qi (vuninitializedq_u8 (), __a, __p); |
e3678b44 SP |
14569 | } |
14570 | ||
261014a1 | 14571 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14572 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14573 | __arm_vclzq_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 14574 | { |
261014a1 | 14575 | return __builtin_mve_vclzq_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
14576 | } |
14577 | ||
261014a1 | 14578 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14579 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14580 | __arm_vclzq_x_u32 (uint32x4_t __a, mve_pred16_t __p) |
e3678b44 | 14581 | { |
261014a1 | 14582 | return __builtin_mve_vclzq_m_uv4si (vuninitializedq_u32 (), __a, __p); |
e3678b44 SP |
14583 | } |
14584 | ||
261014a1 | 14585 | __extension__ extern __inline int8x16_t |
e3678b44 | 14586 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14587 | __arm_vnegq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 14588 | { |
261014a1 | 14589 | return __builtin_mve_vnegq_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
14590 | } |
14591 | ||
261014a1 | 14592 | __extension__ extern __inline int16x8_t |
e3678b44 | 14593 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14594 | __arm_vnegq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 14595 | { |
261014a1 | 14596 | return __builtin_mve_vnegq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
14597 | } |
14598 | ||
261014a1 | 14599 | __extension__ extern __inline int32x4_t |
e3678b44 | 14600 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14601 | __arm_vnegq_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 14602 | { |
261014a1 | 14603 | return __builtin_mve_vnegq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
14604 | } |
14605 | ||
261014a1 | 14606 | __extension__ extern __inline int8x16_t |
e3678b44 | 14607 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14608 | __arm_vmulhq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14609 | { |
261014a1 | 14610 | return __builtin_mve_vmulhq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14611 | } |
14612 | ||
261014a1 | 14613 | __extension__ extern __inline int16x8_t |
e3678b44 | 14614 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14615 | __arm_vmulhq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14616 | { |
261014a1 | 14617 | return __builtin_mve_vmulhq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14618 | } |
14619 | ||
261014a1 | 14620 | __extension__ extern __inline int32x4_t |
e3678b44 | 14621 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14622 | __arm_vmulhq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14623 | { |
261014a1 | 14624 | return __builtin_mve_vmulhq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14625 | } |
14626 | ||
261014a1 | 14627 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14628 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14629 | __arm_vmulhq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14630 | { |
261014a1 | 14631 | return __builtin_mve_vmulhq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14632 | } |
14633 | ||
261014a1 | 14634 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14635 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14636 | __arm_vmulhq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14637 | { |
261014a1 | 14638 | return __builtin_mve_vmulhq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14639 | } |
14640 | ||
261014a1 | 14641 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14642 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14643 | __arm_vmulhq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14644 | { |
261014a1 | 14645 | return __builtin_mve_vmulhq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14646 | } |
14647 | ||
261014a1 | 14648 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14649 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14650 | __arm_vmullbq_poly_x_p8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14651 | { |
261014a1 | 14652 | return __builtin_mve_vmullbq_poly_m_pv16qi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14653 | } |
14654 | ||
261014a1 | 14655 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14656 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14657 | __arm_vmullbq_poly_x_p16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14658 | { |
261014a1 | 14659 | return __builtin_mve_vmullbq_poly_m_pv8hi (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14660 | } |
14661 | ||
261014a1 | 14662 | __extension__ extern __inline int16x8_t |
e3678b44 | 14663 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14664 | __arm_vmullbq_int_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14665 | { |
261014a1 | 14666 | return __builtin_mve_vmullbq_int_m_sv16qi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14667 | } |
14668 | ||
261014a1 | 14669 | __extension__ extern __inline int32x4_t |
e3678b44 | 14670 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14671 | __arm_vmullbq_int_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14672 | { |
261014a1 | 14673 | return __builtin_mve_vmullbq_int_m_sv8hi (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14674 | } |
14675 | ||
261014a1 | 14676 | __extension__ extern __inline int64x2_t |
e3678b44 | 14677 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14678 | __arm_vmullbq_int_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14679 | { |
261014a1 | 14680 | return __builtin_mve_vmullbq_int_m_sv4si (vuninitializedq_s64 (), __a, __b, __p); |
e3678b44 SP |
14681 | } |
14682 | ||
261014a1 | 14683 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14684 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14685 | __arm_vmullbq_int_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14686 | { |
261014a1 | 14687 | return __builtin_mve_vmullbq_int_m_uv16qi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14688 | } |
14689 | ||
261014a1 | 14690 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14691 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14692 | __arm_vmullbq_int_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14693 | { |
261014a1 | 14694 | return __builtin_mve_vmullbq_int_m_uv8hi (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14695 | } |
14696 | ||
261014a1 | 14697 | __extension__ extern __inline uint64x2_t |
e3678b44 | 14698 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14699 | __arm_vmullbq_int_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14700 | { |
261014a1 | 14701 | return __builtin_mve_vmullbq_int_m_uv4si (vuninitializedq_u64 (), __a, __b, __p); |
e3678b44 SP |
14702 | } |
14703 | ||
261014a1 | 14704 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14705 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14706 | __arm_vmulltq_poly_x_p8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14707 | { |
261014a1 | 14708 | return __builtin_mve_vmulltq_poly_m_pv16qi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14709 | } |
14710 | ||
261014a1 | 14711 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14712 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14713 | __arm_vmulltq_poly_x_p16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14714 | { |
261014a1 | 14715 | return __builtin_mve_vmulltq_poly_m_pv8hi (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14716 | } |
14717 | ||
261014a1 | 14718 | __extension__ extern __inline int16x8_t |
e3678b44 | 14719 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14720 | __arm_vmulltq_int_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14721 | { |
261014a1 | 14722 | return __builtin_mve_vmulltq_int_m_sv16qi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14723 | } |
14724 | ||
261014a1 | 14725 | __extension__ extern __inline int32x4_t |
e3678b44 | 14726 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14727 | __arm_vmulltq_int_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14728 | { |
261014a1 | 14729 | return __builtin_mve_vmulltq_int_m_sv8hi (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14730 | } |
14731 | ||
261014a1 | 14732 | __extension__ extern __inline int64x2_t |
e3678b44 | 14733 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14734 | __arm_vmulltq_int_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14735 | { |
261014a1 | 14736 | return __builtin_mve_vmulltq_int_m_sv4si (vuninitializedq_s64 (), __a, __b, __p); |
e3678b44 SP |
14737 | } |
14738 | ||
261014a1 | 14739 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14740 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14741 | __arm_vmulltq_int_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14742 | { |
261014a1 | 14743 | return __builtin_mve_vmulltq_int_m_uv16qi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14744 | } |
14745 | ||
261014a1 | 14746 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14747 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14748 | __arm_vmulltq_int_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14749 | { |
261014a1 | 14750 | return __builtin_mve_vmulltq_int_m_uv8hi (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14751 | } |
14752 | ||
261014a1 | 14753 | __extension__ extern __inline uint64x2_t |
e3678b44 | 14754 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14755 | __arm_vmulltq_int_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14756 | { |
261014a1 | 14757 | return __builtin_mve_vmulltq_int_m_uv4si (vuninitializedq_u64 (), __a, __b, __p); |
e3678b44 SP |
14758 | } |
14759 | ||
261014a1 | 14760 | __extension__ extern __inline int8x16_t |
e3678b44 | 14761 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14762 | __arm_vmulq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14763 | { |
261014a1 | 14764 | return __builtin_mve_vmulq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14765 | } |
14766 | ||
261014a1 | 14767 | __extension__ extern __inline int16x8_t |
e3678b44 | 14768 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14769 | __arm_vmulq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14770 | { |
261014a1 | 14771 | return __builtin_mve_vmulq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14772 | } |
14773 | ||
261014a1 | 14774 | __extension__ extern __inline int32x4_t |
e3678b44 | 14775 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14776 | __arm_vmulq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14777 | { |
261014a1 | 14778 | return __builtin_mve_vmulq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14779 | } |
14780 | ||
261014a1 | 14781 | __extension__ extern __inline int8x16_t |
e3678b44 | 14782 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14783 | __arm_vmulq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) |
e3678b44 | 14784 | { |
261014a1 | 14785 | return __builtin_mve_vmulq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14786 | } |
14787 | ||
261014a1 | 14788 | __extension__ extern __inline int16x8_t |
e3678b44 | 14789 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14790 | __arm_vmulq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) |
e3678b44 | 14791 | { |
261014a1 | 14792 | return __builtin_mve_vmulq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14793 | } |
14794 | ||
261014a1 | 14795 | __extension__ extern __inline int32x4_t |
e3678b44 | 14796 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14797 | __arm_vmulq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 14798 | { |
261014a1 | 14799 | return __builtin_mve_vmulq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14800 | } |
14801 | ||
261014a1 | 14802 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14803 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14804 | __arm_vmulq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14805 | { |
261014a1 | 14806 | return __builtin_mve_vmulq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14807 | } |
14808 | ||
261014a1 | 14809 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14810 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14811 | __arm_vmulq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14812 | { |
261014a1 | 14813 | return __builtin_mve_vmulq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14814 | } |
14815 | ||
261014a1 | 14816 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14817 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14818 | __arm_vmulq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14819 | { |
261014a1 | 14820 | return __builtin_mve_vmulq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14821 | } |
14822 | ||
261014a1 | 14823 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14824 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14825 | __arm_vmulq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
e3678b44 | 14826 | { |
261014a1 | 14827 | return __builtin_mve_vmulq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14828 | } |
14829 | ||
261014a1 | 14830 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14831 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14832 | __arm_vmulq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
e3678b44 | 14833 | { |
261014a1 | 14834 | return __builtin_mve_vmulq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14835 | } |
14836 | ||
261014a1 | 14837 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14838 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14839 | __arm_vmulq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
e3678b44 | 14840 | { |
261014a1 | 14841 | return __builtin_mve_vmulq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14842 | } |
14843 | ||
261014a1 | 14844 | __extension__ extern __inline int8x16_t |
e3678b44 | 14845 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14846 | __arm_vsubq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14847 | { |
261014a1 | 14848 | return __builtin_mve_vsubq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14849 | } |
14850 | ||
261014a1 | 14851 | __extension__ extern __inline int16x8_t |
e3678b44 | 14852 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14853 | __arm_vsubq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14854 | { |
261014a1 | 14855 | return __builtin_mve_vsubq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14856 | } |
14857 | ||
261014a1 | 14858 | __extension__ extern __inline int32x4_t |
e3678b44 | 14859 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14860 | __arm_vsubq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14861 | { |
261014a1 | 14862 | return __builtin_mve_vsubq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14863 | } |
14864 | ||
261014a1 | 14865 | __extension__ extern __inline int8x16_t |
e3678b44 | 14866 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14867 | __arm_vsubq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) |
e3678b44 | 14868 | { |
261014a1 | 14869 | return __builtin_mve_vsubq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14870 | } |
14871 | ||
261014a1 | 14872 | __extension__ extern __inline int16x8_t |
e3678b44 | 14873 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14874 | __arm_vsubq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) |
e3678b44 | 14875 | { |
261014a1 | 14876 | return __builtin_mve_vsubq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14877 | } |
14878 | ||
261014a1 | 14879 | __extension__ extern __inline int32x4_t |
e3678b44 | 14880 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14881 | __arm_vsubq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 14882 | { |
261014a1 | 14883 | return __builtin_mve_vsubq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14884 | } |
14885 | ||
261014a1 | 14886 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14887 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14888 | __arm_vsubq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14889 | { |
261014a1 | 14890 | return __builtin_mve_vsubq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14891 | } |
14892 | ||
261014a1 | 14893 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14894 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14895 | __arm_vsubq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14896 | { |
261014a1 | 14897 | return __builtin_mve_vsubq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14898 | } |
14899 | ||
261014a1 | 14900 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14901 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14902 | __arm_vsubq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14903 | { |
261014a1 | 14904 | return __builtin_mve_vsubq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14905 | } |
14906 | ||
261014a1 | 14907 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14908 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14909 | __arm_vsubq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
e3678b44 | 14910 | { |
261014a1 | 14911 | return __builtin_mve_vsubq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14912 | } |
14913 | ||
261014a1 | 14914 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14915 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14916 | __arm_vsubq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
e3678b44 | 14917 | { |
261014a1 | 14918 | return __builtin_mve_vsubq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14919 | } |
14920 | ||
261014a1 | 14921 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14922 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14923 | __arm_vsubq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
e3678b44 | 14924 | { |
261014a1 | 14925 | return __builtin_mve_vsubq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14926 | } |
14927 | ||
261014a1 | 14928 | __extension__ extern __inline int8x16_t |
e3678b44 | 14929 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14930 | __arm_vcaddq_rot90_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14931 | { |
261014a1 | 14932 | return __builtin_mve_vcaddq_rot90_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14933 | } |
14934 | ||
261014a1 | 14935 | __extension__ extern __inline int16x8_t |
e3678b44 | 14936 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14937 | __arm_vcaddq_rot90_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14938 | { |
261014a1 | 14939 | return __builtin_mve_vcaddq_rot90_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14940 | } |
14941 | ||
261014a1 | 14942 | __extension__ extern __inline int32x4_t |
e3678b44 | 14943 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14944 | __arm_vcaddq_rot90_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14945 | { |
261014a1 | 14946 | return __builtin_mve_vcaddq_rot90_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14947 | } |
14948 | ||
261014a1 | 14949 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14950 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14951 | __arm_vcaddq_rot90_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14952 | { |
261014a1 | 14953 | return __builtin_mve_vcaddq_rot90_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14954 | } |
14955 | ||
261014a1 | 14956 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14957 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14958 | __arm_vcaddq_rot90_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14959 | { |
261014a1 | 14960 | return __builtin_mve_vcaddq_rot90_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14961 | } |
14962 | ||
261014a1 | 14963 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14964 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14965 | __arm_vcaddq_rot90_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14966 | { |
261014a1 | 14967 | return __builtin_mve_vcaddq_rot90_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14968 | } |
14969 | ||
261014a1 | 14970 | __extension__ extern __inline int8x16_t |
e3678b44 | 14971 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14972 | __arm_vcaddq_rot270_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14973 | { |
261014a1 | 14974 | return __builtin_mve_vcaddq_rot270_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14975 | } |
14976 | ||
261014a1 | 14977 | __extension__ extern __inline int16x8_t |
e3678b44 | 14978 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14979 | __arm_vcaddq_rot270_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14980 | { |
261014a1 | 14981 | return __builtin_mve_vcaddq_rot270_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14982 | } |
14983 | ||
261014a1 | 14984 | __extension__ extern __inline int32x4_t |
e3678b44 | 14985 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14986 | __arm_vcaddq_rot270_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14987 | { |
261014a1 | 14988 | return __builtin_mve_vcaddq_rot270_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14989 | } |
14990 | ||
261014a1 | 14991 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14992 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14993 | __arm_vcaddq_rot270_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14994 | { |
261014a1 | 14995 | return __builtin_mve_vcaddq_rot270_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14996 | } |
14997 | ||
261014a1 | 14998 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14999 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15000 | __arm_vcaddq_rot270_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15001 | { |
261014a1 | 15002 | return __builtin_mve_vcaddq_rot270_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15003 | } |
15004 | ||
261014a1 | 15005 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15006 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15007 | __arm_vcaddq_rot270_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15008 | { |
261014a1 | 15009 | return __builtin_mve_vcaddq_rot270_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15010 | } |
15011 | ||
261014a1 | 15012 | __extension__ extern __inline int8x16_t |
e3678b44 | 15013 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15014 | __arm_vhaddq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) |
e3678b44 | 15015 | { |
261014a1 | 15016 | return __builtin_mve_vhaddq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15017 | } |
15018 | ||
261014a1 | 15019 | __extension__ extern __inline int16x8_t |
e3678b44 | 15020 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15021 | __arm_vhaddq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) |
e3678b44 | 15022 | { |
261014a1 | 15023 | return __builtin_mve_vhaddq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15024 | } |
15025 | ||
261014a1 | 15026 | __extension__ extern __inline int32x4_t |
e3678b44 | 15027 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15028 | __arm_vhaddq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15029 | { |
261014a1 | 15030 | return __builtin_mve_vhaddq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15031 | } |
15032 | ||
261014a1 | 15033 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15034 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15035 | __arm_vhaddq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
e3678b44 | 15036 | { |
261014a1 | 15037 | return __builtin_mve_vhaddq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15038 | } |
15039 | ||
261014a1 | 15040 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15041 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15042 | __arm_vhaddq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
e3678b44 | 15043 | { |
261014a1 | 15044 | return __builtin_mve_vhaddq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15045 | } |
15046 | ||
261014a1 | 15047 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15048 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15049 | __arm_vhaddq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
e3678b44 | 15050 | { |
261014a1 | 15051 | return __builtin_mve_vhaddq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15052 | } |
15053 | ||
261014a1 | 15054 | __extension__ extern __inline int8x16_t |
e3678b44 | 15055 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15056 | __arm_vhaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15057 | { |
261014a1 | 15058 | return __builtin_mve_vhaddq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15059 | } |
15060 | ||
261014a1 | 15061 | __extension__ extern __inline int16x8_t |
e3678b44 | 15062 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15063 | __arm_vhaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15064 | { |
261014a1 | 15065 | return __builtin_mve_vhaddq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15066 | } |
15067 | ||
261014a1 | 15068 | __extension__ extern __inline int32x4_t |
e3678b44 | 15069 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15070 | __arm_vhaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15071 | { |
261014a1 | 15072 | return __builtin_mve_vhaddq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15073 | } |
15074 | ||
261014a1 | 15075 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15076 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15077 | __arm_vhaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15078 | { |
261014a1 | 15079 | return __builtin_mve_vhaddq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15080 | } |
15081 | ||
261014a1 | 15082 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15083 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15084 | __arm_vhaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15085 | { |
261014a1 | 15086 | return __builtin_mve_vhaddq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15087 | } |
15088 | ||
15089 | __extension__ extern __inline uint32x4_t | |
15090 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15091 | __arm_vhaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15092 | { |
261014a1 | 15093 | return __builtin_mve_vhaddq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15094 | } |
15095 | ||
261014a1 | 15096 | __extension__ extern __inline int8x16_t |
e3678b44 | 15097 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15098 | __arm_vhcaddq_rot90_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15099 | { |
261014a1 | 15100 | return __builtin_mve_vhcaddq_rot90_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15101 | } |
15102 | ||
261014a1 | 15103 | __extension__ extern __inline int16x8_t |
e3678b44 | 15104 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15105 | __arm_vhcaddq_rot90_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15106 | { |
261014a1 | 15107 | return __builtin_mve_vhcaddq_rot90_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15108 | } |
15109 | ||
261014a1 | 15110 | __extension__ extern __inline int32x4_t |
e3678b44 | 15111 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15112 | __arm_vhcaddq_rot90_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15113 | { |
261014a1 | 15114 | return __builtin_mve_vhcaddq_rot90_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15115 | } |
15116 | ||
261014a1 | 15117 | __extension__ extern __inline int8x16_t |
e3678b44 | 15118 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15119 | __arm_vhcaddq_rot270_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15120 | { |
261014a1 | 15121 | return __builtin_mve_vhcaddq_rot270_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15122 | } |
15123 | ||
261014a1 | 15124 | __extension__ extern __inline int16x8_t |
e3678b44 | 15125 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15126 | __arm_vhcaddq_rot270_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15127 | { |
261014a1 | 15128 | return __builtin_mve_vhcaddq_rot270_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15129 | } |
15130 | ||
261014a1 | 15131 | __extension__ extern __inline int32x4_t |
e3678b44 | 15132 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15133 | __arm_vhcaddq_rot270_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15134 | { |
261014a1 | 15135 | return __builtin_mve_vhcaddq_rot270_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15136 | } |
15137 | ||
261014a1 | 15138 | __extension__ extern __inline int8x16_t |
e3678b44 | 15139 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15140 | __arm_vhsubq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) |
e3678b44 | 15141 | { |
261014a1 | 15142 | return __builtin_mve_vhsubq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15143 | } |
15144 | ||
261014a1 | 15145 | __extension__ extern __inline int16x8_t |
e3678b44 | 15146 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15147 | __arm_vhsubq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) |
e3678b44 | 15148 | { |
261014a1 | 15149 | return __builtin_mve_vhsubq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15150 | } |
15151 | ||
261014a1 | 15152 | __extension__ extern __inline int32x4_t |
e3678b44 | 15153 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15154 | __arm_vhsubq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15155 | { |
261014a1 | 15156 | return __builtin_mve_vhsubq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15157 | } |
15158 | ||
261014a1 | 15159 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15160 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15161 | __arm_vhsubq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
e3678b44 | 15162 | { |
261014a1 | 15163 | return __builtin_mve_vhsubq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15164 | } |
15165 | ||
261014a1 | 15166 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15167 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15168 | __arm_vhsubq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
e3678b44 | 15169 | { |
261014a1 | 15170 | return __builtin_mve_vhsubq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15171 | } |
15172 | ||
261014a1 | 15173 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15174 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15175 | __arm_vhsubq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
e3678b44 | 15176 | { |
261014a1 | 15177 | return __builtin_mve_vhsubq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15178 | } |
15179 | ||
261014a1 | 15180 | __extension__ extern __inline int8x16_t |
e3678b44 | 15181 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15182 | __arm_vhsubq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15183 | { |
261014a1 | 15184 | return __builtin_mve_vhsubq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15185 | } |
15186 | ||
261014a1 | 15187 | __extension__ extern __inline int16x8_t |
e3678b44 | 15188 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15189 | __arm_vhsubq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15190 | { |
261014a1 | 15191 | return __builtin_mve_vhsubq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15192 | } |
15193 | ||
261014a1 | 15194 | __extension__ extern __inline int32x4_t |
e3678b44 | 15195 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15196 | __arm_vhsubq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15197 | { |
261014a1 | 15198 | return __builtin_mve_vhsubq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15199 | } |
15200 | ||
261014a1 | 15201 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15202 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15203 | __arm_vhsubq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15204 | { |
261014a1 | 15205 | return __builtin_mve_vhsubq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15206 | } |
15207 | ||
261014a1 | 15208 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15209 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15210 | __arm_vhsubq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15211 | { |
261014a1 | 15212 | return __builtin_mve_vhsubq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15213 | } |
15214 | ||
261014a1 | 15215 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15216 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15217 | __arm_vhsubq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15218 | { |
261014a1 | 15219 | return __builtin_mve_vhsubq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15220 | } |
15221 | ||
261014a1 | 15222 | __extension__ extern __inline int8x16_t |
e3678b44 | 15223 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15224 | __arm_vrhaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15225 | { |
261014a1 | 15226 | return __builtin_mve_vrhaddq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15227 | } |
15228 | ||
15229 | __extension__ extern __inline int16x8_t | |
15230 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15231 | __arm_vrhaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15232 | { |
261014a1 | 15233 | return __builtin_mve_vrhaddq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15234 | } |
15235 | ||
261014a1 | 15236 | __extension__ extern __inline int32x4_t |
e3678b44 | 15237 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15238 | __arm_vrhaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15239 | { |
261014a1 | 15240 | return __builtin_mve_vrhaddq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15241 | } |
15242 | ||
261014a1 | 15243 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15244 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15245 | __arm_vrhaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15246 | { |
261014a1 | 15247 | return __builtin_mve_vrhaddq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15248 | } |
15249 | ||
261014a1 | 15250 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15251 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15252 | __arm_vrhaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15253 | { |
261014a1 | 15254 | return __builtin_mve_vrhaddq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15255 | } |
15256 | ||
261014a1 | 15257 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15258 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15259 | __arm_vrhaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15260 | { |
261014a1 | 15261 | return __builtin_mve_vrhaddq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15262 | } |
15263 | ||
261014a1 | 15264 | __extension__ extern __inline int8x16_t |
e3678b44 | 15265 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15266 | __arm_vrmulhq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15267 | { |
261014a1 | 15268 | return __builtin_mve_vrmulhq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15269 | } |
15270 | ||
261014a1 | 15271 | __extension__ extern __inline int16x8_t |
e3678b44 | 15272 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15273 | __arm_vrmulhq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15274 | { |
261014a1 | 15275 | return __builtin_mve_vrmulhq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15276 | } |
15277 | ||
261014a1 | 15278 | __extension__ extern __inline int32x4_t |
e3678b44 | 15279 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15280 | __arm_vrmulhq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15281 | { |
261014a1 | 15282 | return __builtin_mve_vrmulhq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15283 | } |
15284 | ||
261014a1 | 15285 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15286 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15287 | __arm_vrmulhq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15288 | { |
261014a1 | 15289 | return __builtin_mve_vrmulhq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15290 | } |
15291 | ||
261014a1 | 15292 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15293 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15294 | __arm_vrmulhq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15295 | { |
261014a1 | 15296 | return __builtin_mve_vrmulhq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15297 | } |
15298 | ||
261014a1 | 15299 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15300 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15301 | __arm_vrmulhq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15302 | { |
261014a1 | 15303 | return __builtin_mve_vrmulhq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15304 | } |
15305 | ||
261014a1 | 15306 | __extension__ extern __inline int8x16_t |
e3678b44 | 15307 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15308 | __arm_vandq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15309 | { |
261014a1 | 15310 | return __builtin_mve_vandq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15311 | } |
15312 | ||
261014a1 | 15313 | __extension__ extern __inline int16x8_t |
e3678b44 | 15314 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15315 | __arm_vandq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15316 | { |
261014a1 | 15317 | return __builtin_mve_vandq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15318 | } |
15319 | ||
261014a1 | 15320 | __extension__ extern __inline int32x4_t |
e3678b44 | 15321 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15322 | __arm_vandq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15323 | { |
261014a1 | 15324 | return __builtin_mve_vandq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15325 | } |
15326 | ||
261014a1 | 15327 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15328 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15329 | __arm_vandq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15330 | { |
261014a1 | 15331 | return __builtin_mve_vandq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15332 | } |
15333 | ||
261014a1 | 15334 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15335 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15336 | __arm_vandq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15337 | { |
261014a1 | 15338 | return __builtin_mve_vandq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15339 | } |
15340 | ||
261014a1 | 15341 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15342 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15343 | __arm_vandq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15344 | { |
261014a1 | 15345 | return __builtin_mve_vandq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15346 | } |
15347 | ||
261014a1 | 15348 | __extension__ extern __inline int8x16_t |
e3678b44 | 15349 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15350 | __arm_vbicq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15351 | { |
261014a1 | 15352 | return __builtin_mve_vbicq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15353 | } |
15354 | ||
261014a1 | 15355 | __extension__ extern __inline int16x8_t |
e3678b44 | 15356 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15357 | __arm_vbicq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15358 | { |
261014a1 | 15359 | return __builtin_mve_vbicq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15360 | } |
15361 | ||
261014a1 | 15362 | __extension__ extern __inline int32x4_t |
e3678b44 | 15363 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15364 | __arm_vbicq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15365 | { |
261014a1 | 15366 | return __builtin_mve_vbicq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15367 | } |
15368 | ||
261014a1 | 15369 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15370 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15371 | __arm_vbicq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15372 | { |
261014a1 | 15373 | return __builtin_mve_vbicq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15374 | } |
15375 | ||
261014a1 | 15376 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15377 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15378 | __arm_vbicq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15379 | { |
261014a1 | 15380 | return __builtin_mve_vbicq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15381 | } |
15382 | ||
261014a1 | 15383 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15384 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15385 | __arm_vbicq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15386 | { |
261014a1 | 15387 | return __builtin_mve_vbicq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15388 | } |
15389 | ||
261014a1 | 15390 | __extension__ extern __inline int8x16_t |
e3678b44 | 15391 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15392 | __arm_vbrsrq_x_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15393 | { |
261014a1 | 15394 | return __builtin_mve_vbrsrq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15395 | } |
15396 | ||
261014a1 | 15397 | __extension__ extern __inline int16x8_t |
e3678b44 | 15398 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15399 | __arm_vbrsrq_x_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15400 | { |
261014a1 | 15401 | return __builtin_mve_vbrsrq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15402 | } |
15403 | ||
261014a1 | 15404 | __extension__ extern __inline int32x4_t |
e3678b44 | 15405 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15406 | __arm_vbrsrq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15407 | { |
261014a1 | 15408 | return __builtin_mve_vbrsrq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15409 | } |
15410 | ||
261014a1 | 15411 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15412 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15413 | __arm_vbrsrq_x_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15414 | { |
261014a1 | 15415 | return __builtin_mve_vbrsrq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15416 | } |
15417 | ||
261014a1 | 15418 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15419 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15420 | __arm_vbrsrq_x_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15421 | { |
261014a1 | 15422 | return __builtin_mve_vbrsrq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15423 | } |
15424 | ||
261014a1 | 15425 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15426 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15427 | __arm_vbrsrq_x_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15428 | { |
261014a1 | 15429 | return __builtin_mve_vbrsrq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15430 | } |
15431 | ||
261014a1 | 15432 | __extension__ extern __inline int8x16_t |
e3678b44 | 15433 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15434 | __arm_veorq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15435 | { |
261014a1 | 15436 | return __builtin_mve_veorq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15437 | } |
15438 | ||
261014a1 | 15439 | __extension__ extern __inline int16x8_t |
e3678b44 | 15440 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15441 | __arm_veorq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15442 | { |
261014a1 | 15443 | return __builtin_mve_veorq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15444 | } |
15445 | ||
261014a1 | 15446 | __extension__ extern __inline int32x4_t |
e3678b44 | 15447 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15448 | __arm_veorq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15449 | { |
261014a1 | 15450 | return __builtin_mve_veorq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15451 | } |
15452 | ||
261014a1 | 15453 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15454 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15455 | __arm_veorq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15456 | { |
261014a1 | 15457 | return __builtin_mve_veorq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15458 | } |
15459 | ||
15460 | __extension__ extern __inline uint16x8_t | |
15461 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15462 | __arm_veorq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15463 | { |
261014a1 | 15464 | return __builtin_mve_veorq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15465 | } |
15466 | ||
261014a1 | 15467 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15468 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15469 | __arm_veorq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15470 | { |
261014a1 | 15471 | return __builtin_mve_veorq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15472 | } |
15473 | ||
261014a1 | 15474 | __extension__ extern __inline int16x8_t |
e3678b44 | 15475 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15476 | __arm_vmovlbq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15477 | { |
261014a1 | 15478 | return __builtin_mve_vmovlbq_m_sv16qi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
15479 | } |
15480 | ||
261014a1 | 15481 | __extension__ extern __inline int32x4_t |
e3678b44 | 15482 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15483 | __arm_vmovlbq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15484 | { |
261014a1 | 15485 | return __builtin_mve_vmovlbq_m_sv8hi (vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
15486 | } |
15487 | ||
261014a1 | 15488 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15489 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15490 | __arm_vmovlbq_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15491 | { |
261014a1 | 15492 | return __builtin_mve_vmovlbq_m_uv16qi (vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
15493 | } |
15494 | ||
261014a1 | 15495 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15496 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15497 | __arm_vmovlbq_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15498 | { |
261014a1 | 15499 | return __builtin_mve_vmovlbq_m_uv8hi (vuninitializedq_u32 (), __a, __p); |
e3678b44 SP |
15500 | } |
15501 | ||
261014a1 | 15502 | __extension__ extern __inline int16x8_t |
e3678b44 | 15503 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15504 | __arm_vmovltq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15505 | { |
261014a1 | 15506 | return __builtin_mve_vmovltq_m_sv16qi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
15507 | } |
15508 | ||
261014a1 | 15509 | __extension__ extern __inline int32x4_t |
e3678b44 | 15510 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15511 | __arm_vmovltq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15512 | { |
261014a1 | 15513 | return __builtin_mve_vmovltq_m_sv8hi (vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
15514 | } |
15515 | ||
261014a1 | 15516 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15517 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15518 | __arm_vmovltq_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15519 | { |
261014a1 | 15520 | return __builtin_mve_vmovltq_m_uv16qi (vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
15521 | } |
15522 | ||
261014a1 | 15523 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15524 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15525 | __arm_vmovltq_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15526 | { |
261014a1 | 15527 | return __builtin_mve_vmovltq_m_uv8hi (vuninitializedq_u32 (), __a, __p); |
e3678b44 SP |
15528 | } |
15529 | ||
261014a1 | 15530 | __extension__ extern __inline int8x16_t |
e3678b44 | 15531 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15532 | __arm_vmvnq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15533 | { |
261014a1 | 15534 | return __builtin_mve_vmvnq_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
15535 | } |
15536 | ||
261014a1 | 15537 | __extension__ extern __inline int16x8_t |
e3678b44 | 15538 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15539 | __arm_vmvnq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15540 | { |
261014a1 | 15541 | return __builtin_mve_vmvnq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
15542 | } |
15543 | ||
15544 | __extension__ extern __inline int32x4_t | |
15545 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15546 | __arm_vmvnq_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 15547 | { |
261014a1 | 15548 | return __builtin_mve_vmvnq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
15549 | } |
15550 | ||
261014a1 | 15551 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15552 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15553 | __arm_vmvnq_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15554 | { |
261014a1 | 15555 | return __builtin_mve_vmvnq_m_uv16qi (vuninitializedq_u8 (), __a, __p); |
e3678b44 SP |
15556 | } |
15557 | ||
261014a1 | 15558 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15559 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15560 | __arm_vmvnq_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15561 | { |
261014a1 | 15562 | return __builtin_mve_vmvnq_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
15563 | } |
15564 | ||
261014a1 | 15565 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15566 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15567 | __arm_vmvnq_x_u32 (uint32x4_t __a, mve_pred16_t __p) |
e3678b44 | 15568 | { |
261014a1 | 15569 | return __builtin_mve_vmvnq_m_uv4si (vuninitializedq_u32 (), __a, __p); |
e3678b44 SP |
15570 | } |
15571 | ||
261014a1 | 15572 | __extension__ extern __inline int16x8_t |
e3678b44 | 15573 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15574 | __arm_vmvnq_x_n_s16 (const int __imm, mve_pred16_t __p) |
e3678b44 | 15575 | { |
261014a1 | 15576 | return __builtin_mve_vmvnq_m_n_sv8hi (vuninitializedq_s16 (), __imm, __p); |
e3678b44 SP |
15577 | } |
15578 | ||
261014a1 | 15579 | __extension__ extern __inline int32x4_t |
e3678b44 | 15580 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15581 | __arm_vmvnq_x_n_s32 (const int __imm, mve_pred16_t __p) |
e3678b44 | 15582 | { |
261014a1 | 15583 | return __builtin_mve_vmvnq_m_n_sv4si (vuninitializedq_s32 (), __imm, __p); |
e3678b44 SP |
15584 | } |
15585 | ||
261014a1 | 15586 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15587 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15588 | __arm_vmvnq_x_n_u16 (const int __imm, mve_pred16_t __p) |
e3678b44 | 15589 | { |
261014a1 | 15590 | return __builtin_mve_vmvnq_m_n_uv8hi (vuninitializedq_u16 (), __imm, __p); |
e3678b44 SP |
15591 | } |
15592 | ||
261014a1 | 15593 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15594 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15595 | __arm_vmvnq_x_n_u32 (const int __imm, mve_pred16_t __p) |
e3678b44 | 15596 | { |
261014a1 | 15597 | return __builtin_mve_vmvnq_m_n_uv4si (vuninitializedq_u32 (), __imm, __p); |
e3678b44 SP |
15598 | } |
15599 | ||
261014a1 | 15600 | __extension__ extern __inline int8x16_t |
e3678b44 | 15601 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15602 | __arm_vornq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15603 | { |
261014a1 | 15604 | return __builtin_mve_vornq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15605 | } |
15606 | ||
261014a1 | 15607 | __extension__ extern __inline int16x8_t |
e3678b44 | 15608 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15609 | __arm_vornq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15610 | { |
261014a1 | 15611 | return __builtin_mve_vornq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15612 | } |
15613 | ||
261014a1 | 15614 | __extension__ extern __inline int32x4_t |
e3678b44 | 15615 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15616 | __arm_vornq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15617 | { |
261014a1 | 15618 | return __builtin_mve_vornq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15619 | } |
15620 | ||
261014a1 | 15621 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15622 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15623 | __arm_vornq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15624 | { |
261014a1 | 15625 | return __builtin_mve_vornq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15626 | } |
15627 | ||
261014a1 | 15628 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15629 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15630 | __arm_vornq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15631 | { |
261014a1 | 15632 | return __builtin_mve_vornq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15633 | } |
15634 | ||
261014a1 | 15635 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15636 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15637 | __arm_vornq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15638 | { |
261014a1 | 15639 | return __builtin_mve_vornq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15640 | } |
15641 | ||
261014a1 | 15642 | __extension__ extern __inline int8x16_t |
e3678b44 | 15643 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15644 | __arm_vorrq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15645 | { |
261014a1 | 15646 | return __builtin_mve_vorrq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15647 | } |
15648 | ||
261014a1 | 15649 | __extension__ extern __inline int16x8_t |
e3678b44 | 15650 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15651 | __arm_vorrq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15652 | { |
261014a1 | 15653 | return __builtin_mve_vorrq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15654 | } |
15655 | ||
261014a1 | 15656 | __extension__ extern __inline int32x4_t |
e3678b44 | 15657 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15658 | __arm_vorrq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15659 | { |
261014a1 | 15660 | return __builtin_mve_vorrq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15661 | } |
15662 | ||
261014a1 | 15663 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15664 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15665 | __arm_vorrq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15666 | { |
261014a1 | 15667 | return __builtin_mve_vorrq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15668 | } |
15669 | ||
261014a1 | 15670 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15671 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15672 | __arm_vorrq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15673 | { |
261014a1 | 15674 | return __builtin_mve_vorrq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15675 | } |
15676 | ||
261014a1 | 15677 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15678 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15679 | __arm_vorrq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15680 | { |
261014a1 | 15681 | return __builtin_mve_vorrq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15682 | } |
15683 | ||
261014a1 | 15684 | __extension__ extern __inline int8x16_t |
e3678b44 | 15685 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15686 | __arm_vrev16q_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15687 | { |
261014a1 | 15688 | return __builtin_mve_vrev16q_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
15689 | } |
15690 | ||
261014a1 | 15691 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15692 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15693 | __arm_vrev16q_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15694 | { |
261014a1 | 15695 | return __builtin_mve_vrev16q_m_uv16qi (vuninitializedq_u8 (), __a, __p); |
e3678b44 SP |
15696 | } |
15697 | ||
261014a1 | 15698 | __extension__ extern __inline int8x16_t |
e3678b44 | 15699 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15700 | __arm_vrev32q_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15701 | { |
261014a1 | 15702 | return __builtin_mve_vrev32q_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
15703 | } |
15704 | ||
261014a1 | 15705 | __extension__ extern __inline int16x8_t |
e3678b44 | 15706 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15707 | __arm_vrev32q_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15708 | { |
261014a1 | 15709 | return __builtin_mve_vrev32q_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
15710 | } |
15711 | ||
261014a1 | 15712 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15713 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15714 | __arm_vrev32q_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15715 | { |
261014a1 | 15716 | return __builtin_mve_vrev32q_m_uv16qi (vuninitializedq_u8 (), __a, __p); |
e3678b44 SP |
15717 | } |
15718 | ||
261014a1 | 15719 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15720 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15721 | __arm_vrev32q_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15722 | { |
261014a1 | 15723 | return __builtin_mve_vrev32q_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
15724 | } |
15725 | ||
261014a1 | 15726 | __extension__ extern __inline int8x16_t |
e3678b44 | 15727 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15728 | __arm_vrev64q_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15729 | { |
261014a1 | 15730 | return __builtin_mve_vrev64q_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
15731 | } |
15732 | ||
261014a1 | 15733 | __extension__ extern __inline int16x8_t |
e3678b44 | 15734 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15735 | __arm_vrev64q_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15736 | { |
261014a1 | 15737 | return __builtin_mve_vrev64q_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
15738 | } |
15739 | ||
261014a1 | 15740 | __extension__ extern __inline int32x4_t |
e3678b44 | 15741 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15742 | __arm_vrev64q_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 15743 | { |
261014a1 | 15744 | return __builtin_mve_vrev64q_m_sv4si (vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
15745 | } |
15746 | ||
261014a1 | 15747 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15748 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15749 | __arm_vrev64q_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15750 | { |
261014a1 | 15751 | return __builtin_mve_vrev64q_m_uv16qi (vuninitializedq_u8 (), __a, __p); |
e3678b44 SP |
15752 | } |
15753 | ||
261014a1 | 15754 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15755 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15756 | __arm_vrev64q_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15757 | { |
261014a1 | 15758 | return __builtin_mve_vrev64q_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
15759 | } |
15760 | ||
15761 | __extension__ extern __inline uint32x4_t | |
15762 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15763 | __arm_vrev64q_x_u32 (uint32x4_t __a, mve_pred16_t __p) |
e3678b44 | 15764 | { |
261014a1 | 15765 | return __builtin_mve_vrev64q_m_uv4si (vuninitializedq_u32 (), __a, __p); |
e3678b44 SP |
15766 | } |
15767 | ||
261014a1 | 15768 | __extension__ extern __inline int8x16_t |
e3678b44 | 15769 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15770 | __arm_vrshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15771 | { |
261014a1 | 15772 | return __builtin_mve_vrshlq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15773 | } |
15774 | ||
261014a1 | 15775 | __extension__ extern __inline int16x8_t |
db5db9d2 | 15776 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15777 | __arm_vrshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
db5db9d2 | 15778 | { |
261014a1 | 15779 | return __builtin_mve_vrshlq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
db5db9d2 SP |
15780 | } |
15781 | ||
261014a1 | 15782 | __extension__ extern __inline int32x4_t |
db5db9d2 | 15783 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15784 | __arm_vrshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
db5db9d2 | 15785 | { |
261014a1 | 15786 | return __builtin_mve_vrshlq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
db5db9d2 SP |
15787 | } |
15788 | ||
261014a1 | 15789 | __extension__ extern __inline uint8x16_t |
db5db9d2 | 15790 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15791 | __arm_vrshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
db5db9d2 | 15792 | { |
261014a1 | 15793 | return __builtin_mve_vrshlq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
db5db9d2 SP |
15794 | } |
15795 | ||
261014a1 | 15796 | __extension__ extern __inline uint16x8_t |
db5db9d2 | 15797 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15798 | __arm_vrshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
db5db9d2 | 15799 | { |
261014a1 | 15800 | return __builtin_mve_vrshlq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
db5db9d2 SP |
15801 | } |
15802 | ||
261014a1 | 15803 | __extension__ extern __inline uint32x4_t |
532e9e24 | 15804 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15805 | __arm_vrshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
532e9e24 | 15806 | { |
261014a1 | 15807 | return __builtin_mve_vrshlq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
532e9e24 SP |
15808 | } |
15809 | ||
261014a1 | 15810 | __extension__ extern __inline int16x8_t |
532e9e24 | 15811 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15812 | __arm_vshllbq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15813 | { |
261014a1 | 15814 | return __builtin_mve_vshllbq_m_n_sv16qi (vuninitializedq_s16 (), __a, __imm, __p); |
532e9e24 SP |
15815 | } |
15816 | ||
261014a1 | 15817 | __extension__ extern __inline int32x4_t |
532e9e24 | 15818 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15819 | __arm_vshllbq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15820 | { |
261014a1 | 15821 | return __builtin_mve_vshllbq_m_n_sv8hi (vuninitializedq_s32 (), __a, __imm, __p); |
532e9e24 SP |
15822 | } |
15823 | ||
261014a1 | 15824 | __extension__ extern __inline uint16x8_t |
532e9e24 | 15825 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15826 | __arm_vshllbq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15827 | { |
261014a1 | 15828 | return __builtin_mve_vshllbq_m_n_uv16qi (vuninitializedq_u16 (), __a, __imm, __p); |
532e9e24 SP |
15829 | } |
15830 | ||
261014a1 | 15831 | __extension__ extern __inline uint32x4_t |
532e9e24 | 15832 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15833 | __arm_vshllbq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15834 | { |
261014a1 | 15835 | return __builtin_mve_vshllbq_m_n_uv8hi (vuninitializedq_u32 (), __a, __imm, __p); |
532e9e24 SP |
15836 | } |
15837 | ||
261014a1 | 15838 | __extension__ extern __inline int16x8_t |
532e9e24 | 15839 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15840 | __arm_vshlltq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15841 | { |
261014a1 | 15842 | return __builtin_mve_vshlltq_m_n_sv16qi (vuninitializedq_s16 (), __a, __imm, __p); |
532e9e24 SP |
15843 | } |
15844 | ||
261014a1 | 15845 | __extension__ extern __inline int32x4_t |
532e9e24 | 15846 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15847 | __arm_vshlltq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15848 | { |
261014a1 | 15849 | return __builtin_mve_vshlltq_m_n_sv8hi (vuninitializedq_s32 (), __a, __imm, __p); |
532e9e24 SP |
15850 | } |
15851 | ||
261014a1 | 15852 | __extension__ extern __inline uint16x8_t |
532e9e24 | 15853 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15854 | __arm_vshlltq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15855 | { |
261014a1 | 15856 | return __builtin_mve_vshlltq_m_n_uv16qi (vuninitializedq_u16 (), __a, __imm, __p); |
532e9e24 SP |
15857 | } |
15858 | ||
261014a1 | 15859 | __extension__ extern __inline uint32x4_t |
532e9e24 | 15860 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15861 | __arm_vshlltq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15862 | { |
261014a1 | 15863 | return __builtin_mve_vshlltq_m_n_uv8hi (vuninitializedq_u32 (), __a, __imm, __p); |
532e9e24 SP |
15864 | } |
15865 | ||
261014a1 | 15866 | __extension__ extern __inline int8x16_t |
532e9e24 | 15867 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15868 | __arm_vshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
532e9e24 | 15869 | { |
261014a1 SP |
15870 | return __builtin_mve_vshlq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
15871 | } | |
15872 | ||
15873 | __extension__ extern __inline int16x8_t | |
15874 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15875 | __arm_vshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
15876 | { | |
15877 | return __builtin_mve_vshlq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); | |
15878 | } | |
15879 | ||
15880 | __extension__ extern __inline int32x4_t | |
15881 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15882 | __arm_vshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
15883 | { | |
15884 | return __builtin_mve_vshlq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); | |
15885 | } | |
15886 | ||
15887 | __extension__ extern __inline uint8x16_t | |
15888 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15889 | __arm_vshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
15890 | { | |
15891 | return __builtin_mve_vshlq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); | |
15892 | } | |
15893 | ||
15894 | __extension__ extern __inline uint16x8_t | |
15895 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15896 | __arm_vshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
15897 | { | |
15898 | return __builtin_mve_vshlq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); | |
15899 | } | |
15900 | ||
15901 | __extension__ extern __inline uint32x4_t | |
15902 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15903 | __arm_vshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
15904 | { | |
15905 | return __builtin_mve_vshlq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); | |
15906 | } | |
15907 | ||
15908 | __extension__ extern __inline int8x16_t | |
15909 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15910 | __arm_vshlq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p) | |
15911 | { | |
15912 | return __builtin_mve_vshlq_m_n_sv16qi (vuninitializedq_s8 (), __a, __imm, __p); | |
15913 | } | |
15914 | ||
15915 | __extension__ extern __inline int16x8_t | |
15916 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15917 | __arm_vshlq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
15918 | { | |
15919 | return __builtin_mve_vshlq_m_n_sv8hi (vuninitializedq_s16 (), __a, __imm, __p); | |
15920 | } | |
15921 | ||
15922 | __extension__ extern __inline int32x4_t | |
15923 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15924 | __arm_vshlq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
15925 | { | |
15926 | return __builtin_mve_vshlq_m_n_sv4si (vuninitializedq_s32 (), __a, __imm, __p); | |
15927 | } | |
15928 | ||
15929 | __extension__ extern __inline uint8x16_t | |
15930 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15931 | __arm_vshlq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
15932 | { | |
15933 | return __builtin_mve_vshlq_m_n_uv16qi (vuninitializedq_u8 (), __a, __imm, __p); | |
15934 | } | |
15935 | ||
15936 | __extension__ extern __inline uint16x8_t | |
15937 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15938 | __arm_vshlq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
15939 | { | |
15940 | return __builtin_mve_vshlq_m_n_uv8hi (vuninitializedq_u16 (), __a, __imm, __p); | |
15941 | } | |
15942 | ||
15943 | __extension__ extern __inline uint32x4_t | |
15944 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15945 | __arm_vshlq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
15946 | { | |
15947 | return __builtin_mve_vshlq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p); | |
15948 | } | |
15949 | ||
15950 | __extension__ extern __inline int8x16_t | |
15951 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15952 | __arm_vrshrq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p) | |
15953 | { | |
15954 | return __builtin_mve_vrshrq_m_n_sv16qi (vuninitializedq_s8 (), __a, __imm, __p); | |
15955 | } | |
15956 | ||
15957 | __extension__ extern __inline int16x8_t | |
15958 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15959 | __arm_vrshrq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
15960 | { | |
15961 | return __builtin_mve_vrshrq_m_n_sv8hi (vuninitializedq_s16 (), __a, __imm, __p); | |
15962 | } | |
15963 | ||
15964 | __extension__ extern __inline int32x4_t | |
15965 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15966 | __arm_vrshrq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
15967 | { | |
15968 | return __builtin_mve_vrshrq_m_n_sv4si (vuninitializedq_s32 (), __a, __imm, __p); | |
15969 | } | |
15970 | ||
15971 | __extension__ extern __inline uint8x16_t | |
15972 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15973 | __arm_vrshrq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
15974 | { | |
15975 | return __builtin_mve_vrshrq_m_n_uv16qi (vuninitializedq_u8 (), __a, __imm, __p); | |
15976 | } | |
15977 | ||
15978 | __extension__ extern __inline uint16x8_t | |
15979 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15980 | __arm_vrshrq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
15981 | { | |
15982 | return __builtin_mve_vrshrq_m_n_uv8hi (vuninitializedq_u16 (), __a, __imm, __p); | |
15983 | } | |
15984 | ||
15985 | __extension__ extern __inline uint32x4_t | |
15986 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15987 | __arm_vrshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
15988 | { | |
15989 | return __builtin_mve_vrshrq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p); | |
15990 | } | |
15991 | ||
15992 | __extension__ extern __inline int8x16_t | |
15993 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15994 | __arm_vshrq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p) | |
15995 | { | |
15996 | return __builtin_mve_vshrq_m_n_sv16qi (vuninitializedq_s8 (), __a, __imm, __p); | |
15997 | } | |
15998 | ||
15999 | __extension__ extern __inline int16x8_t | |
16000 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16001 | __arm_vshrq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
16002 | { | |
16003 | return __builtin_mve_vshrq_m_n_sv8hi (vuninitializedq_s16 (), __a, __imm, __p); | |
16004 | } | |
16005 | ||
16006 | __extension__ extern __inline int32x4_t | |
16007 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16008 | __arm_vshrq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
16009 | { | |
16010 | return __builtin_mve_vshrq_m_n_sv4si (vuninitializedq_s32 (), __a, __imm, __p); | |
16011 | } | |
16012 | ||
16013 | __extension__ extern __inline uint8x16_t | |
16014 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16015 | __arm_vshrq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
16016 | { | |
16017 | return __builtin_mve_vshrq_m_n_uv16qi (vuninitializedq_u8 (), __a, __imm, __p); | |
16018 | } | |
16019 | ||
16020 | __extension__ extern __inline uint16x8_t | |
16021 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16022 | __arm_vshrq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
16023 | { | |
16024 | return __builtin_mve_vshrq_m_n_uv8hi (vuninitializedq_u16 (), __a, __imm, __p); | |
16025 | } | |
16026 | ||
16027 | __extension__ extern __inline uint32x4_t | |
16028 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16029 | __arm_vshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
16030 | { | |
16031 | return __builtin_mve_vshrq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p); | |
16032 | } | |
16033 | ||
c3562f81 SP |
16034 | __extension__ extern __inline int32x4_t |
16035 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16036 | __arm_vadciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out) | |
16037 | { | |
16038 | int32x4_t __res = __builtin_mve_vadciq_sv4si (__a, __b); | |
16039 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16040 | return __res; | |
16041 | } | |
16042 | ||
16043 | __extension__ extern __inline uint32x4_t | |
16044 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16045 | __arm_vadciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out) | |
16046 | { | |
16047 | uint32x4_t __res = __builtin_mve_vadciq_uv4si (__a, __b); | |
16048 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16049 | return __res; | |
16050 | } | |
16051 | ||
16052 | __extension__ extern __inline int32x4_t | |
16053 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16054 | __arm_vadciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) | |
16055 | { | |
16056 | int32x4_t __res = __builtin_mve_vadciq_m_sv4si (__inactive, __a, __b, __p); | |
16057 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16058 | return __res; | |
16059 | } | |
16060 | ||
16061 | __extension__ extern __inline uint32x4_t | |
16062 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16063 | __arm_vadciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) | |
16064 | { | |
16065 | uint32x4_t __res = __builtin_mve_vadciq_m_uv4si (__inactive, __a, __b, __p); | |
16066 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16067 | return __res; | |
16068 | } | |
16069 | ||
16070 | __extension__ extern __inline int32x4_t | |
16071 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16072 | __arm_vadcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry) | |
16073 | { | |
16074 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16075 | int32x4_t __res = __builtin_mve_vadcq_sv4si (__a, __b); | |
16076 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16077 | return __res; | |
16078 | } | |
16079 | ||
16080 | __extension__ extern __inline uint32x4_t | |
16081 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16082 | __arm_vadcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry) | |
16083 | { | |
16084 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16085 | uint32x4_t __res = __builtin_mve_vadcq_uv4si (__a, __b); | |
16086 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16087 | return __res; | |
16088 | } | |
16089 | ||
16090 | __extension__ extern __inline int32x4_t | |
16091 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16092 | __arm_vadcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p) | |
16093 | { | |
16094 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16095 | int32x4_t __res = __builtin_mve_vadcq_m_sv4si (__inactive, __a, __b, __p); | |
16096 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16097 | return __res; | |
16098 | } | |
16099 | ||
16100 | __extension__ extern __inline uint32x4_t | |
16101 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16102 | __arm_vadcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p) | |
16103 | { | |
16104 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16105 | uint32x4_t __res = __builtin_mve_vadcq_m_uv4si (__inactive, __a, __b, __p); | |
16106 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16107 | return __res; | |
16108 | } | |
16109 | ||
16110 | __extension__ extern __inline int32x4_t | |
16111 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16112 | __arm_vsbciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out) | |
16113 | { | |
16114 | int32x4_t __res = __builtin_mve_vsbciq_sv4si (__a, __b); | |
16115 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16116 | return __res; | |
16117 | } | |
16118 | ||
16119 | __extension__ extern __inline uint32x4_t | |
16120 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16121 | __arm_vsbciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out) | |
16122 | { | |
16123 | uint32x4_t __res = __builtin_mve_vsbciq_uv4si (__a, __b); | |
16124 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16125 | return __res; | |
16126 | } | |
16127 | ||
16128 | __extension__ extern __inline int32x4_t | |
16129 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16130 | __arm_vsbciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) | |
16131 | { | |
16132 | int32x4_t __res = __builtin_mve_vsbciq_m_sv4si (__inactive, __a, __b, __p); | |
16133 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16134 | return __res; | |
16135 | } | |
16136 | ||
16137 | __extension__ extern __inline uint32x4_t | |
16138 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16139 | __arm_vsbciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) | |
16140 | { | |
16141 | uint32x4_t __res = __builtin_mve_vsbciq_m_uv4si (__inactive, __a, __b, __p); | |
16142 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16143 | return __res; | |
16144 | } | |
16145 | ||
16146 | __extension__ extern __inline int32x4_t | |
16147 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16148 | __arm_vsbcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry) | |
16149 | { | |
16150 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16151 | int32x4_t __res = __builtin_mve_vsbcq_sv4si (__a, __b); | |
16152 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16153 | return __res; | |
16154 | } | |
16155 | ||
16156 | __extension__ extern __inline uint32x4_t | |
16157 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16158 | __arm_vsbcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry) | |
16159 | { | |
16160 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16161 | uint32x4_t __res = __builtin_mve_vsbcq_uv4si (__a, __b); | |
16162 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16163 | return __res; | |
16164 | } | |
16165 | ||
16166 | __extension__ extern __inline int32x4_t | |
16167 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16168 | __arm_vsbcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p) | |
16169 | { | |
16170 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16171 | int32x4_t __res = __builtin_mve_vsbcq_m_sv4si (__inactive, __a, __b, __p); | |
16172 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16173 | return __res; | |
16174 | } | |
16175 | ||
16176 | __extension__ extern __inline uint32x4_t | |
16177 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16178 | __arm_vsbcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p) | |
16179 | { | |
16180 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16181 | uint32x4_t __res = __builtin_mve_vsbcq_m_uv4si (__inactive, __a, __b, __p); | |
16182 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16183 | return __res; | |
16184 | } | |
16185 | ||
1dfcc3b5 SP |
16186 | __extension__ extern __inline void |
16187 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16188 | __arm_vst1q_p_u8 (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p) | |
16189 | { | |
16190 | return vstrbq_p_u8 (__addr, __value, __p); | |
16191 | } | |
16192 | ||
16193 | __extension__ extern __inline void | |
16194 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16195 | __arm_vst1q_p_s8 (int8_t * __addr, int8x16_t __value, mve_pred16_t __p) | |
16196 | { | |
16197 | return vstrbq_p_s8 (__addr, __value, __p); | |
16198 | } | |
16199 | ||
16200 | __extension__ extern __inline void | |
16201 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16202 | __arm_vst2q_s8 (int8_t * __addr, int8x16x2_t __value) | |
16203 | { | |
16204 | union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv; | |
16205 | __rv.__i = __value; | |
16206 | __builtin_mve_vst2qv16qi ((__builtin_neon_qi *) __addr, __rv.__o); | |
16207 | } | |
16208 | ||
16209 | __extension__ extern __inline void | |
16210 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16211 | __arm_vst2q_u8 (uint8_t * __addr, uint8x16x2_t __value) | |
16212 | { | |
16213 | union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv; | |
16214 | __rv.__i = __value; | |
16215 | __builtin_mve_vst2qv16qi ((__builtin_neon_qi *) __addr, __rv.__o); | |
16216 | } | |
16217 | ||
16218 | __extension__ extern __inline uint8x16_t | |
16219 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16220 | __arm_vld1q_z_u8 (uint8_t const *__base, mve_pred16_t __p) | |
16221 | { | |
16222 | return vldrbq_z_u8 ( __base, __p); | |
16223 | } | |
16224 | ||
16225 | __extension__ extern __inline int8x16_t | |
16226 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16227 | __arm_vld1q_z_s8 (int8_t const *__base, mve_pred16_t __p) | |
16228 | { | |
16229 | return vldrbq_z_s8 ( __base, __p); | |
16230 | } | |
16231 | ||
16232 | __extension__ extern __inline int8x16x2_t | |
16233 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16234 | __arm_vld2q_s8 (int8_t const * __addr) | |
16235 | { | |
16236 | union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv; | |
16237 | __rv.__o = __builtin_mve_vld2qv16qi ((__builtin_neon_qi *) __addr); | |
16238 | return __rv.__i; | |
16239 | } | |
16240 | ||
16241 | __extension__ extern __inline uint8x16x2_t | |
16242 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16243 | __arm_vld2q_u8 (uint8_t const * __addr) | |
16244 | { | |
16245 | union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv; | |
16246 | __rv.__o = __builtin_mve_vld2qv16qi ((__builtin_neon_qi *) __addr); | |
16247 | return __rv.__i; | |
16248 | } | |
16249 | ||
16250 | __extension__ extern __inline int8x16x4_t | |
16251 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16252 | __arm_vld4q_s8 (int8_t const * __addr) | |
16253 | { | |
16254 | union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv; | |
16255 | __rv.__o = __builtin_mve_vld4qv16qi ((__builtin_neon_qi *) __addr); | |
16256 | return __rv.__i; | |
16257 | } | |
16258 | ||
16259 | __extension__ extern __inline uint8x16x4_t | |
16260 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16261 | __arm_vld4q_u8 (uint8_t const * __addr) | |
16262 | { | |
16263 | union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv; | |
16264 | __rv.__o = __builtin_mve_vld4qv16qi ((__builtin_neon_qi *) __addr); | |
16265 | return __rv.__i; | |
16266 | } | |
16267 | ||
16268 | __extension__ extern __inline void | |
16269 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16270 | __arm_vst1q_p_u16 (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p) | |
16271 | { | |
16272 | return vstrhq_p_u16 (__addr, __value, __p); | |
16273 | } | |
16274 | ||
16275 | __extension__ extern __inline void | |
16276 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16277 | __arm_vst1q_p_s16 (int16_t * __addr, int16x8_t __value, mve_pred16_t __p) | |
16278 | { | |
16279 | return vstrhq_p_s16 (__addr, __value, __p); | |
16280 | } | |
16281 | ||
16282 | __extension__ extern __inline void | |
16283 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16284 | __arm_vst2q_s16 (int16_t * __addr, int16x8x2_t __value) | |
16285 | { | |
16286 | union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv; | |
16287 | __rv.__i = __value; | |
16288 | __builtin_mve_vst2qv8hi ((__builtin_neon_hi *) __addr, __rv.__o); | |
16289 | } | |
16290 | ||
16291 | __extension__ extern __inline void | |
16292 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16293 | __arm_vst2q_u16 (uint16_t * __addr, uint16x8x2_t __value) | |
16294 | { | |
16295 | union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv; | |
16296 | __rv.__i = __value; | |
16297 | __builtin_mve_vst2qv8hi ((__builtin_neon_hi *) __addr, __rv.__o); | |
16298 | } | |
16299 | ||
16300 | __extension__ extern __inline uint16x8_t | |
16301 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16302 | __arm_vld1q_z_u16 (uint16_t const *__base, mve_pred16_t __p) | |
16303 | { | |
16304 | return vldrhq_z_u16 ( __base, __p); | |
16305 | } | |
16306 | ||
16307 | __extension__ extern __inline int16x8_t | |
16308 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16309 | __arm_vld1q_z_s16 (int16_t const *__base, mve_pred16_t __p) | |
16310 | { | |
16311 | return vldrhq_z_s16 ( __base, __p); | |
16312 | } | |
16313 | ||
16314 | __extension__ extern __inline int16x8x2_t | |
16315 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16316 | __arm_vld2q_s16 (int16_t const * __addr) | |
16317 | { | |
16318 | union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv; | |
16319 | __rv.__o = __builtin_mve_vld2qv8hi ((__builtin_neon_hi *) __addr); | |
16320 | return __rv.__i; | |
16321 | } | |
16322 | ||
16323 | __extension__ extern __inline uint16x8x2_t | |
16324 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16325 | __arm_vld2q_u16 (uint16_t const * __addr) | |
16326 | { | |
16327 | union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv; | |
16328 | __rv.__o = __builtin_mve_vld2qv8hi ((__builtin_neon_hi *) __addr); | |
16329 | return __rv.__i; | |
16330 | } | |
16331 | ||
16332 | __extension__ extern __inline int16x8x4_t | |
16333 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16334 | __arm_vld4q_s16 (int16_t const * __addr) | |
16335 | { | |
16336 | union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv; | |
16337 | __rv.__o = __builtin_mve_vld4qv8hi ((__builtin_neon_hi *) __addr); | |
16338 | return __rv.__i; | |
16339 | } | |
16340 | ||
16341 | __extension__ extern __inline uint16x8x4_t | |
16342 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16343 | __arm_vld4q_u16 (uint16_t const * __addr) | |
16344 | { | |
16345 | union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv; | |
16346 | __rv.__o = __builtin_mve_vld4qv8hi ((__builtin_neon_hi *) __addr); | |
16347 | return __rv.__i; | |
16348 | } | |
16349 | ||
16350 | __extension__ extern __inline void | |
16351 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16352 | __arm_vst1q_p_u32 (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p) | |
16353 | { | |
16354 | return vstrwq_p_u32 (__addr, __value, __p); | |
16355 | } | |
16356 | ||
16357 | __extension__ extern __inline void | |
16358 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16359 | __arm_vst1q_p_s32 (int32_t * __addr, int32x4_t __value, mve_pred16_t __p) | |
16360 | { | |
16361 | return vstrwq_p_s32 (__addr, __value, __p); | |
16362 | } | |
16363 | ||
16364 | __extension__ extern __inline void | |
16365 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16366 | __arm_vst2q_s32 (int32_t * __addr, int32x4x2_t __value) | |
16367 | { | |
16368 | union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv; | |
16369 | __rv.__i = __value; | |
16370 | __builtin_mve_vst2qv4si ((__builtin_neon_si *) __addr, __rv.__o); | |
16371 | } | |
16372 | ||
16373 | __extension__ extern __inline void | |
16374 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16375 | __arm_vst2q_u32 (uint32_t * __addr, uint32x4x2_t __value) | |
16376 | { | |
16377 | union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv; | |
16378 | __rv.__i = __value; | |
16379 | __builtin_mve_vst2qv4si ((__builtin_neon_si *) __addr, __rv.__o); | |
16380 | } | |
16381 | ||
16382 | __extension__ extern __inline uint32x4_t | |
16383 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16384 | __arm_vld1q_z_u32 (uint32_t const *__base, mve_pred16_t __p) | |
16385 | { | |
16386 | return vldrwq_z_u32 ( __base, __p); | |
16387 | } | |
16388 | ||
16389 | __extension__ extern __inline int32x4_t | |
16390 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16391 | __arm_vld1q_z_s32 (int32_t const *__base, mve_pred16_t __p) | |
16392 | { | |
16393 | return vldrwq_z_s32 ( __base, __p); | |
16394 | } | |
16395 | ||
16396 | __extension__ extern __inline int32x4x2_t | |
16397 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16398 | __arm_vld2q_s32 (int32_t const * __addr) | |
16399 | { | |
16400 | union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv; | |
16401 | __rv.__o = __builtin_mve_vld2qv4si ((__builtin_neon_si *) __addr); | |
16402 | return __rv.__i; | |
16403 | } | |
16404 | ||
16405 | __extension__ extern __inline uint32x4x2_t | |
16406 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16407 | __arm_vld2q_u32 (uint32_t const * __addr) | |
16408 | { | |
16409 | union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv; | |
16410 | __rv.__o = __builtin_mve_vld2qv4si ((__builtin_neon_si *) __addr); | |
16411 | return __rv.__i; | |
16412 | } | |
16413 | ||
16414 | __extension__ extern __inline int32x4x4_t | |
16415 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16416 | __arm_vld4q_s32 (int32_t const * __addr) | |
16417 | { | |
16418 | union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv; | |
16419 | __rv.__o = __builtin_mve_vld4qv4si ((__builtin_neon_si *) __addr); | |
16420 | return __rv.__i; | |
16421 | } | |
16422 | ||
16423 | __extension__ extern __inline uint32x4x4_t | |
16424 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16425 | __arm_vld4q_u32 (uint32_t const * __addr) | |
16426 | { | |
16427 | union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv; | |
16428 | __rv.__o = __builtin_mve_vld4qv4si ((__builtin_neon_si *) __addr); | |
16429 | return __rv.__i; | |
16430 | } | |
16431 | ||
1a5c27b1 SP |
16432 | __extension__ extern __inline int16x8_t |
16433 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16434 | __arm_vsetq_lane_s16 (int16_t __a, int16x8_t __b, const int __idx) | |
16435 | { | |
16436 | __ARM_CHECK_LANEQ (__b, __idx); | |
16437 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16438 | return __b; | |
16439 | } | |
16440 | ||
16441 | __extension__ extern __inline int32x4_t | |
16442 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16443 | __arm_vsetq_lane_s32 (int32_t __a, int32x4_t __b, const int __idx) | |
16444 | { | |
16445 | __ARM_CHECK_LANEQ (__b, __idx); | |
16446 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16447 | return __b; | |
16448 | } | |
16449 | ||
16450 | __extension__ extern __inline int8x16_t | |
16451 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16452 | __arm_vsetq_lane_s8 (int8_t __a, int8x16_t __b, const int __idx) | |
16453 | { | |
16454 | __ARM_CHECK_LANEQ (__b, __idx); | |
16455 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16456 | return __b; | |
16457 | } | |
16458 | ||
16459 | __extension__ extern __inline int64x2_t | |
16460 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16461 | __arm_vsetq_lane_s64 (int64_t __a, int64x2_t __b, const int __idx) | |
16462 | { | |
16463 | __ARM_CHECK_LANEQ (__b, __idx); | |
16464 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16465 | return __b; | |
16466 | } | |
16467 | ||
16468 | __extension__ extern __inline uint8x16_t | |
16469 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16470 | __arm_vsetq_lane_u8 (uint8_t __a, uint8x16_t __b, const int __idx) | |
16471 | { | |
16472 | __ARM_CHECK_LANEQ (__b, __idx); | |
16473 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16474 | return __b; | |
16475 | } | |
16476 | ||
16477 | __extension__ extern __inline uint16x8_t | |
16478 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16479 | __arm_vsetq_lane_u16 (uint16_t __a, uint16x8_t __b, const int __idx) | |
16480 | { | |
16481 | __ARM_CHECK_LANEQ (__b, __idx); | |
16482 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16483 | return __b; | |
16484 | } | |
16485 | ||
16486 | __extension__ extern __inline uint32x4_t | |
16487 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16488 | __arm_vsetq_lane_u32 (uint32_t __a, uint32x4_t __b, const int __idx) | |
16489 | { | |
16490 | __ARM_CHECK_LANEQ (__b, __idx); | |
16491 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16492 | return __b; | |
16493 | } | |
16494 | ||
16495 | __extension__ extern __inline uint64x2_t | |
16496 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16497 | __arm_vsetq_lane_u64 (uint64_t __a, uint64x2_t __b, const int __idx) | |
16498 | { | |
16499 | __ARM_CHECK_LANEQ (__b, __idx); | |
16500 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16501 | return __b; | |
16502 | } | |
16503 | ||
16504 | __extension__ extern __inline int16_t | |
16505 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16506 | __arm_vgetq_lane_s16 (int16x8_t __a, const int __idx) | |
16507 | { | |
16508 | __ARM_CHECK_LANEQ (__a, __idx); | |
16509 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16510 | } | |
16511 | ||
16512 | __extension__ extern __inline int32_t | |
16513 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16514 | __arm_vgetq_lane_s32 (int32x4_t __a, const int __idx) | |
16515 | { | |
16516 | __ARM_CHECK_LANEQ (__a, __idx); | |
16517 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16518 | } | |
16519 | ||
16520 | __extension__ extern __inline int8_t | |
16521 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16522 | __arm_vgetq_lane_s8 (int8x16_t __a, const int __idx) | |
16523 | { | |
16524 | __ARM_CHECK_LANEQ (__a, __idx); | |
16525 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16526 | } | |
16527 | ||
16528 | __extension__ extern __inline int64_t | |
16529 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16530 | __arm_vgetq_lane_s64 (int64x2_t __a, const int __idx) | |
16531 | { | |
16532 | __ARM_CHECK_LANEQ (__a, __idx); | |
16533 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16534 | } | |
16535 | ||
16536 | __extension__ extern __inline uint8_t | |
16537 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16538 | __arm_vgetq_lane_u8 (uint8x16_t __a, const int __idx) | |
16539 | { | |
16540 | __ARM_CHECK_LANEQ (__a, __idx); | |
16541 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16542 | } | |
16543 | ||
16544 | __extension__ extern __inline uint16_t | |
16545 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16546 | __arm_vgetq_lane_u16 (uint16x8_t __a, const int __idx) | |
16547 | { | |
16548 | __ARM_CHECK_LANEQ (__a, __idx); | |
16549 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16550 | } | |
16551 | ||
16552 | __extension__ extern __inline uint32_t | |
16553 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16554 | __arm_vgetq_lane_u32 (uint32x4_t __a, const int __idx) | |
16555 | { | |
16556 | __ARM_CHECK_LANEQ (__a, __idx); | |
16557 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16558 | } | |
16559 | ||
16560 | __extension__ extern __inline uint64_t | |
16561 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16562 | __arm_vgetq_lane_u64 (uint64x2_t __a, const int __idx) | |
16563 | { | |
16564 | __ARM_CHECK_LANEQ (__a, __idx); | |
16565 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16566 | } | |
16567 | ||
85244449 SP |
16568 | __extension__ extern __inline uint64_t |
16569 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16570 | __arm_lsll (uint64_t value, int32_t shift) | |
16571 | { | |
16572 | return (value << shift); | |
16573 | } | |
16574 | ||
16575 | __extension__ extern __inline int64_t | |
16576 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16577 | __arm_asrl (int64_t value, int32_t shift) | |
16578 | { | |
16579 | return (value >> shift); | |
16580 | } | |
16581 | ||
16582 | __extension__ extern __inline uint64_t | |
16583 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16584 | __arm_uqrshll (uint64_t value, int32_t shift) | |
16585 | { | |
16586 | return __builtin_mve_uqrshll_sat64_di (value, shift); | |
16587 | } | |
16588 | ||
16589 | __extension__ extern __inline uint64_t | |
16590 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16591 | __arm_uqrshll_sat48 (uint64_t value, int32_t shift) | |
16592 | { | |
16593 | return __builtin_mve_uqrshll_sat48_di (value, shift); | |
16594 | } | |
16595 | ||
16596 | __extension__ extern __inline int64_t | |
16597 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16598 | __arm_sqrshrl (int64_t value, int32_t shift) | |
16599 | { | |
16600 | return __builtin_mve_sqrshrl_sat64_di (value, shift); | |
16601 | } | |
16602 | ||
16603 | __extension__ extern __inline int64_t | |
16604 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16605 | __arm_sqrshrl_sat48 (int64_t value, int32_t shift) | |
16606 | { | |
16607 | return __builtin_mve_sqrshrl_sat48_di (value, shift); | |
16608 | } | |
16609 | ||
16610 | __extension__ extern __inline uint64_t | |
16611 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16612 | __arm_uqshll (uint64_t value, const int shift) | |
16613 | { | |
16614 | return __builtin_mve_uqshll_di (value, shift); | |
16615 | } | |
16616 | ||
16617 | __extension__ extern __inline uint64_t | |
16618 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16619 | __arm_urshrl (uint64_t value, const int shift) | |
16620 | { | |
16621 | return __builtin_mve_urshrl_di (value, shift); | |
16622 | } | |
16623 | ||
16624 | __extension__ extern __inline int64_t | |
16625 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16626 | __arm_srshrl (int64_t value, const int shift) | |
16627 | { | |
16628 | return __builtin_mve_srshrl_di (value, shift); | |
16629 | } | |
16630 | ||
16631 | __extension__ extern __inline int64_t | |
16632 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16633 | __arm_sqshll (int64_t value, const int shift) | |
16634 | { | |
16635 | return __builtin_mve_sqshll_di (value, shift); | |
16636 | } | |
16637 | ||
16638 | __extension__ extern __inline uint32_t | |
16639 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16640 | __arm_uqrshl (uint32_t value, int32_t shift) | |
16641 | { | |
16642 | return __builtin_mve_uqrshl_si (value, shift); | |
16643 | } | |
16644 | ||
16645 | __extension__ extern __inline int32_t | |
16646 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16647 | __arm_sqrshr (int32_t value, int32_t shift) | |
16648 | { | |
16649 | return __builtin_mve_sqrshr_si (value, shift); | |
16650 | } | |
16651 | ||
16652 | __extension__ extern __inline uint32_t | |
16653 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16654 | __arm_uqshl (uint32_t value, const int shift) | |
16655 | { | |
16656 | return __builtin_mve_uqshl_si (value, shift); | |
16657 | } | |
16658 | ||
16659 | __extension__ extern __inline uint32_t | |
16660 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16661 | __arm_urshr (uint32_t value, const int shift) | |
16662 | { | |
16663 | return __builtin_mve_urshr_si (value, shift); | |
16664 | } | |
16665 | ||
16666 | __extension__ extern __inline int32_t | |
16667 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16668 | __arm_sqshl (int32_t value, const int shift) | |
16669 | { | |
16670 | return __builtin_mve_sqshl_si (value, shift); | |
16671 | } | |
16672 | ||
16673 | __extension__ extern __inline int32_t | |
16674 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16675 | __arm_srshr (int32_t value, const int shift) | |
16676 | { | |
16677 | return __builtin_mve_srshr_si (value, shift); | |
16678 | } | |
16679 | ||
88c9a831 SP |
16680 | __extension__ extern __inline int8x16_t |
16681 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16682 | __arm_vshlcq_m_s8 (int8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
16683 | { | |
16684 | int8x16_t __res = __builtin_mve_vshlcq_m_vec_sv16qi (__a, *__b, __imm, __p); | |
16685 | *__b = __builtin_mve_vshlcq_m_carry_sv16qi (__a, *__b, __imm, __p); | |
16686 | return __res; | |
16687 | } | |
16688 | ||
16689 | __extension__ extern __inline uint8x16_t | |
16690 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16691 | __arm_vshlcq_m_u8 (uint8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
16692 | { | |
16693 | uint8x16_t __res = __builtin_mve_vshlcq_m_vec_uv16qi (__a, *__b, __imm, __p); | |
16694 | *__b = __builtin_mve_vshlcq_m_carry_uv16qi (__a, *__b, __imm, __p); | |
16695 | return __res; | |
16696 | } | |
16697 | ||
16698 | __extension__ extern __inline int16x8_t | |
16699 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16700 | __arm_vshlcq_m_s16 (int16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
16701 | { | |
16702 | int16x8_t __res = __builtin_mve_vshlcq_m_vec_sv8hi (__a, *__b, __imm, __p); | |
16703 | *__b = __builtin_mve_vshlcq_m_carry_sv8hi (__a, *__b, __imm, __p); | |
16704 | return __res; | |
16705 | } | |
16706 | ||
16707 | __extension__ extern __inline uint16x8_t | |
16708 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16709 | __arm_vshlcq_m_u16 (uint16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
16710 | { | |
16711 | uint16x8_t __res = __builtin_mve_vshlcq_m_vec_uv8hi (__a, *__b, __imm, __p); | |
16712 | *__b = __builtin_mve_vshlcq_m_carry_uv8hi (__a, *__b, __imm, __p); | |
16713 | return __res; | |
16714 | } | |
16715 | ||
16716 | __extension__ extern __inline int32x4_t | |
16717 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16718 | __arm_vshlcq_m_s32 (int32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
16719 | { | |
16720 | int32x4_t __res = __builtin_mve_vshlcq_m_vec_sv4si (__a, *__b, __imm, __p); | |
16721 | *__b = __builtin_mve_vshlcq_m_carry_sv4si (__a, *__b, __imm, __p); | |
16722 | return __res; | |
16723 | } | |
16724 | ||
16725 | __extension__ extern __inline uint32x4_t | |
16726 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16727 | __arm_vshlcq_m_u32 (uint32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
16728 | { | |
16729 | uint32x4_t __res = __builtin_mve_vshlcq_m_vec_uv4si (__a, *__b, __imm, __p); | |
16730 | *__b = __builtin_mve_vshlcq_m_carry_uv4si (__a, *__b, __imm, __p); | |
16731 | return __res; | |
16732 | } | |
16733 | ||
261014a1 SP |
16734 | #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ |
16735 | ||
16736 | __extension__ extern __inline void | |
16737 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16738 | __arm_vst4q_f16 (float16_t * __addr, float16x8x4_t __value) | |
16739 | { | |
16740 | union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv; | |
16741 | __rv.__i = __value; | |
16742 | __builtin_mve_vst4qv8hf (__addr, __rv.__o); | |
16743 | } | |
16744 | ||
16745 | __extension__ extern __inline void | |
16746 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16747 | __arm_vst4q_f32 (float32_t * __addr, float32x4x4_t __value) | |
16748 | { | |
16749 | union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv; | |
16750 | __rv.__i = __value; | |
16751 | __builtin_mve_vst4qv4sf (__addr, __rv.__o); | |
16752 | } | |
16753 | ||
16754 | __extension__ extern __inline float16x8_t | |
16755 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16756 | __arm_vrndxq_f16 (float16x8_t __a) | |
16757 | { | |
16758 | return __builtin_mve_vrndxq_fv8hf (__a); | |
532e9e24 SP |
16759 | } |
16760 | ||
16761 | __extension__ extern __inline float32x4_t | |
16762 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16763 | __arm_vrndxq_f32 (float32x4_t __a) |
532e9e24 | 16764 | { |
261014a1 | 16765 | return __builtin_mve_vrndxq_fv4sf (__a); |
532e9e24 SP |
16766 | } |
16767 | ||
16768 | __extension__ extern __inline float16x8_t | |
16769 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16770 | __arm_vrndq_f16 (float16x8_t __a) |
532e9e24 | 16771 | { |
261014a1 | 16772 | return __builtin_mve_vrndq_fv8hf (__a); |
532e9e24 SP |
16773 | } |
16774 | ||
16775 | __extension__ extern __inline float32x4_t | |
16776 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16777 | __arm_vrndq_f32 (float32x4_t __a) |
532e9e24 | 16778 | { |
261014a1 | 16779 | return __builtin_mve_vrndq_fv4sf (__a); |
532e9e24 SP |
16780 | } |
16781 | ||
16782 | __extension__ extern __inline float16x8_t | |
16783 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16784 | __arm_vrndpq_f16 (float16x8_t __a) |
532e9e24 | 16785 | { |
261014a1 | 16786 | return __builtin_mve_vrndpq_fv8hf (__a); |
532e9e24 SP |
16787 | } |
16788 | ||
16789 | __extension__ extern __inline float32x4_t | |
16790 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16791 | __arm_vrndpq_f32 (float32x4_t __a) |
532e9e24 | 16792 | { |
261014a1 | 16793 | return __builtin_mve_vrndpq_fv4sf (__a); |
532e9e24 SP |
16794 | } |
16795 | ||
16796 | __extension__ extern __inline float16x8_t | |
16797 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16798 | __arm_vrndnq_f16 (float16x8_t __a) |
532e9e24 | 16799 | { |
261014a1 | 16800 | return __builtin_mve_vrndnq_fv8hf (__a); |
532e9e24 SP |
16801 | } |
16802 | ||
16803 | __extension__ extern __inline float32x4_t | |
16804 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16805 | __arm_vrndnq_f32 (float32x4_t __a) |
532e9e24 | 16806 | { |
261014a1 | 16807 | return __builtin_mve_vrndnq_fv4sf (__a); |
532e9e24 SP |
16808 | } |
16809 | ||
16810 | __extension__ extern __inline float16x8_t | |
16811 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16812 | __arm_vrndmq_f16 (float16x8_t __a) |
532e9e24 | 16813 | { |
261014a1 | 16814 | return __builtin_mve_vrndmq_fv8hf (__a); |
532e9e24 SP |
16815 | } |
16816 | ||
16817 | __extension__ extern __inline float32x4_t | |
16818 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16819 | __arm_vrndmq_f32 (float32x4_t __a) |
532e9e24 | 16820 | { |
261014a1 | 16821 | return __builtin_mve_vrndmq_fv4sf (__a); |
532e9e24 SP |
16822 | } |
16823 | ||
16824 | __extension__ extern __inline float16x8_t | |
16825 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16826 | __arm_vrndaq_f16 (float16x8_t __a) |
532e9e24 | 16827 | { |
261014a1 | 16828 | return __builtin_mve_vrndaq_fv8hf (__a); |
532e9e24 SP |
16829 | } |
16830 | ||
16831 | __extension__ extern __inline float32x4_t | |
16832 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16833 | __arm_vrndaq_f32 (float32x4_t __a) |
532e9e24 | 16834 | { |
261014a1 | 16835 | return __builtin_mve_vrndaq_fv4sf (__a); |
532e9e24 SP |
16836 | } |
16837 | ||
16838 | __extension__ extern __inline float16x8_t | |
16839 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16840 | __arm_vrev64q_f16 (float16x8_t __a) |
532e9e24 | 16841 | { |
261014a1 | 16842 | return __builtin_mve_vrev64q_fv8hf (__a); |
532e9e24 SP |
16843 | } |
16844 | ||
16845 | __extension__ extern __inline float32x4_t | |
16846 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16847 | __arm_vrev64q_f32 (float32x4_t __a) |
532e9e24 | 16848 | { |
261014a1 | 16849 | return __builtin_mve_vrev64q_fv4sf (__a); |
532e9e24 SP |
16850 | } |
16851 | ||
16852 | __extension__ extern __inline float16x8_t | |
16853 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16854 | __arm_vnegq_f16 (float16x8_t __a) |
532e9e24 | 16855 | { |
261014a1 SP |
16856 | return __builtin_mve_vnegq_fv8hf (__a); |
16857 | } | |
16858 | ||
16859 | __extension__ extern __inline float32x4_t | |
16860 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16861 | __arm_vnegq_f32 (float32x4_t __a) | |
16862 | { | |
16863 | return __builtin_mve_vnegq_fv4sf (__a); | |
16864 | } | |
16865 | ||
16866 | __extension__ extern __inline float16x8_t | |
16867 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16868 | __arm_vdupq_n_f16 (float16_t __a) | |
16869 | { | |
16870 | return __builtin_mve_vdupq_n_fv8hf (__a); | |
16871 | } | |
16872 | ||
16873 | __extension__ extern __inline float32x4_t | |
16874 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16875 | __arm_vdupq_n_f32 (float32_t __a) | |
16876 | { | |
16877 | return __builtin_mve_vdupq_n_fv4sf (__a); | |
16878 | } | |
16879 | ||
16880 | __extension__ extern __inline float16x8_t | |
16881 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16882 | __arm_vabsq_f16 (float16x8_t __a) | |
16883 | { | |
16884 | return __builtin_mve_vabsq_fv8hf (__a); | |
16885 | } | |
16886 | ||
16887 | __extension__ extern __inline float32x4_t | |
16888 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16889 | __arm_vabsq_f32 (float32x4_t __a) | |
16890 | { | |
16891 | return __builtin_mve_vabsq_fv4sf (__a); | |
16892 | } | |
16893 | ||
16894 | __extension__ extern __inline float16x8_t | |
16895 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16896 | __arm_vrev32q_f16 (float16x8_t __a) | |
16897 | { | |
16898 | return __builtin_mve_vrev32q_fv8hf (__a); | |
532e9e24 SP |
16899 | } |
16900 | ||
16901 | __extension__ extern __inline float32x4_t | |
16902 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 SP |
16903 | __arm_vcvttq_f32_f16 (float16x8_t __a) |
16904 | { | |
16905 | return __builtin_mve_vcvttq_f32_f16v4sf (__a); | |
16906 | } | |
16907 | ||
16908 | __extension__ extern __inline float32x4_t | |
16909 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16910 | __arm_vcvtbq_f32_f16 (float16x8_t __a) | |
16911 | { | |
16912 | return __builtin_mve_vcvtbq_f32_f16v4sf (__a); | |
16913 | } | |
16914 | ||
16915 | __extension__ extern __inline float16x8_t | |
16916 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16917 | __arm_vcvtq_f16_s16 (int16x8_t __a) | |
16918 | { | |
16919 | return __builtin_mve_vcvtq_to_f_sv8hf (__a); | |
16920 | } | |
16921 | ||
16922 | __extension__ extern __inline float32x4_t | |
16923 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16924 | __arm_vcvtq_f32_s32 (int32x4_t __a) | |
16925 | { | |
16926 | return __builtin_mve_vcvtq_to_f_sv4sf (__a); | |
16927 | } | |
16928 | ||
16929 | __extension__ extern __inline float16x8_t | |
16930 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16931 | __arm_vcvtq_f16_u16 (uint16x8_t __a) | |
16932 | { | |
16933 | return __builtin_mve_vcvtq_to_f_uv8hf (__a); | |
16934 | } | |
16935 | ||
16936 | __extension__ extern __inline float32x4_t | |
16937 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16938 | __arm_vcvtq_f32_u32 (uint32x4_t __a) | |
16939 | { | |
16940 | return __builtin_mve_vcvtq_to_f_uv4sf (__a); | |
16941 | } | |
16942 | ||
16943 | __extension__ extern __inline int16x8_t | |
16944 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16945 | __arm_vcvtq_s16_f16 (float16x8_t __a) | |
16946 | { | |
16947 | return __builtin_mve_vcvtq_from_f_sv8hi (__a); | |
16948 | } | |
16949 | ||
16950 | __extension__ extern __inline int32x4_t | |
16951 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16952 | __arm_vcvtq_s32_f32 (float32x4_t __a) | |
16953 | { | |
16954 | return __builtin_mve_vcvtq_from_f_sv4si (__a); | |
16955 | } | |
16956 | ||
16957 | __extension__ extern __inline uint16x8_t | |
16958 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16959 | __arm_vcvtq_u16_f16 (float16x8_t __a) | |
16960 | { | |
16961 | return __builtin_mve_vcvtq_from_f_uv8hi (__a); | |
16962 | } | |
16963 | ||
16964 | __extension__ extern __inline uint32x4_t | |
16965 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16966 | __arm_vcvtq_u32_f32 (float32x4_t __a) | |
16967 | { | |
16968 | return __builtin_mve_vcvtq_from_f_uv4si (__a); | |
16969 | } | |
16970 | ||
16971 | __extension__ extern __inline uint16x8_t | |
16972 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16973 | __arm_vcvtpq_u16_f16 (float16x8_t __a) | |
16974 | { | |
16975 | return __builtin_mve_vcvtpq_uv8hi (__a); | |
16976 | } | |
16977 | ||
16978 | __extension__ extern __inline uint32x4_t | |
16979 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16980 | __arm_vcvtpq_u32_f32 (float32x4_t __a) | |
16981 | { | |
16982 | return __builtin_mve_vcvtpq_uv4si (__a); | |
16983 | } | |
16984 | ||
16985 | __extension__ extern __inline uint16x8_t | |
16986 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16987 | __arm_vcvtnq_u16_f16 (float16x8_t __a) | |
16988 | { | |
16989 | return __builtin_mve_vcvtnq_uv8hi (__a); | |
16990 | } | |
16991 | ||
16992 | __extension__ extern __inline uint16x8_t | |
16993 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16994 | __arm_vcvtmq_u16_f16 (float16x8_t __a) | |
16995 | { | |
16996 | return __builtin_mve_vcvtmq_uv8hi (__a); | |
16997 | } | |
16998 | ||
16999 | __extension__ extern __inline uint32x4_t | |
17000 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17001 | __arm_vcvtmq_u32_f32 (float32x4_t __a) | |
17002 | { | |
17003 | return __builtin_mve_vcvtmq_uv4si (__a); | |
17004 | } | |
17005 | ||
17006 | __extension__ extern __inline uint16x8_t | |
17007 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17008 | __arm_vcvtaq_u16_f16 (float16x8_t __a) | |
17009 | { | |
17010 | return __builtin_mve_vcvtaq_uv8hi (__a); | |
17011 | } | |
17012 | ||
17013 | __extension__ extern __inline uint32x4_t | |
17014 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17015 | __arm_vcvtaq_u32_f32 (float32x4_t __a) | |
17016 | { | |
17017 | return __builtin_mve_vcvtaq_uv4si (__a); | |
17018 | } | |
17019 | ||
17020 | __extension__ extern __inline int16x8_t | |
17021 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17022 | __arm_vcvtaq_s16_f16 (float16x8_t __a) | |
17023 | { | |
17024 | return __builtin_mve_vcvtaq_sv8hi (__a); | |
17025 | } | |
17026 | ||
17027 | __extension__ extern __inline int32x4_t | |
17028 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17029 | __arm_vcvtaq_s32_f32 (float32x4_t __a) | |
17030 | { | |
17031 | return __builtin_mve_vcvtaq_sv4si (__a); | |
17032 | } | |
17033 | ||
17034 | __extension__ extern __inline int16x8_t | |
17035 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17036 | __arm_vcvtnq_s16_f16 (float16x8_t __a) | |
17037 | { | |
17038 | return __builtin_mve_vcvtnq_sv8hi (__a); | |
17039 | } | |
17040 | ||
17041 | __extension__ extern __inline int32x4_t | |
17042 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17043 | __arm_vcvtnq_s32_f32 (float32x4_t __a) | |
17044 | { | |
17045 | return __builtin_mve_vcvtnq_sv4si (__a); | |
17046 | } | |
17047 | ||
17048 | __extension__ extern __inline int16x8_t | |
17049 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17050 | __arm_vcvtpq_s16_f16 (float16x8_t __a) | |
17051 | { | |
17052 | return __builtin_mve_vcvtpq_sv8hi (__a); | |
17053 | } | |
17054 | ||
17055 | __extension__ extern __inline int32x4_t | |
17056 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17057 | __arm_vcvtpq_s32_f32 (float32x4_t __a) | |
17058 | { | |
17059 | return __builtin_mve_vcvtpq_sv4si (__a); | |
17060 | } | |
17061 | ||
17062 | __extension__ extern __inline int16x8_t | |
17063 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17064 | __arm_vcvtmq_s16_f16 (float16x8_t __a) | |
17065 | { | |
17066 | return __builtin_mve_vcvtmq_sv8hi (__a); | |
17067 | } | |
17068 | ||
17069 | __extension__ extern __inline int32x4_t | |
17070 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17071 | __arm_vcvtmq_s32_f32 (float32x4_t __a) | |
17072 | { | |
17073 | return __builtin_mve_vcvtmq_sv4si (__a); | |
17074 | } | |
17075 | ||
17076 | __extension__ extern __inline float16x8_t | |
17077 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17078 | __arm_vsubq_n_f16 (float16x8_t __a, float16_t __b) | |
17079 | { | |
17080 | return __builtin_mve_vsubq_n_fv8hf (__a, __b); | |
17081 | } | |
17082 | ||
17083 | __extension__ extern __inline float32x4_t | |
17084 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17085 | __arm_vsubq_n_f32 (float32x4_t __a, float32_t __b) | |
17086 | { | |
17087 | return __builtin_mve_vsubq_n_fv4sf (__a, __b); | |
17088 | } | |
17089 | ||
17090 | __extension__ extern __inline float16x8_t | |
17091 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17092 | __arm_vbrsrq_n_f16 (float16x8_t __a, int32_t __b) | |
17093 | { | |
17094 | return __builtin_mve_vbrsrq_n_fv8hf (__a, __b); | |
17095 | } | |
17096 | ||
17097 | __extension__ extern __inline float32x4_t | |
17098 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17099 | __arm_vbrsrq_n_f32 (float32x4_t __a, int32_t __b) | |
17100 | { | |
17101 | return __builtin_mve_vbrsrq_n_fv4sf (__a, __b); | |
17102 | } | |
17103 | ||
17104 | __extension__ extern __inline float16x8_t | |
17105 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17106 | __arm_vcvtq_n_f16_s16 (int16x8_t __a, const int __imm6) | |
17107 | { | |
17108 | return __builtin_mve_vcvtq_n_to_f_sv8hf (__a, __imm6); | |
17109 | } | |
17110 | ||
17111 | __extension__ extern __inline float32x4_t | |
17112 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17113 | __arm_vcvtq_n_f32_s32 (int32x4_t __a, const int __imm6) | |
17114 | { | |
17115 | return __builtin_mve_vcvtq_n_to_f_sv4sf (__a, __imm6); | |
17116 | } | |
17117 | ||
17118 | __extension__ extern __inline float16x8_t | |
17119 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17120 | __arm_vcvtq_n_f16_u16 (uint16x8_t __a, const int __imm6) | |
17121 | { | |
17122 | return __builtin_mve_vcvtq_n_to_f_uv8hf (__a, __imm6); | |
17123 | } | |
17124 | ||
17125 | __extension__ extern __inline float32x4_t | |
17126 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17127 | __arm_vcvtq_n_f32_u32 (uint32x4_t __a, const int __imm6) | |
17128 | { | |
17129 | return __builtin_mve_vcvtq_n_to_f_uv4sf (__a, __imm6); | |
17130 | } | |
17131 | ||
17132 | __extension__ extern __inline float16x8_t | |
17133 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17134 | __arm_vcreateq_f16 (uint64_t __a, uint64_t __b) | |
17135 | { | |
17136 | return __builtin_mve_vcreateq_fv8hf (__a, __b); | |
17137 | } | |
17138 | ||
17139 | __extension__ extern __inline float32x4_t | |
17140 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17141 | __arm_vcreateq_f32 (uint64_t __a, uint64_t __b) | |
17142 | { | |
17143 | return __builtin_mve_vcreateq_fv4sf (__a, __b); | |
17144 | } | |
17145 | ||
17146 | __extension__ extern __inline int16x8_t | |
17147 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17148 | __arm_vcvtq_n_s16_f16 (float16x8_t __a, const int __imm6) | |
17149 | { | |
17150 | return __builtin_mve_vcvtq_n_from_f_sv8hi (__a, __imm6); | |
17151 | } | |
17152 | ||
17153 | __extension__ extern __inline int32x4_t | |
17154 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17155 | __arm_vcvtq_n_s32_f32 (float32x4_t __a, const int __imm6) | |
17156 | { | |
17157 | return __builtin_mve_vcvtq_n_from_f_sv4si (__a, __imm6); | |
17158 | } | |
17159 | ||
17160 | __extension__ extern __inline uint16x8_t | |
17161 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17162 | __arm_vcvtq_n_u16_f16 (float16x8_t __a, const int __imm6) | |
17163 | { | |
17164 | return __builtin_mve_vcvtq_n_from_f_uv8hi (__a, __imm6); | |
17165 | } | |
17166 | ||
17167 | __extension__ extern __inline uint32x4_t | |
17168 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17169 | __arm_vcvtq_n_u32_f32 (float32x4_t __a, const int __imm6) | |
17170 | { | |
17171 | return __builtin_mve_vcvtq_n_from_f_uv4si (__a, __imm6); | |
17172 | } | |
17173 | ||
17174 | __extension__ extern __inline mve_pred16_t | |
17175 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17176 | __arm_vcmpneq_n_f16 (float16x8_t __a, float16_t __b) | |
17177 | { | |
17178 | return __builtin_mve_vcmpneq_n_fv8hf (__a, __b); | |
17179 | } | |
17180 | ||
17181 | __extension__ extern __inline mve_pred16_t | |
17182 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17183 | __arm_vcmpneq_f16 (float16x8_t __a, float16x8_t __b) | |
17184 | { | |
17185 | return __builtin_mve_vcmpneq_fv8hf (__a, __b); | |
17186 | } | |
17187 | ||
17188 | __extension__ extern __inline mve_pred16_t | |
17189 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17190 | __arm_vcmpltq_n_f16 (float16x8_t __a, float16_t __b) | |
17191 | { | |
17192 | return __builtin_mve_vcmpltq_n_fv8hf (__a, __b); | |
17193 | } | |
17194 | ||
17195 | __extension__ extern __inline mve_pred16_t | |
17196 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17197 | __arm_vcmpltq_f16 (float16x8_t __a, float16x8_t __b) | |
17198 | { | |
17199 | return __builtin_mve_vcmpltq_fv8hf (__a, __b); | |
17200 | } | |
17201 | ||
17202 | __extension__ extern __inline mve_pred16_t | |
17203 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17204 | __arm_vcmpleq_n_f16 (float16x8_t __a, float16_t __b) | |
17205 | { | |
17206 | return __builtin_mve_vcmpleq_n_fv8hf (__a, __b); | |
17207 | } | |
17208 | ||
17209 | __extension__ extern __inline mve_pred16_t | |
17210 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17211 | __arm_vcmpleq_f16 (float16x8_t __a, float16x8_t __b) | |
17212 | { | |
17213 | return __builtin_mve_vcmpleq_fv8hf (__a, __b); | |
17214 | } | |
17215 | ||
17216 | __extension__ extern __inline mve_pred16_t | |
17217 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17218 | __arm_vcmpgtq_n_f16 (float16x8_t __a, float16_t __b) | |
17219 | { | |
17220 | return __builtin_mve_vcmpgtq_n_fv8hf (__a, __b); | |
17221 | } | |
17222 | ||
17223 | __extension__ extern __inline mve_pred16_t | |
17224 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17225 | __arm_vcmpgtq_f16 (float16x8_t __a, float16x8_t __b) | |
17226 | { | |
17227 | return __builtin_mve_vcmpgtq_fv8hf (__a, __b); | |
17228 | } | |
17229 | ||
17230 | __extension__ extern __inline mve_pred16_t | |
17231 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17232 | __arm_vcmpgeq_n_f16 (float16x8_t __a, float16_t __b) | |
17233 | { | |
17234 | return __builtin_mve_vcmpgeq_n_fv8hf (__a, __b); | |
17235 | } | |
17236 | ||
17237 | __extension__ extern __inline mve_pred16_t | |
17238 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17239 | __arm_vcmpgeq_f16 (float16x8_t __a, float16x8_t __b) | |
17240 | { | |
17241 | return __builtin_mve_vcmpgeq_fv8hf (__a, __b); | |
17242 | } | |
17243 | ||
17244 | __extension__ extern __inline mve_pred16_t | |
17245 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17246 | __arm_vcmpeqq_n_f16 (float16x8_t __a, float16_t __b) | |
17247 | { | |
17248 | return __builtin_mve_vcmpeqq_n_fv8hf (__a, __b); | |
17249 | } | |
17250 | ||
17251 | __extension__ extern __inline mve_pred16_t | |
17252 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17253 | __arm_vcmpeqq_f16 (float16x8_t __a, float16x8_t __b) | |
17254 | { | |
17255 | return __builtin_mve_vcmpeqq_fv8hf (__a, __b); | |
17256 | } | |
17257 | ||
17258 | __extension__ extern __inline float16x8_t | |
17259 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17260 | __arm_vsubq_f16 (float16x8_t __a, float16x8_t __b) | |
17261 | { | |
17262 | return __builtin_mve_vsubq_fv8hf (__a, __b); | |
17263 | } | |
17264 | ||
17265 | __extension__ extern __inline float16x8_t | |
17266 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17267 | __arm_vorrq_f16 (float16x8_t __a, float16x8_t __b) | |
17268 | { | |
17269 | return __builtin_mve_vorrq_fv8hf (__a, __b); | |
17270 | } | |
17271 | ||
17272 | __extension__ extern __inline float16x8_t | |
17273 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17274 | __arm_vornq_f16 (float16x8_t __a, float16x8_t __b) | |
17275 | { | |
17276 | return __builtin_mve_vornq_fv8hf (__a, __b); | |
17277 | } | |
17278 | ||
17279 | __extension__ extern __inline float16x8_t | |
17280 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17281 | __arm_vmulq_n_f16 (float16x8_t __a, float16_t __b) | |
17282 | { | |
17283 | return __builtin_mve_vmulq_n_fv8hf (__a, __b); | |
17284 | } | |
17285 | ||
17286 | __extension__ extern __inline float16x8_t | |
17287 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17288 | __arm_vmulq_f16 (float16x8_t __a, float16x8_t __b) | |
17289 | { | |
17290 | return __builtin_mve_vmulq_fv8hf (__a, __b); | |
17291 | } | |
17292 | ||
17293 | __extension__ extern __inline float16_t | |
17294 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17295 | __arm_vminnmvq_f16 (float16_t __a, float16x8_t __b) | |
17296 | { | |
17297 | return __builtin_mve_vminnmvq_fv8hf (__a, __b); | |
17298 | } | |
17299 | ||
17300 | __extension__ extern __inline float16x8_t | |
17301 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17302 | __arm_vminnmq_f16 (float16x8_t __a, float16x8_t __b) | |
17303 | { | |
17304 | return __builtin_mve_vminnmq_fv8hf (__a, __b); | |
17305 | } | |
17306 | ||
17307 | __extension__ extern __inline float16_t | |
17308 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17309 | __arm_vminnmavq_f16 (float16_t __a, float16x8_t __b) | |
17310 | { | |
17311 | return __builtin_mve_vminnmavq_fv8hf (__a, __b); | |
17312 | } | |
17313 | ||
17314 | __extension__ extern __inline float16x8_t | |
17315 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17316 | __arm_vminnmaq_f16 (float16x8_t __a, float16x8_t __b) | |
17317 | { | |
17318 | return __builtin_mve_vminnmaq_fv8hf (__a, __b); | |
17319 | } | |
17320 | ||
17321 | __extension__ extern __inline float16_t | |
17322 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17323 | __arm_vmaxnmvq_f16 (float16_t __a, float16x8_t __b) | |
17324 | { | |
17325 | return __builtin_mve_vmaxnmvq_fv8hf (__a, __b); | |
17326 | } | |
17327 | ||
17328 | __extension__ extern __inline float16x8_t | |
17329 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17330 | __arm_vmaxnmq_f16 (float16x8_t __a, float16x8_t __b) | |
17331 | { | |
17332 | return __builtin_mve_vmaxnmq_fv8hf (__a, __b); | |
17333 | } | |
17334 | ||
17335 | __extension__ extern __inline float16_t | |
17336 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17337 | __arm_vmaxnmavq_f16 (float16_t __a, float16x8_t __b) | |
17338 | { | |
17339 | return __builtin_mve_vmaxnmavq_fv8hf (__a, __b); | |
17340 | } | |
17341 | ||
17342 | __extension__ extern __inline float16x8_t | |
17343 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17344 | __arm_vmaxnmaq_f16 (float16x8_t __a, float16x8_t __b) | |
17345 | { | |
17346 | return __builtin_mve_vmaxnmaq_fv8hf (__a, __b); | |
17347 | } | |
17348 | ||
17349 | __extension__ extern __inline float16x8_t | |
17350 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17351 | __arm_veorq_f16 (float16x8_t __a, float16x8_t __b) | |
17352 | { | |
17353 | return __builtin_mve_veorq_fv8hf (__a, __b); | |
17354 | } | |
17355 | ||
17356 | __extension__ extern __inline float16x8_t | |
17357 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17358 | __arm_vcmulq_rot90_f16 (float16x8_t __a, float16x8_t __b) | |
17359 | { | |
17360 | return __builtin_mve_vcmulq_rot90_fv8hf (__a, __b); | |
17361 | } | |
17362 | ||
17363 | __extension__ extern __inline float16x8_t | |
17364 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17365 | __arm_vcmulq_rot270_f16 (float16x8_t __a, float16x8_t __b) | |
17366 | { | |
17367 | return __builtin_mve_vcmulq_rot270_fv8hf (__a, __b); | |
17368 | } | |
17369 | ||
17370 | __extension__ extern __inline float16x8_t | |
17371 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17372 | __arm_vcmulq_rot180_f16 (float16x8_t __a, float16x8_t __b) | |
17373 | { | |
17374 | return __builtin_mve_vcmulq_rot180_fv8hf (__a, __b); | |
17375 | } | |
17376 | ||
17377 | __extension__ extern __inline float16x8_t | |
17378 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17379 | __arm_vcmulq_f16 (float16x8_t __a, float16x8_t __b) | |
17380 | { | |
17381 | return __builtin_mve_vcmulq_fv8hf (__a, __b); | |
17382 | } | |
17383 | ||
17384 | __extension__ extern __inline float16x8_t | |
17385 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17386 | __arm_vcaddq_rot90_f16 (float16x8_t __a, float16x8_t __b) | |
17387 | { | |
17388 | return __builtin_mve_vcaddq_rot90_fv8hf (__a, __b); | |
17389 | } | |
17390 | ||
17391 | __extension__ extern __inline float16x8_t | |
17392 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17393 | __arm_vcaddq_rot270_f16 (float16x8_t __a, float16x8_t __b) | |
17394 | { | |
17395 | return __builtin_mve_vcaddq_rot270_fv8hf (__a, __b); | |
17396 | } | |
17397 | ||
17398 | __extension__ extern __inline float16x8_t | |
17399 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17400 | __arm_vbicq_f16 (float16x8_t __a, float16x8_t __b) | |
17401 | { | |
17402 | return __builtin_mve_vbicq_fv8hf (__a, __b); | |
17403 | } | |
17404 | ||
17405 | __extension__ extern __inline float16x8_t | |
17406 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17407 | __arm_vandq_f16 (float16x8_t __a, float16x8_t __b) | |
17408 | { | |
17409 | return __builtin_mve_vandq_fv8hf (__a, __b); | |
17410 | } | |
17411 | ||
17412 | __extension__ extern __inline float16x8_t | |
17413 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17414 | __arm_vaddq_n_f16 (float16x8_t __a, float16_t __b) | |
17415 | { | |
17416 | return __builtin_mve_vaddq_n_fv8hf (__a, __b); | |
17417 | } | |
17418 | ||
17419 | __extension__ extern __inline float16x8_t | |
17420 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17421 | __arm_vabdq_f16 (float16x8_t __a, float16x8_t __b) | |
17422 | { | |
17423 | return __builtin_mve_vabdq_fv8hf (__a, __b); | |
17424 | } | |
17425 | ||
17426 | __extension__ extern __inline mve_pred16_t | |
17427 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17428 | __arm_vcmpneq_n_f32 (float32x4_t __a, float32_t __b) | |
17429 | { | |
17430 | return __builtin_mve_vcmpneq_n_fv4sf (__a, __b); | |
17431 | } | |
17432 | ||
17433 | __extension__ extern __inline mve_pred16_t | |
17434 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17435 | __arm_vcmpneq_f32 (float32x4_t __a, float32x4_t __b) | |
17436 | { | |
17437 | return __builtin_mve_vcmpneq_fv4sf (__a, __b); | |
17438 | } | |
17439 | ||
17440 | __extension__ extern __inline mve_pred16_t | |
17441 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17442 | __arm_vcmpltq_n_f32 (float32x4_t __a, float32_t __b) | |
17443 | { | |
17444 | return __builtin_mve_vcmpltq_n_fv4sf (__a, __b); | |
17445 | } | |
17446 | ||
17447 | __extension__ extern __inline mve_pred16_t | |
17448 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17449 | __arm_vcmpltq_f32 (float32x4_t __a, float32x4_t __b) | |
17450 | { | |
17451 | return __builtin_mve_vcmpltq_fv4sf (__a, __b); | |
17452 | } | |
17453 | ||
17454 | __extension__ extern __inline mve_pred16_t | |
17455 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17456 | __arm_vcmpleq_n_f32 (float32x4_t __a, float32_t __b) | |
17457 | { | |
17458 | return __builtin_mve_vcmpleq_n_fv4sf (__a, __b); | |
17459 | } | |
17460 | ||
17461 | __extension__ extern __inline mve_pred16_t | |
17462 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17463 | __arm_vcmpleq_f32 (float32x4_t __a, float32x4_t __b) | |
17464 | { | |
17465 | return __builtin_mve_vcmpleq_fv4sf (__a, __b); | |
17466 | } | |
17467 | ||
17468 | __extension__ extern __inline mve_pred16_t | |
17469 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17470 | __arm_vcmpgtq_n_f32 (float32x4_t __a, float32_t __b) | |
17471 | { | |
17472 | return __builtin_mve_vcmpgtq_n_fv4sf (__a, __b); | |
17473 | } | |
17474 | ||
17475 | __extension__ extern __inline mve_pred16_t | |
17476 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17477 | __arm_vcmpgtq_f32 (float32x4_t __a, float32x4_t __b) | |
17478 | { | |
17479 | return __builtin_mve_vcmpgtq_fv4sf (__a, __b); | |
17480 | } | |
17481 | ||
17482 | __extension__ extern __inline mve_pred16_t | |
17483 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17484 | __arm_vcmpgeq_n_f32 (float32x4_t __a, float32_t __b) | |
17485 | { | |
17486 | return __builtin_mve_vcmpgeq_n_fv4sf (__a, __b); | |
17487 | } | |
17488 | ||
17489 | __extension__ extern __inline mve_pred16_t | |
17490 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17491 | __arm_vcmpgeq_f32 (float32x4_t __a, float32x4_t __b) | |
17492 | { | |
17493 | return __builtin_mve_vcmpgeq_fv4sf (__a, __b); | |
17494 | } | |
17495 | ||
17496 | __extension__ extern __inline mve_pred16_t | |
17497 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17498 | __arm_vcmpeqq_n_f32 (float32x4_t __a, float32_t __b) | |
17499 | { | |
17500 | return __builtin_mve_vcmpeqq_n_fv4sf (__a, __b); | |
17501 | } | |
17502 | ||
17503 | __extension__ extern __inline mve_pred16_t | |
17504 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17505 | __arm_vcmpeqq_f32 (float32x4_t __a, float32x4_t __b) | |
17506 | { | |
17507 | return __builtin_mve_vcmpeqq_fv4sf (__a, __b); | |
17508 | } | |
17509 | ||
17510 | __extension__ extern __inline float32x4_t | |
17511 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17512 | __arm_vsubq_f32 (float32x4_t __a, float32x4_t __b) | |
17513 | { | |
17514 | return __builtin_mve_vsubq_fv4sf (__a, __b); | |
17515 | } | |
17516 | ||
17517 | __extension__ extern __inline float32x4_t | |
17518 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17519 | __arm_vorrq_f32 (float32x4_t __a, float32x4_t __b) | |
17520 | { | |
17521 | return __builtin_mve_vorrq_fv4sf (__a, __b); | |
17522 | } | |
17523 | ||
17524 | __extension__ extern __inline float32x4_t | |
17525 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17526 | __arm_vornq_f32 (float32x4_t __a, float32x4_t __b) | |
17527 | { | |
17528 | return __builtin_mve_vornq_fv4sf (__a, __b); | |
17529 | } | |
17530 | ||
17531 | __extension__ extern __inline float32x4_t | |
17532 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17533 | __arm_vmulq_n_f32 (float32x4_t __a, float32_t __b) | |
17534 | { | |
17535 | return __builtin_mve_vmulq_n_fv4sf (__a, __b); | |
17536 | } | |
17537 | ||
17538 | __extension__ extern __inline float32x4_t | |
17539 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17540 | __arm_vmulq_f32 (float32x4_t __a, float32x4_t __b) | |
17541 | { | |
17542 | return __builtin_mve_vmulq_fv4sf (__a, __b); | |
17543 | } | |
17544 | ||
17545 | __extension__ extern __inline float32_t | |
17546 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17547 | __arm_vminnmvq_f32 (float32_t __a, float32x4_t __b) | |
17548 | { | |
17549 | return __builtin_mve_vminnmvq_fv4sf (__a, __b); | |
17550 | } | |
17551 | ||
17552 | __extension__ extern __inline float32x4_t | |
17553 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17554 | __arm_vminnmq_f32 (float32x4_t __a, float32x4_t __b) | |
17555 | { | |
17556 | return __builtin_mve_vminnmq_fv4sf (__a, __b); | |
17557 | } | |
17558 | ||
17559 | __extension__ extern __inline float32_t | |
17560 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17561 | __arm_vminnmavq_f32 (float32_t __a, float32x4_t __b) | |
17562 | { | |
17563 | return __builtin_mve_vminnmavq_fv4sf (__a, __b); | |
17564 | } | |
17565 | ||
17566 | __extension__ extern __inline float32x4_t | |
17567 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17568 | __arm_vminnmaq_f32 (float32x4_t __a, float32x4_t __b) | |
17569 | { | |
17570 | return __builtin_mve_vminnmaq_fv4sf (__a, __b); | |
17571 | } | |
17572 | ||
17573 | __extension__ extern __inline float32_t | |
17574 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17575 | __arm_vmaxnmvq_f32 (float32_t __a, float32x4_t __b) | |
17576 | { | |
17577 | return __builtin_mve_vmaxnmvq_fv4sf (__a, __b); | |
17578 | } | |
17579 | ||
17580 | __extension__ extern __inline float32x4_t | |
17581 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17582 | __arm_vmaxnmq_f32 (float32x4_t __a, float32x4_t __b) | |
17583 | { | |
17584 | return __builtin_mve_vmaxnmq_fv4sf (__a, __b); | |
17585 | } | |
17586 | ||
17587 | __extension__ extern __inline float32_t | |
17588 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17589 | __arm_vmaxnmavq_f32 (float32_t __a, float32x4_t __b) | |
17590 | { | |
17591 | return __builtin_mve_vmaxnmavq_fv4sf (__a, __b); | |
17592 | } | |
17593 | ||
17594 | __extension__ extern __inline float32x4_t | |
17595 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17596 | __arm_vmaxnmaq_f32 (float32x4_t __a, float32x4_t __b) | |
17597 | { | |
17598 | return __builtin_mve_vmaxnmaq_fv4sf (__a, __b); | |
17599 | } | |
17600 | ||
17601 | __extension__ extern __inline float32x4_t | |
17602 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17603 | __arm_veorq_f32 (float32x4_t __a, float32x4_t __b) | |
17604 | { | |
17605 | return __builtin_mve_veorq_fv4sf (__a, __b); | |
17606 | } | |
17607 | ||
17608 | __extension__ extern __inline float32x4_t | |
17609 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17610 | __arm_vcmulq_rot90_f32 (float32x4_t __a, float32x4_t __b) | |
17611 | { | |
17612 | return __builtin_mve_vcmulq_rot90_fv4sf (__a, __b); | |
17613 | } | |
17614 | ||
17615 | __extension__ extern __inline float32x4_t | |
17616 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17617 | __arm_vcmulq_rot270_f32 (float32x4_t __a, float32x4_t __b) | |
17618 | { | |
17619 | return __builtin_mve_vcmulq_rot270_fv4sf (__a, __b); | |
17620 | } | |
17621 | ||
17622 | __extension__ extern __inline float32x4_t | |
17623 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17624 | __arm_vcmulq_rot180_f32 (float32x4_t __a, float32x4_t __b) | |
17625 | { | |
17626 | return __builtin_mve_vcmulq_rot180_fv4sf (__a, __b); | |
17627 | } | |
17628 | ||
17629 | __extension__ extern __inline float32x4_t | |
17630 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17631 | __arm_vcmulq_f32 (float32x4_t __a, float32x4_t __b) | |
17632 | { | |
17633 | return __builtin_mve_vcmulq_fv4sf (__a, __b); | |
17634 | } | |
17635 | ||
17636 | __extension__ extern __inline float32x4_t | |
17637 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17638 | __arm_vcaddq_rot90_f32 (float32x4_t __a, float32x4_t __b) | |
17639 | { | |
17640 | return __builtin_mve_vcaddq_rot90_fv4sf (__a, __b); | |
17641 | } | |
17642 | ||
17643 | __extension__ extern __inline float32x4_t | |
17644 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17645 | __arm_vcaddq_rot270_f32 (float32x4_t __a, float32x4_t __b) | |
17646 | { | |
17647 | return __builtin_mve_vcaddq_rot270_fv4sf (__a, __b); | |
17648 | } | |
17649 | ||
17650 | __extension__ extern __inline float32x4_t | |
17651 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17652 | __arm_vbicq_f32 (float32x4_t __a, float32x4_t __b) | |
17653 | { | |
17654 | return __builtin_mve_vbicq_fv4sf (__a, __b); | |
17655 | } | |
17656 | ||
17657 | __extension__ extern __inline float32x4_t | |
17658 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17659 | __arm_vandq_f32 (float32x4_t __a, float32x4_t __b) | |
17660 | { | |
17661 | return __builtin_mve_vandq_fv4sf (__a, __b); | |
17662 | } | |
17663 | ||
17664 | __extension__ extern __inline float32x4_t | |
17665 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17666 | __arm_vaddq_n_f32 (float32x4_t __a, float32_t __b) | |
17667 | { | |
17668 | return __builtin_mve_vaddq_n_fv4sf (__a, __b); | |
17669 | } | |
17670 | ||
17671 | __extension__ extern __inline float32x4_t | |
17672 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17673 | __arm_vabdq_f32 (float32x4_t __a, float32x4_t __b) | |
17674 | { | |
17675 | return __builtin_mve_vabdq_fv4sf (__a, __b); | |
17676 | } | |
17677 | ||
17678 | __extension__ extern __inline float16x8_t | |
17679 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17680 | __arm_vcvttq_f16_f32 (float16x8_t __a, float32x4_t __b) | |
17681 | { | |
17682 | return __builtin_mve_vcvttq_f16_f32v8hf (__a, __b); | |
17683 | } | |
17684 | ||
17685 | __extension__ extern __inline float16x8_t | |
17686 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17687 | __arm_vcvtbq_f16_f32 (float16x8_t __a, float32x4_t __b) | |
17688 | { | |
17689 | return __builtin_mve_vcvtbq_f16_f32v8hf (__a, __b); | |
17690 | } | |
17691 | ||
17692 | __extension__ extern __inline mve_pred16_t | |
17693 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17694 | __arm_vcmpeqq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17695 | { | |
17696 | return __builtin_mve_vcmpeqq_m_fv8hf (__a, __b, __p); | |
17697 | } | |
17698 | ||
17699 | __extension__ extern __inline mve_pred16_t | |
17700 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17701 | __arm_vcmpeqq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17702 | { | |
17703 | return __builtin_mve_vcmpeqq_m_fv4sf (__a, __b, __p); | |
17704 | } | |
17705 | ||
17706 | __extension__ extern __inline int16x8_t | |
17707 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17708 | __arm_vcvtaq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17709 | { | |
17710 | return __builtin_mve_vcvtaq_m_sv8hi (__inactive, __a, __p); | |
17711 | } | |
17712 | ||
17713 | __extension__ extern __inline uint16x8_t | |
17714 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17715 | __arm_vcvtaq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17716 | { | |
17717 | return __builtin_mve_vcvtaq_m_uv8hi (__inactive, __a, __p); | |
17718 | } | |
17719 | ||
17720 | __extension__ extern __inline int32x4_t | |
17721 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17722 | __arm_vcvtaq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17723 | { | |
17724 | return __builtin_mve_vcvtaq_m_sv4si (__inactive, __a, __p); | |
17725 | } | |
17726 | ||
17727 | __extension__ extern __inline uint32x4_t | |
17728 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17729 | __arm_vcvtaq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17730 | { | |
17731 | return __builtin_mve_vcvtaq_m_uv4si (__inactive, __a, __p); | |
17732 | } | |
17733 | ||
17734 | __extension__ extern __inline float16x8_t | |
17735 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17736 | __arm_vcvtq_m_f16_s16 (float16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
17737 | { | |
17738 | return __builtin_mve_vcvtq_m_to_f_sv8hf (__inactive, __a, __p); | |
17739 | } | |
17740 | ||
17741 | __extension__ extern __inline float16x8_t | |
17742 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17743 | __arm_vcvtq_m_f16_u16 (float16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
17744 | { | |
17745 | return __builtin_mve_vcvtq_m_to_f_uv8hf (__inactive, __a, __p); | |
17746 | } | |
17747 | ||
17748 | __extension__ extern __inline float32x4_t | |
17749 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17750 | __arm_vcvtq_m_f32_s32 (float32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
17751 | { | |
17752 | return __builtin_mve_vcvtq_m_to_f_sv4sf (__inactive, __a, __p); | |
17753 | } | |
17754 | ||
17755 | __extension__ extern __inline float32x4_t | |
17756 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17757 | __arm_vcvtq_m_f32_u32 (float32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
17758 | { | |
17759 | return __builtin_mve_vcvtq_m_to_f_uv4sf (__inactive, __a, __p); | |
17760 | } | |
17761 | ||
17762 | ||
17763 | __extension__ extern __inline float16x8_t | |
17764 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17765 | __arm_vcvtbq_m_f16_f32 (float16x8_t __a, float32x4_t __b, mve_pred16_t __p) | |
17766 | { | |
17767 | return __builtin_mve_vcvtbq_m_f16_f32v8hf (__a, __b, __p); | |
17768 | } | |
17769 | ||
17770 | __extension__ extern __inline float32x4_t | |
17771 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17772 | __arm_vcvtbq_m_f32_f16 (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17773 | { | |
17774 | return __builtin_mve_vcvtbq_m_f32_f16v4sf (__inactive, __a, __p); | |
17775 | } | |
17776 | ||
17777 | __extension__ extern __inline float16x8_t | |
17778 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17779 | __arm_vcvttq_m_f16_f32 (float16x8_t __a, float32x4_t __b, mve_pred16_t __p) | |
17780 | { | |
17781 | return __builtin_mve_vcvttq_m_f16_f32v8hf (__a, __b, __p); | |
17782 | } | |
17783 | ||
17784 | __extension__ extern __inline float32x4_t | |
17785 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17786 | __arm_vcvttq_m_f32_f16 (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17787 | { | |
17788 | return __builtin_mve_vcvttq_m_f32_f16v4sf (__inactive, __a, __p); | |
17789 | } | |
17790 | ||
17791 | __extension__ extern __inline float16x8_t | |
17792 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17793 | __arm_vrev32q_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17794 | { | |
17795 | return __builtin_mve_vrev32q_m_fv8hf (__inactive, __a, __p); | |
17796 | } | |
17797 | ||
17798 | __extension__ extern __inline float16x8_t | |
17799 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17800 | __arm_vcmlaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17801 | { | |
17802 | return __builtin_mve_vcmlaq_fv8hf (__a, __b, __c); | |
17803 | } | |
17804 | ||
17805 | __extension__ extern __inline float16x8_t | |
17806 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17807 | __arm_vcmlaq_rot180_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17808 | { | |
17809 | return __builtin_mve_vcmlaq_rot180_fv8hf (__a, __b, __c); | |
17810 | } | |
17811 | ||
17812 | __extension__ extern __inline float16x8_t | |
17813 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17814 | __arm_vcmlaq_rot270_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17815 | { | |
17816 | return __builtin_mve_vcmlaq_rot270_fv8hf (__a, __b, __c); | |
17817 | } | |
17818 | ||
17819 | __extension__ extern __inline float16x8_t | |
17820 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17821 | __arm_vcmlaq_rot90_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17822 | { | |
17823 | return __builtin_mve_vcmlaq_rot90_fv8hf (__a, __b, __c); | |
17824 | } | |
17825 | ||
17826 | __extension__ extern __inline float16x8_t | |
17827 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17828 | __arm_vfmaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17829 | { | |
17830 | return __builtin_mve_vfmaq_fv8hf (__a, __b, __c); | |
17831 | } | |
17832 | ||
17833 | __extension__ extern __inline float16x8_t | |
17834 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17835 | __arm_vfmaq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c) | |
17836 | { | |
17837 | return __builtin_mve_vfmaq_n_fv8hf (__a, __b, __c); | |
17838 | } | |
17839 | ||
17840 | __extension__ extern __inline float16x8_t | |
17841 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17842 | __arm_vfmasq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c) | |
17843 | { | |
17844 | return __builtin_mve_vfmasq_n_fv8hf (__a, __b, __c); | |
17845 | } | |
17846 | ||
17847 | __extension__ extern __inline float16x8_t | |
17848 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17849 | __arm_vfmsq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17850 | { | |
17851 | return __builtin_mve_vfmsq_fv8hf (__a, __b, __c); | |
17852 | } | |
17853 | ||
17854 | __extension__ extern __inline float16x8_t | |
17855 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17856 | __arm_vabsq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17857 | { | |
17858 | return __builtin_mve_vabsq_m_fv8hf (__inactive, __a, __p); | |
17859 | } | |
17860 | ||
17861 | __extension__ extern __inline int16x8_t | |
17862 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17863 | __arm_vcvtmq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17864 | { | |
17865 | return __builtin_mve_vcvtmq_m_sv8hi (__inactive, __a, __p); | |
17866 | } | |
17867 | ||
17868 | __extension__ extern __inline int16x8_t | |
17869 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17870 | __arm_vcvtnq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17871 | { | |
17872 | return __builtin_mve_vcvtnq_m_sv8hi (__inactive, __a, __p); | |
17873 | } | |
17874 | ||
17875 | __extension__ extern __inline int16x8_t | |
17876 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17877 | __arm_vcvtpq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17878 | { | |
17879 | return __builtin_mve_vcvtpq_m_sv8hi (__inactive, __a, __p); | |
17880 | } | |
17881 | ||
17882 | __extension__ extern __inline int16x8_t | |
17883 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17884 | __arm_vcvtq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17885 | { | |
17886 | return __builtin_mve_vcvtq_m_from_f_sv8hi (__inactive, __a, __p); | |
17887 | } | |
17888 | ||
17889 | __extension__ extern __inline float16x8_t | |
17890 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17891 | __arm_vdupq_m_n_f16 (float16x8_t __inactive, float16_t __a, mve_pred16_t __p) | |
17892 | { | |
17893 | return __builtin_mve_vdupq_m_n_fv8hf (__inactive, __a, __p); | |
17894 | } | |
17895 | ||
17896 | __extension__ extern __inline float16x8_t | |
17897 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17898 | __arm_vmaxnmaq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17899 | { | |
17900 | return __builtin_mve_vmaxnmaq_m_fv8hf (__a, __b, __p); | |
17901 | } | |
17902 | ||
17903 | __extension__ extern __inline float16_t | |
17904 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17905 | __arm_vmaxnmavq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
17906 | { | |
17907 | return __builtin_mve_vmaxnmavq_p_fv8hf (__a, __b, __p); | |
17908 | } | |
17909 | ||
17910 | __extension__ extern __inline float16_t | |
17911 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17912 | __arm_vmaxnmvq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
17913 | { | |
17914 | return __builtin_mve_vmaxnmvq_p_fv8hf (__a, __b, __p); | |
17915 | } | |
17916 | ||
17917 | __extension__ extern __inline float16x8_t | |
17918 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17919 | __arm_vminnmaq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17920 | { | |
17921 | return __builtin_mve_vminnmaq_m_fv8hf (__a, __b, __p); | |
17922 | } | |
17923 | ||
17924 | __extension__ extern __inline float16_t | |
17925 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17926 | __arm_vminnmavq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
17927 | { | |
17928 | return __builtin_mve_vminnmavq_p_fv8hf (__a, __b, __p); | |
17929 | } | |
17930 | ||
17931 | __extension__ extern __inline float16_t | |
17932 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17933 | __arm_vminnmvq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
17934 | { | |
17935 | return __builtin_mve_vminnmvq_p_fv8hf (__a, __b, __p); | |
17936 | } | |
17937 | ||
17938 | __extension__ extern __inline float16x8_t | |
17939 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17940 | __arm_vnegq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17941 | { | |
17942 | return __builtin_mve_vnegq_m_fv8hf (__inactive, __a, __p); | |
17943 | } | |
17944 | ||
17945 | __extension__ extern __inline float16x8_t | |
17946 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17947 | __arm_vpselq_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17948 | { | |
17949 | return __builtin_mve_vpselq_fv8hf (__a, __b, __p); | |
17950 | } | |
17951 | ||
17952 | __extension__ extern __inline float16x8_t | |
17953 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17954 | __arm_vrev64q_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17955 | { | |
17956 | return __builtin_mve_vrev64q_m_fv8hf (__inactive, __a, __p); | |
17957 | } | |
17958 | ||
17959 | __extension__ extern __inline float16x8_t | |
17960 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17961 | __arm_vrndaq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17962 | { | |
17963 | return __builtin_mve_vrndaq_m_fv8hf (__inactive, __a, __p); | |
17964 | } | |
17965 | ||
17966 | __extension__ extern __inline float16x8_t | |
17967 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17968 | __arm_vrndmq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17969 | { | |
17970 | return __builtin_mve_vrndmq_m_fv8hf (__inactive, __a, __p); | |
17971 | } | |
17972 | ||
17973 | __extension__ extern __inline float16x8_t | |
17974 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17975 | __arm_vrndnq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17976 | { | |
17977 | return __builtin_mve_vrndnq_m_fv8hf (__inactive, __a, __p); | |
17978 | } | |
17979 | ||
17980 | __extension__ extern __inline float16x8_t | |
17981 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17982 | __arm_vrndpq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17983 | { | |
17984 | return __builtin_mve_vrndpq_m_fv8hf (__inactive, __a, __p); | |
17985 | } | |
17986 | ||
17987 | __extension__ extern __inline float16x8_t | |
17988 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17989 | __arm_vrndq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17990 | { | |
17991 | return __builtin_mve_vrndq_m_fv8hf (__inactive, __a, __p); | |
17992 | } | |
17993 | ||
17994 | __extension__ extern __inline float16x8_t | |
17995 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17996 | __arm_vrndxq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17997 | { | |
17998 | return __builtin_mve_vrndxq_m_fv8hf (__inactive, __a, __p); | |
17999 | } | |
18000 | ||
18001 | __extension__ extern __inline mve_pred16_t | |
18002 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18003 | __arm_vcmpeqq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18004 | { | |
18005 | return __builtin_mve_vcmpeqq_m_n_fv8hf (__a, __b, __p); | |
18006 | } | |
18007 | ||
18008 | __extension__ extern __inline mve_pred16_t | |
18009 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18010 | __arm_vcmpgeq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18011 | { | |
18012 | return __builtin_mve_vcmpgeq_m_fv8hf (__a, __b, __p); | |
18013 | } | |
18014 | ||
18015 | __extension__ extern __inline mve_pred16_t | |
18016 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18017 | __arm_vcmpgeq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18018 | { | |
18019 | return __builtin_mve_vcmpgeq_m_n_fv8hf (__a, __b, __p); | |
18020 | } | |
18021 | ||
18022 | __extension__ extern __inline mve_pred16_t | |
18023 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18024 | __arm_vcmpgtq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18025 | { | |
18026 | return __builtin_mve_vcmpgtq_m_fv8hf (__a, __b, __p); | |
18027 | } | |
18028 | ||
18029 | __extension__ extern __inline mve_pred16_t | |
18030 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18031 | __arm_vcmpgtq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18032 | { | |
18033 | return __builtin_mve_vcmpgtq_m_n_fv8hf (__a, __b, __p); | |
18034 | } | |
18035 | ||
18036 | __extension__ extern __inline mve_pred16_t | |
18037 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18038 | __arm_vcmpleq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18039 | { | |
18040 | return __builtin_mve_vcmpleq_m_fv8hf (__a, __b, __p); | |
18041 | } | |
18042 | ||
18043 | __extension__ extern __inline mve_pred16_t | |
18044 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18045 | __arm_vcmpleq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18046 | { | |
18047 | return __builtin_mve_vcmpleq_m_n_fv8hf (__a, __b, __p); | |
18048 | } | |
18049 | ||
18050 | __extension__ extern __inline mve_pred16_t | |
18051 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18052 | __arm_vcmpltq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18053 | { | |
18054 | return __builtin_mve_vcmpltq_m_fv8hf (__a, __b, __p); | |
18055 | } | |
18056 | ||
18057 | __extension__ extern __inline mve_pred16_t | |
18058 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18059 | __arm_vcmpltq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18060 | { | |
18061 | return __builtin_mve_vcmpltq_m_n_fv8hf (__a, __b, __p); | |
18062 | } | |
18063 | ||
18064 | __extension__ extern __inline mve_pred16_t | |
18065 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18066 | __arm_vcmpneq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18067 | { | |
18068 | return __builtin_mve_vcmpneq_m_fv8hf (__a, __b, __p); | |
18069 | } | |
18070 | ||
18071 | __extension__ extern __inline mve_pred16_t | |
18072 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18073 | __arm_vcmpneq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18074 | { | |
18075 | return __builtin_mve_vcmpneq_m_n_fv8hf (__a, __b, __p); | |
18076 | } | |
18077 | ||
18078 | __extension__ extern __inline uint16x8_t | |
18079 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18080 | __arm_vcvtmq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
18081 | { | |
18082 | return __builtin_mve_vcvtmq_m_uv8hi (__inactive, __a, __p); | |
18083 | } | |
18084 | ||
18085 | __extension__ extern __inline uint16x8_t | |
18086 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18087 | __arm_vcvtnq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
18088 | { | |
18089 | return __builtin_mve_vcvtnq_m_uv8hi (__inactive, __a, __p); | |
18090 | } | |
18091 | ||
18092 | __extension__ extern __inline uint16x8_t | |
18093 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18094 | __arm_vcvtpq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
18095 | { | |
18096 | return __builtin_mve_vcvtpq_m_uv8hi (__inactive, __a, __p); | |
18097 | } | |
18098 | ||
18099 | __extension__ extern __inline uint16x8_t | |
18100 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18101 | __arm_vcvtq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
18102 | { | |
18103 | return __builtin_mve_vcvtq_m_from_f_uv8hi (__inactive, __a, __p); | |
18104 | } | |
18105 | ||
18106 | __extension__ extern __inline float32x4_t | |
18107 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18108 | __arm_vcmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
18109 | { | |
18110 | return __builtin_mve_vcmlaq_fv4sf (__a, __b, __c); | |
18111 | } | |
18112 | ||
18113 | __extension__ extern __inline float32x4_t | |
18114 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18115 | __arm_vcmlaq_rot180_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
18116 | { | |
18117 | return __builtin_mve_vcmlaq_rot180_fv4sf (__a, __b, __c); | |
18118 | } | |
18119 | ||
18120 | __extension__ extern __inline float32x4_t | |
18121 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18122 | __arm_vcmlaq_rot270_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
18123 | { | |
18124 | return __builtin_mve_vcmlaq_rot270_fv4sf (__a, __b, __c); | |
18125 | } | |
18126 | ||
18127 | __extension__ extern __inline float32x4_t | |
18128 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18129 | __arm_vcmlaq_rot90_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
18130 | { | |
18131 | return __builtin_mve_vcmlaq_rot90_fv4sf (__a, __b, __c); | |
18132 | } | |
18133 | ||
18134 | __extension__ extern __inline float32x4_t | |
18135 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18136 | __arm_vfmaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
18137 | { | |
18138 | return __builtin_mve_vfmaq_fv4sf (__a, __b, __c); | |
18139 | } | |
18140 | ||
18141 | __extension__ extern __inline float32x4_t | |
18142 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18143 | __arm_vfmaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) | |
18144 | { | |
18145 | return __builtin_mve_vfmaq_n_fv4sf (__a, __b, __c); | |
18146 | } | |
18147 | ||
18148 | __extension__ extern __inline float32x4_t | |
18149 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18150 | __arm_vfmasq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) | |
18151 | { | |
18152 | return __builtin_mve_vfmasq_n_fv4sf (__a, __b, __c); | |
18153 | } | |
18154 | ||
18155 | __extension__ extern __inline float32x4_t | |
18156 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18157 | __arm_vfmsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
18158 | { | |
18159 | return __builtin_mve_vfmsq_fv4sf (__a, __b, __c); | |
18160 | } | |
18161 | ||
18162 | __extension__ extern __inline float32x4_t | |
18163 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18164 | __arm_vabsq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18165 | { | |
18166 | return __builtin_mve_vabsq_m_fv4sf (__inactive, __a, __p); | |
18167 | } | |
18168 | ||
18169 | __extension__ extern __inline int32x4_t | |
18170 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18171 | __arm_vcvtmq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18172 | { | |
18173 | return __builtin_mve_vcvtmq_m_sv4si (__inactive, __a, __p); | |
18174 | } | |
18175 | ||
18176 | __extension__ extern __inline int32x4_t | |
18177 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18178 | __arm_vcvtnq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18179 | { | |
18180 | return __builtin_mve_vcvtnq_m_sv4si (__inactive, __a, __p); | |
18181 | } | |
18182 | ||
18183 | __extension__ extern __inline int32x4_t | |
18184 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18185 | __arm_vcvtpq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18186 | { | |
18187 | return __builtin_mve_vcvtpq_m_sv4si (__inactive, __a, __p); | |
18188 | } | |
18189 | ||
18190 | __extension__ extern __inline int32x4_t | |
18191 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18192 | __arm_vcvtq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18193 | { | |
18194 | return __builtin_mve_vcvtq_m_from_f_sv4si (__inactive, __a, __p); | |
18195 | } | |
18196 | ||
18197 | __extension__ extern __inline float32x4_t | |
18198 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18199 | __arm_vdupq_m_n_f32 (float32x4_t __inactive, float32_t __a, mve_pred16_t __p) | |
18200 | { | |
18201 | return __builtin_mve_vdupq_m_n_fv4sf (__inactive, __a, __p); | |
18202 | } | |
18203 | ||
18204 | __extension__ extern __inline float32x4_t | |
18205 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18206 | __arm_vmaxnmaq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18207 | { | |
18208 | return __builtin_mve_vmaxnmaq_m_fv4sf (__a, __b, __p); | |
18209 | } | |
18210 | ||
18211 | __extension__ extern __inline float32_t | |
18212 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18213 | __arm_vmaxnmavq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
18214 | { | |
18215 | return __builtin_mve_vmaxnmavq_p_fv4sf (__a, __b, __p); | |
18216 | } | |
18217 | ||
18218 | __extension__ extern __inline float32_t | |
18219 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18220 | __arm_vmaxnmvq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
18221 | { | |
18222 | return __builtin_mve_vmaxnmvq_p_fv4sf (__a, __b, __p); | |
18223 | } | |
18224 | ||
18225 | __extension__ extern __inline float32x4_t | |
18226 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18227 | __arm_vminnmaq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18228 | { | |
18229 | return __builtin_mve_vminnmaq_m_fv4sf (__a, __b, __p); | |
18230 | } | |
18231 | ||
18232 | __extension__ extern __inline float32_t | |
18233 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18234 | __arm_vminnmavq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
18235 | { | |
18236 | return __builtin_mve_vminnmavq_p_fv4sf (__a, __b, __p); | |
18237 | } | |
18238 | ||
18239 | __extension__ extern __inline float32_t | |
18240 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18241 | __arm_vminnmvq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
18242 | { | |
18243 | return __builtin_mve_vminnmvq_p_fv4sf (__a, __b, __p); | |
18244 | } | |
18245 | ||
18246 | __extension__ extern __inline float32x4_t | |
18247 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18248 | __arm_vnegq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18249 | { | |
18250 | return __builtin_mve_vnegq_m_fv4sf (__inactive, __a, __p); | |
18251 | } | |
18252 | ||
18253 | __extension__ extern __inline float32x4_t | |
18254 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18255 | __arm_vpselq_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18256 | { | |
18257 | return __builtin_mve_vpselq_fv4sf (__a, __b, __p); | |
18258 | } | |
18259 | ||
18260 | __extension__ extern __inline float32x4_t | |
18261 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18262 | __arm_vrev64q_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18263 | { | |
18264 | return __builtin_mve_vrev64q_m_fv4sf (__inactive, __a, __p); | |
18265 | } | |
18266 | ||
18267 | __extension__ extern __inline float32x4_t | |
18268 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18269 | __arm_vrndaq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18270 | { | |
18271 | return __builtin_mve_vrndaq_m_fv4sf (__inactive, __a, __p); | |
18272 | } | |
18273 | ||
18274 | __extension__ extern __inline float32x4_t | |
18275 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18276 | __arm_vrndmq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18277 | { | |
18278 | return __builtin_mve_vrndmq_m_fv4sf (__inactive, __a, __p); | |
18279 | } | |
18280 | ||
18281 | __extension__ extern __inline float32x4_t | |
18282 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18283 | __arm_vrndnq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18284 | { | |
18285 | return __builtin_mve_vrndnq_m_fv4sf (__inactive, __a, __p); | |
18286 | } | |
18287 | ||
18288 | __extension__ extern __inline float32x4_t | |
18289 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18290 | __arm_vrndpq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18291 | { | |
18292 | return __builtin_mve_vrndpq_m_fv4sf (__inactive, __a, __p); | |
18293 | } | |
18294 | ||
18295 | __extension__ extern __inline float32x4_t | |
18296 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18297 | __arm_vrndq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18298 | { | |
18299 | return __builtin_mve_vrndq_m_fv4sf (__inactive, __a, __p); | |
18300 | } | |
18301 | ||
18302 | __extension__ extern __inline float32x4_t | |
18303 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18304 | __arm_vrndxq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18305 | { | |
18306 | return __builtin_mve_vrndxq_m_fv4sf (__inactive, __a, __p); | |
18307 | } | |
18308 | ||
18309 | __extension__ extern __inline mve_pred16_t | |
18310 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18311 | __arm_vcmpeqq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18312 | { | |
18313 | return __builtin_mve_vcmpeqq_m_n_fv4sf (__a, __b, __p); | |
18314 | } | |
18315 | ||
18316 | __extension__ extern __inline mve_pred16_t | |
18317 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18318 | __arm_vcmpgeq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18319 | { | |
18320 | return __builtin_mve_vcmpgeq_m_fv4sf (__a, __b, __p); | |
18321 | } | |
18322 | ||
18323 | __extension__ extern __inline mve_pred16_t | |
18324 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18325 | __arm_vcmpgeq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18326 | { | |
18327 | return __builtin_mve_vcmpgeq_m_n_fv4sf (__a, __b, __p); | |
18328 | } | |
18329 | ||
18330 | __extension__ extern __inline mve_pred16_t | |
18331 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18332 | __arm_vcmpgtq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18333 | { | |
18334 | return __builtin_mve_vcmpgtq_m_fv4sf (__a, __b, __p); | |
18335 | } | |
18336 | ||
18337 | __extension__ extern __inline mve_pred16_t | |
18338 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18339 | __arm_vcmpgtq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18340 | { | |
18341 | return __builtin_mve_vcmpgtq_m_n_fv4sf (__a, __b, __p); | |
18342 | } | |
18343 | ||
18344 | __extension__ extern __inline mve_pred16_t | |
18345 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18346 | __arm_vcmpleq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18347 | { | |
18348 | return __builtin_mve_vcmpleq_m_fv4sf (__a, __b, __p); | |
18349 | } | |
18350 | ||
18351 | __extension__ extern __inline mve_pred16_t | |
18352 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18353 | __arm_vcmpleq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18354 | { | |
18355 | return __builtin_mve_vcmpleq_m_n_fv4sf (__a, __b, __p); | |
18356 | } | |
18357 | ||
18358 | __extension__ extern __inline mve_pred16_t | |
18359 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18360 | __arm_vcmpltq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18361 | { | |
18362 | return __builtin_mve_vcmpltq_m_fv4sf (__a, __b, __p); | |
18363 | } | |
18364 | ||
18365 | __extension__ extern __inline mve_pred16_t | |
18366 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18367 | __arm_vcmpltq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18368 | { | |
18369 | return __builtin_mve_vcmpltq_m_n_fv4sf (__a, __b, __p); | |
18370 | } | |
18371 | ||
18372 | __extension__ extern __inline mve_pred16_t | |
18373 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18374 | __arm_vcmpneq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18375 | { | |
18376 | return __builtin_mve_vcmpneq_m_fv4sf (__a, __b, __p); | |
18377 | } | |
18378 | ||
18379 | __extension__ extern __inline mve_pred16_t | |
18380 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18381 | __arm_vcmpneq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18382 | { | |
18383 | return __builtin_mve_vcmpneq_m_n_fv4sf (__a, __b, __p); | |
18384 | } | |
18385 | ||
18386 | __extension__ extern __inline uint32x4_t | |
18387 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18388 | __arm_vcvtmq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18389 | { | |
18390 | return __builtin_mve_vcvtmq_m_uv4si (__inactive, __a, __p); | |
18391 | } | |
18392 | ||
18393 | __extension__ extern __inline uint32x4_t | |
18394 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18395 | __arm_vcvtnq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18396 | { | |
18397 | return __builtin_mve_vcvtnq_m_uv4si (__inactive, __a, __p); | |
18398 | } | |
18399 | ||
18400 | __extension__ extern __inline uint32x4_t | |
18401 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18402 | __arm_vcvtpq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18403 | { | |
18404 | return __builtin_mve_vcvtpq_m_uv4si (__inactive, __a, __p); | |
18405 | } | |
18406 | ||
18407 | __extension__ extern __inline uint32x4_t | |
18408 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18409 | __arm_vcvtq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18410 | { | |
18411 | return __builtin_mve_vcvtq_m_from_f_uv4si (__inactive, __a, __p); | |
18412 | } | |
18413 | ||
18414 | __extension__ extern __inline float16x8_t | |
18415 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18416 | __arm_vcvtq_m_n_f16_u16 (float16x8_t __inactive, uint16x8_t __a, const int __imm6, mve_pred16_t __p) | |
18417 | { | |
18418 | return __builtin_mve_vcvtq_m_n_to_f_uv8hf (__inactive, __a, __imm6, __p); | |
18419 | } | |
18420 | ||
18421 | __extension__ extern __inline float16x8_t | |
18422 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18423 | __arm_vcvtq_m_n_f16_s16 (float16x8_t __inactive, int16x8_t __a, const int __imm6, mve_pred16_t __p) | |
18424 | { | |
18425 | return __builtin_mve_vcvtq_m_n_to_f_sv8hf (__inactive, __a, __imm6, __p); | |
18426 | } | |
18427 | ||
18428 | __extension__ extern __inline float32x4_t | |
18429 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18430 | __arm_vcvtq_m_n_f32_u32 (float32x4_t __inactive, uint32x4_t __a, const int __imm6, mve_pred16_t __p) | |
18431 | { | |
18432 | return __builtin_mve_vcvtq_m_n_to_f_uv4sf (__inactive, __a, __imm6, __p); | |
18433 | } | |
18434 | ||
18435 | __extension__ extern __inline float32x4_t | |
18436 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18437 | __arm_vcvtq_m_n_f32_s32 (float32x4_t __inactive, int32x4_t __a, const int __imm6, mve_pred16_t __p) | |
18438 | { | |
18439 | return __builtin_mve_vcvtq_m_n_to_f_sv4sf (__inactive, __a, __imm6, __p); | |
18440 | } | |
18441 | ||
18442 | __extension__ extern __inline float32x4_t | |
18443 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18444 | __arm_vabdq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18445 | { | |
18446 | return __builtin_mve_vabdq_m_fv4sf (__inactive, __a, __b, __p); | |
18447 | } | |
18448 | ||
18449 | __extension__ extern __inline float16x8_t | |
18450 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18451 | __arm_vabdq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18452 | { | |
18453 | return __builtin_mve_vabdq_m_fv8hf (__inactive, __a, __b, __p); | |
18454 | } | |
18455 | ||
18456 | __extension__ extern __inline float32x4_t | |
18457 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18458 | __arm_vaddq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18459 | { | |
18460 | return __builtin_mve_vaddq_m_fv4sf (__inactive, __a, __b, __p); | |
18461 | } | |
18462 | ||
18463 | __extension__ extern __inline float16x8_t | |
18464 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18465 | __arm_vaddq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18466 | { | |
18467 | return __builtin_mve_vaddq_m_fv8hf (__inactive, __a, __b, __p); | |
18468 | } | |
18469 | ||
18470 | __extension__ extern __inline float32x4_t | |
18471 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18472 | __arm_vaddq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18473 | { | |
18474 | return __builtin_mve_vaddq_m_n_fv4sf (__inactive, __a, __b, __p); | |
18475 | } | |
18476 | ||
18477 | __extension__ extern __inline float16x8_t | |
18478 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18479 | __arm_vaddq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18480 | { | |
18481 | return __builtin_mve_vaddq_m_n_fv8hf (__inactive, __a, __b, __p); | |
18482 | } | |
18483 | ||
18484 | __extension__ extern __inline float32x4_t | |
18485 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18486 | __arm_vandq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18487 | { | |
18488 | return __builtin_mve_vandq_m_fv4sf (__inactive, __a, __b, __p); | |
18489 | } | |
18490 | ||
18491 | __extension__ extern __inline float16x8_t | |
18492 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18493 | __arm_vandq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18494 | { | |
18495 | return __builtin_mve_vandq_m_fv8hf (__inactive, __a, __b, __p); | |
18496 | } | |
18497 | ||
18498 | __extension__ extern __inline float32x4_t | |
18499 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18500 | __arm_vbicq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18501 | { | |
18502 | return __builtin_mve_vbicq_m_fv4sf (__inactive, __a, __b, __p); | |
18503 | } | |
18504 | ||
18505 | __extension__ extern __inline float16x8_t | |
18506 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18507 | __arm_vbicq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18508 | { | |
18509 | return __builtin_mve_vbicq_m_fv8hf (__inactive, __a, __b, __p); | |
18510 | } | |
18511 | ||
18512 | __extension__ extern __inline float32x4_t | |
18513 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18514 | __arm_vbrsrq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, int32_t __b, mve_pred16_t __p) | |
18515 | { | |
18516 | return __builtin_mve_vbrsrq_m_n_fv4sf (__inactive, __a, __b, __p); | |
18517 | } | |
18518 | ||
18519 | __extension__ extern __inline float16x8_t | |
18520 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18521 | __arm_vbrsrq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, int32_t __b, mve_pred16_t __p) | |
18522 | { | |
18523 | return __builtin_mve_vbrsrq_m_n_fv8hf (__inactive, __a, __b, __p); | |
18524 | } | |
18525 | ||
18526 | __extension__ extern __inline float32x4_t | |
18527 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18528 | __arm_vcaddq_rot270_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18529 | { | |
18530 | return __builtin_mve_vcaddq_rot270_m_fv4sf (__inactive, __a, __b, __p); | |
18531 | } | |
18532 | ||
18533 | __extension__ extern __inline float16x8_t | |
18534 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18535 | __arm_vcaddq_rot270_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18536 | { | |
18537 | return __builtin_mve_vcaddq_rot270_m_fv8hf (__inactive, __a, __b, __p); | |
18538 | } | |
18539 | ||
18540 | __extension__ extern __inline float32x4_t | |
18541 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18542 | __arm_vcaddq_rot90_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18543 | { | |
18544 | return __builtin_mve_vcaddq_rot90_m_fv4sf (__inactive, __a, __b, __p); | |
18545 | } | |
18546 | ||
18547 | __extension__ extern __inline float16x8_t | |
18548 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18549 | __arm_vcaddq_rot90_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18550 | { | |
18551 | return __builtin_mve_vcaddq_rot90_m_fv8hf (__inactive, __a, __b, __p); | |
18552 | } | |
18553 | ||
18554 | __extension__ extern __inline float32x4_t | |
18555 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18556 | __arm_vcmlaq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
18557 | { | |
18558 | return __builtin_mve_vcmlaq_m_fv4sf (__a, __b, __c, __p); | |
18559 | } | |
18560 | ||
18561 | __extension__ extern __inline float16x8_t | |
18562 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18563 | __arm_vcmlaq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
18564 | { | |
18565 | return __builtin_mve_vcmlaq_m_fv8hf (__a, __b, __c, __p); | |
18566 | } | |
18567 | ||
18568 | __extension__ extern __inline float32x4_t | |
18569 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18570 | __arm_vcmlaq_rot180_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
18571 | { | |
18572 | return __builtin_mve_vcmlaq_rot180_m_fv4sf (__a, __b, __c, __p); | |
18573 | } | |
18574 | ||
18575 | __extension__ extern __inline float16x8_t | |
18576 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18577 | __arm_vcmlaq_rot180_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
18578 | { | |
18579 | return __builtin_mve_vcmlaq_rot180_m_fv8hf (__a, __b, __c, __p); | |
18580 | } | |
18581 | ||
18582 | __extension__ extern __inline float32x4_t | |
18583 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18584 | __arm_vcmlaq_rot270_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
18585 | { | |
18586 | return __builtin_mve_vcmlaq_rot270_m_fv4sf (__a, __b, __c, __p); | |
18587 | } | |
18588 | ||
18589 | __extension__ extern __inline float16x8_t | |
18590 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18591 | __arm_vcmlaq_rot270_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
18592 | { | |
18593 | return __builtin_mve_vcmlaq_rot270_m_fv8hf (__a, __b, __c, __p); | |
18594 | } | |
18595 | ||
18596 | __extension__ extern __inline float32x4_t | |
18597 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18598 | __arm_vcmlaq_rot90_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
18599 | { | |
18600 | return __builtin_mve_vcmlaq_rot90_m_fv4sf (__a, __b, __c, __p); | |
18601 | } | |
18602 | ||
18603 | __extension__ extern __inline float16x8_t | |
18604 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18605 | __arm_vcmlaq_rot90_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
18606 | { | |
18607 | return __builtin_mve_vcmlaq_rot90_m_fv8hf (__a, __b, __c, __p); | |
18608 | } | |
18609 | ||
18610 | __extension__ extern __inline float32x4_t | |
18611 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18612 | __arm_vcmulq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18613 | { | |
18614 | return __builtin_mve_vcmulq_m_fv4sf (__inactive, __a, __b, __p); | |
18615 | } | |
18616 | ||
18617 | __extension__ extern __inline float16x8_t | |
18618 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18619 | __arm_vcmulq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18620 | { | |
18621 | return __builtin_mve_vcmulq_m_fv8hf (__inactive, __a, __b, __p); | |
18622 | } | |
18623 | ||
18624 | __extension__ extern __inline float32x4_t | |
18625 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18626 | __arm_vcmulq_rot180_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18627 | { | |
18628 | return __builtin_mve_vcmulq_rot180_m_fv4sf (__inactive, __a, __b, __p); | |
18629 | } | |
18630 | ||
18631 | __extension__ extern __inline float16x8_t | |
18632 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18633 | __arm_vcmulq_rot180_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18634 | { | |
18635 | return __builtin_mve_vcmulq_rot180_m_fv8hf (__inactive, __a, __b, __p); | |
18636 | } | |
18637 | ||
18638 | __extension__ extern __inline float32x4_t | |
18639 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18640 | __arm_vcmulq_rot270_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18641 | { | |
18642 | return __builtin_mve_vcmulq_rot270_m_fv4sf (__inactive, __a, __b, __p); | |
18643 | } | |
18644 | ||
18645 | __extension__ extern __inline float16x8_t | |
18646 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18647 | __arm_vcmulq_rot270_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18648 | { | |
18649 | return __builtin_mve_vcmulq_rot270_m_fv8hf (__inactive, __a, __b, __p); | |
18650 | } | |
18651 | ||
18652 | __extension__ extern __inline float32x4_t | |
18653 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18654 | __arm_vcmulq_rot90_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18655 | { | |
18656 | return __builtin_mve_vcmulq_rot90_m_fv4sf (__inactive, __a, __b, __p); | |
18657 | } | |
18658 | ||
18659 | __extension__ extern __inline float16x8_t | |
18660 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18661 | __arm_vcmulq_rot90_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18662 | { | |
18663 | return __builtin_mve_vcmulq_rot90_m_fv8hf (__inactive, __a, __b, __p); | |
18664 | } | |
18665 | ||
18666 | __extension__ extern __inline int32x4_t | |
18667 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18668 | __arm_vcvtq_m_n_s32_f32 (int32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p) | |
18669 | { | |
18670 | return __builtin_mve_vcvtq_m_n_from_f_sv4si (__inactive, __a, __imm6, __p); | |
18671 | } | |
18672 | ||
18673 | __extension__ extern __inline int16x8_t | |
18674 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18675 | __arm_vcvtq_m_n_s16_f16 (int16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p) | |
18676 | { | |
18677 | return __builtin_mve_vcvtq_m_n_from_f_sv8hi (__inactive, __a, __imm6, __p); | |
18678 | } | |
18679 | ||
18680 | __extension__ extern __inline uint32x4_t | |
18681 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18682 | __arm_vcvtq_m_n_u32_f32 (uint32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p) | |
18683 | { | |
18684 | return __builtin_mve_vcvtq_m_n_from_f_uv4si (__inactive, __a, __imm6, __p); | |
18685 | } | |
18686 | ||
18687 | __extension__ extern __inline uint16x8_t | |
18688 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18689 | __arm_vcvtq_m_n_u16_f16 (uint16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p) | |
18690 | { | |
18691 | return __builtin_mve_vcvtq_m_n_from_f_uv8hi (__inactive, __a, __imm6, __p); | |
18692 | } | |
18693 | ||
18694 | __extension__ extern __inline float32x4_t | |
18695 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18696 | __arm_veorq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18697 | { | |
18698 | return __builtin_mve_veorq_m_fv4sf (__inactive, __a, __b, __p); | |
18699 | } | |
18700 | ||
18701 | __extension__ extern __inline float16x8_t | |
18702 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18703 | __arm_veorq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18704 | { | |
18705 | return __builtin_mve_veorq_m_fv8hf (__inactive, __a, __b, __p); | |
18706 | } | |
18707 | ||
18708 | __extension__ extern __inline float32x4_t | |
18709 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18710 | __arm_vfmaq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
18711 | { | |
18712 | return __builtin_mve_vfmaq_m_fv4sf (__a, __b, __c, __p); | |
18713 | } | |
18714 | ||
18715 | __extension__ extern __inline float16x8_t | |
18716 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18717 | __arm_vfmaq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
18718 | { | |
18719 | return __builtin_mve_vfmaq_m_fv8hf (__a, __b, __c, __p); | |
18720 | } | |
18721 | ||
18722 | __extension__ extern __inline float32x4_t | |
18723 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18724 | __arm_vfmaq_m_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p) | |
18725 | { | |
18726 | return __builtin_mve_vfmaq_m_n_fv4sf (__a, __b, __c, __p); | |
18727 | } | |
18728 | ||
18729 | __extension__ extern __inline float16x8_t | |
18730 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18731 | __arm_vfmaq_m_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p) | |
18732 | { | |
18733 | return __builtin_mve_vfmaq_m_n_fv8hf (__a, __b, __c, __p); | |
18734 | } | |
18735 | ||
18736 | __extension__ extern __inline float32x4_t | |
18737 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18738 | __arm_vfmasq_m_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p) | |
18739 | { | |
18740 | return __builtin_mve_vfmasq_m_n_fv4sf (__a, __b, __c, __p); | |
18741 | } | |
18742 | ||
18743 | __extension__ extern __inline float16x8_t | |
18744 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18745 | __arm_vfmasq_m_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p) | |
18746 | { | |
18747 | return __builtin_mve_vfmasq_m_n_fv8hf (__a, __b, __c, __p); | |
18748 | } | |
18749 | ||
18750 | __extension__ extern __inline float32x4_t | |
18751 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18752 | __arm_vfmsq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
18753 | { | |
18754 | return __builtin_mve_vfmsq_m_fv4sf (__a, __b, __c, __p); | |
18755 | } | |
18756 | ||
18757 | __extension__ extern __inline float16x8_t | |
18758 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18759 | __arm_vfmsq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
18760 | { | |
18761 | return __builtin_mve_vfmsq_m_fv8hf (__a, __b, __c, __p); | |
18762 | } | |
18763 | ||
18764 | __extension__ extern __inline float32x4_t | |
18765 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18766 | __arm_vmaxnmq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18767 | { | |
18768 | return __builtin_mve_vmaxnmq_m_fv4sf (__inactive, __a, __b, __p); | |
18769 | } | |
18770 | ||
18771 | __extension__ extern __inline float16x8_t | |
18772 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18773 | __arm_vmaxnmq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18774 | { | |
18775 | return __builtin_mve_vmaxnmq_m_fv8hf (__inactive, __a, __b, __p); | |
18776 | } | |
18777 | ||
18778 | __extension__ extern __inline float32x4_t | |
18779 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18780 | __arm_vminnmq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18781 | { | |
18782 | return __builtin_mve_vminnmq_m_fv4sf (__inactive, __a, __b, __p); | |
18783 | } | |
18784 | ||
18785 | __extension__ extern __inline float16x8_t | |
18786 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18787 | __arm_vminnmq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18788 | { | |
18789 | return __builtin_mve_vminnmq_m_fv8hf (__inactive, __a, __b, __p); | |
18790 | } | |
18791 | ||
18792 | __extension__ extern __inline float32x4_t | |
18793 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18794 | __arm_vmulq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18795 | { | |
18796 | return __builtin_mve_vmulq_m_fv4sf (__inactive, __a, __b, __p); | |
18797 | } | |
18798 | ||
18799 | __extension__ extern __inline float16x8_t | |
18800 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18801 | __arm_vmulq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18802 | { | |
18803 | return __builtin_mve_vmulq_m_fv8hf (__inactive, __a, __b, __p); | |
18804 | } | |
18805 | ||
18806 | __extension__ extern __inline float32x4_t | |
18807 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18808 | __arm_vmulq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18809 | { | |
18810 | return __builtin_mve_vmulq_m_n_fv4sf (__inactive, __a, __b, __p); | |
18811 | } | |
18812 | ||
18813 | __extension__ extern __inline float16x8_t | |
18814 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18815 | __arm_vmulq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18816 | { | |
18817 | return __builtin_mve_vmulq_m_n_fv8hf (__inactive, __a, __b, __p); | |
18818 | } | |
18819 | ||
18820 | __extension__ extern __inline float32x4_t | |
18821 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18822 | __arm_vornq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18823 | { | |
18824 | return __builtin_mve_vornq_m_fv4sf (__inactive, __a, __b, __p); | |
18825 | } | |
18826 | ||
18827 | __extension__ extern __inline float16x8_t | |
18828 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18829 | __arm_vornq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18830 | { | |
18831 | return __builtin_mve_vornq_m_fv8hf (__inactive, __a, __b, __p); | |
18832 | } | |
18833 | ||
18834 | __extension__ extern __inline float32x4_t | |
18835 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18836 | __arm_vorrq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18837 | { | |
18838 | return __builtin_mve_vorrq_m_fv4sf (__inactive, __a, __b, __p); | |
18839 | } | |
18840 | ||
18841 | __extension__ extern __inline float16x8_t | |
18842 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18843 | __arm_vorrq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18844 | { | |
18845 | return __builtin_mve_vorrq_m_fv8hf (__inactive, __a, __b, __p); | |
18846 | } | |
18847 | ||
18848 | __extension__ extern __inline float32x4_t | |
18849 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18850 | __arm_vsubq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18851 | { | |
18852 | return __builtin_mve_vsubq_m_fv4sf (__inactive, __a, __b, __p); | |
18853 | } | |
18854 | ||
18855 | __extension__ extern __inline float16x8_t | |
18856 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18857 | __arm_vsubq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18858 | { | |
18859 | return __builtin_mve_vsubq_m_fv8hf (__inactive, __a, __b, __p); | |
18860 | } | |
18861 | ||
18862 | __extension__ extern __inline float32x4_t | |
18863 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18864 | __arm_vsubq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18865 | { | |
18866 | return __builtin_mve_vsubq_m_n_fv4sf (__inactive, __a, __b, __p); | |
18867 | } | |
18868 | ||
18869 | __extension__ extern __inline float16x8_t | |
18870 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18871 | __arm_vsubq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18872 | { | |
18873 | return __builtin_mve_vsubq_m_n_fv8hf (__inactive, __a, __b, __p); | |
18874 | } | |
18875 | ||
18876 | __extension__ extern __inline float32x4_t | |
18877 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18878 | __arm_vld1q_f32 (float32_t const * __base) | |
18879 | { | |
18880 | return __builtin_mve_vld1q_fv4sf((__builtin_neon_si *) __base); | |
18881 | } | |
18882 | ||
18883 | __extension__ extern __inline float16x8_t | |
18884 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18885 | __arm_vld1q_f16 (float16_t const * __base) | |
18886 | { | |
18887 | return __builtin_mve_vld1q_fv8hf((__builtin_neon_hi *) __base); | |
18888 | } | |
18889 | ||
18890 | __extension__ extern __inline float32x4_t | |
18891 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18892 | __arm_vldrwq_f32 (float32_t const * __base) | |
18893 | { | |
18894 | return __builtin_mve_vldrwq_fv4sf((__builtin_neon_si *) __base); | |
18895 | } | |
18896 | ||
18897 | __extension__ extern __inline float32x4_t | |
18898 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18899 | __arm_vldrwq_z_f32 (float32_t const * __base, mve_pred16_t __p) | |
18900 | { | |
18901 | return __builtin_mve_vldrwq_z_fv4sf((__builtin_neon_si *) __base, __p); | |
18902 | } | |
18903 | ||
18904 | __extension__ extern __inline float16x8_t | |
18905 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18906 | __arm_vldrhq_z_f16 (float16_t const * __base, mve_pred16_t __p) | |
18907 | { | |
18908 | return __builtin_mve_vldrhq_z_fv8hf((__builtin_neon_hi *) __base, __p); | |
18909 | } | |
18910 | ||
18911 | __extension__ extern __inline float16x8_t | |
18912 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18913 | __arm_vldrhq_f16 (float16_t const * __base) | |
18914 | { | |
18915 | return __builtin_mve_vldrhq_fv8hf((__builtin_neon_hi *) __base); | |
18916 | } | |
18917 | ||
18918 | __extension__ extern __inline float16x8_t | |
18919 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18920 | __arm_vldrhq_gather_offset_f16 (float16_t const * __base, uint16x8_t __offset) | |
18921 | { | |
18922 | return __builtin_mve_vldrhq_gather_offset_fv8hf((__builtin_neon_hi *) __base, __offset); | |
18923 | } | |
18924 | ||
18925 | __extension__ extern __inline float16x8_t | |
18926 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18927 | __arm_vldrhq_gather_offset_z_f16 (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
18928 | { | |
18929 | return __builtin_mve_vldrhq_gather_offset_z_fv8hf((__builtin_neon_hi *) __base, __offset, __p); | |
18930 | } | |
18931 | ||
18932 | __extension__ extern __inline float16x8_t | |
18933 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18934 | __arm_vldrhq_gather_shifted_offset_f16 (float16_t const * __base, uint16x8_t __offset) | |
18935 | { | |
18936 | return __builtin_mve_vldrhq_gather_shifted_offset_fv8hf (__base, __offset); | |
18937 | } | |
18938 | ||
18939 | __extension__ extern __inline float16x8_t | |
18940 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18941 | __arm_vldrhq_gather_shifted_offset_z_f16 (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
18942 | { | |
18943 | return __builtin_mve_vldrhq_gather_shifted_offset_z_fv8hf (__base, __offset, __p); | |
18944 | } | |
18945 | ||
18946 | __extension__ extern __inline float32x4_t | |
18947 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18948 | __arm_vldrwq_gather_base_f32 (uint32x4_t __addr, const int __offset) | |
18949 | { | |
18950 | return __builtin_mve_vldrwq_gather_base_fv4sf (__addr, __offset); | |
18951 | } | |
18952 | ||
18953 | __extension__ extern __inline float32x4_t | |
18954 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18955 | __arm_vldrwq_gather_base_z_f32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p) | |
18956 | { | |
18957 | return __builtin_mve_vldrwq_gather_base_z_fv4sf (__addr, __offset, __p); | |
18958 | } | |
18959 | ||
18960 | __extension__ extern __inline float32x4_t | |
18961 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18962 | __arm_vldrwq_gather_offset_f32 (float32_t const * __base, uint32x4_t __offset) | |
18963 | { | |
18964 | return __builtin_mve_vldrwq_gather_offset_fv4sf((__builtin_neon_si *) __base, __offset); | |
18965 | } | |
18966 | ||
18967 | __extension__ extern __inline float32x4_t | |
18968 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18969 | __arm_vldrwq_gather_offset_z_f32 (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
18970 | { | |
18971 | return __builtin_mve_vldrwq_gather_offset_z_fv4sf((__builtin_neon_si *) __base, __offset, __p); | |
18972 | } | |
18973 | ||
18974 | __extension__ extern __inline float32x4_t | |
18975 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18976 | __arm_vldrwq_gather_shifted_offset_f32 (float32_t const * __base, uint32x4_t __offset) | |
18977 | { | |
18978 | return __builtin_mve_vldrwq_gather_shifted_offset_fv4sf (__base, __offset); | |
18979 | } | |
18980 | ||
18981 | __extension__ extern __inline float32x4_t | |
18982 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18983 | __arm_vldrwq_gather_shifted_offset_z_f32 (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
18984 | { | |
18985 | return __builtin_mve_vldrwq_gather_shifted_offset_z_fv4sf (__base, __offset, __p); | |
18986 | } | |
18987 | ||
18988 | __extension__ extern __inline void | |
18989 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18990 | __arm_vstrwq_p_f32 (float32_t * __addr, float32x4_t __value, mve_pred16_t __p) | |
18991 | { | |
18992 | __builtin_mve_vstrwq_p_fv4sf (__addr, __value, __p); | |
18993 | } | |
18994 | ||
18995 | __extension__ extern __inline void | |
18996 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18997 | __arm_vstrwq_f32 (float32_t * __addr, float32x4_t __value) | |
18998 | { | |
18999 | __builtin_mve_vstrwq_fv4sf (__addr, __value); | |
19000 | } | |
19001 | ||
19002 | __extension__ extern __inline void | |
19003 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19004 | __arm_vst1q_f32 (float32_t * __addr, float32x4_t __value) | |
19005 | { | |
19006 | __builtin_mve_vst1q_fv4sf (__addr, __value); | |
19007 | } | |
19008 | ||
19009 | __extension__ extern __inline void | |
19010 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19011 | __arm_vst1q_f16 (float16_t * __addr, float16x8_t __value) | |
19012 | { | |
19013 | __builtin_mve_vst1q_fv8hf (__addr, __value); | |
19014 | } | |
19015 | ||
19016 | __extension__ extern __inline void | |
19017 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19018 | __arm_vstrhq_f16 (float16_t * __addr, float16x8_t __value) | |
19019 | { | |
19020 | __builtin_mve_vstrhq_fv8hf (__addr, __value); | |
19021 | } | |
19022 | ||
19023 | __extension__ extern __inline void | |
19024 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19025 | __arm_vstrhq_p_f16 (float16_t * __addr, float16x8_t __value, mve_pred16_t __p) | |
19026 | { | |
19027 | __builtin_mve_vstrhq_p_fv8hf (__addr, __value, __p); | |
19028 | } | |
19029 | ||
19030 | __extension__ extern __inline void | |
19031 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19032 | __arm_vstrhq_scatter_offset_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value) | |
19033 | { | |
19034 | __builtin_mve_vstrhq_scatter_offset_fv8hf (__base, __offset, __value); | |
19035 | } | |
19036 | ||
19037 | __extension__ extern __inline void | |
19038 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19039 | __arm_vstrhq_scatter_offset_p_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p) | |
19040 | { | |
19041 | __builtin_mve_vstrhq_scatter_offset_p_fv8hf (__base, __offset, __value, __p); | |
19042 | } | |
19043 | ||
19044 | __extension__ extern __inline void | |
19045 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19046 | __arm_vstrhq_scatter_shifted_offset_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value) | |
19047 | { | |
19048 | __builtin_mve_vstrhq_scatter_shifted_offset_fv8hf (__base, __offset, __value); | |
19049 | } | |
19050 | ||
19051 | __extension__ extern __inline void | |
19052 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19053 | __arm_vstrhq_scatter_shifted_offset_p_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p) | |
19054 | { | |
19055 | __builtin_mve_vstrhq_scatter_shifted_offset_p_fv8hf (__base, __offset, __value, __p); | |
19056 | } | |
19057 | ||
19058 | __extension__ extern __inline void | |
19059 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19060 | __arm_vstrwq_scatter_base_f32 (uint32x4_t __addr, const int __offset, float32x4_t __value) | |
19061 | { | |
19062 | __builtin_mve_vstrwq_scatter_base_fv4sf (__addr, __offset, __value); | |
19063 | } | |
19064 | ||
19065 | __extension__ extern __inline void | |
19066 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19067 | __arm_vstrwq_scatter_base_p_f32 (uint32x4_t __addr, const int __offset, float32x4_t __value, mve_pred16_t __p) | |
19068 | { | |
19069 | __builtin_mve_vstrwq_scatter_base_p_fv4sf (__addr, __offset, __value, __p); | |
19070 | } | |
19071 | ||
19072 | __extension__ extern __inline void | |
19073 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19074 | __arm_vstrwq_scatter_offset_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value) | |
19075 | { | |
19076 | __builtin_mve_vstrwq_scatter_offset_fv4sf (__base, __offset, __value); | |
19077 | } | |
19078 | ||
19079 | __extension__ extern __inline void | |
19080 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19081 | __arm_vstrwq_scatter_offset_p_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p) | |
19082 | { | |
19083 | __builtin_mve_vstrwq_scatter_offset_p_fv4sf (__base, __offset, __value, __p); | |
19084 | } | |
19085 | ||
19086 | __extension__ extern __inline void | |
19087 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19088 | __arm_vstrwq_scatter_shifted_offset_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value) | |
19089 | { | |
19090 | __builtin_mve_vstrwq_scatter_shifted_offset_fv4sf (__base, __offset, __value); | |
19091 | } | |
19092 | ||
19093 | __extension__ extern __inline void | |
19094 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19095 | __arm_vstrwq_scatter_shifted_offset_p_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p) | |
19096 | { | |
19097 | __builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf (__base, __offset, __value, __p); | |
19098 | } | |
19099 | ||
19100 | __extension__ extern __inline float16x8_t | |
19101 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19102 | __arm_vaddq_f16 (float16x8_t __a, float16x8_t __b) | |
19103 | { | |
19104 | return __a + __b; | |
19105 | } | |
19106 | ||
19107 | __extension__ extern __inline float32x4_t | |
19108 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19109 | __arm_vaddq_f32 (float32x4_t __a, float32x4_t __b) | |
19110 | { | |
19111 | return __a + __b; | |
19112 | } | |
19113 | ||
19114 | __extension__ extern __inline float16x8_t | |
19115 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19116 | __arm_vuninitializedq_f16 (void) | |
19117 | { | |
19118 | float16x8_t __uninit; | |
19119 | __asm__ ("": "=w" (__uninit)); | |
19120 | return __uninit; | |
19121 | } | |
19122 | ||
19123 | __extension__ extern __inline float32x4_t | |
19124 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19125 | __arm_vuninitializedq_f32 (void) | |
19126 | { | |
19127 | float32x4_t __uninit; | |
19128 | __asm__ ("": "=w" (__uninit)); | |
19129 | return __uninit; | |
19130 | } | |
19131 | ||
19132 | __extension__ extern __inline int32x4_t | |
19133 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19134 | __arm_vreinterpretq_s32_f16 (float16x8_t __a) | |
19135 | { | |
19136 | return (int32x4_t) __a; | |
19137 | } | |
19138 | ||
19139 | __extension__ extern __inline int32x4_t | |
19140 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19141 | __arm_vreinterpretq_s32_f32 (float32x4_t __a) | |
19142 | { | |
19143 | return (int32x4_t) __a; | |
19144 | } | |
19145 | ||
19146 | __extension__ extern __inline int16x8_t | |
19147 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19148 | __arm_vreinterpretq_s16_f16 (float16x8_t __a) | |
19149 | { | |
19150 | return (int16x8_t) __a; | |
19151 | } | |
19152 | ||
19153 | __extension__ extern __inline int16x8_t | |
19154 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19155 | __arm_vreinterpretq_s16_f32 (float32x4_t __a) | |
19156 | { | |
19157 | return (int16x8_t) __a; | |
19158 | } | |
19159 | ||
19160 | __extension__ extern __inline int64x2_t | |
19161 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19162 | __arm_vreinterpretq_s64_f16 (float16x8_t __a) | |
19163 | { | |
19164 | return (int64x2_t) __a; | |
19165 | } | |
19166 | ||
19167 | __extension__ extern __inline int64x2_t | |
19168 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19169 | __arm_vreinterpretq_s64_f32 (float32x4_t __a) | |
19170 | { | |
19171 | return (int64x2_t) __a; | |
19172 | } | |
19173 | ||
19174 | __extension__ extern __inline int8x16_t | |
19175 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19176 | __arm_vreinterpretq_s8_f16 (float16x8_t __a) | |
19177 | { | |
19178 | return (int8x16_t) __a; | |
19179 | } | |
19180 | ||
19181 | __extension__ extern __inline int8x16_t | |
19182 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19183 | __arm_vreinterpretq_s8_f32 (float32x4_t __a) | |
19184 | { | |
19185 | return (int8x16_t) __a; | |
19186 | } | |
19187 | ||
19188 | __extension__ extern __inline uint16x8_t | |
19189 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19190 | __arm_vreinterpretq_u16_f16 (float16x8_t __a) | |
19191 | { | |
19192 | return (uint16x8_t) __a; | |
19193 | } | |
19194 | ||
19195 | __extension__ extern __inline uint16x8_t | |
19196 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19197 | __arm_vreinterpretq_u16_f32 (float32x4_t __a) | |
19198 | { | |
19199 | return (uint16x8_t) __a; | |
19200 | } | |
19201 | ||
19202 | __extension__ extern __inline uint32x4_t | |
19203 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19204 | __arm_vreinterpretq_u32_f16 (float16x8_t __a) | |
19205 | { | |
19206 | return (uint32x4_t) __a; | |
19207 | } | |
19208 | ||
19209 | __extension__ extern __inline uint32x4_t | |
19210 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19211 | __arm_vreinterpretq_u32_f32 (float32x4_t __a) | |
19212 | { | |
19213 | return (uint32x4_t) __a; | |
19214 | } | |
19215 | ||
19216 | __extension__ extern __inline uint64x2_t | |
19217 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19218 | __arm_vreinterpretq_u64_f16 (float16x8_t __a) | |
19219 | { | |
19220 | return (uint64x2_t) __a; | |
19221 | } | |
19222 | ||
19223 | __extension__ extern __inline uint64x2_t | |
19224 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19225 | __arm_vreinterpretq_u64_f32 (float32x4_t __a) | |
19226 | { | |
19227 | return (uint64x2_t) __a; | |
19228 | } | |
19229 | ||
19230 | __extension__ extern __inline uint8x16_t | |
19231 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19232 | __arm_vreinterpretq_u8_f16 (float16x8_t __a) | |
19233 | { | |
19234 | return (uint8x16_t) __a; | |
19235 | } | |
19236 | ||
19237 | __extension__ extern __inline uint8x16_t | |
19238 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19239 | __arm_vreinterpretq_u8_f32 (float32x4_t __a) | |
19240 | { | |
19241 | return (uint8x16_t) __a; | |
19242 | } | |
19243 | ||
19244 | __extension__ extern __inline float16x8_t | |
19245 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19246 | __arm_vreinterpretq_f16_f32 (float32x4_t __a) | |
19247 | { | |
19248 | return (float16x8_t) __a; | |
19249 | } | |
19250 | ||
19251 | __extension__ extern __inline float16x8_t | |
19252 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19253 | __arm_vreinterpretq_f16_s16 (int16x8_t __a) | |
19254 | { | |
19255 | return (float16x8_t) __a; | |
19256 | } | |
19257 | ||
19258 | __extension__ extern __inline float16x8_t | |
19259 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19260 | __arm_vreinterpretq_f16_s32 (int32x4_t __a) | |
19261 | { | |
19262 | return (float16x8_t) __a; | |
19263 | } | |
19264 | ||
19265 | __extension__ extern __inline float16x8_t | |
19266 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19267 | __arm_vreinterpretq_f16_s64 (int64x2_t __a) | |
19268 | { | |
19269 | return (float16x8_t) __a; | |
19270 | } | |
19271 | ||
19272 | __extension__ extern __inline float16x8_t | |
19273 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19274 | __arm_vreinterpretq_f16_s8 (int8x16_t __a) | |
19275 | { | |
19276 | return (float16x8_t) __a; | |
19277 | } | |
19278 | ||
19279 | __extension__ extern __inline float16x8_t | |
19280 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19281 | __arm_vreinterpretq_f16_u16 (uint16x8_t __a) | |
19282 | { | |
19283 | return (float16x8_t) __a; | |
19284 | } | |
19285 | ||
19286 | __extension__ extern __inline float16x8_t | |
19287 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19288 | __arm_vreinterpretq_f16_u32 (uint32x4_t __a) | |
19289 | { | |
19290 | return (float16x8_t) __a; | |
19291 | } | |
19292 | ||
19293 | __extension__ extern __inline float16x8_t | |
19294 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19295 | __arm_vreinterpretq_f16_u64 (uint64x2_t __a) | |
532e9e24 | 19296 | { |
261014a1 | 19297 | return (float16x8_t) __a; |
532e9e24 SP |
19298 | } |
19299 | ||
19300 | __extension__ extern __inline float16x8_t | |
19301 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19302 | __arm_vreinterpretq_f16_u8 (uint8x16_t __a) |
532e9e24 | 19303 | { |
261014a1 | 19304 | return (float16x8_t) __a; |
532e9e24 SP |
19305 | } |
19306 | ||
19307 | __extension__ extern __inline float32x4_t | |
19308 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19309 | __arm_vreinterpretq_f32_f16 (float16x8_t __a) |
532e9e24 | 19310 | { |
261014a1 | 19311 | return (float32x4_t) __a; |
532e9e24 SP |
19312 | } |
19313 | ||
261014a1 | 19314 | __extension__ extern __inline float32x4_t |
532e9e24 | 19315 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19316 | __arm_vreinterpretq_f32_s16 (int16x8_t __a) |
532e9e24 | 19317 | { |
261014a1 | 19318 | return (float32x4_t) __a; |
532e9e24 SP |
19319 | } |
19320 | ||
19321 | __extension__ extern __inline float32x4_t | |
19322 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19323 | __arm_vreinterpretq_f32_s32 (int32x4_t __a) |
532e9e24 | 19324 | { |
261014a1 | 19325 | return (float32x4_t) __a; |
532e9e24 SP |
19326 | } |
19327 | ||
261014a1 | 19328 | __extension__ extern __inline float32x4_t |
532e9e24 | 19329 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19330 | __arm_vreinterpretq_f32_s64 (int64x2_t __a) |
532e9e24 | 19331 | { |
261014a1 | 19332 | return (float32x4_t) __a; |
532e9e24 SP |
19333 | } |
19334 | ||
19335 | __extension__ extern __inline float32x4_t | |
19336 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19337 | __arm_vreinterpretq_f32_s8 (int8x16_t __a) |
532e9e24 | 19338 | { |
261014a1 | 19339 | return (float32x4_t) __a; |
532e9e24 SP |
19340 | } |
19341 | ||
261014a1 | 19342 | __extension__ extern __inline float32x4_t |
532e9e24 | 19343 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19344 | __arm_vreinterpretq_f32_u16 (uint16x8_t __a) |
532e9e24 | 19345 | { |
261014a1 | 19346 | return (float32x4_t) __a; |
532e9e24 SP |
19347 | } |
19348 | ||
261014a1 | 19349 | __extension__ extern __inline float32x4_t |
532e9e24 | 19350 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19351 | __arm_vreinterpretq_f32_u32 (uint32x4_t __a) |
532e9e24 | 19352 | { |
261014a1 | 19353 | return (float32x4_t) __a; |
532e9e24 SP |
19354 | } |
19355 | ||
261014a1 | 19356 | __extension__ extern __inline float32x4_t |
532e9e24 | 19357 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19358 | __arm_vreinterpretq_f32_u64 (uint64x2_t __a) |
532e9e24 | 19359 | { |
261014a1 | 19360 | return (float32x4_t) __a; |
532e9e24 SP |
19361 | } |
19362 | ||
261014a1 | 19363 | __extension__ extern __inline float32x4_t |
532e9e24 | 19364 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19365 | __arm_vreinterpretq_f32_u8 (uint8x16_t __a) |
532e9e24 | 19366 | { |
261014a1 | 19367 | return (float32x4_t) __a; |
532e9e24 SP |
19368 | } |
19369 | ||
261014a1 | 19370 | __extension__ extern __inline float32x4_t |
532e9e24 | 19371 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19372 | __arm_vldrwq_gather_base_wb_f32 (uint32x4_t * __addr, const int __offset) |
532e9e24 | 19373 | { |
261014a1 | 19374 | float32x4_t |
ff825b81 SP |
19375 | result = __builtin_mve_vldrwq_gather_base_nowb_fv4sf (*__addr, __offset); |
19376 | *__addr = __builtin_mve_vldrwq_gather_base_wb_fv4sf (*__addr, __offset); | |
261014a1 | 19377 | return result; |
532e9e24 SP |
19378 | } |
19379 | ||
19380 | __extension__ extern __inline float32x4_t | |
19381 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19382 | __arm_vldrwq_gather_base_wb_z_f32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p) |
532e9e24 | 19383 | { |
261014a1 | 19384 | float32x4_t |
ff825b81 SP |
19385 | result = __builtin_mve_vldrwq_gather_base_nowb_z_fv4sf (*__addr, __offset, __p); |
19386 | *__addr = __builtin_mve_vldrwq_gather_base_wb_z_fv4sf (*__addr, __offset, __p); | |
261014a1 | 19387 | return result; |
532e9e24 SP |
19388 | } |
19389 | ||
261014a1 | 19390 | __extension__ extern __inline void |
532e9e24 | 19391 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19392 | __arm_vstrwq_scatter_base_wb_f32 (uint32x4_t * __addr, const int __offset, float32x4_t __value) |
532e9e24 | 19393 | { |
261014a1 SP |
19394 | __builtin_mve_vstrwq_scatter_base_wb_fv4sf (*__addr, __offset, __value); |
19395 | __builtin_mve_vstrwq_scatter_base_wb_add_fv4sf (*__addr, __offset, *__addr); | |
532e9e24 SP |
19396 | } |
19397 | ||
261014a1 | 19398 | __extension__ extern __inline void |
532e9e24 | 19399 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19400 | __arm_vstrwq_scatter_base_wb_p_f32 (uint32x4_t * __addr, const int __offset, float32x4_t __value, mve_pred16_t __p) |
532e9e24 | 19401 | { |
261014a1 SP |
19402 | __builtin_mve_vstrwq_scatter_base_wb_p_fv4sf (*__addr, __offset, __value, __p); |
19403 | __builtin_mve_vstrwq_scatter_base_wb_p_add_fv4sf (*__addr, __offset, *__addr, __p); | |
532e9e24 SP |
19404 | } |
19405 | ||
19406 | __extension__ extern __inline float16x8_t | |
19407 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19408 | __arm_vdupq_x_n_f16 (float16_t __a, mve_pred16_t __p) |
532e9e24 | 19409 | { |
261014a1 | 19410 | return __builtin_mve_vdupq_m_n_fv8hf (vuninitializedq_f16 (), __a, __p); |
532e9e24 SP |
19411 | } |
19412 | ||
19413 | __extension__ extern __inline float32x4_t | |
19414 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19415 | __arm_vdupq_x_n_f32 (float32_t __a, mve_pred16_t __p) |
532e9e24 | 19416 | { |
261014a1 | 19417 | return __builtin_mve_vdupq_m_n_fv4sf (vuninitializedq_f32 (), __a, __p); |
532e9e24 SP |
19418 | } |
19419 | ||
19420 | __extension__ extern __inline float16x8_t | |
19421 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19422 | __arm_vminnmq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 19423 | { |
261014a1 | 19424 | return __builtin_mve_vminnmq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19425 | } |
19426 | ||
19427 | __extension__ extern __inline float32x4_t | |
19428 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19429 | __arm_vminnmq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 19430 | { |
261014a1 | 19431 | return __builtin_mve_vminnmq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19432 | } |
19433 | ||
19434 | __extension__ extern __inline float16x8_t | |
19435 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19436 | __arm_vmaxnmq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 19437 | { |
261014a1 | 19438 | return __builtin_mve_vmaxnmq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19439 | } |
19440 | ||
19441 | __extension__ extern __inline float32x4_t | |
19442 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19443 | __arm_vmaxnmq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 19444 | { |
261014a1 | 19445 | return __builtin_mve_vmaxnmq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19446 | } |
19447 | ||
19448 | __extension__ extern __inline float16x8_t | |
19449 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19450 | __arm_vabdq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 19451 | { |
261014a1 | 19452 | return __builtin_mve_vabdq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19453 | } |
19454 | ||
19455 | __extension__ extern __inline float32x4_t | |
19456 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19457 | __arm_vabdq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 19458 | { |
261014a1 | 19459 | return __builtin_mve_vabdq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19460 | } |
19461 | ||
19462 | __extension__ extern __inline float16x8_t | |
19463 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19464 | __arm_vabsq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
532e9e24 | 19465 | { |
261014a1 | 19466 | return __builtin_mve_vabsq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
532e9e24 SP |
19467 | } |
19468 | ||
19469 | __extension__ extern __inline float32x4_t | |
19470 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19471 | __arm_vabsq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
532e9e24 | 19472 | { |
261014a1 | 19473 | return __builtin_mve_vabsq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
532e9e24 SP |
19474 | } |
19475 | ||
19476 | __extension__ extern __inline float16x8_t | |
19477 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19478 | __arm_vaddq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 19479 | { |
261014a1 | 19480 | return __builtin_mve_vaddq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19481 | } |
19482 | ||
19483 | __extension__ extern __inline float32x4_t | |
19484 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19485 | __arm_vaddq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 19486 | { |
261014a1 | 19487 | return __builtin_mve_vaddq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19488 | } |
19489 | ||
19490 | __extension__ extern __inline float16x8_t | |
19491 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19492 | __arm_vaddq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) |
532e9e24 | 19493 | { |
261014a1 | 19494 | return __builtin_mve_vaddq_m_n_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19495 | } |
19496 | ||
19497 | __extension__ extern __inline float32x4_t | |
19498 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19499 | __arm_vaddq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) |
532e9e24 | 19500 | { |
261014a1 | 19501 | return __builtin_mve_vaddq_m_n_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19502 | } |
19503 | ||
19504 | __extension__ extern __inline float16x8_t | |
19505 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19506 | __arm_vnegq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
532e9e24 | 19507 | { |
261014a1 | 19508 | return __builtin_mve_vnegq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
532e9e24 SP |
19509 | } |
19510 | ||
19511 | __extension__ extern __inline float32x4_t | |
19512 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19513 | __arm_vnegq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
532e9e24 | 19514 | { |
261014a1 | 19515 | return __builtin_mve_vnegq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
532e9e24 SP |
19516 | } |
19517 | ||
19518 | __extension__ extern __inline float16x8_t | |
19519 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19520 | __arm_vmulq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 19521 | { |
261014a1 | 19522 | return __builtin_mve_vmulq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19523 | } |
19524 | ||
19525 | __extension__ extern __inline float32x4_t | |
19526 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19527 | __arm_vmulq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 19528 | { |
261014a1 | 19529 | return __builtin_mve_vmulq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19530 | } |
19531 | ||
19532 | __extension__ extern __inline float16x8_t | |
19533 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19534 | __arm_vmulq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) |
532e9e24 | 19535 | { |
261014a1 | 19536 | return __builtin_mve_vmulq_m_n_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19537 | } |
19538 | ||
19539 | __extension__ extern __inline float32x4_t | |
19540 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19541 | __arm_vmulq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) |
532e9e24 | 19542 | { |
261014a1 | 19543 | return __builtin_mve_vmulq_m_n_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19544 | } |
19545 | ||
19546 | __extension__ extern __inline float16x8_t | |
19547 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19548 | __arm_vsubq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 19549 | { |
261014a1 | 19550 | return __builtin_mve_vsubq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19551 | } |
19552 | ||
19553 | __extension__ extern __inline float32x4_t | |
19554 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19555 | __arm_vsubq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 19556 | { |
261014a1 | 19557 | return __builtin_mve_vsubq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19558 | } |
19559 | ||
19560 | __extension__ extern __inline float16x8_t | |
19561 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19562 | __arm_vsubq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) |
532e9e24 | 19563 | { |
261014a1 | 19564 | return __builtin_mve_vsubq_m_n_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 | 19565 | } |
429d607b | 19566 | |
bf1e3d5a SP |
19567 | __extension__ extern __inline float32x4_t |
19568 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19569 | __arm_vsubq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) |
bf1e3d5a | 19570 | { |
261014a1 | 19571 | return __builtin_mve_vsubq_m_n_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
bf1e3d5a SP |
19572 | } |
19573 | ||
19574 | __extension__ extern __inline float16x8_t | |
19575 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19576 | __arm_vcaddq_rot90_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
bf1e3d5a | 19577 | { |
261014a1 | 19578 | return __builtin_mve_vcaddq_rot90_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
bf1e3d5a SP |
19579 | } |
19580 | ||
19581 | __extension__ extern __inline float32x4_t | |
19582 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19583 | __arm_vcaddq_rot90_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
bf1e3d5a | 19584 | { |
261014a1 | 19585 | return __builtin_mve_vcaddq_rot90_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
bf1e3d5a SP |
19586 | } |
19587 | ||
261014a1 | 19588 | __extension__ extern __inline float16x8_t |
bf1e3d5a | 19589 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19590 | __arm_vcaddq_rot270_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
bf1e3d5a | 19591 | { |
261014a1 | 19592 | return __builtin_mve_vcaddq_rot270_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
bf1e3d5a SP |
19593 | } |
19594 | ||
261014a1 | 19595 | __extension__ extern __inline float32x4_t |
bf1e3d5a | 19596 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19597 | __arm_vcaddq_rot270_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
bf1e3d5a | 19598 | { |
261014a1 | 19599 | return __builtin_mve_vcaddq_rot270_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
bf1e3d5a SP |
19600 | } |
19601 | ||
19602 | __extension__ extern __inline float16x8_t | |
19603 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19604 | __arm_vcmulq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
bf1e3d5a | 19605 | { |
261014a1 | 19606 | return __builtin_mve_vcmulq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
bf1e3d5a | 19607 | } |
4cc23303 | 19608 | |
261014a1 | 19609 | __extension__ extern __inline float32x4_t |
4cc23303 | 19610 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19611 | __arm_vcmulq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
4cc23303 | 19612 | { |
261014a1 | 19613 | return __builtin_mve_vcmulq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
4cc23303 SP |
19614 | } |
19615 | ||
19616 | __extension__ extern __inline float16x8_t | |
19617 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19618 | __arm_vcmulq_rot90_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
4cc23303 | 19619 | { |
261014a1 | 19620 | return __builtin_mve_vcmulq_rot90_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
4cc23303 SP |
19621 | } |
19622 | ||
261014a1 | 19623 | __extension__ extern __inline float32x4_t |
4cc23303 | 19624 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19625 | __arm_vcmulq_rot90_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
4cc23303 | 19626 | { |
261014a1 | 19627 | return __builtin_mve_vcmulq_rot90_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
4cc23303 SP |
19628 | } |
19629 | ||
19630 | __extension__ extern __inline float16x8_t | |
19631 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19632 | __arm_vcmulq_rot180_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
4cc23303 | 19633 | { |
261014a1 | 19634 | return __builtin_mve_vcmulq_rot180_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
4cc23303 SP |
19635 | } |
19636 | ||
19637 | __extension__ extern __inline float32x4_t | |
19638 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19639 | __arm_vcmulq_rot180_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
4cc23303 | 19640 | { |
261014a1 | 19641 | return __builtin_mve_vcmulq_rot180_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
4cc23303 SP |
19642 | } |
19643 | ||
261014a1 | 19644 | __extension__ extern __inline float16x8_t |
4cc23303 | 19645 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19646 | __arm_vcmulq_rot270_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
4cc23303 | 19647 | { |
261014a1 | 19648 | return __builtin_mve_vcmulq_rot270_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
4cc23303 SP |
19649 | } |
19650 | ||
19651 | __extension__ extern __inline float32x4_t | |
19652 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19653 | __arm_vcmulq_rot270_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
4cc23303 | 19654 | { |
261014a1 | 19655 | return __builtin_mve_vcmulq_rot270_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
4cc23303 SP |
19656 | } |
19657 | ||
261014a1 | 19658 | __extension__ extern __inline int16x8_t |
4cc23303 | 19659 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19660 | __arm_vcvtaq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p) |
4cc23303 | 19661 | { |
261014a1 | 19662 | return __builtin_mve_vcvtaq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
4cc23303 SP |
19663 | } |
19664 | ||
261014a1 | 19665 | __extension__ extern __inline int32x4_t |
4cc23303 | 19666 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19667 | __arm_vcvtaq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p) |
4cc23303 | 19668 | { |
261014a1 | 19669 | return __builtin_mve_vcvtaq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
4cc23303 SP |
19670 | } |
19671 | ||
261014a1 | 19672 | __extension__ extern __inline uint16x8_t |
4cc23303 | 19673 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19674 | __arm_vcvtaq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p) |
4cc23303 | 19675 | { |
261014a1 | 19676 | return __builtin_mve_vcvtaq_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
4cc23303 SP |
19677 | } |
19678 | ||
261014a1 | 19679 | __extension__ extern __inline uint32x4_t |
5cad47e0 | 19680 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19681 | __arm_vcvtaq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p) |
5cad47e0 | 19682 | { |
261014a1 | 19683 | return __builtin_mve_vcvtaq_m_uv4si (vuninitializedq_u32 (), __a, __p); |
5cad47e0 SP |
19684 | } |
19685 | ||
261014a1 | 19686 | __extension__ extern __inline int16x8_t |
5cad47e0 | 19687 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19688 | __arm_vcvtnq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p) |
5cad47e0 | 19689 | { |
261014a1 | 19690 | return __builtin_mve_vcvtnq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
5cad47e0 SP |
19691 | } |
19692 | ||
261014a1 | 19693 | __extension__ extern __inline int32x4_t |
5cad47e0 | 19694 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19695 | __arm_vcvtnq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p) |
5cad47e0 | 19696 | { |
261014a1 | 19697 | return __builtin_mve_vcvtnq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
5cad47e0 SP |
19698 | } |
19699 | ||
261014a1 | 19700 | __extension__ extern __inline uint16x8_t |
5cad47e0 | 19701 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19702 | __arm_vcvtnq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p) |
5cad47e0 | 19703 | { |
261014a1 | 19704 | return __builtin_mve_vcvtnq_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
5cad47e0 SP |
19705 | } |
19706 | ||
261014a1 | 19707 | __extension__ extern __inline uint32x4_t |
5cad47e0 | 19708 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19709 | __arm_vcvtnq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p) |
5cad47e0 | 19710 | { |
261014a1 | 19711 | return __builtin_mve_vcvtnq_m_uv4si (vuninitializedq_u32 (), __a, __p); |
5cad47e0 SP |
19712 | } |
19713 | ||
261014a1 | 19714 | __extension__ extern __inline int16x8_t |
5cad47e0 | 19715 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19716 | __arm_vcvtpq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p) |
5cad47e0 | 19717 | { |
261014a1 | 19718 | return __builtin_mve_vcvtpq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
5cad47e0 SP |
19719 | } |
19720 | ||
261014a1 | 19721 | __extension__ extern __inline int32x4_t |
7a5fffa5 | 19722 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19723 | __arm_vcvtpq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p) |
7a5fffa5 | 19724 | { |
261014a1 | 19725 | return __builtin_mve_vcvtpq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
7a5fffa5 SP |
19726 | } |
19727 | ||
261014a1 | 19728 | __extension__ extern __inline uint16x8_t |
7a5fffa5 | 19729 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19730 | __arm_vcvtpq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 19731 | { |
261014a1 | 19732 | return __builtin_mve_vcvtpq_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
7a5fffa5 SP |
19733 | } |
19734 | ||
261014a1 | 19735 | __extension__ extern __inline uint32x4_t |
7a5fffa5 | 19736 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19737 | __arm_vcvtpq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p) |
7a5fffa5 | 19738 | { |
261014a1 | 19739 | return __builtin_mve_vcvtpq_m_uv4si (vuninitializedq_u32 (), __a, __p); |
7a5fffa5 SP |
19740 | } |
19741 | ||
261014a1 | 19742 | __extension__ extern __inline int16x8_t |
7a5fffa5 | 19743 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19744 | __arm_vcvtmq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 19745 | { |
261014a1 | 19746 | return __builtin_mve_vcvtmq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
7a5fffa5 SP |
19747 | } |
19748 | ||
261014a1 | 19749 | __extension__ extern __inline int32x4_t |
7a5fffa5 | 19750 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19751 | __arm_vcvtmq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p) |
7a5fffa5 | 19752 | { |
261014a1 | 19753 | return __builtin_mve_vcvtmq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
7a5fffa5 SP |
19754 | } |
19755 | ||
261014a1 | 19756 | __extension__ extern __inline uint16x8_t |
7a5fffa5 | 19757 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19758 | __arm_vcvtmq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 19759 | { |
261014a1 | 19760 | return __builtin_mve_vcvtmq_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
7a5fffa5 SP |
19761 | } |
19762 | ||
261014a1 | 19763 | __extension__ extern __inline uint32x4_t |
7a5fffa5 | 19764 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19765 | __arm_vcvtmq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p) |
7a5fffa5 | 19766 | { |
261014a1 | 19767 | return __builtin_mve_vcvtmq_m_uv4si (vuninitializedq_u32 (), __a, __p); |
7a5fffa5 SP |
19768 | } |
19769 | ||
261014a1 | 19770 | __extension__ extern __inline float32x4_t |
7a5fffa5 | 19771 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19772 | __arm_vcvtbq_x_f32_f16 (float16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 19773 | { |
261014a1 | 19774 | return __builtin_mve_vcvtbq_m_f32_f16v4sf (vuninitializedq_f32 (), __a, __p); |
7a5fffa5 SP |
19775 | } |
19776 | ||
261014a1 | 19777 | __extension__ extern __inline float32x4_t |
7a5fffa5 | 19778 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19779 | __arm_vcvttq_x_f32_f16 (float16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 19780 | { |
261014a1 | 19781 | return __builtin_mve_vcvttq_m_f32_f16v4sf (vuninitializedq_f32 (), __a, __p); |
7a5fffa5 SP |
19782 | } |
19783 | ||
261014a1 | 19784 | __extension__ extern __inline float16x8_t |
7a5fffa5 | 19785 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19786 | __arm_vcvtq_x_f16_u16 (uint16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 19787 | { |
261014a1 | 19788 | return __builtin_mve_vcvtq_m_to_f_uv8hf (vuninitializedq_f16 (), __a, __p); |
7a5fffa5 SP |
19789 | } |
19790 | ||
3eff57aa SP |
19791 | __extension__ extern __inline float16x8_t |
19792 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19793 | __arm_vcvtq_x_f16_s16 (int16x8_t __a, mve_pred16_t __p) |
3eff57aa | 19794 | { |
261014a1 | 19795 | return __builtin_mve_vcvtq_m_to_f_sv8hf (vuninitializedq_f16 (), __a, __p); |
3eff57aa SP |
19796 | } |
19797 | ||
19798 | __extension__ extern __inline float32x4_t | |
19799 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19800 | __arm_vcvtq_x_f32_s32 (int32x4_t __a, mve_pred16_t __p) |
3eff57aa | 19801 | { |
261014a1 | 19802 | return __builtin_mve_vcvtq_m_to_f_sv4sf (vuninitializedq_f32 (), __a, __p); |
3eff57aa SP |
19803 | } |
19804 | ||
261014a1 | 19805 | __extension__ extern __inline float32x4_t |
85a94e87 | 19806 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19807 | __arm_vcvtq_x_f32_u32 (uint32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19808 | { |
261014a1 | 19809 | return __builtin_mve_vcvtq_m_to_f_uv4sf (vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19810 | } |
19811 | ||
261014a1 | 19812 | __extension__ extern __inline float16x8_t |
85a94e87 | 19813 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19814 | __arm_vcvtq_x_n_f16_s16 (int16x8_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19815 | { |
261014a1 | 19816 | return __builtin_mve_vcvtq_m_n_to_f_sv8hf (vuninitializedq_f16 (), __a, __imm6, __p); |
85a94e87 SP |
19817 | } |
19818 | ||
261014a1 | 19819 | __extension__ extern __inline float16x8_t |
85a94e87 | 19820 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19821 | __arm_vcvtq_x_n_f16_u16 (uint16x8_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19822 | { |
261014a1 | 19823 | return __builtin_mve_vcvtq_m_n_to_f_uv8hf (vuninitializedq_f16 (), __a, __imm6, __p); |
85a94e87 SP |
19824 | } |
19825 | ||
261014a1 | 19826 | __extension__ extern __inline float32x4_t |
85a94e87 | 19827 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19828 | __arm_vcvtq_x_n_f32_s32 (int32x4_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19829 | { |
261014a1 | 19830 | return __builtin_mve_vcvtq_m_n_to_f_sv4sf (vuninitializedq_f32 (), __a, __imm6, __p); |
85a94e87 SP |
19831 | } |
19832 | ||
261014a1 | 19833 | __extension__ extern __inline float32x4_t |
85a94e87 | 19834 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19835 | __arm_vcvtq_x_n_f32_u32 (uint32x4_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19836 | { |
261014a1 | 19837 | return __builtin_mve_vcvtq_m_n_to_f_uv4sf (vuninitializedq_f32 (), __a, __imm6, __p); |
85a94e87 SP |
19838 | } |
19839 | ||
19840 | __extension__ extern __inline int16x8_t | |
19841 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19842 | __arm_vcvtq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19843 | { |
261014a1 | 19844 | return __builtin_mve_vcvtq_m_from_f_sv8hi (vuninitializedq_s16 (), __a, __p); |
85a94e87 SP |
19845 | } |
19846 | ||
261014a1 | 19847 | __extension__ extern __inline int32x4_t |
85a94e87 | 19848 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19849 | __arm_vcvtq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19850 | { |
261014a1 | 19851 | return __builtin_mve_vcvtq_m_from_f_sv4si (vuninitializedq_s32 (), __a, __p); |
85a94e87 SP |
19852 | } |
19853 | ||
261014a1 | 19854 | __extension__ extern __inline uint16x8_t |
85a94e87 | 19855 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19856 | __arm_vcvtq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19857 | { |
261014a1 | 19858 | return __builtin_mve_vcvtq_m_from_f_uv8hi (vuninitializedq_u16 (), __a, __p); |
85a94e87 SP |
19859 | } |
19860 | ||
261014a1 | 19861 | __extension__ extern __inline uint32x4_t |
85a94e87 | 19862 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19863 | __arm_vcvtq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19864 | { |
261014a1 | 19865 | return __builtin_mve_vcvtq_m_from_f_uv4si (vuninitializedq_u32 (), __a, __p); |
85a94e87 SP |
19866 | } |
19867 | ||
261014a1 | 19868 | __extension__ extern __inline int16x8_t |
85a94e87 | 19869 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19870 | __arm_vcvtq_x_n_s16_f16 (float16x8_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19871 | { |
261014a1 | 19872 | return __builtin_mve_vcvtq_m_n_from_f_sv8hi (vuninitializedq_s16 (), __a, __imm6, __p); |
85a94e87 SP |
19873 | } |
19874 | ||
261014a1 | 19875 | __extension__ extern __inline int32x4_t |
85a94e87 | 19876 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19877 | __arm_vcvtq_x_n_s32_f32 (float32x4_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19878 | { |
261014a1 | 19879 | return __builtin_mve_vcvtq_m_n_from_f_sv4si (vuninitializedq_s32 (), __a, __imm6, __p); |
85a94e87 SP |
19880 | } |
19881 | ||
19882 | __extension__ extern __inline uint16x8_t | |
19883 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19884 | __arm_vcvtq_x_n_u16_f16 (float16x8_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19885 | { |
261014a1 | 19886 | return __builtin_mve_vcvtq_m_n_from_f_uv8hi (vuninitializedq_u16 (), __a, __imm6, __p); |
85a94e87 SP |
19887 | } |
19888 | ||
19889 | __extension__ extern __inline uint32x4_t | |
19890 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19891 | __arm_vcvtq_x_n_u32_f32 (float32x4_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19892 | { |
261014a1 | 19893 | return __builtin_mve_vcvtq_m_n_from_f_uv4si (vuninitializedq_u32 (), __a, __imm6, __p); |
85a94e87 SP |
19894 | } |
19895 | ||
261014a1 | 19896 | __extension__ extern __inline float16x8_t |
85a94e87 | 19897 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19898 | __arm_vrndq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19899 | { |
261014a1 | 19900 | return __builtin_mve_vrndq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19901 | } |
19902 | ||
261014a1 | 19903 | __extension__ extern __inline float32x4_t |
85a94e87 | 19904 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19905 | __arm_vrndq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19906 | { |
261014a1 | 19907 | return __builtin_mve_vrndq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19908 | } |
19909 | ||
261014a1 | 19910 | __extension__ extern __inline float16x8_t |
85a94e87 | 19911 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19912 | __arm_vrndnq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19913 | { |
261014a1 | 19914 | return __builtin_mve_vrndnq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19915 | } |
19916 | ||
261014a1 | 19917 | __extension__ extern __inline float32x4_t |
85a94e87 | 19918 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19919 | __arm_vrndnq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19920 | { |
261014a1 | 19921 | return __builtin_mve_vrndnq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19922 | } |
19923 | ||
261014a1 | 19924 | __extension__ extern __inline float16x8_t |
85a94e87 | 19925 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19926 | __arm_vrndmq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19927 | { |
261014a1 | 19928 | return __builtin_mve_vrndmq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19929 | } |
19930 | ||
261014a1 | 19931 | __extension__ extern __inline float32x4_t |
85a94e87 | 19932 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19933 | __arm_vrndmq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19934 | { |
261014a1 | 19935 | return __builtin_mve_vrndmq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19936 | } |
19937 | ||
19938 | __extension__ extern __inline float16x8_t | |
19939 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19940 | __arm_vrndpq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19941 | { |
261014a1 | 19942 | return __builtin_mve_vrndpq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19943 | } |
19944 | ||
261014a1 | 19945 | __extension__ extern __inline float32x4_t |
85a94e87 | 19946 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19947 | __arm_vrndpq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19948 | { |
261014a1 | 19949 | return __builtin_mve_vrndpq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19950 | } |
19951 | ||
19952 | __extension__ extern __inline float16x8_t | |
19953 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19954 | __arm_vrndaq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19955 | { |
261014a1 | 19956 | return __builtin_mve_vrndaq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19957 | } |
19958 | ||
261014a1 | 19959 | __extension__ extern __inline float32x4_t |
85a94e87 | 19960 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19961 | __arm_vrndaq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19962 | { |
261014a1 | 19963 | return __builtin_mve_vrndaq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19964 | } |
19965 | ||
19966 | __extension__ extern __inline float16x8_t | |
19967 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19968 | __arm_vrndxq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19969 | { |
261014a1 | 19970 | return __builtin_mve_vrndxq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19971 | } |
19972 | ||
261014a1 | 19973 | __extension__ extern __inline float32x4_t |
85a94e87 | 19974 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19975 | __arm_vrndxq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19976 | { |
261014a1 | 19977 | return __builtin_mve_vrndxq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19978 | } |
19979 | ||
19980 | __extension__ extern __inline float16x8_t | |
19981 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19982 | __arm_vandq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
85a94e87 | 19983 | { |
261014a1 | 19984 | return __builtin_mve_vandq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
19985 | } |
19986 | ||
261014a1 | 19987 | __extension__ extern __inline float32x4_t |
85a94e87 | 19988 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19989 | __arm_vandq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
85a94e87 | 19990 | { |
261014a1 | 19991 | return __builtin_mve_vandq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
85a94e87 SP |
19992 | } |
19993 | ||
261014a1 | 19994 | __extension__ extern __inline float16x8_t |
85a94e87 | 19995 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19996 | __arm_vbicq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
85a94e87 | 19997 | { |
261014a1 | 19998 | return __builtin_mve_vbicq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
19999 | } |
20000 | ||
20001 | __extension__ extern __inline float32x4_t | |
20002 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 20003 | __arm_vbicq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
85a94e87 | 20004 | { |
261014a1 | 20005 | return __builtin_mve_vbicq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
85a94e87 SP |
20006 | } |
20007 | ||
261014a1 | 20008 | __extension__ extern __inline float16x8_t |
85a94e87 | 20009 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 20010 | __arm_vbrsrq_x_n_f16 (float16x8_t __a, int32_t __b, mve_pred16_t __p) |
85a94e87 | 20011 | { |
261014a1 | 20012 | return __builtin_mve_vbrsrq_m_n_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
20013 | } |
20014 | ||
20015 | __extension__ extern __inline float32x4_t | |
20016 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 20017 | __arm_vbrsrq_x_n_f32 (float32x4_t __a, int32_t __b, mve_pred16_t __p) |
85a94e87 | 20018 | { |
261014a1 | 20019 | return __builtin_mve_vbrsrq_m_n_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
85a94e87 SP |
20020 | } |
20021 | ||
261014a1 | 20022 | __extension__ extern __inline float16x8_t |
85a94e87 | 20023 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 20024 | __arm_veorq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
85a94e87 | 20025 | { |
261014a1 | 20026 | return __builtin_mve_veorq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
20027 | } |
20028 | ||
20029 | __extension__ extern __inline float32x4_t | |
20030 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 20031 | __arm_veorq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
85a94e87 | 20032 | { |
261014a1 | 20033 | return __builtin_mve_veorq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
85a94e87 SP |
20034 | } |
20035 | ||
261014a1 | 20036 | __extension__ extern __inline float16x8_t |
85a94e87 | 20037 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 20038 | __arm_vornq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
85a94e87 | 20039 | { |
261014a1 | 20040 | return __builtin_mve_vornq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
20041 | } |
20042 | ||
20043 | __extension__ extern __inline float32x4_t | |
20044 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 20045 | __arm_vornq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
85a94e87 | 20046 | { |
261014a1 | 20047 | return __builtin_mve_vornq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
85a94e87 SP |
20048 | } |
20049 | ||
261014a1 | 20050 | __extension__ extern __inline float16x8_t |
85a94e87 | 20051 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 20052 | __arm_vorrq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
85a94e87 | 20053 | { |
261014a1 | 20054 | return __builtin_mve_vorrq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
20055 | } |
20056 | ||
41e1a7ff SP |
20057 | __extension__ extern __inline float32x4_t |
20058 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 20059 | __arm_vorrq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
41e1a7ff | 20060 | { |
261014a1 | 20061 | return __builtin_mve_vorrq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
41e1a7ff SP |
20062 | } |
20063 | ||
261014a1 | 20064 | __extension__ extern __inline float16x8_t |
41e1a7ff | 20065 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 20066 | __arm_vrev32q_x_f16 (float16x8_t __a, mve_pred16_t __p) |
41e1a7ff | 20067 | { |
261014a1 | 20068 | return __builtin_mve_vrev32q_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
41e1a7ff SP |
20069 | } |
20070 | ||
261014a1 | 20071 | __extension__ extern __inline float16x8_t |
41e1a7ff | 20072 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 20073 | __arm_vrev64q_x_f16 (float16x8_t __a, mve_pred16_t __p) |
41e1a7ff | 20074 | { |
261014a1 | 20075 | return __builtin_mve_vrev64q_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
41e1a7ff SP |
20076 | } |
20077 | ||
261014a1 | 20078 | __extension__ extern __inline float32x4_t |
41e1a7ff | 20079 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 20080 | __arm_vrev64q_x_f32 (float32x4_t __a, mve_pred16_t __p) |
41e1a7ff | 20081 | { |
261014a1 | 20082 | return __builtin_mve_vrev64q_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
41e1a7ff SP |
20083 | } |
20084 | ||
1dfcc3b5 SP |
20085 | __extension__ extern __inline float16x8x4_t |
20086 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20087 | __arm_vld4q_f16 (float16_t const * __addr) | |
20088 | { | |
20089 | union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv; | |
20090 | __rv.__o = __builtin_mve_vld4qv8hf (__addr); | |
20091 | return __rv.__i; | |
20092 | } | |
20093 | ||
20094 | __extension__ extern __inline float16x8x2_t | |
20095 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20096 | __arm_vld2q_f16 (float16_t const * __addr) | |
20097 | { | |
20098 | union { float16x8x2_t __i; __builtin_neon_oi __o; } __rv; | |
20099 | __rv.__o = __builtin_mve_vld2qv8hf (__addr); | |
20100 | return __rv.__i; | |
20101 | } | |
20102 | ||
20103 | __extension__ extern __inline float16x8_t | |
20104 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20105 | __arm_vld1q_z_f16 (float16_t const *__base, mve_pred16_t __p) | |
20106 | { | |
20107 | return vldrhq_z_f16 ( __base, __p); | |
20108 | } | |
20109 | ||
20110 | __extension__ extern __inline void | |
20111 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20112 | __arm_vst2q_f16 (float16_t * __addr, float16x8x2_t __value) | |
20113 | { | |
20114 | union { float16x8x2_t __i; __builtin_neon_oi __o; } __rv; | |
20115 | __rv.__i = __value; | |
20116 | __builtin_mve_vst2qv8hf (__addr, __rv.__o); | |
20117 | } | |
20118 | ||
20119 | __extension__ extern __inline void | |
20120 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20121 | __arm_vst1q_p_f16 (float16_t * __addr, float16x8_t __value, mve_pred16_t __p) | |
20122 | { | |
20123 | return vstrhq_p_f16 (__addr, __value, __p); | |
20124 | } | |
20125 | ||
20126 | __extension__ extern __inline float32x4x4_t | |
20127 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20128 | __arm_vld4q_f32 (float32_t const * __addr) | |
20129 | { | |
20130 | union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv; | |
20131 | __rv.__o = __builtin_mve_vld4qv4sf (__addr); | |
20132 | return __rv.__i; | |
20133 | } | |
20134 | ||
20135 | __extension__ extern __inline float32x4x2_t | |
20136 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20137 | __arm_vld2q_f32 (float32_t const * __addr) | |
20138 | { | |
20139 | union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv; | |
20140 | __rv.__o = __builtin_mve_vld2qv4sf (__addr); | |
20141 | return __rv.__i; | |
20142 | } | |
20143 | ||
20144 | __extension__ extern __inline float32x4_t | |
20145 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20146 | __arm_vld1q_z_f32 (float32_t const *__base, mve_pred16_t __p) | |
20147 | { | |
20148 | return vldrwq_z_f32 ( __base, __p); | |
20149 | } | |
20150 | ||
20151 | __extension__ extern __inline void | |
20152 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20153 | __arm_vst2q_f32 (float32_t * __addr, float32x4x2_t __value) | |
20154 | { | |
20155 | union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv; | |
20156 | __rv.__i = __value; | |
20157 | __builtin_mve_vst2qv4sf (__addr, __rv.__o); | |
20158 | } | |
20159 | ||
20160 | __extension__ extern __inline void | |
20161 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20162 | __arm_vst1q_p_f32 (float32_t * __addr, float32x4_t __value, mve_pred16_t __p) | |
20163 | { | |
20164 | return vstrwq_p_f32 (__addr, __value, __p); | |
20165 | } | |
20166 | ||
1a5c27b1 SP |
20167 | __extension__ extern __inline float16x8_t |
20168 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20169 | __arm_vsetq_lane_f16 (float16_t __a, float16x8_t __b, const int __idx) | |
20170 | { | |
20171 | __ARM_CHECK_LANEQ (__b, __idx); | |
20172 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
20173 | return __b; | |
20174 | } | |
20175 | ||
20176 | __extension__ extern __inline float32x4_t | |
20177 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20178 | __arm_vsetq_lane_f32 (float32_t __a, float32x4_t __b, const int __idx) | |
20179 | { | |
20180 | __ARM_CHECK_LANEQ (__b, __idx); | |
20181 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
20182 | return __b; | |
20183 | } | |
20184 | ||
20185 | __extension__ extern __inline float16_t | |
20186 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20187 | __arm_vgetq_lane_f16 (float16x8_t __a, const int __idx) | |
20188 | { | |
20189 | __ARM_CHECK_LANEQ (__a, __idx); | |
20190 | return __a[__ARM_LANEQ(__a,__idx)]; | |
20191 | } | |
20192 | ||
20193 | __extension__ extern __inline float32_t | |
20194 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20195 | __arm_vgetq_lane_f32 (float32x4_t __a, const int __idx) | |
20196 | { | |
20197 | __ARM_CHECK_LANEQ (__a, __idx); | |
20198 | return __a[__ARM_LANEQ(__a,__idx)]; | |
20199 | } | |
e3678b44 SP |
20200 | #endif |
20201 | ||
20202 | enum { | |
20203 | __ARM_mve_type_float16_t = 1, | |
20204 | __ARM_mve_type_float16_t_ptr, | |
20205 | __ARM_mve_type_float16_t_const_ptr, | |
20206 | __ARM_mve_type_float16x8_t, | |
20207 | __ARM_mve_type_float16x8x2_t, | |
20208 | __ARM_mve_type_float16x8x4_t, | |
20209 | __ARM_mve_type_float32_t, | |
20210 | __ARM_mve_type_float32_t_ptr, | |
20211 | __ARM_mve_type_float32_t_const_ptr, | |
20212 | __ARM_mve_type_float32x4_t, | |
20213 | __ARM_mve_type_float32x4x2_t, | |
20214 | __ARM_mve_type_float32x4x4_t, | |
20215 | __ARM_mve_type_int16_t, | |
20216 | __ARM_mve_type_int16_t_ptr, | |
20217 | __ARM_mve_type_int16_t_const_ptr, | |
20218 | __ARM_mve_type_int16x8_t, | |
20219 | __ARM_mve_type_int16x8x2_t, | |
20220 | __ARM_mve_type_int16x8x4_t, | |
20221 | __ARM_mve_type_int32_t, | |
20222 | __ARM_mve_type_int32_t_ptr, | |
20223 | __ARM_mve_type_int32_t_const_ptr, | |
20224 | __ARM_mve_type_int32x4_t, | |
20225 | __ARM_mve_type_int32x4x2_t, | |
20226 | __ARM_mve_type_int32x4x4_t, | |
20227 | __ARM_mve_type_int64_t, | |
20228 | __ARM_mve_type_int64_t_ptr, | |
14782c81 SP |
20229 | __ARM_mve_type_int64_t_const_ptr, |
20230 | __ARM_mve_type_int64x2_t, | |
20231 | __ARM_mve_type_int8_t, | |
20232 | __ARM_mve_type_int8_t_ptr, | |
20233 | __ARM_mve_type_int8_t_const_ptr, | |
20234 | __ARM_mve_type_int8x16_t, | |
20235 | __ARM_mve_type_int8x16x2_t, | |
20236 | __ARM_mve_type_int8x16x4_t, | |
20237 | __ARM_mve_type_uint16_t, | |
20238 | __ARM_mve_type_uint16_t_ptr, | |
20239 | __ARM_mve_type_uint16_t_const_ptr, | |
20240 | __ARM_mve_type_uint16x8_t, | |
20241 | __ARM_mve_type_uint16x8x2_t, | |
20242 | __ARM_mve_type_uint16x8x4_t, | |
20243 | __ARM_mve_type_uint32_t, | |
20244 | __ARM_mve_type_uint32_t_ptr, | |
20245 | __ARM_mve_type_uint32_t_const_ptr, | |
20246 | __ARM_mve_type_uint32x4_t, | |
20247 | __ARM_mve_type_uint32x4x2_t, | |
20248 | __ARM_mve_type_uint32x4x4_t, | |
20249 | __ARM_mve_type_uint64_t, | |
20250 | __ARM_mve_type_uint64_t_ptr, | |
20251 | __ARM_mve_type_uint64_t_const_ptr, | |
20252 | __ARM_mve_type_uint64x2_t, | |
20253 | __ARM_mve_type_uint8_t, | |
20254 | __ARM_mve_type_uint8_t_ptr, | |
20255 | __ARM_mve_type_uint8_t_const_ptr, | |
20256 | __ARM_mve_type_uint8x16_t, | |
20257 | __ARM_mve_type_uint8x16x2_t, | |
20258 | __ARM_mve_type_uint8x16x4_t, | |
20259 | __ARM_mve_unsupported_type | |
20260 | }; | |
20261 | ||
e3678b44 SP |
20262 | #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ |
20263 | #define __ARM_mve_typeid(x) _Generic(x, \ | |
20264 | float16_t: __ARM_mve_type_float16_t, \ | |
20265 | float16_t *: __ARM_mve_type_float16_t_ptr, \ | |
20266 | float16_t const *: __ARM_mve_type_float16_t_const_ptr, \ | |
20267 | float16x8_t: __ARM_mve_type_float16x8_t, \ | |
20268 | float16x8x2_t: __ARM_mve_type_float16x8x2_t, \ | |
20269 | float16x8x4_t: __ARM_mve_type_float16x8x4_t, \ | |
20270 | float32_t: __ARM_mve_type_float32_t, \ | |
20271 | float32_t *: __ARM_mve_type_float32_t_ptr, \ | |
20272 | float32_t const *: __ARM_mve_type_float32_t_const_ptr, \ | |
20273 | float32x4_t: __ARM_mve_type_float32x4_t, \ | |
20274 | float32x4x2_t: __ARM_mve_type_float32x4x2_t, \ | |
20275 | float32x4x4_t: __ARM_mve_type_float32x4x4_t, \ | |
20276 | int16_t: __ARM_mve_type_int16_t, \ | |
20277 | int16_t *: __ARM_mve_type_int16_t_ptr, \ | |
20278 | int16_t const *: __ARM_mve_type_int16_t_const_ptr, \ | |
20279 | int16x8_t: __ARM_mve_type_int16x8_t, \ | |
20280 | int16x8x2_t: __ARM_mve_type_int16x8x2_t, \ | |
20281 | int16x8x4_t: __ARM_mve_type_int16x8x4_t, \ | |
20282 | int32_t: __ARM_mve_type_int32_t, \ | |
20283 | int32_t *: __ARM_mve_type_int32_t_ptr, \ | |
20284 | int32_t const *: __ARM_mve_type_int32_t_const_ptr, \ | |
20285 | int32x4_t: __ARM_mve_type_int32x4_t, \ | |
20286 | int32x4x2_t: __ARM_mve_type_int32x4x2_t, \ | |
20287 | int32x4x4_t: __ARM_mve_type_int32x4x4_t, \ | |
20288 | int64_t: __ARM_mve_type_int64_t, \ | |
20289 | int64_t *: __ARM_mve_type_int64_t_ptr, \ | |
20290 | int64_t const *: __ARM_mve_type_int64_t_const_ptr, \ | |
20291 | int64x2_t: __ARM_mve_type_int64x2_t, \ | |
20292 | int8_t: __ARM_mve_type_int8_t, \ | |
20293 | int8_t *: __ARM_mve_type_int8_t_ptr, \ | |
20294 | int8_t const *: __ARM_mve_type_int8_t_const_ptr, \ | |
20295 | int8x16_t: __ARM_mve_type_int8x16_t, \ | |
20296 | int8x16x2_t: __ARM_mve_type_int8x16x2_t, \ | |
20297 | int8x16x4_t: __ARM_mve_type_int8x16x4_t, \ | |
20298 | uint16_t: __ARM_mve_type_uint16_t, \ | |
20299 | uint16_t *: __ARM_mve_type_uint16_t_ptr, \ | |
20300 | uint16_t const *: __ARM_mve_type_uint16_t_const_ptr, \ | |
20301 | uint16x8_t: __ARM_mve_type_uint16x8_t, \ | |
20302 | uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \ | |
20303 | uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \ | |
20304 | uint32_t: __ARM_mve_type_uint32_t, \ | |
20305 | uint32_t *: __ARM_mve_type_uint32_t_ptr, \ | |
20306 | uint32_t const *: __ARM_mve_type_uint32_t_const_ptr, \ | |
20307 | uint32x4_t: __ARM_mve_type_uint32x4_t, \ | |
20308 | uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \ | |
20309 | uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \ | |
20310 | uint64_t: __ARM_mve_type_uint64_t, \ | |
20311 | uint64_t *: __ARM_mve_type_uint64_t_ptr, \ | |
20312 | uint64_t const *: __ARM_mve_type_uint64_t_const_ptr, \ | |
20313 | uint64x2_t: __ARM_mve_type_uint64x2_t, \ | |
20314 | uint8_t: __ARM_mve_type_uint8_t, \ | |
20315 | uint8_t *: __ARM_mve_type_uint8_t_ptr, \ | |
20316 | uint8_t const *: __ARM_mve_type_uint8_t_const_ptr, \ | |
20317 | uint8x16_t: __ARM_mve_type_uint8x16_t, \ | |
20318 | uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \ | |
20319 | uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \ | |
20320 | default: _Generic(x, \ | |
20321 | signed char: __ARM_mve_type_int8_t, \ | |
20322 | short: __ARM_mve_type_int16_t, \ | |
20323 | int: __ARM_mve_type_int32_t, \ | |
20324 | long: __ARM_mve_type_int32_t, \ | |
20325 | long long: __ARM_mve_type_int64_t, \ | |
20326 | unsigned char: __ARM_mve_type_uint8_t, \ | |
20327 | unsigned short: __ARM_mve_type_uint16_t, \ | |
20328 | unsigned int: __ARM_mve_type_uint32_t, \ | |
20329 | unsigned long: __ARM_mve_type_uint32_t, \ | |
20330 | unsigned long long: __ARM_mve_type_uint64_t, \ | |
20331 | default: __ARM_mve_unsupported_type)) | |
20332 | #else | |
20333 | #define __ARM_mve_typeid(x) _Generic(x, \ | |
20334 | int16_t: __ARM_mve_type_int16_t, \ | |
20335 | int16_t *: __ARM_mve_type_int16_t_ptr, \ | |
20336 | int16_t const *: __ARM_mve_type_int16_t_const_ptr, \ | |
20337 | int16x8_t: __ARM_mve_type_int16x8_t, \ | |
20338 | int16x8x2_t: __ARM_mve_type_int16x8x2_t, \ | |
20339 | int16x8x4_t: __ARM_mve_type_int16x8x4_t, \ | |
20340 | int32_t: __ARM_mve_type_int32_t, \ | |
20341 | int32_t *: __ARM_mve_type_int32_t_ptr, \ | |
20342 | int32_t const *: __ARM_mve_type_int32_t_const_ptr, \ | |
20343 | int32x4_t: __ARM_mve_type_int32x4_t, \ | |
20344 | int32x4x2_t: __ARM_mve_type_int32x4x2_t, \ | |
20345 | int32x4x4_t: __ARM_mve_type_int32x4x4_t, \ | |
20346 | int64_t: __ARM_mve_type_int64_t, \ | |
20347 | int64_t *: __ARM_mve_type_int64_t_ptr, \ | |
20348 | int64_t const *: __ARM_mve_type_int64_t_const_ptr, \ | |
20349 | int64x2_t: __ARM_mve_type_int64x2_t, \ | |
20350 | int8_t: __ARM_mve_type_int8_t, \ | |
20351 | int8_t *: __ARM_mve_type_int8_t_ptr, \ | |
20352 | int8_t const *: __ARM_mve_type_int8_t_const_ptr, \ | |
20353 | int8x16_t: __ARM_mve_type_int8x16_t, \ | |
20354 | int8x16x2_t: __ARM_mve_type_int8x16x2_t, \ | |
20355 | int8x16x4_t: __ARM_mve_type_int8x16x4_t, \ | |
20356 | uint16_t: __ARM_mve_type_uint16_t, \ | |
20357 | uint16_t *: __ARM_mve_type_uint16_t_ptr, \ | |
20358 | uint16_t const *: __ARM_mve_type_uint16_t_const_ptr, \ | |
20359 | uint16x8_t: __ARM_mve_type_uint16x8_t, \ | |
20360 | uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \ | |
20361 | uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \ | |
20362 | uint32_t: __ARM_mve_type_uint32_t, \ | |
20363 | uint32_t *: __ARM_mve_type_uint32_t_ptr, \ | |
20364 | uint32_t const *: __ARM_mve_type_uint32_t_const_ptr, \ | |
20365 | uint32x4_t: __ARM_mve_type_uint32x4_t, \ | |
20366 | uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \ | |
20367 | uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \ | |
20368 | uint64_t: __ARM_mve_type_uint64_t, \ | |
20369 | uint64_t *: __ARM_mve_type_uint64_t_ptr, \ | |
20370 | uint64_t const *: __ARM_mve_type_uint64_t_const_ptr, \ | |
20371 | uint64x2_t: __ARM_mve_type_uint64x2_t, \ | |
20372 | uint8_t: __ARM_mve_type_uint8_t, \ | |
20373 | uint8_t *: __ARM_mve_type_uint8_t_ptr, \ | |
20374 | uint8_t const *: __ARM_mve_type_uint8_t_const_ptr, \ | |
20375 | uint8x16_t: __ARM_mve_type_uint8x16_t, \ | |
20376 | uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \ | |
20377 | uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \ | |
20378 | default: _Generic(x, \ | |
20379 | signed char: __ARM_mve_type_int8_t, \ | |
20380 | short: __ARM_mve_type_int16_t, \ | |
20381 | int: __ARM_mve_type_int32_t, \ | |
20382 | long: __ARM_mve_type_int32_t, \ | |
20383 | long long: __ARM_mve_type_int64_t, \ | |
20384 | unsigned char: __ARM_mve_type_uint8_t, \ | |
20385 | unsigned short: __ARM_mve_type_uint16_t, \ | |
20386 | unsigned int: __ARM_mve_type_uint32_t, \ | |
20387 | unsigned long: __ARM_mve_type_uint32_t, \ | |
20388 | unsigned long long: __ARM_mve_type_uint64_t, \ | |
20389 | default: __ARM_mve_unsupported_type)) | |
20390 | #endif /* MVE Floating point. */ | |
20391 | ||
20392 | extern void *__ARM_undef; | |
20393 | #define __ARM_mve_coerce(param, type) \ | |
20394 | _Generic(param, type: param, default: *(type *)__ARM_undef) | |
20395 | #define __ARM_mve_coerce1(param, type) \ | |
20396 | _Generic(param, type: param, const type: param, default: *(type *)__ARM_undef) | |
20397 | ||
20398 | #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ | |
20399 | ||
20400 | #define vst4q(p0,p1) __arm_vst4q(p0,p1) | |
20401 | #define __arm_vst4q(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20402 | __typeof(p1) __p1 = (p1); \ | |
20403 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20404 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \ | |
20405 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \ | |
20406 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \ | |
20407 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \ | |
20408 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \ | |
20409 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)), \ | |
20410 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x4_t]: __arm_vst4q_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x4_t)), \ | |
20411 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x4_t]: __arm_vst4q_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x4_t)));}) | |
20412 | ||
20413 | #define vrndxq(p0) __arm_vrndxq(p0) | |
20414 | #define __arm_vrndxq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20415 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20416 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
20417 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
20418 | ||
20419 | #define vrndq(p0) __arm_vrndq(p0) | |
20420 | #define __arm_vrndq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20421 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20422 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
20423 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
20424 | ||
20425 | #define vrndpq(p0) __arm_vrndpq(p0) | |
20426 | #define __arm_vrndpq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20427 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20428 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
20429 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
20430 | ||
20431 | #define vrndnq(p0) __arm_vrndnq(p0) | |
20432 | #define __arm_vrndnq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20433 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20434 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
20435 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
20436 | ||
20437 | #define vrndmq(p0) __arm_vrndmq(p0) | |
20438 | #define __arm_vrndmq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20439 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20440 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
20441 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
20442 | ||
20443 | #define vrndaq(p0) __arm_vrndaq(p0) | |
20444 | #define __arm_vrndaq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20445 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20446 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
20447 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndaq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
20448 | ||
20449 | #define vrev64q(p0) __arm_vrev64q(p0) | |
20450 | #define __arm_vrev64q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20451 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20452 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
20453 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
20454 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
20455 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
20456 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
20457 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
20458 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev64q_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
20459 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrev64q_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
20460 | ||
20461 | #define vnegq(p0) __arm_vnegq(p0) | |
20462 | #define __arm_vnegq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20463 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20464 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
20465 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
20466 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
20467 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vnegq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
20468 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vnegq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
20469 | ||
20470 | #define vdupq_n(p0) __arm_vdupq_n(p0) | |
20471 | #define __arm_vdupq_n(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20472 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20473 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vdupq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
20474 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vdupq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
20475 | ||
20476 | #define vabsq(p0) __arm_vabsq(p0) | |
20477 | #define __arm_vabsq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20478 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20479 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
20480 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
20481 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
20482 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
20483 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
20484 | ||
20485 | #define vrev32q(p0) __arm_vrev32q(p0) | |
20486 | #define __arm_vrev32q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20487 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20488 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
20489 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
20490 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
20491 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
20492 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev32q_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) | |
20493 | ||
20494 | #define vcvtbq_f32(p0) __arm_vcvtbq_f32(p0) | |
20495 | #define __arm_vcvtbq_f32(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20496 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20497 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vcvtbq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) | |
20498 | ||
20499 | #define vcvttq_f32(p0) __arm_vcvttq_f32(p0) | |
20500 | #define __arm_vcvttq_f32(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20501 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20502 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vcvttq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) | |
20503 | ||
20504 | #define vrev16q(p0) __arm_vrev16q(p0) | |
20505 | #define __arm_vrev16q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20506 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20507 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
20508 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)));}) | |
20509 | ||
20510 | #define vqabsq(p0) __arm_vqabsq(p0) | |
20511 | #define __arm_vqabsq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20512 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20513 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
20514 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
20515 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
20516 | ||
20517 | #define vqnegq(p0) __arm_vqnegq(p0) | |
20518 | #define __arm_vqnegq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20519 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20520 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
20521 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
20522 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
20523 | ||
20524 | #define vmvnq(p0) __arm_vmvnq(p0) | |
20525 | #define __arm_vmvnq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20526 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20527 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
20528 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
20529 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
20530 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
20531 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
20532 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
20533 | ||
20534 | #define vmovlbq(p0) __arm_vmovlbq(p0) | |
20535 | #define __arm_vmovlbq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20536 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20537 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
20538 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
20539 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
20540 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) | |
20541 | ||
20542 | #define vmovltq(p0) __arm_vmovltq(p0) | |
20543 | #define __arm_vmovltq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20544 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20545 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
20546 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
20547 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
20548 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) | |
20549 | ||
20550 | #define vclzq(p0) __arm_vclzq(p0) | |
20551 | #define __arm_vclzq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20552 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20553 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
20554 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
20555 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
20556 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
20557 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
20558 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
20559 | ||
20560 | #define vclsq(p0) __arm_vclsq(p0) | |
20561 | #define __arm_vclsq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20562 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20563 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
20564 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
20565 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
20566 | ||
20567 | #define vcvtq(p0) __arm_vcvtq(p0) | |
20568 | #define __arm_vcvtq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
20569 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20570 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
20571 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
20572 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
20573 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
20574 | ||
20575 | #define vsubq_n(p0,p1) __arm_vsubq_n(p0,p1) | |
20576 | #define __arm_vsubq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20577 | __typeof(p1) __p1 = (p1); \ | |
20578 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20579 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vsubq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
20580 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vsubq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)), \ | |
20581 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
20582 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20583 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20584 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
20585 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
20586 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
20587 | ||
20588 | #define vshlq(p0,p1) __arm_vshlq(p0,p1) | |
20589 | #define __arm_vshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20590 | __typeof(p1) __p1 = (p1); \ | |
20591 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20592 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20593 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20594 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20595 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20596 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20597 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
20598 | ||
20599 | #define vshrq(p0,p1) __arm_vshrq(p0,p1) | |
20600 | #define __arm_vshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20601 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20602 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
20603 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
20604 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
20605 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
20606 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
20607 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
20608 | ||
20609 | #define vcvtq_n(p0,p1) __arm_vcvtq_n(p0,p1) | |
20610 | #define __arm_vcvtq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20611 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20612 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_n_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
20613 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_n_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
20614 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_n_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
20615 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_n_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
20616 | ||
20617 | #define vsubq_n(p0,p1) __arm_vsubq_n(p0,p1) | |
20618 | #define __arm_vsubq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20619 | __typeof(p1) __p1 = (p1); \ | |
20620 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20621 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
20622 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20623 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20624 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
20625 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
20626 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
20627 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vsubq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
20628 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vsubq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
20629 | ||
20630 | #define vsubq(p0,p1) __arm_vsubq(p0,p1) | |
20631 | #define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20632 | __typeof(p1) __p1 = (p1); \ | |
20633 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20634 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20635 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20636 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20637 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20638 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20639 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
20640 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20641 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
20642 | ||
20643 | #define vorrq(p0,p1) __arm_vorrq(p0,p1) | |
20644 | #define __arm_vorrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20645 | __typeof(p1) __p1 = (p1); \ | |
20646 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20647 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20648 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20649 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20650 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20651 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20652 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
20653 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20654 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
20655 | ||
20656 | #define vabdq(p0,p1) __arm_vabdq(p0,p1) | |
20657 | #define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20658 | __typeof(p1) __p1 = (p1); \ | |
20659 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20660 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20661 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20662 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20663 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20664 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20665 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
20666 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20667 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
20668 | ||
20669 | #define vaddq(p0,p1) __arm_vaddq(p0,p1) | |
20670 | #define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20671 | __typeof(p1) __p1 = (p1); \ | |
20672 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20673 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20674 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20675 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20676 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20677 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20678 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
3eff57aa SP |
20679 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_f16 (__ARM_mve_coerce(p0, float16x8_t), __ARM_mve_coerce(p1, float16x8_t)), \ |
20680 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_f32 (__ARM_mve_coerce(p0, float32x4_t), __ARM_mve_coerce(p1, float32x4_t)), \ | |
e3678b44 SP |
20681 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ |
20682 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
20683 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
20684 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
20685 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20686 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20687 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vaddq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
20688 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vaddq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
20689 | ||
20690 | #define vandq(p0,p1) __arm_vandq(p0,p1) | |
20691 | #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20692 | __typeof(p1) __p1 = (p1); \ | |
20693 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20694 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20695 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20696 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20697 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20698 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20699 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
20700 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20701 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
20702 | ||
20703 | #define vbicq(p0,p1) __arm_vbicq(p0,p1) | |
20704 | #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20705 | __typeof(p1) __p1 = (p1); \ | |
20706 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
1ef979c6 SP |
20707 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vbicq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1(__p1, int)), \ |
20708 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vbicq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1(__p1, int)), \ | |
20709 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vbicq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1(__p1, int)), \ | |
20710 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vbicq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1(__p1, int)), \ | |
e3678b44 SP |
20711 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
20712 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20713 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20714 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20715 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20716 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
20717 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20718 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
20719 | ||
20720 | #define vornq(p0,p1) __arm_vornq(p0,p1) | |
20721 | #define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20722 | __typeof(p1) __p1 = (p1); \ | |
20723 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20724 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20725 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20726 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20727 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20728 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20729 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
20730 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20731 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
20732 | ||
20733 | #define vmulq_n(p0,p1) __arm_vmulq_n(p0,p1) | |
20734 | #define __arm_vmulq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20735 | __typeof(p1) __p1 = (p1); \ | |
20736 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20737 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
20738 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20739 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20740 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
20741 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
20742 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
20743 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vmulq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
20744 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vmulq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
20745 | ||
20746 | #define vmulq(p0,p1) __arm_vmulq(p0,p1) | |
20747 | #define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20748 | __typeof(p1) __p1 = (p1); \ | |
20749 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20750 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20751 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20752 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20753 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20754 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20755 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
20756 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20757 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
20758 | ||
20759 | #define vcaddq_rot270(p0,p1) __arm_vcaddq_rot270(p0,p1) | |
20760 | #define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20761 | __typeof(p1) __p1 = (p1); \ | |
20762 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20763 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20764 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20765 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20766 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20767 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20768 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
20769 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20770 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
20771 | ||
20772 | #define vcmpeqq(p0,p1) __arm_vcmpeqq(p0,p1) | |
20773 | #define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20774 | __typeof(p1) __p1 = (p1); \ | |
20775 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20776 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
20777 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20778 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20779 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
20780 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
20781 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
20782 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpeqq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
20783 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpeqq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)), \ | |
20784 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20785 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20786 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20787 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20788 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20789 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
20790 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20791 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
20792 | ||
20793 | #define vcaddq_rot90(p0,p1) __arm_vcaddq_rot90(p0,p1) | |
20794 | #define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20795 | __typeof(p1) __p1 = (p1); \ | |
20796 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20797 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20798 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20799 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20800 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20801 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20802 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
20803 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20804 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20805 | |
e3678b44 SP |
20806 | #define vcmpgeq_n(p0,p1) __arm_vcmpgeq_n(p0,p1) |
20807 | #define __arm_vcmpgeq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20808 | __typeof(p1) __p1 = (p1); \ | |
20809 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20810 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
20811 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20812 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20813 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
20814 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
f9355dee | 20815 | |
e3678b44 SP |
20816 | #define vcmpeqq_m(p0,p1,p2) __arm_vcmpeqq_m(p0,p1,p2) |
20817 | #define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20818 | __typeof(p1) __p1 = (p1); \ | |
20819 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20820 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
20821 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
20822 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
20823 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
20824 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
20825 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
20826 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
20827 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
20828 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
20829 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
20830 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
20831 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \ | |
20832 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
20833 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
20834 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpeqq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
20835 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpeqq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) | |
f9355dee | 20836 | |
e3678b44 SP |
20837 | #define vcmpgtq(p0,p1) __arm_vcmpgtq(p0,p1) |
20838 | #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20839 | __typeof(p1) __p1 = (p1); \ |
20840 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20841 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
20842 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20843 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20844 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20845 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20846 | |
e3678b44 SP |
20847 | #define vcmpleq(p0,p1) __arm_vcmpleq(p0,p1) |
20848 | #define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20849 | __typeof(p1) __p1 = (p1); \ | |
20850 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20851 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20852 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20853 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20854 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpleq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20855 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpleq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
20856 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
20857 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20858 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20859 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpleq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
20860 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpleq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
20861 | ||
20862 | #define vcmpltq(p0,p1) __arm_vcmpltq(p0,p1) | |
20863 | #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20864 | __typeof(p1) __p1 = (p1); \ | |
20865 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20866 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20867 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20868 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20869 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
20870 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20871 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20872 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20873 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
20874 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpltq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
20875 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpltq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
20876 | ||
20877 | #define vcmpneq(p0,p1) __arm_vcmpneq(p0,p1) | |
20878 | #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20879 | __typeof(p1) __p1 = (p1); \ | |
20880 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20881 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
20882 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20883 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20884 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
20885 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
20886 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
20887 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpneq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
20888 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpneq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)), \ | |
20889 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20890 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20891 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20892 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20893 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20894 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
20895 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpneq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20896 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpneq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
20897 | ||
20898 | #define vcmulq(p0,p1) __arm_vcmulq(p0,p1) | |
20899 | #define __arm_vcmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20900 | __typeof(p1) __p1 = (p1); \ | |
20901 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20902 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20903 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
20904 | ||
20905 | #define vcmulq_rot180(p0,p1) __arm_vcmulq_rot180(p0,p1) | |
20906 | #define __arm_vcmulq_rot180(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20907 | __typeof(p1) __p1 = (p1); \ | |
20908 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20909 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20910 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20911 | |
e3678b44 SP |
20912 | #define vcmulq_rot270(p0,p1) __arm_vcmulq_rot270(p0,p1) |
20913 | #define __arm_vcmulq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20914 | __typeof(p1) __p1 = (p1); \ | |
20915 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20916 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20917 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20918 | |
e3678b44 SP |
20919 | #define vcmulq_rot90(p0,p1) __arm_vcmulq_rot90(p0,p1) |
20920 | #define __arm_vcmulq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20921 | __typeof(p1) __p1 = (p1); \ | |
20922 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20923 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20924 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20925 | |
e3678b44 SP |
20926 | #define veorq(p0,p1) __arm_veorq(p0,p1) |
20927 | #define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20928 | __typeof(p1) __p1 = (p1); \ | |
20929 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20930 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20931 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20932 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20933 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20934 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20935 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
20936 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20937 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20938 | |
e3678b44 SP |
20939 | #define vmaxnmaq(p0,p1) __arm_vmaxnmaq(p0,p1) |
20940 | #define __arm_vmaxnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20941 | __typeof(p1) __p1 = (p1); \ | |
20942 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20943 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20944 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20945 | |
e3678b44 SP |
20946 | #define vmaxnmavq(p0,p1) __arm_vmaxnmavq(p0,p1) |
20947 | #define __arm_vmaxnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20948 | __typeof(p1) __p1 = (p1); \ | |
20949 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20950 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20951 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20952 | |
e3678b44 SP |
20953 | #define vmaxnmq(p0,p1) __arm_vmaxnmq(p0,p1) |
20954 | #define __arm_vmaxnmq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20955 | __typeof(p1) __p1 = (p1); \ | |
20956 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20957 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20958 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20959 | |
e3678b44 SP |
20960 | #define vmaxnmvq(p0,p1) __arm_vmaxnmvq(p0,p1) |
20961 | #define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20962 | __typeof(p1) __p1 = (p1); \ | |
20963 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20964 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20965 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20966 | |
e3678b44 SP |
20967 | #define vmaxnmvq(p0,p1) __arm_vmaxnmvq(p0,p1) |
20968 | #define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20969 | __typeof(p1) __p1 = (p1); \ | |
20970 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20971 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20972 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20973 | |
e3678b44 SP |
20974 | #define vminnmaq(p0,p1) __arm_vminnmaq(p0,p1) |
20975 | #define __arm_vminnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20976 | __typeof(p1) __p1 = (p1); \ | |
20977 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20978 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20979 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20980 | |
e3678b44 SP |
20981 | #define vminnmavq(p0,p1) __arm_vminnmavq(p0,p1) |
20982 | #define __arm_vminnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20983 | __typeof(p1) __p1 = (p1); \ | |
20984 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20985 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20986 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20987 | |
e3678b44 SP |
20988 | #define vbrsrq(p0,p1) __arm_vbrsrq(p0,p1) |
20989 | #define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20990 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
20991 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
20992 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
20993 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
20994 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
20995 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
20996 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \ | |
20997 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vbrsrq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), p1), \ | |
20998 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vbrsrq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), p1));}) | |
f9355dee | 20999 | |
e3678b44 SP |
21000 | #define vminnmq(p0,p1) __arm_vminnmq(p0,p1) |
21001 | #define __arm_vminnmq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21002 | __typeof(p1) __p1 = (p1); \ | |
21003 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21004 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
21005 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 21006 | |
e3678b44 SP |
21007 | #define vsubq(p0,p1) __arm_vsubq(p0,p1) |
21008 | #define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21009 | __typeof(p1) __p1 = (p1); \ | |
21010 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21011 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vsubq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
21012 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vsubq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)), \ | |
21013 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
21014 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
21015 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21016 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
21017 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
21018 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
21019 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21020 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21021 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21022 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
21023 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21024 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
21025 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
21026 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 21027 | |
e3678b44 SP |
21028 | #define vminnmvq(p0,p1) __arm_vminnmvq(p0,p1) |
21029 | #define __arm_vminnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21030 | __typeof(p1) __p1 = (p1); \ | |
21031 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21032 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
21033 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 21034 | |
e3678b44 SP |
21035 | #define vcmpgeq(p0,p1) __arm_vcmpgeq(p0,p1) |
21036 | #define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21037 | __typeof(p1) __p1 = (p1); \ | |
21038 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21039 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21040 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21041 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21042 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
21043 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
21044 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21045 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
21046 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
21047 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
21048 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
f9355dee | 21049 | |
e3678b44 SP |
21050 | #define vshlq_r(p0,p1) __arm_vshlq_r(p0,p1) |
21051 | #define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21052 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
21053 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
21054 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
21055 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
21056 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
21057 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
21058 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
f9355dee | 21059 | |
e3678b44 SP |
21060 | #define vshlq_n(p0,p1) __arm_vshlq_n(p0,p1) |
21061 | #define __arm_vshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21062 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
21063 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
21064 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
21065 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
21066 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
21067 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
21068 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
f9355dee | 21069 | |
e3678b44 SP |
21070 | #define vshlltq(p0,p1) __arm_vshlltq(p0,p1) |
21071 | #define __arm_vshlltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21072 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
21073 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
21074 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
21075 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
21076 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) | |
f9355dee | 21077 | |
e3678b44 SP |
21078 | #define vshllbq(p0,p1) __arm_vshllbq(p0,p1) |
21079 | #define __arm_vshllbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21080 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
21081 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
21082 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
21083 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
21084 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) | |
f9355dee | 21085 | |
e3678b44 SP |
21086 | #define vrshrq(p0,p1) __arm_vrshrq(p0,p1) |
21087 | #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21088 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
21089 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
21090 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
21091 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
21092 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
21093 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
21094 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
f9355dee | 21095 | |
e3678b44 SP |
21096 | #define vrshrq(p0,p1) __arm_vrshrq(p0,p1) |
21097 | #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21098 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
21099 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
21100 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
21101 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
21102 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
21103 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
21104 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
f9355dee | 21105 | |
e3678b44 SP |
21106 | #define vrshlq(p0,p1) __arm_vrshlq(p0,p1) |
21107 | #define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21108 | __typeof(p1) __p1 = (p1); \ |
21109 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21110 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ |
21111 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21112 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21113 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21114 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21115 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21116 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21117 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21118 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21119 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21120 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21121 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 21122 | |
e3678b44 SP |
21123 | #define vrmulhq(p0,p1) __arm_vrmulhq(p0,p1) |
21124 | #define __arm_vrmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21125 | __typeof(p1) __p1 = (p1); \ |
21126 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21127 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
21128 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21129 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21130 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
21131 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21132 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 21133 | |
e3678b44 SP |
21134 | #define vrhaddq(p0,p1) __arm_vrhaddq(p0,p1) |
21135 | #define __arm_vrhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21136 | __typeof(p1) __p1 = (p1); \ | |
21137 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21138 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21139 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21140 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21141 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
21142 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21143 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
21144 | ||
21145 | #define vqsubq(p0,p1) __arm_vqsubq(p0,p1) | |
21146 | #define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21147 | __typeof(p1) __p1 = (p1); \ | |
21148 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21149 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
21150 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
21151 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21152 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
21153 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
21154 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
21155 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21156 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21157 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21158 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
21159 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21160 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
21161 | ||
21162 | #define vqshluq(p0,p1) __arm_vqshluq(p0,p1) | |
21163 | #define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21164 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
21165 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
21166 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
21167 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));}) | |
f9355dee | 21168 | |
e3678b44 SP |
21169 | #define vqshlq(p0,p1) __arm_vqshlq(p0,p1) |
21170 | #define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21171 | __typeof(p1) __p1 = (p1); \ | |
21172 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21173 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21174 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21175 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21176 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21177 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21178 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
21179 | ||
21180 | #define vqshlq_r(p0,p1) __arm_vqshlq_r(p0,p1) | |
21181 | #define __arm_vqshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21182 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
21183 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
21184 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
21185 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
21186 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
21187 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
21188 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
f9355dee | 21189 | |
e3678b44 SP |
21190 | #define vqshlq_n(p0,p1) __arm_vqshlq_n(p0,p1) |
21191 | #define __arm_vqshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21192 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
21193 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
21194 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
21195 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
21196 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
21197 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
21198 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
21199 | ||
21200 | #define vqrshlq(p0,p1) __arm_vqrshlq(p0,p1) | |
21201 | #define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21202 | __typeof(p1) __p1 = (p1); \ |
21203 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21204 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
21205 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21206 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21207 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21208 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21209 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21210 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21211 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21212 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21213 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21214 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21215 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
f9355dee | 21216 | |
e3678b44 SP |
21217 | #define vqrdmulhq(p0,p1) __arm_vqrdmulhq(p0,p1) |
21218 | #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21219 | __typeof(p1) __p1 = (p1); \ |
21220 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21221 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
21222 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21223 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21224 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
21225 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
21226 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
f9355dee | 21227 | |
e3678b44 SP |
21228 | #define vmlaldavxq(p0,p1) __arm_vmlaldavxq(p0,p1) |
21229 | #define __arm_vmlaldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21230 | __typeof(p1) __p1 = (p1); \ |
21231 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21232 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
21233 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 21234 | |
e3678b44 SP |
21235 | #define vqmovuntq(p0,p1) __arm_vqmovuntq(p0,p1) |
21236 | #define __arm_vqmovuntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21237 | __typeof(p1) __p1 = (p1); \ |
21238 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21239 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
21240 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
21241 | ||
21242 | #define vqmovntq(p0,p1) __arm_vqmovntq(p0,p1) | |
21243 | #define __arm_vqmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21244 | __typeof(p1) __p1 = (p1); \ | |
21245 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21246 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21247 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21248 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21249 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
21250 | ||
21251 | #define vqmovnbq(p0,p1) __arm_vqmovnbq(p0,p1) | |
21252 | #define __arm_vqmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21253 | __typeof(p1) __p1 = (p1); \ | |
21254 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21255 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21256 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21257 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21258 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 21259 | |
e3678b44 SP |
21260 | #define vqdmulltq(p0,p1) __arm_vqdmulltq(p0,p1) |
21261 | #define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21262 | __typeof(p1) __p1 = (p1); \ |
21263 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21264 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ |
21265 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21266 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21267 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 21268 | |
e3678b44 SP |
21269 | #define vqmovunbq(p0,p1) __arm_vqmovunbq(p0,p1) |
21270 | #define __arm_vqmovunbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21271 | __typeof(p1) __p1 = (p1); \ |
21272 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21273 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
21274 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 21275 | |
e3678b44 SP |
21276 | #define vqdmullbq(p0,p1) __arm_vqdmullbq(p0,p1) |
21277 | #define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21278 | __typeof(p1) __p1 = (p1); \ |
21279 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21280 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ |
21281 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21282 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21283 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 21284 | |
e3678b44 SP |
21285 | #define vqdmulhq(p0,p1) __arm_vqdmulhq(p0,p1) |
21286 | #define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21287 | __typeof(p1) __p1 = (p1); \ |
21288 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21289 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ |
21290 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
21291 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21292 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21293 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21294 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 21295 | |
e3678b44 SP |
21296 | #define vqaddq(p0,p1) __arm_vqaddq(p0,p1) |
21297 | #define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21298 | __typeof(p1) __p1 = (p1); \ |
21299 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21300 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ |
21301 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
21302 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21303 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
21304 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
21305 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
21306 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21307 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21308 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21309 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
21310 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21311 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 21312 | |
e3678b44 SP |
21313 | #define vmulltq_poly(p0,p1) __arm_vmulltq_poly(p0,p1) |
21314 | #define __arm_vmulltq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21315 | __typeof(p1) __p1 = (p1); \ |
21316 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21317 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ |
21318 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
f9355dee | 21319 | |
e3678b44 SP |
21320 | #define vmullbq_poly(p0,p1) __arm_vmullbq_poly(p0,p1) |
21321 | #define __arm_vmullbq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21322 | __typeof(p1) __p1 = (p1); \ |
21323 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21324 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ |
21325 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
f9355dee | 21326 | |
e3678b44 SP |
21327 | #define vmulltq_int(p0,p1) __arm_vmulltq_int(p0,p1) |
21328 | #define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21329 | __typeof(p1) __p1 = (p1); \ |
21330 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21331 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
21332 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21333 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21334 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
21335 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21336 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 21337 | |
e3678b44 SP |
21338 | #define vhaddq(p0,p1) __arm_vhaddq(p0,p1) |
21339 | #define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
0dad5b33 SP |
21340 | __typeof(p1) __p1 = (p1); \ |
21341 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21342 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ |
21343 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
21344 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21345 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
21346 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
21347 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
21348 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21349 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21350 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21351 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
21352 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21353 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 21354 | |
e3678b44 SP |
21355 | #define vhcaddq_rot270(p0,p1) __arm_vhcaddq_rot270(p0,p1) |
21356 | #define __arm_vhcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21357 | __typeof(p1) __p1 = (p1); \ |
21358 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21359 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
21360 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21361 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 21362 | |
e3678b44 SP |
21363 | #define vhcaddq_rot90(p0,p1) __arm_vhcaddq_rot90(p0,p1) |
21364 | #define __arm_vhcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21365 | __typeof(p1) __p1 = (p1); \ |
21366 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21367 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
21368 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21369 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 21370 | |
e3678b44 SP |
21371 | #define vhsubq(p0,p1) __arm_vhsubq(p0,p1) |
21372 | #define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21373 | __typeof(p1) __p1 = (p1); \ |
21374 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21375 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ |
21376 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
21377 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21378 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
21379 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
21380 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
21381 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21382 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21383 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21384 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
21385 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21386 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 21387 | |
e3678b44 SP |
21388 | #define vminq(p0,p1) __arm_vminq(p0,p1) |
21389 | #define __arm_vminq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21390 | __typeof(p1) __p1 = (p1); \ |
21391 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21392 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
21393 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21394 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21395 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
21396 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21397 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 21398 | |
e3678b44 SP |
21399 | #define vminaq(p0,p1) __arm_vminaq(p0,p1) |
21400 | #define __arm_vminaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21401 | __typeof(p1) __p1 = (p1); \ |
21402 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21403 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
21404 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21405 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 21406 | |
e3678b44 SP |
21407 | #define vmaxq(p0,p1) __arm_vmaxq(p0,p1) |
21408 | #define __arm_vmaxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21409 | __typeof(p1) __p1 = (p1); \ |
21410 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21411 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
21412 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21413 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21414 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
21415 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21416 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 21417 | |
e3678b44 SP |
21418 | #define vmaxaq(p0,p1) __arm_vmaxaq(p0,p1) |
21419 | #define __arm_vmaxaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21420 | __typeof(p1) __p1 = (p1); \ |
21421 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21422 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
21423 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21424 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 21425 | |
e3678b44 SP |
21426 | #define vmovntq(p0,p1) __arm_vmovntq(p0,p1) |
21427 | #define __arm_vmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21428 | __typeof(p1) __p1 = (p1); \ |
21429 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21430 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
21431 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21432 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21433 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 21434 | |
e3678b44 SP |
21435 | #define vmovnbq(p0,p1) __arm_vmovnbq(p0,p1) |
21436 | #define __arm_vmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21437 | __typeof(p1) __p1 = (p1); \ |
21438 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21439 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
21440 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21441 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21442 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 21443 | |
e3678b44 SP |
21444 | #define vmulhq(p0,p1) __arm_vmulhq(p0,p1) |
21445 | #define __arm_vmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21446 | __typeof(p1) __p1 = (p1); \ |
21447 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21448 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
21449 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21450 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21451 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
21452 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21453 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 21454 | |
e3678b44 SP |
21455 | #define vmullbq_int(p0,p1) __arm_vmullbq_int(p0,p1) |
21456 | #define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21457 | __typeof(p1) __p1 = (p1); \ |
21458 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21459 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
21460 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21461 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21462 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
21463 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21464 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 21465 | |
e3678b44 SP |
21466 | #define vcmpgtq(p0,p1) __arm_vcmpgtq(p0,p1) |
21467 | #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21468 | __typeof(p1) __p1 = (p1); \ |
21469 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21470 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
21471 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21472 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21473 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
21474 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
21475 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21476 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
21477 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
21478 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
21479 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
f9355dee | 21480 | |
e3678b44 SP |
21481 | #define vbicq_m_n(p0,p1,p2) __arm_vbicq_m_n(p0,p1,p2) |
21482 | #define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21483 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
21484 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
21485 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
21486 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
21487 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
21488 | ||
21489 | #define vqrshrnbq(p0,p1,p2) __arm_vqrshrnbq(p0,p1,p2) | |
21490 | #define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21491 | __typeof(p1) __p1 = (p1); \ |
21492 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21493 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
21494 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21495 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21496 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
f9355dee | 21497 | |
e3678b44 SP |
21498 | #define vqrshrunbq(p0,p1,p2) __arm_vqrshrunbq(p0,p1,p2) |
21499 | #define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21500 | __typeof(p1) __p1 = (p1); \ |
21501 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21502 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
21503 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f9355dee | 21504 | |
e3678b44 SP |
21505 | #define vshlcq(p0,p1,p2) __arm_vshlcq(p0,p1,p2) |
21506 | #define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21507 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
21508 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
21509 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
21510 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
21511 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
21512 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
21513 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
21514 | ||
21515 | #define vclsq_m(p0,p1,p2) __arm_vclsq_m(p0,p1,p2) | |
21516 | #define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21517 | __typeof(p1) __p1 = (p1); \ |
21518 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21519 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
21520 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21521 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f9355dee | 21522 | |
e3678b44 SP |
21523 | #define vclzq_m(p0,p1,p2) __arm_vclzq_m(p0,p1,p2) |
21524 | #define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21525 | __typeof(p1) __p1 = (p1); \ |
21526 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21527 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclzq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
21528 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclzq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21529 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclzq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21530 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vclzq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
21531 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21532 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
f9355dee | 21533 | |
e3678b44 SP |
21534 | #define vmaxaq_m(p0,p1,p2) __arm_vmaxaq_m(p0,p1,p2) |
21535 | #define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21536 | __typeof(p1) __p1 = (p1); \ |
21537 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21538 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
21539 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21540 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f9355dee | 21541 | |
e3678b44 SP |
21542 | #define vminaq_m(p0,p1,p2) __arm_vminaq_m(p0,p1,p2) |
21543 | #define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21544 | __typeof(p1) __p1 = (p1); \ |
21545 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21546 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
21547 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21548 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f9355dee | 21549 | |
e3678b44 SP |
21550 | #define vmlaq(p0,p1,p2) __arm_vmlaq(p0,p1,p2) |
21551 | #define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21552 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
21553 | __typeof(p2) __p2 = (p2); \ |
21554 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21555 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
21556 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
21557 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
21558 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
21559 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
21560 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
f9355dee | 21561 | |
e3678b44 SP |
21562 | #define vsriq(p0,p1,p2) __arm_vsriq(p0,p1,p2) |
21563 | #define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21564 | __typeof(p1) __p1 = (p1); \ | |
21565 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21566 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
21567 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21568 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21569 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
21570 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21571 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
f9355dee | 21572 | |
e3678b44 SP |
21573 | #define vsliq(p0,p1,p2) __arm_vsliq(p0,p1,p2) |
21574 | #define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21575 | __typeof(p1) __p1 = (p1); \ | |
21576 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21577 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
21578 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21579 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21580 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
21581 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21582 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
f9355dee | 21583 | |
e3678b44 SP |
21584 | #define vshlq_m_r(p0,p1,p2) __arm_vshlq_m_r(p0,p1,p2) |
21585 | #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21586 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
21587 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ |
21588 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
21589 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
21590 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
21591 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
21592 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
14782c81 | 21593 | |
e3678b44 SP |
21594 | #define vrshlq_m_n(p0,p1,p2) __arm_vrshlq_m_n(p0,p1,p2) |
21595 | #define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21596 | __typeof(p1) __p1 = (p1); \ | |
f9355dee | 21597 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
21598 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ |
21599 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
21600 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
21601 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
21602 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
21603 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));}) | |
14782c81 | 21604 | |
e3678b44 SP |
21605 | #define vqshlq_m_r(p0,p1,p2) __arm_vqshlq_m_r(p0,p1,p2) |
21606 | #define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21607 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
21608 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ |
21609 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
21610 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
21611 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
21612 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
21613 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
14782c81 | 21614 | |
e3678b44 SP |
21615 | #define vqrshlq_m_n(p0,p1,p2) __arm_vqrshlq_m_n(p0,p1,p2) |
21616 | #define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21617 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
21618 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ |
21619 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
21620 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
21621 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
21622 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
21623 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
f9355dee | 21624 | |
e3678b44 SP |
21625 | #define vqrdmlsdhxq(p0,p1,p2) __arm_vqrdmlsdhxq(p0,p1,p2) |
21626 | #define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
14782c81 | 21627 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
21628 | __typeof(p2) __p2 = (p2); \ |
21629 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21630 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
21631 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
21632 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
14782c81 | 21633 | |
e3678b44 SP |
21634 | #define vqrdmlsdhq(p0,p1,p2) __arm_vqrdmlsdhq(p0,p1,p2) |
21635 | #define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21636 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
21637 | __typeof(p2) __p2 = (p2); \ |
21638 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21639 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
21640 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
21641 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
f9355dee | 21642 | |
e3678b44 SP |
21643 | #define vqrdmlashq(p0,p1,p2) __arm_vqrdmlashq(p0,p1,p2) |
21644 | #define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21645 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
21646 | __typeof(p2) __p2 = (p2); \ |
21647 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21648 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
21649 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
21650 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
21651 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqrdmlashq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
21652 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqrdmlashq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
21653 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqrdmlashq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
f9355dee | 21654 | |
e3678b44 SP |
21655 | #define vqrdmlahq(p0,p1,p2) __arm_vqrdmlahq(p0,p1,p2) |
21656 | #define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21657 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
21658 | __typeof(p2) __p2 = (p2); \ |
21659 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21660 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
21661 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
21662 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
21663 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqrdmlahq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
21664 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqrdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
21665 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqrdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
f9355dee | 21666 | |
e3678b44 SP |
21667 | #define vmlasq(p0,p1,p2) __arm_vmlasq(p0,p1,p2) |
21668 | #define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21669 | __typeof(p1) __p1 = (p1); \ | |
21670 | __typeof(p2) __p2 = (p2); \ | |
21671 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21672 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
21673 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
21674 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
21675 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
21676 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
21677 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
a50f6abf | 21678 | |
e3678b44 SP |
21679 | #define vqdmlahq(p0,p1,p2) __arm_vqdmlahq(p0,p1,p2) |
21680 | #define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 21681 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
21682 | __typeof(p2) __p2 = (p2); \ |
21683 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21684 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
21685 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
21686 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
21687 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqdmlahq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
21688 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
21689 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
f9355dee | 21690 | |
e3678b44 SP |
21691 | #define vqrdmladhxq(p0,p1,p2) __arm_vqrdmladhxq(p0,p1,p2) |
21692 | #define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21693 | __typeof(p1) __p1 = (p1); \ | |
21694 | __typeof(p2) __p2 = (p2); \ | |
21695 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21696 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
21697 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
21698 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
f9355dee | 21699 | |
e3678b44 SP |
21700 | #define vqrdmladhq(p0,p1,p2) __arm_vqrdmladhq(p0,p1,p2) |
21701 | #define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21702 | __typeof(p1) __p1 = (p1); \ | |
21703 | __typeof(p2) __p2 = (p2); \ | |
21704 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21705 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
21706 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
21707 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
a50f6abf | 21708 | |
e3678b44 SP |
21709 | #define vqnegq_m(p0,p1,p2) __arm_vqnegq_m(p0,p1,p2) |
21710 | #define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21711 | __typeof(p1) __p1 = (p1); \ |
21712 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21713 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
21714 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21715 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
21716 | ||
21717 | #define vqdmlsdhxq(p0,p1,p2) __arm_vqdmlsdhxq(p0,p1,p2) | |
21718 | #define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21719 | __typeof(p1) __p1 = (p1); \ | |
21720 | __typeof(p2) __p2 = (p2); \ | |
21721 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21722 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
21723 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
21724 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
21725 | ||
21726 | #define vqdmlsdhq(p0,p1,p2) __arm_vqdmlsdhq(p0,p1,p2) | |
21727 | #define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21728 | __typeof(p1) __p1 = (p1); \ | |
21729 | __typeof(p2) __p2 = (p2); \ | |
21730 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21731 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
21732 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
21733 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
21734 | ||
21735 | #define vqdmladhxq(p0,p1,p2) __arm_vqdmladhxq(p0,p1,p2) | |
21736 | #define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21737 | __typeof(p1) __p1 = (p1); \ | |
21738 | __typeof(p2) __p2 = (p2); \ | |
21739 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21740 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
21741 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
21742 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
21743 | ||
21744 | #define vqdmladhq(p0,p1,p2) __arm_vqdmladhq(p0,p1,p2) | |
21745 | #define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21746 | __typeof(p1) __p1 = (p1); \ | |
21747 | __typeof(p2) __p2 = (p2); \ | |
21748 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21749 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
21750 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
21751 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
a50f6abf | 21752 | |
e3678b44 SP |
21753 | #define vmovlbq_m(p0,p1,p2) __arm_vmovlbq_m(p0,p1,p2) |
21754 | #define __arm_vmovlbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21755 | __typeof(p1) __p1 = (p1); \ |
21756 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21757 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovlbq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
21758 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovlbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21759 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
21760 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
a50f6abf | 21761 | |
e3678b44 SP |
21762 | #define vmovnbq_m(p0,p1,p2) __arm_vmovnbq_m(p0,p1,p2) |
21763 | #define __arm_vmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21764 | __typeof(p1) __p1 = (p1); \ |
21765 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21766 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
21767 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21768 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21769 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
a50f6abf | 21770 | |
e3678b44 SP |
21771 | #define vmovntq_m(p0,p1,p2) __arm_vmovntq_m(p0,p1,p2) |
21772 | #define __arm_vmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21773 | __typeof(p1) __p1 = (p1); \ |
21774 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21775 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
21776 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21777 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21778 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
a50f6abf | 21779 | |
e3678b44 SP |
21780 | #define vmovltq_m(p0,p1,p2) __arm_vmovltq_m(p0,p1,p2) |
21781 | #define __arm_vmovltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21782 | __typeof(p1) __p1 = (p1); \ |
21783 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21784 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovltq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
21785 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21786 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovltq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
21787 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovltq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
a50f6abf | 21788 | |
e3678b44 SP |
21789 | #define vshrntq(p0,p1,p2) __arm_vshrntq(p0,p1,p2) |
21790 | #define __arm_vshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21791 | __typeof(p1) __p1 = (p1); \ |
21792 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21793 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
21794 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21795 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21796 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
a50f6abf | 21797 | |
e3678b44 SP |
21798 | #define vshrnbq(p0,p1,p2) __arm_vshrnbq(p0,p1,p2) |
21799 | #define __arm_vshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21800 | __typeof(p1) __p1 = (p1); \ |
21801 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21802 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
21803 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21804 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21805 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
a50f6abf | 21806 | |
e3678b44 SP |
21807 | #define vrshrntq(p0,p1,p2) __arm_vrshrntq(p0,p1,p2) |
21808 | #define __arm_vrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21809 | __typeof(p1) __p1 = (p1); \ |
21810 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21811 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
21812 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21813 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21814 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
a50f6abf | 21815 | |
e3678b44 SP |
21816 | #define vcvtaq_m(p0,p1,p2) __arm_vcvtaq_m(p0,p1,p2) |
21817 | #define __arm_vcvtaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21818 | __typeof(p1) __p1 = (p1); \ |
21819 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21820 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtaq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21821 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtaq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
21822 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtaq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21823 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtaq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
a50f6abf | 21824 | |
e3678b44 SP |
21825 | #define vcvtq_m(p0,p1,p2) __arm_vcvtq_m(p0,p1,p2) |
21826 | #define __arm_vcvtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21827 | __typeof(p1) __p1 = (p1); \ |
21828 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21829 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcvtq_m_f16_s16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
21830 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcvtq_m_f32_s32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21831 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcvtq_m_f16_u16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21832 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcvtq_m_f32_u32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
21833 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21834 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
21835 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21836 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
a50f6abf | 21837 | |
db5db9d2 SP |
21838 | #define vcvtq_m_n(p0,p1,p2,p3) __arm_vcvtq_m_n(p0,p1,p2,p3) |
21839 | #define __arm_vcvtq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21840 | __typeof(p1) __p1 = (p1); \ | |
21841 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
21842 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_n_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \ |
21843 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_n_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3), \ | |
21844 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_n_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \ | |
21845 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_n_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3), \ | |
db5db9d2 SP |
21846 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcvtq_m_n_f16_s16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ |
21847 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcvtq_m_n_f32_s32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
21848 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcvtq_m_n_f16_u16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
21849 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcvtq_m_n_f32_u32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
21850 | ||
e3678b44 SP |
21851 | #define vabsq_m(p0,p1,p2) __arm_vabsq_m(p0,p1,p2) |
21852 | #define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21853 | __typeof(p1) __p1 = (p1); \ |
21854 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21855 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
21856 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21857 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21858 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabsq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21859 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabsq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
6df4618c | 21860 | |
e3678b44 SP |
21861 | #define vcmlaq(p0,p1,p2) __arm_vcmlaq(p0,p1,p2) |
21862 | #define __arm_vcmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21863 | __typeof(p1) __p1 = (p1); \ | |
21864 | __typeof(p2) __p2 = (p2); \ | |
21865 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21866 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ | |
21867 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
21868 | ||
21869 | #define vcmlaq_rot180(p0,p1,p2) __arm_vcmlaq_rot180(p0,p1,p2) | |
21870 | #define __arm_vcmlaq_rot180(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21871 | __typeof(p1) __p1 = (p1); \ | |
21872 | __typeof(p2) __p2 = (p2); \ | |
21873 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21874 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot180_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ | |
21875 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot180_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
21876 | ||
21877 | #define vcmlaq_rot270(p0,p1,p2) __arm_vcmlaq_rot270(p0,p1,p2) | |
21878 | #define __arm_vcmlaq_rot270(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21879 | __typeof(p1) __p1 = (p1); \ | |
21880 | __typeof(p2) __p2 = (p2); \ | |
21881 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21882 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ | |
21883 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
21884 | ||
21885 | #define vcmlaq_rot90(p0,p1,p2) __arm_vcmlaq_rot90(p0,p1,p2) | |
21886 | #define __arm_vcmlaq_rot90(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21887 | __typeof(p1) __p1 = (p1); \ | |
21888 | __typeof(p2) __p2 = (p2); \ | |
21889 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21890 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ | |
21891 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
21892 | ||
e3678b44 SP |
21893 | #define vrndxq_m(p0,p1,p2) __arm_vrndxq_m(p0,p1,p2) |
21894 | #define __arm_vrndxq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21895 | __typeof(p1) __p1 = (p1); \ |
21896 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21897 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndxq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21898 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndxq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
6df4618c | 21899 | |
e3678b44 SP |
21900 | #define vrndq_m(p0,p1,p2) __arm_vrndq_m(p0,p1,p2) |
21901 | #define __arm_vrndq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21902 | __typeof(p1) __p1 = (p1); \ |
21903 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21904 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21905 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
6df4618c | 21906 | |
e3678b44 SP |
21907 | #define vrndpq_m(p0,p1,p2) __arm_vrndpq_m(p0,p1,p2) |
21908 | #define __arm_vrndpq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21909 | __typeof(p1) __p1 = (p1); \ |
21910 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21911 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndpq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21912 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndpq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
6df4618c | 21913 | |
e3678b44 SP |
21914 | #define vcmpgtq_m(p0,p1,p2) __arm_vcmpgtq_m(p0,p1,p2) |
21915 | #define __arm_vcmpgtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21916 | __typeof(p1) __p1 = (p1); \ |
21917 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21918 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
21919 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21920 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21921 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
21922 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
21923 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
21924 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgtq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
21925 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgtq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2), \ | |
21926 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21927 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
6df4618c | 21928 | |
e3678b44 SP |
21929 | #define vcmpleq_m(p0,p1,p2) __arm_vcmpleq_m(p0,p1,p2) |
21930 | #define __arm_vcmpleq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21931 | __typeof(p1) __p1 = (p1); \ |
21932 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21933 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
21934 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21935 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21936 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpleq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21937 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpleq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
21938 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpleq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
21939 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
21940 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
21941 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpleq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
21942 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpleq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) | |
6df4618c | 21943 | |
e3678b44 SP |
21944 | #define vcmpltq_m(p0,p1,p2) __arm_vcmpltq_m(p0,p1,p2) |
21945 | #define __arm_vcmpltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21946 | __typeof(p1) __p1 = (p1); \ |
21947 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21948 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
21949 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21950 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21951 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21952 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
21953 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpltq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
21954 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
21955 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
21956 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpltq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
21957 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpltq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) | |
6df4618c | 21958 | |
e3678b44 SP |
21959 | #define vcmpneq_m(p0,p1,p2) __arm_vcmpneq_m(p0,p1,p2) |
21960 | #define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21961 | __typeof(p1) __p1 = (p1); \ |
21962 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21963 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
21964 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21965 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21966 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
21967 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21968 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
21969 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpneq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21970 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpneq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
21971 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
21972 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
21973 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
21974 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
21975 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
21976 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \ | |
21977 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpneq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
21978 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpneq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) | |
a50f6abf | 21979 | |
e3678b44 SP |
21980 | #define vcvtbq_m(p0,p1,p2) __arm_vcvtbq_m(p0,p1,p2) |
21981 | #define __arm_vcvtbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
4be8cf77 SP |
21982 | __typeof(p1) __p1 = (p1); \ |
21983 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21984 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float16x8_t]: __arm_vcvtbq_m_f32_f16 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21985 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float32x4_t]: __arm_vcvtbq_m_f16_f32 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
4be8cf77 | 21986 | |
e3678b44 SP |
21987 | #define vcvttq_m(p0,p1,p2) __arm_vcvttq_m(p0,p1,p2) |
21988 | #define __arm_vcvttq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21989 | __typeof(p1) __p1 = (p1); \ |
21990 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21991 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float16x8_t]: __arm_vcvttq_m_f32_f16 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21992 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float32x4_t]: __arm_vcvttq_m_f16_f32 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
4be8cf77 | 21993 | |
e3678b44 SP |
21994 | #define vcvtmq_m(p0,p1,p2) __arm_vcvtmq_m(p0,p1,p2) |
21995 | #define __arm_vcvtmq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
d71dba7b SP |
21996 | __typeof(p1) __p1 = (p1); \ |
21997 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21998 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtmq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21999 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtmq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
22000 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtmq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22001 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtmq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
d71dba7b | 22002 | |
e3678b44 SP |
22003 | #define vcvtnq_m(p0,p1,p2) __arm_vcvtnq_m(p0,p1,p2) |
22004 | #define __arm_vcvtnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
22005 | __typeof(p1) __p1 = (p1); \ |
22006 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
22007 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtnq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
22008 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtnq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
22009 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtnq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22010 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtnq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
22011 | ||
22012 | #define vcvtpq_m(p0,p1,p2) __arm_vcvtpq_m(p0,p1,p2) | |
22013 | #define __arm_vcvtpq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22014 | __typeof(p1) __p1 = (p1); \ | |
22015 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22016 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtpq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22017 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtpq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
22018 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtpq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22019 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtpq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
22020 | ||
22021 | #define vcmpltq(p0,p1) __arm_vcmpltq(p0,p1) | |
22022 | #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22023 | __typeof(p1) __p1 = (p1); \ | |
22024 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22025 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
22026 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
22027 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22028 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpltq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
22029 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpltq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)), \ | |
22030 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22031 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22032 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22033 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
22034 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
22035 | ||
22036 | #define vdupq_m(p0,p1,p2) __arm_vdupq_m(p0,p1,p2) | |
22037 | #define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22038 | __typeof(p1) __p1 = (p1); \ | |
22039 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22040 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vdupq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
22041 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vdupq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
22042 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vdupq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
22043 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
22044 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
22045 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \ | |
22046 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vdupq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
22047 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vdupq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) | |
22048 | ||
e3678b44 SP |
22049 | #define vfmaq(p0,p1,p2) __arm_vfmaq(p0,p1,p2) |
22050 | #define __arm_vfmaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22051 | __typeof(p1) __p1 = (p1); \ | |
22052 | __typeof(p2) __p2 = (p2); \ | |
22053 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
261014a1 SP |
22054 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vfmaq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t)), \ |
22055 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vfmaq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t)), \ | |
e3678b44 SP |
22056 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ |
22057 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
d71dba7b | 22058 | |
e3678b44 SP |
22059 | #define vfmsq(p0,p1,p2) __arm_vfmsq(p0,p1,p2) |
22060 | #define __arm_vfmsq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c | 22061 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
22062 | __typeof(p2) __p2 = (p2); \ |
22063 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22064 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmsq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ | |
22065 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmsq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
f9355dee | 22066 | |
261014a1 SP |
22067 | #define vfmasq(p0,p1,p2) __arm_vfmasq(p0,p1,p2) |
22068 | #define __arm_vfmasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22069 | __typeof(p1) __p1 = (p1); \ | |
22070 | __typeof(p2) __p2 = (p2); \ | |
22071 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22072 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vfmasq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t)), \ | |
22073 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vfmasq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t)));}) | |
22074 | ||
e3678b44 SP |
22075 | #define vmaxnmaq_m(p0,p1,p2) __arm_vmaxnmaq_m(p0,p1,p2) |
22076 | #define __arm_vmaxnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
22077 | __typeof(p1) __p1 = (p1); \ |
22078 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
22079 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
22080 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 22081 | |
e3678b44 SP |
22082 | #define vmaxnmavq_m(p0,p1,p2) __arm_vmaxnmavq_m(p0,p1,p2) |
22083 | #define __arm_vmaxnmavq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
22084 | __typeof(p1) __p1 = (p1); \ |
22085 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
22086 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
22087 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 22088 | |
e3678b44 SP |
22089 | #define vmaxnmvq_m(p0,p1,p2) __arm_vmaxnmvq_m(p0,p1,p2) |
22090 | #define __arm_vmaxnmvq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
22091 | __typeof(p1) __p1 = (p1); \ |
22092 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
22093 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
22094 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 22095 | |
e3678b44 SP |
22096 | #define vmaxnmavq_p(p0,p1,p2) __arm_vmaxnmavq_p(p0,p1,p2) |
22097 | #define __arm_vmaxnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
22098 | __typeof(p1) __p1 = (p1); \ |
22099 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
22100 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_p_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
22101 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_p_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 22102 | |
e3678b44 SP |
22103 | #define vmaxnmvq_p(p0,p1,p2) __arm_vmaxnmvq_p(p0,p1,p2) |
22104 | #define __arm_vmaxnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
22105 | __typeof(p1) __p1 = (p1); \ |
22106 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
22107 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_p_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
22108 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_p_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 22109 | |
e3678b44 SP |
22110 | #define vminnmaq_m(p0,p1,p2) __arm_vminnmaq_m(p0,p1,p2) |
22111 | #define __arm_vminnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
22112 | __typeof(p1) __p1 = (p1); \ |
22113 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
22114 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
22115 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 22116 | |
e3678b44 SP |
22117 | #define vminnmavq_p(p0,p1,p2) __arm_vminnmavq_p(p0,p1,p2) |
22118 | #define __arm_vminnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
22119 | __typeof(p1) __p1 = (p1); \ |
22120 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
22121 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_p_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
22122 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_p_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 22123 | |
e3678b44 SP |
22124 | #define vminnmvq_p(p0,p1,p2) __arm_vminnmvq_p(p0,p1,p2) |
22125 | #define __arm_vminnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
22126 | __typeof(p1) __p1 = (p1); \ |
22127 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
22128 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_p_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
22129 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_p_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 22130 | |
e3678b44 SP |
22131 | #define vrndnq_m(p0,p1,p2) __arm_vrndnq_m(p0,p1,p2) |
22132 | #define __arm_vrndnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
22133 | __typeof(p1) __p1 = (p1); \ |
22134 | __typeof(p2) __p2 = (p2); \ | |
e3678b44 SP |
22135 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
22136 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndnq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22137 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndnq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __p2));}) | |
8165795c | 22138 | |
e3678b44 SP |
22139 | #define vrndaq_m(p0,p1,p2) __arm_vrndaq_m(p0,p1,p2) |
22140 | #define __arm_vrndaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
22141 | __typeof(p1) __p1 = (p1); \ |
22142 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
22143 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
22144 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 22145 | |
e3678b44 SP |
22146 | #define vrndmq_m(p0,p1,p2) __arm_vrndmq_m(p0,p1,p2) |
22147 | #define __arm_vrndmq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
22148 | __typeof(p1) __p1 = (p1); \ |
22149 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
22150 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
22151 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 22152 | |
e3678b44 SP |
22153 | #define vrev64q_m(p0,p1,p2) __arm_vrev64q_m(p0,p1,p2) |
22154 | #define __arm_vrev64q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 22155 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
22156 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
22157 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev64q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
22158 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev64q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22159 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrev64q_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22160 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev64q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
22161 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22162 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
22163 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrev64q_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22164 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrev64q_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 22165 | |
e3678b44 SP |
22166 | #define vrev32q_m(p0,p1,p2) __arm_vrev32q_m(p0,p1,p2) |
22167 | #define __arm_vrev32q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22168 | __typeof(p1) __p1 = (p1); \ | |
22169 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22170 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev32q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
22171 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev32q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22172 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev32q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
22173 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev32q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22174 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrev32q_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2));}) | |
8165795c | 22175 | |
e3678b44 SP |
22176 | #define vpselq(p0,p1,p2) __arm_vpselq(p0,p1,p2) |
22177 | #define __arm_vpselq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 22178 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
22179 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
22180 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vpselq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
22181 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vpselq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22182 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vpselq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22183 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int64x2_t]: __arm_vpselq_s64 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \ | |
22184 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vpselq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
22185 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vpselq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22186 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vpselq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
22187 | int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint64x2_t]: __arm_vpselq_u64 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint64x2_t), p2), \ | |
22188 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vpselq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22189 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vpselq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 22190 | |
e3678b44 SP |
22191 | #define vcmpgeq(p0,p1) __arm_vcmpgeq(p0,p1) |
22192 | #define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 22193 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
22194 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
22195 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22196 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22197 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22198 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
22199 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
22200 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22201 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
22202 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
22203 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
22204 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
8165795c | 22205 | |
e3678b44 SP |
22206 | #define vcmpeqq_m(p0,p1,p2) __arm_vcmpeqq_m(p0,p1,p2) |
22207 | #define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 22208 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
22209 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
22210 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
22211 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22212 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22213 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
22214 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22215 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
22216 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
22217 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
22218 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
22219 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
22220 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
22221 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \ | |
22222 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22223 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
22224 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpeqq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
22225 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpeqq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) | |
8165795c | 22226 | |
e3678b44 SP |
22227 | #define vcmpgtq(p0,p1) __arm_vcmpgtq(p0,p1) |
22228 | #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 22229 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
22230 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
22231 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22232 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22233 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22234 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
22235 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
22236 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22237 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
22238 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
22239 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
22240 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
8165795c | 22241 | |
e3678b44 SP |
22242 | #define vrshrnbq(p0,p1,p2) __arm_vrshrnbq(p0,p1,p2) |
22243 | #define __arm_vrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 22244 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
22245 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
22246 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22247 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22248 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22249 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
22250 | ||
22251 | #define vrev16q_m(p0,p1,p2) __arm_vrev16q_m(p0,p1,p2) | |
22252 | #define __arm_vrev16q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22253 | __typeof(p1) __p1 = (p1); \ | |
22254 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22255 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev16q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
22256 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev16q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2));}) | |
22257 | ||
22258 | #define vqshruntq(p0,p1,p2) __arm_vqshruntq(p0,p1,p2) | |
22259 | #define __arm_vqshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22260 | __typeof(p1) __p1 = (p1); \ | |
22261 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22262 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22263 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
22264 | ||
22265 | #define vqshrunbq_n(p0,p1,p2) __arm_vqshrunbq_n(p0,p1,p2) | |
22266 | #define __arm_vqshrunbq_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22267 | __typeof(p1) __p1 = (p1); \ | |
22268 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22269 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22270 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
22271 | ||
22272 | #define vqshrnbq(p0,p1,p2) __arm_vqshrnbq(p0,p1,p2) | |
22273 | #define __arm_vqshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22274 | __typeof(p1) __p1 = (p1); \ | |
22275 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22276 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22277 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22278 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22279 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
22280 | ||
22281 | #define vqshrntq(p0,p1,p2) __arm_vqshrntq(p0,p1,p2) | |
22282 | #define __arm_vqshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22283 | __typeof(p1) __p1 = (p1); \ | |
22284 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22285 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22286 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22287 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22288 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
22289 | ||
22290 | #define vqrshruntq(p0,p1,p2) __arm_vqrshruntq(p0,p1,p2) | |
22291 | #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22292 | __typeof(p1) __p1 = (p1); \ | |
22293 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22294 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22295 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
22296 | ||
22297 | #define vqmovnbq_m(p0,p1,p2) __arm_vqmovnbq_m(p0,p1,p2) | |
22298 | #define __arm_vqmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22299 | __typeof(p1) __p1 = (p1); \ | |
22300 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22301 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22302 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22303 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22304 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
22305 | ||
22306 | #define vqmovntq_m(p0,p1,p2) __arm_vqmovntq_m(p0,p1,p2) | |
22307 | #define __arm_vqmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22308 | __typeof(p1) __p1 = (p1); \ | |
22309 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22310 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22311 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22312 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22313 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
22314 | ||
22315 | #define vqmovunbq_m(p0,p1,p2) __arm_vqmovunbq_m(p0,p1,p2) | |
22316 | #define __arm_vqmovunbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22317 | __typeof(p1) __p1 = (p1); \ | |
22318 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22319 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22320 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
22321 | ||
22322 | #define vqmovuntq_m(p0,p1,p2) __arm_vqmovuntq_m(p0,p1,p2) | |
22323 | #define __arm_vqmovuntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22324 | __typeof(p1) __p1 = (p1); \ | |
22325 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22326 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22327 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
22328 | ||
22329 | #define vqrshrntq(p0,p1,p2) __arm_vqrshrntq(p0,p1,p2) | |
22330 | #define __arm_vqrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22331 | __typeof(p1) __p1 = (p1); \ | |
22332 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22333 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22334 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22335 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22336 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
22337 | ||
22338 | #define vqrshruntq(p0,p1,p2) __arm_vqrshruntq(p0,p1,p2) | |
22339 | #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22340 | __typeof(p1) __p1 = (p1); \ | |
22341 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22342 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22343 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
22344 | ||
22345 | #define vnegq_m(p0,p1,p2) __arm_vnegq_m(p0,p1,p2) | |
22346 | #define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22347 | __typeof(p1) __p1 = (p1); \ | |
22348 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22349 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
22350 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22351 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22352 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vnegq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22353 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vnegq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
22354 | ||
22355 | #define vcmpgeq_m(p0,p1,p2) __arm_vcmpgeq_m(p0,p1,p2) | |
22356 | #define __arm_vcmpgeq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22357 | __typeof(p1) __p1 = (p1); \ | |
22358 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22359 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
22360 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22361 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22362 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
22363 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
22364 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
22365 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgeq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
22366 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgeq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2), \ | |
22367 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22368 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
22369 | ||
22370 | #define vabdq_m(p0,p1,p2,p3) __arm_vabdq_m(p0,p1,p2,p3) | |
22371 | #define __arm_vabdq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22372 | __typeof(p1) __p1 = (p1); \ | |
22373 | __typeof(p2) __p2 = (p2); \ | |
22374 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22375 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22376 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22377 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22378 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22379 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22380 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22381 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22382 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22383 | ||
22384 | #define vaddq_m(p0,p1,p2,p3) __arm_vaddq_m(p0,p1,p2,p3) | |
22385 | #define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22386 | __typeof(p1) __p1 = (p1); \ | |
22387 | __typeof(p2) __p2 = (p2); \ | |
22388 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22389 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22390 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22391 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22392 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22393 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22394 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22395 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22396 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
22397 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
22398 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
22399 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
22400 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
22401 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
22402 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
22403 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vaddq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
22404 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vaddq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
22405 | ||
22406 | #define vandq_m(p0,p1,p2,p3) __arm_vandq_m(p0,p1,p2,p3) | |
22407 | #define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22408 | __typeof(p1) __p1 = (p1); \ | |
22409 | __typeof(p2) __p2 = (p2); \ | |
22410 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22411 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22412 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22413 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22414 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22415 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22416 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22417 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22418 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22419 | ||
22420 | #define vbicq_m(p0,p1,p2,p3) __arm_vbicq_m(p0,p1,p2,p3) | |
22421 | #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22422 | __typeof(p1) __p1 = (p1); \ | |
22423 | __typeof(p2) __p2 = (p2); \ | |
22424 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22425 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22426 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22427 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22428 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22429 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22430 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22431 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22432 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22433 | ||
22434 | #define vbrsrq_m(p0,p1,p2,p3) __arm_vbrsrq_m(p0,p1,p2,p3) | |
22435 | #define __arm_vbrsrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22436 | __typeof(p1) __p1 = (p1); \ | |
22437 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22438 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbrsrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
22439 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbrsrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
22440 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbrsrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
22441 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
22442 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
22443 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3), \ | |
22444 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbrsrq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \ | |
22445 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbrsrq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3));}) | |
22446 | ||
22447 | #define vcaddq_rot270_m(p0,p1,p2,p3) __arm_vcaddq_rot270_m(p0,p1,p2,p3) | |
22448 | #define __arm_vcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22449 | __typeof(p1) __p1 = (p1); \ | |
22450 | __typeof(p2) __p2 = (p2); \ | |
22451 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22452 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22453 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22454 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22455 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22456 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22457 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22458 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22459 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22460 | ||
22461 | #define vcaddq_rot90_m(p0,p1,p2,p3) __arm_vcaddq_rot90_m(p0,p1,p2,p3) | |
22462 | #define __arm_vcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22463 | __typeof(p1) __p1 = (p1); \ | |
22464 | __typeof(p2) __p2 = (p2); \ | |
22465 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22466 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22467 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22468 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22469 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22470 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22471 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22472 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22473 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22474 | ||
22475 | #define vcmlaq_m(p0,p1,p2,p3) __arm_vcmlaq_m(p0,p1,p2,p3) | |
22476 | #define __arm_vcmlaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22477 | __typeof(p1) __p1 = (p1); \ | |
22478 | __typeof(p2) __p2 = (p2); \ | |
22479 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22480 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22481 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22482 | ||
22483 | #define vcmlaq_rot180_m(p0,p1,p2,p3) __arm_vcmlaq_rot180_m(p0,p1,p2,p3) | |
22484 | #define __arm_vcmlaq_rot180_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22485 | __typeof(p1) __p1 = (p1); \ | |
22486 | __typeof(p2) __p2 = (p2); \ | |
22487 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22488 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot180_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22489 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot180_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22490 | ||
22491 | #define vcmlaq_rot270_m(p0,p1,p2,p3) __arm_vcmlaq_rot270_m(p0,p1,p2,p3) | |
22492 | #define __arm_vcmlaq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22493 | __typeof(p1) __p1 = (p1); \ | |
22494 | __typeof(p2) __p2 = (p2); \ | |
22495 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22496 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22497 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22498 | ||
22499 | #define vcmlaq_rot90_m(p0,p1,p2,p3) __arm_vcmlaq_rot90_m(p0,p1,p2,p3) | |
22500 | #define __arm_vcmlaq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22501 | __typeof(p1) __p1 = (p1); \ | |
22502 | __typeof(p2) __p2 = (p2); \ | |
22503 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22504 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot90_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22505 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot90_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
8165795c | 22506 | |
532e9e24 SP |
22507 | #define vcmulq_m(p0,p1,p2,p3) __arm_vcmulq_m(p0,p1,p2,p3) |
22508 | #define __arm_vcmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 22509 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
22510 | __typeof(p2) __p2 = (p2); \ |
22511 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22512 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22513 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
8165795c | 22514 | |
532e9e24 SP |
22515 | #define vcmulq_rot180_m(p0,p1,p2,p3) __arm_vcmulq_rot180_m(p0,p1,p2,p3) |
22516 | #define __arm_vcmulq_rot180_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 22517 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
22518 | __typeof(p2) __p2 = (p2); \ |
22519 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22520 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22521 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
8165795c | 22522 | |
532e9e24 SP |
22523 | #define vcmulq_rot270_m(p0,p1,p2,p3) __arm_vcmulq_rot270_m(p0,p1,p2,p3) |
22524 | #define __arm_vcmulq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 22525 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
22526 | __typeof(p2) __p2 = (p2); \ |
22527 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22528 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22529 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
8165795c | 22530 | |
532e9e24 SP |
22531 | #define vcmulq_rot90_m(p0,p1,p2,p3) __arm_vcmulq_rot90_m(p0,p1,p2,p3) |
22532 | #define __arm_vcmulq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 22533 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
22534 | __typeof(p2) __p2 = (p2); \ |
22535 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)] [__ARM_mve_typeid(__p2)])0, \ | |
22536 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_m_f16(__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22537 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_m_f32(__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
33203b4c | 22538 | |
532e9e24 SP |
22539 | #define veorq_m(p0,p1,p2,p3) __arm_veorq_m(p0,p1,p2,p3) |
22540 | #define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c | 22541 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
22542 | __typeof(p2) __p2 = (p2); \ |
22543 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22544 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22545 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22546 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22547 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22548 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22549 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22550 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22551 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
33203b4c | 22552 | |
532e9e24 SP |
22553 | #define vfmaq_m(p0,p1,p2,p3) __arm_vfmaq_m(p0,p1,p2,p3) |
22554 | #define __arm_vfmaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 22555 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
22556 | __typeof(p2) __p2 = (p2); \ |
22557 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22558 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22559 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
22560 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vfmaq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
22561 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vfmaq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
0dad5b33 | 22562 | |
532e9e24 SP |
22563 | #define vfmasq_m(p0,p1,p2,p3) __arm_vfmasq_m(p0,p1,p2,p3) |
22564 | #define __arm_vfmasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 22565 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
22566 | __typeof(p2) __p2 = (p2); \ |
22567 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22568 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vfmasq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
22569 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vfmasq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
8165795c | 22570 | |
532e9e24 SP |
22571 | #define vfmsq_m(p0,p1,p2,p3) __arm_vfmsq_m(p0,p1,p2,p3) |
22572 | #define __arm_vfmsq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 22573 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
22574 | __typeof(p2) __p2 = (p2); \ |
22575 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22576 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmsq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22577 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmsq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
8165795c | 22578 | |
532e9e24 SP |
22579 | #define vmaxnmq_m(p0,p1,p2,p3) __arm_vmaxnmq_m(p0,p1,p2,p3) |
22580 | #define __arm_vmaxnmq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
0dad5b33 | 22581 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
22582 | __typeof(p2) __p2 = (p2); \ |
22583 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22584 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22585 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
0dad5b33 | 22586 | |
532e9e24 SP |
22587 | #define vminnmq_m(p0,p1,p2,p3) __arm_vminnmq_m(p0,p1,p2,p3) |
22588 | #define __arm_vminnmq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
0dad5b33 | 22589 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
22590 | __typeof(p2) __p2 = (p2); \ |
22591 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22592 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22593 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
0dad5b33 | 22594 | |
532e9e24 SP |
22595 | #define vmulq_m(p0,p1,p2,p3) __arm_vmulq_m(p0,p1,p2,p3) |
22596 | #define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 22597 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
22598 | __typeof(p2) __p2 = (p2); \ |
22599 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22600 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22601 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22602 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22603 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22604 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22605 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22606 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22607 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
22608 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
22609 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
22610 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
22611 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
22612 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
22613 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
22614 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vmulq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
22615 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vmulq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
0dad5b33 | 22616 | |
532e9e24 SP |
22617 | #define vornq_m(p0,p1,p2,p3) __arm_vornq_m(p0,p1,p2,p3) |
22618 | #define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
0dad5b33 | 22619 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
22620 | __typeof(p2) __p2 = (p2); \ |
22621 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22622 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22623 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22624 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22625 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22626 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22627 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22628 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22629 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
0dad5b33 | 22630 | |
532e9e24 SP |
22631 | #define vsubq_m(p0,p1,p2,p3) __arm_vsubq_m(p0,p1,p2,p3) |
22632 | #define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
0dad5b33 | 22633 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
22634 | __typeof(p2) __p2 = (p2); \ |
22635 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22636 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22637 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22638 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22639 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22640 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22641 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22642 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22643 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
22644 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
22645 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
22646 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
22647 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
22648 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
22649 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
22650 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vsubq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
22651 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vsubq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
e3678b44 | 22652 | |
532e9e24 SP |
22653 | #define vorrq_m(p0,p1,p2,p3) __arm_vorrq_m(p0,p1,p2,p3) |
22654 | #define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
e3678b44 | 22655 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
22656 | __typeof(p2) __p2 = (p2); \ |
22657 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22658 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22659 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22660 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22661 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22662 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22663 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22664 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22665 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
0dad5b33 | 22666 | |
bf1e3d5a SP |
22667 | #define vld1q(p0) __arm_vld1q(p0) |
22668 | #define __arm_vld1q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22669 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22670 | int (*)[__ARM_mve_type_int8_t_const_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce(__p0, int8_t const *)), \ | |
22671 | int (*)[__ARM_mve_type_int16_t_const_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce(__p0, int16_t const *)), \ | |
22672 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce(__p0, int32_t const *)), \ | |
22673 | int (*)[__ARM_mve_type_uint8_t_const_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce(__p0, uint8_t const *)), \ | |
22674 | int (*)[__ARM_mve_type_uint16_t_const_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce(__p0, uint16_t const *)), \ | |
22675 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce(__p0, uint32_t const *)), \ | |
22676 | int (*)[__ARM_mve_type_float16_t_const_ptr]: __arm_vld1q_f16 (__ARM_mve_coerce(__p0, float16_t const *)), \ | |
22677 | int (*)[__ARM_mve_type_float32_t_const_ptr]: __arm_vld1q_f32 (__ARM_mve_coerce(__p0, float32_t const *)));}) | |
22678 | ||
1dfcc3b5 SP |
22679 | #define vld1q_z(p0,p1) __arm_vld1q_z(p0, p1) |
22680 | #define __arm_vld1q_z(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22681 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22682 | int (*)[__ARM_mve_type_int8_t_const_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce(__p0, int8_t const *), p1), \ | |
22683 | int (*)[__ARM_mve_type_int16_t_const_ptr]: __arm_vld1q_z_s16 (__ARM_mve_coerce(__p0, int16_t const *), p1), \ | |
22684 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vld1q_z_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1), \ | |
22685 | int (*)[__ARM_mve_type_uint8_t_const_ptr]: __arm_vld1q_z_u8 (__ARM_mve_coerce(__p0, uint8_t const *), p1), \ | |
22686 | int (*)[__ARM_mve_type_uint16_t_const_ptr]: __arm_vld1q_z_u16 (__ARM_mve_coerce(__p0, uint16_t const *), p1), \ | |
22687 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vld1q_z_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1), \ | |
22688 | int (*)[__ARM_mve_type_float16_t_const_ptr]: __arm_vld1q_z_f16 (__ARM_mve_coerce(__p0, float16_t const *), p1), \ | |
22689 | int (*)[__ARM_mve_type_float32_t_const_ptr]: __arm_vld1q_z_f32 (__ARM_mve_coerce(__p0, float32_t const *), p1));}) | |
22690 | ||
22691 | #define vld2q(p0) __arm_vld2q(p0) | |
22692 | #define __arm_vld2q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22693 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22694 | int (*)[__ARM_mve_type_int8_t_const_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce(__p0, int8_t const *)), \ | |
22695 | int (*)[__ARM_mve_type_int16_t_const_ptr]: __arm_vld2q_s16 (__ARM_mve_coerce(__p0, int16_t const *)), \ | |
22696 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vld2q_s32 (__ARM_mve_coerce(__p0, int32_t const *)), \ | |
22697 | int (*)[__ARM_mve_type_uint8_t_const_ptr]: __arm_vld2q_u8 (__ARM_mve_coerce(__p0, uint8_t const *)), \ | |
22698 | int (*)[__ARM_mve_type_uint16_t_const_ptr]: __arm_vld2q_u16 (__ARM_mve_coerce(__p0, uint16_t const *)), \ | |
22699 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vld2q_u32 (__ARM_mve_coerce(__p0, uint32_t const *)), \ | |
22700 | int (*)[__ARM_mve_type_float16_t_const_ptr]: __arm_vld2q_f16 (__ARM_mve_coerce(__p0, float16_t const *)), \ | |
22701 | int (*)[__ARM_mve_type_float32_t_const_ptr]: __arm_vld2q_f32 (__ARM_mve_coerce(__p0, float32_t const *)));}) | |
22702 | ||
22703 | #define vld4q(p0) __arm_vld4q(p0) | |
22704 | #define __arm_vld4q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22705 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22706 | int (*)[__ARM_mve_type_int8_t_const_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce(__p0, int8_t const *)), \ | |
22707 | int (*)[__ARM_mve_type_int16_t_const_ptr]: __arm_vld4q_s16 (__ARM_mve_coerce(__p0, int16_t const *)), \ | |
22708 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vld4q_s32 (__ARM_mve_coerce(__p0, int32_t const *)), \ | |
22709 | int (*)[__ARM_mve_type_uint8_t_const_ptr]: __arm_vld4q_u8 (__ARM_mve_coerce(__p0, uint8_t const *)), \ | |
22710 | int (*)[__ARM_mve_type_uint16_t_const_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce(__p0, uint16_t const *)), \ | |
22711 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce(__p0, uint32_t const *)), \ | |
22712 | int (*)[__ARM_mve_type_float16_t_const_ptr]: __arm_vld4q_f16 (__ARM_mve_coerce(__p0, float16_t const *)), \ | |
22713 | int (*)[__ARM_mve_type_float32_t_const_ptr]: __arm_vld4q_f32 (__ARM_mve_coerce(__p0, float32_t const *)));}) | |
22714 | ||
4cc23303 SP |
22715 | #define vldrhq_gather_offset(p0,p1) __arm_vldrhq_gather_offset(p0,p1) |
22716 | #define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22717 | __typeof(p1) __p1 = (p1); \ | |
22718 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22719 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22720 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
22721 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22722 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
22723 | int (*)[__ARM_mve_type_float16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_f16 (__ARM_mve_coerce(__p0, float16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
22724 | ||
22725 | #define vldrhq_gather_offset_z(p0,p1,p2) __arm_vldrhq_gather_offset_z(p0,p1,p2) | |
22726 | #define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22727 | __typeof(p1) __p1 = (p1); \ | |
22728 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22729 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22730 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
22731 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22732 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
22733 | int (*)[__ARM_mve_type_float16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_f16 (__ARM_mve_coerce(__p0, float16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
22734 | ||
22735 | #define vldrhq_gather_shifted_offset(p0,p1) __arm_vldrhq_gather_shifted_offset(p0,p1) | |
22736 | #define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22737 | __typeof(p1) __p1 = (p1); \ | |
22738 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22739 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22740 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
22741 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22742 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
22743 | int (*)[__ARM_mve_type_float16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_f16 (__ARM_mve_coerce(__p0, float16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
22744 | ||
22745 | #define vldrhq_gather_shifted_offset_z(p0,p1,p2) __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) | |
22746 | #define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22747 | __typeof(p1) __p1 = (p1); \ | |
22748 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22749 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22750 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
22751 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22752 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
22753 | int (*)[__ARM_mve_type_float16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_f16 (__ARM_mve_coerce(__p0, float16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
22754 | ||
22755 | #define vldrwq_gather_offset(p0,p1) __arm_vldrwq_gather_offset(p0,p1) | |
22756 | #define __arm_vldrwq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22757 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22758 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1), \ | |
22759 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1), \ | |
22760 | int (*)[__ARM_mve_type_float32_t_const_ptr]: __arm_vldrwq_gather_offset_f32 (__ARM_mve_coerce(__p0, float32_t const *), p1));}) | |
22761 | ||
22762 | #define vldrwq_gather_offset_z(p0,p1,p2) __arm_vldrwq_gather_offset_z(p0,p1,p2) | |
22763 | #define __arm_vldrwq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22764 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22765 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1, p2), \ | |
22766 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1, p2), \ | |
22767 | int (*)[__ARM_mve_type_float32_t_const_ptr]: __arm_vldrwq_gather_offset_z_f32 (__ARM_mve_coerce(__p0, float32_t const *), p1, p2));}) | |
22768 | ||
22769 | #define vldrwq_gather_shifted_offset(p0,p1) __arm_vldrwq_gather_shifted_offset(p0,p1) | |
22770 | #define __arm_vldrwq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22771 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22772 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1), \ | |
22773 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1), \ | |
22774 | int (*)[__ARM_mve_type_float32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_f32 (__ARM_mve_coerce(__p0, float32_t const *), p1));}) | |
22775 | ||
22776 | #define vldrwq_gather_shifted_offset_z(p0,p1,p2) __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) | |
22777 | #define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22778 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22779 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1, p2), \ | |
22780 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1, p2), \ | |
22781 | int (*)[__ARM_mve_type_float32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_z_f32 (__ARM_mve_coerce(__p0, float32_t const *), p1, p2));}) | |
22782 | ||
1dfcc3b5 SP |
22783 | #define vst1q_p(p0,p1,p2) __arm_vst1q_p(p0,p1,p2) |
22784 | #define __arm_vst1q_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22785 | __typeof(p1) __p1 = (p1); \ | |
22786 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22787 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
22788 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22789 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22790 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
22791 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22792 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
22793 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22794 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
22795 | ||
22796 | #define vst2q(p0,p1) __arm_vst2q(p0,p1) | |
22797 | #define __arm_vst2q(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22798 | __typeof(p1) __p1 = (p1); \ | |
22799 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22800 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \ | |
22801 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x2_t]: __arm_vst2q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x2_t)), \ | |
22802 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x2_t]: __arm_vst2q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x2_t)), \ | |
22803 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x2_t]: __arm_vst2q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x2_t)), \ | |
22804 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x2_t]: __arm_vst2q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x2_t)), \ | |
22805 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x2_t]: __arm_vst2q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x2_t)), \ | |
22806 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x2_t]: __arm_vst2q_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x2_t)), \ | |
22807 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x2_t]: __arm_vst2q_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x2_t)));}) | |
22808 | ||
5cad47e0 SP |
22809 | #define vst1q(p0,p1) __arm_vst1q(p0,p1) |
22810 | #define __arm_vst1q(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22811 | __typeof(p1) __p1 = (p1); \ | |
22812 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22813 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22814 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22815 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22816 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22817 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22818 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
22819 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
22820 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
22821 | ||
22822 | #define vstrhq(p0,p1) __arm_vstrhq(p0,p1) | |
22823 | #define __arm_vstrhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22824 | __typeof(p1) __p1 = (p1); \ | |
22825 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22826 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22827 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22828 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22829 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
22830 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)));}) | |
22831 | ||
22832 | #define vstrhq_p(p0,p1,p2) __arm_vstrhq_p(p0,p1,p2) | |
22833 | #define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22834 | __typeof(p1) __p1 = (p1); \ | |
22835 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22836 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22837 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22838 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22839 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
22840 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2));}) | |
22841 | ||
22842 | #define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) | |
22843 | #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22844 | __typeof(p1) __p1 = (p1); \ | |
22845 | __typeof(p2) __p2 = (p2); \ | |
22846 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22847 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22848 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22849 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22850 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22851 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) | |
22852 | ||
22853 | #define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2) | |
22854 | #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22855 | __typeof(p1) __p1 = (p1); \ | |
22856 | __typeof(p2) __p2 = (p2); \ | |
22857 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22858 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
22859 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
22860 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
22861 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
22862 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) | |
22863 | ||
22864 | #define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
22865 | #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22866 | __typeof(p1) __p1 = (p1); \ | |
22867 | __typeof(p2) __p2 = (p2); \ | |
22868 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22869 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22870 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22871 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22872 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22873 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) | |
22874 | ||
22875 | #define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) | |
22876 | #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22877 | __typeof(p1) __p1 = (p1); \ | |
22878 | __typeof(p2) __p2 = (p2); \ | |
22879 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22880 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
22881 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
22882 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
22883 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
22884 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) | |
22885 | ||
22886 | #define vstrwq_p(p0,p1,p2) __arm_vstrwq_p(p0,p1,p2) | |
22887 | #define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22888 | __typeof(p1) __p1 = (p1); \ | |
22889 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22890 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22891 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
22892 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
22893 | ||
22894 | #define vstrwq(p0,p1) __arm_vstrwq(p0,p1) | |
22895 | #define __arm_vstrwq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22896 | __typeof(p1) __p1 = (p1); \ | |
22897 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22898 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22899 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
22900 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
22901 | ||
7a5fffa5 SP |
22902 | #define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2) |
22903 | #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22904 | __typeof(p1) __p1 = (p1); \ | |
22905 | __typeof(p2) __p2 = (p2); \ | |
22906 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22907 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
22908 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
22909 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
22910 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
22911 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) | |
22912 | ||
22913 | #define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) | |
22914 | #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22915 | __typeof(p1) __p1 = (p1); \ | |
22916 | __typeof(p2) __p2 = (p2); \ | |
22917 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22918 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22919 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22920 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22921 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22922 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) | |
22923 | ||
22924 | #define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) | |
22925 | #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22926 | __typeof(p1) __p1 = (p1); \ | |
22927 | __typeof(p2) __p2 = (p2); \ | |
22928 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22929 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
22930 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
22931 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
22932 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
22933 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) | |
22934 | ||
22935 | #define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
22936 | #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22937 | __typeof(p1) __p1 = (p1); \ | |
22938 | __typeof(p2) __p2 = (p2); \ | |
22939 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22940 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22941 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22942 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22943 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22944 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) | |
22945 | ||
22946 | #define vstrwq_scatter_base(p0,p1,p2) __arm_vstrwq_scatter_base(p0,p1,p2) | |
22947 | #define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ | |
22948 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
22949 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
22950 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
22951 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
22952 | ||
22953 | #define vstrwq_scatter_base_p(p0,p1,p2,p3) __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) | |
22954 | #define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ | |
22955 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
22956 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22957 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32(p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22958 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_p_f32(p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22959 | ||
22960 | #define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) | |
22961 | #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22962 | __typeof(p1) __p1 = (p1); \ | |
22963 | __typeof(p2) __p2 = (p2); \ | |
22964 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
22965 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
22966 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
22967 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
22968 | ||
22969 | #define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) | |
22970 | #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22971 | __typeof(p1) __p1 = (p1); \ | |
22972 | __typeof(p2) __p2 = (p2); \ | |
22973 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
22974 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22975 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22976 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22977 | ||
22978 | #define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) | |
22979 | #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22980 | __typeof(p1) __p1 = (p1); \ | |
22981 | __typeof(p2) __p2 = (p2); \ | |
22982 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
22983 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22984 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22985 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22986 | ||
22987 | #define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) | |
22988 | #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22989 | __typeof(p1) __p1 = (p1); \ | |
22990 | __typeof(p2) __p2 = (p2); \ | |
22991 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
22992 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
22993 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
22994 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
22995 | ||
22996 | #define vstrwq_scatter_shifted_offset(p0,p1,p2) __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) | |
22997 | #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22998 | __typeof(p1) __p1 = (p1); \ | |
22999 | __typeof(p2) __p2 = (p2); \ | |
23000 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
23001 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
23002 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
23003 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
23004 | ||
23005 | #define vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
23006 | #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
23007 | __typeof(p1) __p1 = (p1); \ | |
23008 | __typeof(p2) __p2 = (p2); \ | |
23009 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
23010 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
23011 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
23012 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23013 | ||
23014 | #define vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
23015 | #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
23016 | __typeof(p1) __p1 = (p1); \ | |
23017 | __typeof(p2) __p2 = (p2); \ | |
23018 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
23019 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
23020 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
23021 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23022 | ||
23023 | #define vstrwq_scatter_shifted_offset(p0,p1,p2) __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) | |
23024 | #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23025 | __typeof(p1) __p1 = (p1); \ | |
23026 | __typeof(p2) __p2 = (p2); \ | |
23027 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
23028 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
23029 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
23030 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
23031 | ||
85a94e87 SP |
23032 | #define vuninitializedq(p0) __arm_vuninitializedq(p0) |
23033 | #define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23034 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23035 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \ | |
23036 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \ | |
23037 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \ | |
23038 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \ | |
23039 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \ | |
23040 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \ | |
23041 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \ | |
23042 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 (), \ | |
23043 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vuninitializedq_f16 (), \ | |
23044 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vuninitializedq_f32 ());}) | |
23045 | ||
23046 | #define vreinterpretq_f16(p0) __arm_vreinterpretq_f16(p0) | |
23047 | #define __arm_vreinterpretq_f16(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23048 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23049 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23050 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23051 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
23052 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
23053 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
23054 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
23055 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
23056 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
23057 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_f16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
23058 | ||
23059 | #define vreinterpretq_f32(p0) __arm_vreinterpretq_f32(p0) | |
23060 | #define __arm_vreinterpretq_f32(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23061 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23062 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23063 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23064 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
23065 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
23066 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
23067 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
23068 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
23069 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
23070 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) | |
23071 | ||
23072 | #define vreinterpretq_s16(p0) __arm_vreinterpretq_s16(p0) | |
23073 | #define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23074 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23075 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
23076 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23077 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
23078 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
23079 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
23080 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
23081 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
23082 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
23083 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
23084 | ||
23085 | #define vreinterpretq_s32(p0) __arm_vreinterpretq_s32(p0) | |
23086 | #define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23087 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23088 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
23089 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23090 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23091 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
23092 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
23093 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
23094 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
23095 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
23096 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
23097 | ||
23098 | #define vreinterpretq_s64(p0) __arm_vreinterpretq_s64(p0) | |
23099 | #define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23100 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23101 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
23102 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23103 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
23104 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23105 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
23106 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
23107 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
23108 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
23109 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
23110 | ||
23111 | #define vreinterpretq_s8(p0) __arm_vreinterpretq_s8(p0) | |
23112 | #define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23113 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23114 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
23115 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23116 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
23117 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
23118 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
23119 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
23120 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
23121 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
23122 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
23123 | ||
23124 | #define vreinterpretq_u16(p0) __arm_vreinterpretq_u16(p0) | |
23125 | #define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23126 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23127 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
23128 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23129 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
23130 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
23131 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
23132 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23133 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
23134 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
23135 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
23136 | ||
23137 | #define vreinterpretq_u32(p0) __arm_vreinterpretq_u32(p0) | |
23138 | #define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23139 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23140 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
23141 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23142 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23143 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
23144 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
23145 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
23146 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
23147 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
23148 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
23149 | ||
23150 | #define vreinterpretq_u64(p0) __arm_vreinterpretq_u64(p0) | |
23151 | #define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23152 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23153 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
23154 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23155 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
23156 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23157 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
23158 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
23159 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
23160 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
23161 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
23162 | ||
23163 | #define vreinterpretq_u8(p0) __arm_vreinterpretq_u8(p0) | |
23164 | #define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23165 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23166 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
23167 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23168 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
23169 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
23170 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23171 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
23172 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
23173 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
23174 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
23175 | ||
41e1a7ff SP |
23176 | #define vstrwq_scatter_base_wb(p0,p1,p2) __arm_vstrwq_scatter_base_wb(p0,p1,p2) |
23177 | #define __arm_vstrwq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ | |
23178 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
23179 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
23180 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
23181 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_wb_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
23182 | ||
23183 | #define vstrwq_scatter_base_wb_p(p0,p1,p2,p3) __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) | |
23184 | #define __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ | |
23185 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
23186 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
23187 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
23188 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_wb_p_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23189 | ||
261014a1 SP |
23190 | #define vabdq_x(p1,p2,p3) __arm_vabdq_x(p1,p2,p3) |
23191 | #define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23192 | __typeof(p2) __p2 = (p2); \ | |
23193 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23194 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
23195 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
23196 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
23197 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
23198 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
23199 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
23200 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23201 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23202 | ||
23203 | #define vabsq_x(p1,p2) __arm_vabsq_x(p1,p2) | |
23204 | #define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
23205 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
23206 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23207 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23208 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
23209 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
23210 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
23211 | ||
23212 | #define vaddq_x(p1,p2,p3) __arm_vaddq_x(p1,p2,p3) | |
23213 | #define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23214 | __typeof(p2) __p2 = (p2); \ | |
23215 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23216 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
23217 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
23218 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
23219 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
23220 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
23221 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
23222 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
23223 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
23224 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
23225 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
23226 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
23227 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
23228 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23229 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
23230 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vaddq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
23231 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vaddq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
23232 | ||
23233 | #define vandq_x(p1,p2,p3) __arm_vandq_x(p1,p2,p3) | |
23234 | #define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23235 | __typeof(p2) __p2 = (p2); \ | |
23236 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23237 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
23238 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
23239 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
23240 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
23241 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
23242 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
23243 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23244 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23245 | ||
23246 | #define vbicq_x(p1,p2,p3) __arm_vbicq_x(p1,p2,p3) | |
23247 | #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23248 | __typeof(p2) __p2 = (p2); \ | |
23249 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23250 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
23251 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
23252 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
23253 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
23254 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
23255 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
23256 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23257 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23258 | ||
23259 | #define vbrsrq_x(p1,p2,p3) __arm_vbrsrq_x(p1,p2,p3) | |
23260 | #define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23261 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
23262 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
23263 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
23264 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
23265 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
23266 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
23267 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3), \ | |
23268 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vbrsrq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2, p3), \ | |
23269 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vbrsrq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2, p3));}) | |
23270 | ||
23271 | #define vcaddq_rot270_x(p1,p2,p3) __arm_vcaddq_rot270_x(p1,p2,p3) | |
23272 | #define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23273 | __typeof(p2) __p2 = (p2); \ | |
23274 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23275 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
23276 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
23277 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
23278 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
23279 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
23280 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
23281 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23282 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23283 | ||
23284 | #define vcaddq_rot90_x(p1,p2,p3) __arm_vcaddq_rot90_x(p1,p2,p3) | |
23285 | #define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23286 | __typeof(p2) __p2 = (p2); \ | |
23287 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23288 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
23289 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
23290 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
23291 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
23292 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
23293 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
23294 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23295 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23296 | ||
23297 | #define vcmulq_rot180_x(p1,p2,p3) __arm_vcmulq_rot180_x(p1,p2,p3) | |
23298 | #define __arm_vcmulq_rot180_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23299 | __typeof(p2) __p2 = (p2); \ | |
23300 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23301 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23302 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23303 | ||
23304 | #define vcmulq_rot270_x(p1,p2,p3) __arm_vcmulq_rot270_x(p1,p2,p3) | |
23305 | #define __arm_vcmulq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23306 | __typeof(p2) __p2 = (p2); \ | |
23307 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23308 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23309 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23310 | ||
23311 | #define vcmulq_x(p1,p2,p3) __arm_vcmulq_x(p1,p2,p3) | |
23312 | #define __arm_vcmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23313 | __typeof(p2) __p2 = (p2); \ | |
23314 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23315 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23316 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23317 | ||
23318 | #define vcvtq_x(p1,p2) __arm_vcvtq_x(p1,p2) | |
23319 | #define __arm_vcvtq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
23320 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
23321 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23322 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_x_f32_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
23323 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23324 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
23325 | ||
23326 | #define vcvtq_x_n(p1,p2,p3) __arm_vcvtq_x_n(p1,p2,p3) | |
23327 | #define __arm_vcvtq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23328 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
23329 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_n_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
23330 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_x_n_f32_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
23331 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_n_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
23332 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_n_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
23333 | ||
23334 | #define veorq_x(p1,p2,p3) __arm_veorq_x(p1,p2,p3) | |
23335 | #define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23336 | __typeof(p2) __p2 = (p2); \ | |
23337 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23338 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
23339 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
23340 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
23341 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
23342 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
23343 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
23344 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23345 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23346 | ||
23347 | #define vmaxnmq_x(p1,p2,p3) __arm_vmaxnmq_x(p1,p2,p3) | |
23348 | #define __arm_vmaxnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23349 | __typeof(p2) __p2 = (p2); \ | |
23350 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23351 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23352 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23353 | ||
23354 | #define vminnmq_x(p1,p2,p3) __arm_vminnmq_x(p1,p2,p3) | |
23355 | #define __arm_vminnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23356 | __typeof(p2) __p2 = (p2); \ | |
23357 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23358 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23359 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23360 | ||
23361 | #define vmulq_x(p1,p2,p3) __arm_vmulq_x(p1,p2,p3) | |
23362 | #define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23363 | __typeof(p2) __p2 = (p2); \ | |
23364 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23365 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
23366 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
23367 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
23368 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
23369 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
23370 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
23371 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
23372 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
23373 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
23374 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
23375 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
23376 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
23377 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23378 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
23379 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vmulq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
23380 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vmulq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
23381 | ||
23382 | #define vnegq_x(p1,p2) __arm_vnegq_x(p1,p2) | |
23383 | #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
23384 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
23385 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23386 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23387 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
23388 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vnegq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
23389 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vnegq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
23390 | ||
23391 | #define vornq_x(p1,p2,p3) __arm_vornq_x(p1,p2,p3) | |
23392 | #define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23393 | __typeof(p2) __p2 = (p2); \ | |
23394 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23395 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
23396 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
23397 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
23398 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
23399 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
23400 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
23401 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23402 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23403 | ||
23404 | #define vorrq_x(p1,p2,p3) __arm_vorrq_x(p1,p2,p3) | |
23405 | #define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23406 | __typeof(p2) __p2 = (p2); \ | |
23407 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23408 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
23409 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
23410 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
23411 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
23412 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
23413 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
23414 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23415 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23416 | ||
23417 | #define vrev32q_x(p1,p2) __arm_vrev32q_x(p1,p2) | |
23418 | #define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
23419 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
23420 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23421 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23422 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
23423 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23424 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev32q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2));}) | |
23425 | ||
23426 | #define vrev64q_x(p1,p2) __arm_vrev64q_x(p1,p2) | |
23427 | #define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
23428 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
23429 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23430 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23431 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
23432 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
23433 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23434 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
23435 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev64q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
23436 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrev64q_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
23437 | ||
23438 | #define vrndaq_x(p1,p2) __arm_vrndaq_x(p1,p2) | |
23439 | #define __arm_vrndaq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
23440 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
23441 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
23442 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndaq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
23443 | ||
23444 | #define vrndmq_x(p1,p2) __arm_vrndmq_x(p1,p2) | |
23445 | #define __arm_vrndmq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
23446 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
23447 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
23448 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
23449 | ||
23450 | #define vrndnq_x(p1,p2) __arm_vrndnq_x(p1,p2) | |
23451 | #define __arm_vrndnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
23452 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
23453 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
23454 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
23455 | ||
23456 | #define vrndpq_x(p1,p2) __arm_vrndpq_x(p1,p2) | |
23457 | #define __arm_vrndpq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
23458 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
23459 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
23460 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
23461 | ||
23462 | #define vrndq_x(p1,p2) __arm_vrndq_x(p1,p2) | |
23463 | #define __arm_vrndq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
23464 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
23465 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
23466 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
23467 | ||
23468 | #define vrndxq_x(p1,p2) __arm_vrndxq_x(p1,p2) | |
23469 | #define __arm_vrndxq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
23470 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
23471 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
23472 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
23473 | ||
23474 | #define vsubq_x(p1,p2,p3) __arm_vsubq_x(p1,p2,p3) | |
23475 | #define __arm_vsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23476 | __typeof(p2) __p2 = (p2); \ | |
23477 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23478 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23479 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
23480 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vsubq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
23481 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vsubq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
23482 | ||
23483 | #define vcmulq_rot90_x(p1,p2,p3) __arm_vcmulq_rot90_x(p1,p2,p3) | |
23484 | #define __arm_vcmulq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
23485 | __typeof(p2) __p2 = (p2); \ | |
23486 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23487 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
23488 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
23489 | ||
1a5c27b1 SP |
23490 | #define vgetq_lane(p0,p1) __arm_vgetq_lane(p0,p1) |
23491 | #define __arm_vgetq_lane(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23492 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23493 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vgetq_lane_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
23494 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vgetq_lane_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
23495 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vgetq_lane_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
23496 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vgetq_lane_s64 (__ARM_mve_coerce(__p0, int64x2_t), p1), \ | |
23497 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vgetq_lane_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
23498 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vgetq_lane_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
23499 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vgetq_lane_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \ | |
23500 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vgetq_lane_u64 (__ARM_mve_coerce(__p0, uint64x2_t), p1), \ | |
23501 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vgetq_lane_f16 (__ARM_mve_coerce(__p0, float16x8_t), p1), \ | |
23502 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vgetq_lane_f32 (__ARM_mve_coerce(__p0, float32x4_t), p1));}) | |
23503 | ||
23504 | #define vsetq_lane(p0,p1,p2) __arm_vsetq_lane(p0,p1,p2) | |
23505 | #define __arm_vsetq_lane(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23506 | __typeof(p1) __p1 = (p1); \ | |
23507 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23508 | int (*)[__ARM_mve_type_int8_t][__ARM_mve_type_int8x16_t]: __arm_vsetq_lane_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23509 | int (*)[__ARM_mve_type_int16_t][__ARM_mve_type_int16x8_t]: __arm_vsetq_lane_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23510 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vsetq_lane_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
23511 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int64x2_t]: __arm_vsetq_lane_s64 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \ | |
23512 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_uint8x16_t]: __arm_vsetq_lane_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
23513 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_uint16x8_t]: __arm_vsetq_lane_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23514 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vsetq_lane_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
23515 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint64x2_t]: __arm_vsetq_lane_u64 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint64x2_t), p2), \ | |
23516 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vsetq_lane_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
23517 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vsetq_lane_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
23518 | ||
e3678b44 | 23519 | #else /* MVE Integer. */ |
14782c81 | 23520 | |
41e1a7ff SP |
23521 | #define vstrwq_scatter_base_wb(p0,p1,p2) __arm_vstrwq_scatter_base_wb(p0,p1,p2) |
23522 | #define __arm_vstrwq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ | |
23523 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
23524 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
23525 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
23526 | ||
23527 | #define vstrwq_scatter_base_wb_p(p0,p1,p2,p3) __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) | |
23528 | #define __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ | |
23529 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
23530 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
23531 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
23532 | ||
14782c81 SP |
23533 | #define vst4q(p0,p1) __arm_vst4q(p0,p1) |
23534 | #define __arm_vst4q(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23535 | __typeof(p1) __p1 = (p1); \ | |
23536 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23537 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \ | |
23538 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \ | |
23539 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \ | |
23540 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \ | |
23541 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \ | |
23542 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)));}) | |
23543 | ||
6df4618c SP |
23544 | #define vabsq(p0) __arm_vabsq(p0) |
23545 | #define __arm_vabsq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23546 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23547 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23548 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23549 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
23550 | ||
23551 | #define vclsq(p0) __arm_vclsq(p0) | |
23552 | #define __arm_vclsq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23553 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23554 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23555 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23556 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
23557 | ||
23558 | #define vclzq(p0) __arm_vclzq(p0) | |
23559 | #define __arm_vclzq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23560 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23561 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23562 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23563 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
23564 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
23565 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
23566 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
23567 | ||
23568 | #define vnegq(p0) __arm_vnegq(p0) | |
23569 | #define __arm_vnegq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23570 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23571 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23572 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23573 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
23574 | ||
6df4618c SP |
23575 | #define vmovlbq(p0) __arm_vmovlbq(p0) |
23576 | #define __arm_vmovlbq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23577 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23578 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23579 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23580 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
23581 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) | |
23582 | ||
23583 | #define vmovltq(p0) __arm_vmovltq(p0) | |
23584 | #define __arm_vmovltq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23585 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23586 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23587 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23588 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
23589 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) | |
23590 | ||
23591 | #define vmvnq(p0) __arm_vmvnq(p0) | |
23592 | #define __arm_vmvnq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23593 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23594 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23595 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23596 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
23597 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
23598 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
23599 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
23600 | ||
23601 | #define vrev16q(p0) __arm_vrev16q(p0) | |
23602 | #define __arm_vrev16q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23603 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23604 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23605 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)));}) | |
23606 | ||
23607 | #define vrev32q(p0) __arm_vrev32q(p0) | |
23608 | #define __arm_vrev32q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23609 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23610 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23611 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23612 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
23613 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) | |
23614 | ||
5db0eb95 SP |
23615 | #define vrev64q(p0) __arm_vrev64q(p0) |
23616 | #define __arm_vrev64q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23617 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23618 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23619 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23620 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
23621 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
23622 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
23623 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
23624 | ||
6df4618c SP |
23625 | #define vqabsq(p0) __arm_vqabsq(p0) |
23626 | #define __arm_vqabsq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23627 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23628 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23629 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23630 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
23631 | ||
23632 | #define vqnegq(p0) __arm_vqnegq(p0) | |
23633 | #define __arm_vqnegq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
23634 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23635 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
23636 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
23637 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
23638 | ||
f166a8cd SP |
23639 | #define vshrq(p0,p1) __arm_vshrq(p0,p1) |
23640 | #define __arm_vshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23641 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23642 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
23643 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
23644 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
23645 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
23646 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
23647 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
23648 | ||
d71dba7b SP |
23649 | #define vcmpneq(p0,p1) __arm_vcmpneq(p0,p1) |
23650 | #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23651 | __typeof(p1) __p1 = (p1); \ | |
23652 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23653 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23654 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23655 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23656 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23657 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23658 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23659 | ||
23660 | #define vshlq(p0,p1) __arm_vshlq(p0,p1) | |
23661 | #define __arm_vshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23662 | __typeof(p1) __p1 = (p1); \ | |
23663 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23664 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23665 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23666 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23667 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23668 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23669 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23670 | ||
33203b4c SP |
23671 | #define vsubq(p0,p1) __arm_vsubq(p0,p1) |
23672 | #define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23673 | __typeof(p1) __p1 = (p1); \ | |
23674 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23675 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23676 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23677 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23678 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23679 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
f9355dee | 23680 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ |
33203b4c SP |
23681 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ |
23682 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23683 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23684 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
23685 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
23686 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
23687 | ||
23688 | #define vshlq_r(p0,p1) __arm_vshlq_r(p0,p1) | |
23689 | #define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23690 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23691 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
23692 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
23693 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
23694 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
23695 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
23696 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
23697 | ||
33203b4c SP |
23698 | #define vrshlq(p0,p1) __arm_vrshlq(p0,p1) |
23699 | #define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c SP |
23700 | __typeof(p1) __p1 = (p1); \ |
23701 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23702 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23703 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23704 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23705 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23706 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
f9355dee | 23707 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ |
33203b4c SP |
23708 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
23709 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23710 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23711 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23712 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23713 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23714 | ||
23715 | #define vrmulhq(p0,p1) __arm_vrmulhq(p0,p1) | |
23716 | #define __arm_vrmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23717 | __typeof(p1) __p1 = (p1); \ | |
23718 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23719 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23720 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23721 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23722 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23723 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23724 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23725 | ||
23726 | #define vrhaddq(p0,p1) __arm_vrhaddq(p0,p1) | |
23727 | #define __arm_vrhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23728 | __typeof(p1) __p1 = (p1); \ | |
23729 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23730 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23731 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23732 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23733 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23734 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23735 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23736 | ||
f9355dee SP |
23737 | #define vqsubq(p0,p1) __arm_vqsubq(p0,p1) |
23738 | #define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c SP |
23739 | __typeof(p1) __p1 = (p1); \ |
23740 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23741 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
23742 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23743 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23744 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
23745 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
f9355dee | 23746 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ |
33203b4c SP |
23747 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
23748 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23749 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23750 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23751 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23752 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23753 | ||
23754 | #define vqshlq(p0,p1) __arm_vqshlq(p0,p1) | |
23755 | #define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23756 | __typeof(p1) __p1 = (p1); \ | |
23757 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23758 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23759 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23760 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23761 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23762 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23763 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23764 | ||
23765 | #define vqshlq_r(p0,p1) __arm_vqshlq_r(p0,p1) | |
23766 | #define __arm_vqshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23767 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23768 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
23769 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
23770 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
23771 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
23772 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
23773 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
23774 | ||
23775 | #define vqshluq(p0,p1) __arm_vqshluq(p0,p1) | |
23776 | #define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23777 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23778 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
23779 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
23780 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));}) | |
23781 | ||
f9355dee SP |
23782 | #define vrshrq(p0,p1) __arm_vrshrq(p0,p1) |
23783 | #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c SP |
23784 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
23785 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
23786 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
23787 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
23788 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
23789 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
23790 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
23791 | ||
23792 | #define vshlq_n(p0,p1) __arm_vshlq_n(p0,p1) | |
23793 | #define __arm_vshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23794 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23795 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
23796 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
23797 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
23798 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
23799 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
23800 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
23801 | ||
33203b4c SP |
23802 | #define vqshlq_n(p0,p1) __arm_vqshlq_n(p0,p1) |
23803 | #define __arm_vqshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23804 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23805 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
23806 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
23807 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
23808 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
23809 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
23810 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
23811 | ||
33203b4c SP |
23812 | #define vqrshlq(p0,p1) __arm_vqrshlq(p0,p1) |
23813 | #define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23814 | __typeof(p1) __p1 = (p1); \ | |
23815 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23816 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23817 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23818 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23819 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23820 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
f9355dee SP |
23821 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ |
23822 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23823 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23824 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23825 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23826 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23827 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
33203b4c SP |
23828 | |
23829 | #define vqrdmulhq(p0,p1) __arm_vqrdmulhq(p0,p1) | |
23830 | #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23831 | __typeof(p1) __p1 = (p1); \ | |
23832 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23833 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23834 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
f9355dee SP |
23835 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ |
23836 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
23837 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23838 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
33203b4c SP |
23839 | |
23840 | #define vqdmulhq(p0,p1) __arm_vqdmulhq(p0,p1) | |
23841 | #define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23842 | __typeof(p1) __p1 = (p1); \ | |
23843 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
f9355dee SP |
23844 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ |
23845 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23846 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
33203b4c SP |
23847 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
23848 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23849 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23850 | ||
f9355dee SP |
23851 | #define vqaddq(p0,p1) __arm_vqaddq(p0,p1) |
23852 | #define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c SP |
23853 | __typeof(p1) __p1 = (p1); \ |
23854 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23855 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
23856 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23857 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23858 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
23859 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
f9355dee | 23860 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ |
33203b4c SP |
23861 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
23862 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23863 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23864 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23865 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23866 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23867 | ||
33203b4c SP |
23868 | #define vorrq(p0,p1) __arm_vorrq(p0,p1) |
23869 | #define __arm_vorrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23870 | __typeof(p1) __p1 = (p1); \ | |
23871 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23872 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23873 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23874 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23875 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23876 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23877 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23878 | ||
23879 | #define vornq(p0,p1) __arm_vornq(p0,p1) | |
23880 | #define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23881 | __typeof(p1) __p1 = (p1); \ | |
23882 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23883 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23884 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23885 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23886 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23887 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23888 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23889 | ||
23890 | #define vmulq_n(p0,p1) __arm_vmulq_n(p0,p1) | |
23891 | #define __arm_vmulq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23892 | __typeof(p1) __p1 = (p1); \ | |
23893 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23894 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
23895 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23896 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23897 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
23898 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
23899 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
23900 | ||
23901 | #define vmulq(p0,p1) __arm_vmulq(p0,p1) | |
23902 | #define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23903 | __typeof(p1) __p1 = (p1); \ | |
23904 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23905 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23906 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23907 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23908 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23909 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23910 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23911 | ||
23912 | #define vmulltq_int(p0,p1) __arm_vmulltq_int(p0,p1) | |
23913 | #define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23914 | __typeof(p1) __p1 = (p1); \ | |
23915 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23916 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23917 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23918 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23919 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23920 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23921 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23922 | ||
23923 | #define vmullbq_int(p0,p1) __arm_vmullbq_int(p0,p1) | |
23924 | #define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23925 | __typeof(p1) __p1 = (p1); \ | |
23926 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23927 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23928 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23929 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23930 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23931 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23932 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23933 | ||
23934 | #define vmulhq(p0,p1) __arm_vmulhq(p0,p1) | |
23935 | #define __arm_vmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23936 | __typeof(p1) __p1 = (p1); \ | |
23937 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23938 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23939 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23940 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23941 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23942 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23943 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23944 | ||
23945 | #define vminq(p0,p1) __arm_vminq(p0,p1) | |
23946 | #define __arm_vminq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23947 | __typeof(p1) __p1 = (p1); \ | |
23948 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23949 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23950 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23951 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23952 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23953 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23954 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23955 | ||
23956 | #define vminaq(p0,p1) __arm_vminaq(p0,p1) | |
23957 | #define __arm_vminaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23958 | __typeof(p1) __p1 = (p1); \ | |
23959 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23960 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23961 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23962 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23963 | ||
23964 | #define vmaxq(p0,p1) __arm_vmaxq(p0,p1) | |
23965 | #define __arm_vmaxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23966 | __typeof(p1) __p1 = (p1); \ | |
23967 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23968 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23969 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23970 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23971 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23972 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23973 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23974 | ||
23975 | #define vmaxaq(p0,p1) __arm_vmaxaq(p0,p1) | |
23976 | #define __arm_vmaxaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23977 | __typeof(p1) __p1 = (p1); \ | |
23978 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23979 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23980 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23981 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23982 | ||
f9355dee SP |
23983 | #define vhsubq(p0,p1) __arm_vhsubq(p0,p1) |
23984 | #define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c SP |
23985 | __typeof(p1) __p1 = (p1); \ |
23986 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23987 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
23988 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23989 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23990 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
23991 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
f9355dee | 23992 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ |
33203b4c SP |
23993 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
23994 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23995 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23996 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23997 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23998 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23999 | ||
24000 | #define vhcaddq_rot90(p0,p1) __arm_vhcaddq_rot90(p0,p1) | |
24001 | #define __arm_vhcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24002 | __typeof(p1) __p1 = (p1); \ | |
24003 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24004 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24005 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24006 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
24007 | ||
24008 | #define vhcaddq_rot270(p0,p1) __arm_vhcaddq_rot270(p0,p1) | |
24009 | #define __arm_vhcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24010 | __typeof(p1) __p1 = (p1); \ | |
24011 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24012 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24013 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24014 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
24015 | ||
f9355dee SP |
24016 | #define vhaddq(p0,p1) __arm_vhaddq(p0,p1) |
24017 | #define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c SP |
24018 | __typeof(p1) __p1 = (p1); \ |
24019 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24020 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
24021 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
24022 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
24023 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
24024 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
f9355dee | 24025 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ |
33203b4c SP |
24026 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
24027 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24028 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24029 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24030 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24031 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24032 | ||
24033 | #define veorq(p0,p1) __arm_veorq(p0,p1) | |
24034 | #define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24035 | __typeof(p1) __p1 = (p1); \ | |
24036 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24037 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24038 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24039 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24040 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24041 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24042 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24043 | ||
24044 | #define vcaddq_rot90(p0,p1) __arm_vcaddq_rot90(p0,p1) | |
24045 | #define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24046 | __typeof(p1) __p1 = (p1); \ | |
24047 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24048 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24049 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24050 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24051 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24052 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24053 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24054 | ||
24055 | #define vcaddq_rot270(p0,p1) __arm_vcaddq_rot270(p0,p1) | |
24056 | #define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24057 | __typeof(p1) __p1 = (p1); \ | |
24058 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24059 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24060 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24061 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24062 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24063 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24064 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24065 | ||
24066 | #define vbrsrq(p0,p1) __arm_vbrsrq(p0,p1) | |
24067 | #define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24068 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24069 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
24070 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
24071 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
24072 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
24073 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
24074 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
24075 | ||
24076 | #define vbicq(p0,p1) __arm_vbicq(p0,p1) | |
24077 | #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24078 | __typeof(p1) __p1 = (p1); \ | |
24079 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
1ef979c6 SP |
24080 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vbicq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1(__p1, int)), \ |
24081 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vbicq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1(__p1, int)), \ | |
24082 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vbicq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1(__p1, int)), \ | |
24083 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vbicq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1(__p1, int)), \ | |
33203b4c SP |
24084 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
24085 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24086 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24087 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24088 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24089 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24090 | ||
24091 | #define vaddq(p0,p1) __arm_vaddq(p0,p1) | |
24092 | #define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24093 | __typeof(p1) __p1 = (p1); \ | |
24094 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24095 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24096 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24097 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24098 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24099 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
f9355dee | 24100 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ |
33203b4c SP |
24101 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ |
24102 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
24103 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
24104 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
24105 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
24106 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
24107 | ||
24108 | #define vandq(p0,p1) __arm_vandq(p0,p1) | |
24109 | #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24110 | __typeof(p1) __p1 = (p1); \ | |
24111 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24112 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24113 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24114 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24115 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24116 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24117 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24118 | ||
24119 | #define vabdq(p0,p1) __arm_vabdq(p0,p1) | |
24120 | #define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24121 | __typeof(p1) __p1 = (p1); \ | |
24122 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24123 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24124 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24125 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24126 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24127 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24128 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24129 | ||
33203b4c SP |
24130 | #define vcmpeqq(p0,p1) __arm_vcmpeqq(p0,p1) |
24131 | #define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24132 | __typeof(p1) __p1 = (p1); \ | |
24133 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24134 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24135 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24136 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24137 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24138 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24139 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
24140 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
24141 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
24142 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
24143 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
24144 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
24145 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
24146 | ||
33203b4c SP |
24147 | #define vcmpneq(p0,p1) __arm_vcmpneq(p0,p1) |
24148 | #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24149 | __typeof(p1) __p1 = (p1); \ | |
24150 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24151 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24152 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24153 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24154 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24155 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24156 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
24157 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
24158 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
24159 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
24160 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
24161 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
24162 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
24163 | ||
f9355dee SP |
24164 | |
24165 | #define vqmovntq(p0,p1) __arm_vqmovntq(p0,p1) | |
24166 | #define __arm_vqmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24167 | __typeof(p1) __p1 = (p1); \ | |
24168 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24169 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24170 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24171 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24172 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24173 | ||
24174 | #define vqmovnbq(p0,p1) __arm_vqmovnbq(p0,p1) | |
24175 | #define __arm_vqmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24176 | __typeof(p1) __p1 = (p1); \ | |
24177 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24178 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24179 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24180 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24181 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24182 | ||
24183 | #define vmulltq_poly(p0,p1) __arm_vmulltq_poly(p0,p1) | |
24184 | #define __arm_vmulltq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24185 | __typeof(p1) __p1 = (p1); \ | |
24186 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24187 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24188 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
24189 | ||
24190 | #define vmullbq_poly(p0,p1) __arm_vmullbq_poly(p0,p1) | |
24191 | #define __arm_vmullbq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24192 | __typeof(p1) __p1 = (p1); \ | |
24193 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24194 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24195 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
24196 | ||
24197 | #define vmovntq(p0,p1) __arm_vmovntq(p0,p1) | |
24198 | #define __arm_vmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24199 | __typeof(p1) __p1 = (p1); \ | |
24200 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24201 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24202 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24203 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24204 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24205 | ||
24206 | #define vmovnbq(p0,p1) __arm_vmovnbq(p0,p1) | |
24207 | #define __arm_vmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24208 | __typeof(p1) __p1 = (p1); \ | |
24209 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24210 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24211 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24212 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24213 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24214 | ||
24215 | #define vmlaldavxq(p0,p1) __arm_vmlaldavxq(p0,p1) | |
24216 | #define __arm_vmlaldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24217 | __typeof(p1) __p1 = (p1); \ | |
24218 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24219 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24220 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
24221 | ||
24222 | #define vqmovuntq(p0,p1) __arm_vqmovuntq(p0,p1) | |
24223 | #define __arm_vqmovuntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24224 | __typeof(p1) __p1 = (p1); \ | |
24225 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24226 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24227 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
24228 | ||
24229 | #define vshlltq(p0,p1) __arm_vshlltq(p0,p1) | |
24230 | #define __arm_vshlltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24231 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24232 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
24233 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
24234 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
24235 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) | |
24236 | ||
24237 | #define vshllbq(p0,p1) __arm_vshllbq(p0,p1) | |
24238 | #define __arm_vshllbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24239 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24240 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
24241 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
24242 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
24243 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) | |
24244 | ||
f9355dee SP |
24245 | #define vqmovunbq(p0,p1) __arm_vqmovunbq(p0,p1) |
24246 | #define __arm_vqmovunbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24247 | __typeof(p1) __p1 = (p1); \ | |
24248 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24249 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24250 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
24251 | ||
24252 | #define vqdmulltq(p0,p1) __arm_vqdmulltq(p0,p1) | |
24253 | #define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24254 | __typeof(p1) __p1 = (p1); \ | |
24255 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24256 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
24257 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
24258 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24259 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
24260 | ||
24261 | #define vqdmullbq(p0,p1) __arm_vqdmullbq(p0,p1) | |
24262 | #define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24263 | __typeof(p1) __p1 = (p1); \ | |
24264 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24265 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
24266 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
24267 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24268 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
24269 | ||
24270 | #define vcmpgeq_n(p0,p1) __arm_vcmpgeq_n(p0,p1) | |
24271 | #define __arm_vcmpgeq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24272 | __typeof(p1) __p1 = (p1); \ | |
24273 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24274 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
24275 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
24276 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
24277 | ||
33203b4c SP |
24278 | #define vcmpgeq(p0,p1) __arm_vcmpgeq(p0,p1) |
24279 | #define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24280 | __typeof(p1) __p1 = (p1); \ | |
24281 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24282 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24283 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24284 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24285 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
24286 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
24287 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
24288 | ||
24289 | #define vcmpgtq(p0,p1) __arm_vcmpgtq(p0,p1) | |
24290 | #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24291 | __typeof(p1) __p1 = (p1); \ | |
24292 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24293 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24294 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24295 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24296 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
24297 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
24298 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
24299 | ||
33203b4c SP |
24300 | #define vcmpleq(p0,p1) __arm_vcmpleq(p0,p1) |
24301 | #define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24302 | __typeof(p1) __p1 = (p1); \ | |
24303 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24304 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24305 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24306 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24307 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
24308 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
24309 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
24310 | ||
24311 | #define vcmpltq(p0,p1) __arm_vcmpltq(p0,p1) | |
24312 | #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24313 | __typeof(p1) __p1 = (p1); \ | |
24314 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24315 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24316 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24317 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24318 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
24319 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
24320 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
24321 | ||
8165795c SP |
24322 | #define vcmpneq_m(p0,p1,p2) __arm_vcmpneq_m(p0,p1,p2) |
24323 | #define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24324 | __typeof(p1) __p1 = (p1); \ | |
24325 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24326 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24327 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
24328 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
24329 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
24330 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
24331 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
24332 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \ | |
24333 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24334 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24335 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24336 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24337 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24338 | ||
24339 | #define vcmpneq(p0,p1) __arm_vcmpneq(p0,p1) | |
24340 | #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24341 | __typeof(p1) __p1 = (p1); \ | |
24342 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24343 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24344 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24345 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24346 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24347 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24348 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
24349 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
24350 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
24351 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
24352 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
24353 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
24354 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
24355 | ||
0dad5b33 SP |
24356 | #define vshlcq(p0,p1,p2) __arm_vshlcq(p0,p1,p2) |
24357 | #define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24358 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24359 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
24360 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
24361 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
24362 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
24363 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
24364 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
24365 | ||
0dad5b33 SP |
24366 | #define vcmpeqq_m(p0,p1,p2) __arm_vcmpeqq_m(p0,p1,p2) |
24367 | #define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24368 | __typeof(p1) __p1 = (p1); \ | |
24369 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24370 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24371 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24372 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24373 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24374 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24375 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
24376 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
24377 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
24378 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
24379 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
24380 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
24381 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));}) | |
24382 | ||
24383 | #define vbicq_m_n(p0,p1,p2) __arm_vbicq_m_n(p0,p1,p2) | |
24384 | #define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24385 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24386 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
24387 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
24388 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
24389 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
24390 | ||
24391 | #define vqrshrnbq(p0,p1,p2) __arm_vqrshrnbq(p0,p1,p2) | |
24392 | #define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24393 | __typeof(p1) __p1 = (p1); \ | |
24394 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24395 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24396 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24397 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24398 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24399 | ||
24400 | #define vqrshrunbq(p0,p1,p2) __arm_vqrshrunbq(p0,p1,p2) | |
24401 | #define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24402 | __typeof(p1) __p1 = (p1); \ | |
24403 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24404 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24405 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24406 | ||
8165795c SP |
24407 | #define vqrdmlsdhq(p0,p1,p2) __arm_vqrdmlsdhq(p0,p1,p2) |
24408 | #define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24409 | __typeof(p1) __p1 = (p1); \ | |
24410 | __typeof(p2) __p2 = (p2); \ | |
24411 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24412 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
24413 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24414 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
24415 | ||
24416 | #define vqrdmlsdhxq(p0,p1,p2) __arm_vqrdmlsdhxq(p0,p1,p2) | |
24417 | #define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24418 | __typeof(p1) __p1 = (p1); \ | |
24419 | __typeof(p2) __p2 = (p2); \ | |
24420 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24421 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
24422 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24423 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
24424 | ||
24425 | #define vqrshlq_m_n(p0,p1,p2) __arm_vqrshlq_m_n(p0,p1,p2) | |
24426 | #define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24427 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24428 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
24429 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
24430 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
24431 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
24432 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
24433 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
24434 | ||
24435 | #define vqshlq_m_r(p0,p1,p2) __arm_vqshlq_m_r(p0,p1,p2) | |
24436 | #define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24437 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24438 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
24439 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
24440 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
24441 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
24442 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
24443 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
24444 | ||
24445 | #define vrev64q_m(p0,p1,p2) __arm_vrev64q_m(p0,p1,p2) | |
24446 | #define __arm_vrev64q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24447 | __typeof(p1) __p1 = (p1); \ | |
24448 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24449 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev64q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24450 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev64q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24451 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrev64q_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24452 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev64q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24453 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24454 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24455 | ||
24456 | #define vrshlq_m_n(p0,p1,p2) __arm_vrshlq_m_n(p0,p1,p2) | |
24457 | #define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24458 | __typeof(p1) __p1 = (p1); \ | |
24459 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24460 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
24461 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
24462 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
24463 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
24464 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
24465 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));}) | |
24466 | ||
24467 | #define vshlq_m_r(p0,p1,p2) __arm_vshlq_m_r(p0,p1,p2) | |
24468 | #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24469 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24470 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
24471 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
24472 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
24473 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
24474 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
24475 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
24476 | ||
24477 | #define vsliq(p0,p1,p2) __arm_vsliq(p0,p1,p2) | |
24478 | #define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24479 | __typeof(p1) __p1 = (p1); \ | |
24480 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24481 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24482 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24483 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24484 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24485 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24486 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24487 | ||
24488 | #define vsriq(p0,p1,p2) __arm_vsriq(p0,p1,p2) | |
24489 | #define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24490 | __typeof(p1) __p1 = (p1); \ | |
24491 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24492 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24493 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24494 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24495 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24496 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24497 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24498 | ||
24499 | #define vqrdmlashq(p0,p1,p2) __arm_vqrdmlashq(p0,p1,p2) | |
24500 | #define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24501 | __typeof(p1) __p1 = (p1); \ | |
24502 | __typeof(p2) __p2 = (p2); \ | |
24503 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24504 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
24505 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
24506 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
24507 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqrdmlashq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
24508 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqrdmlashq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
24509 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqrdmlashq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
24510 | ||
24511 | #define vqrdmlahq(p0,p1,p2) __arm_vqrdmlahq(p0,p1,p2) | |
24512 | #define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24513 | __typeof(p1) __p1 = (p1); \ | |
24514 | __typeof(p2) __p2 = (p2); \ | |
24515 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24516 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
24517 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
24518 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
24519 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqrdmlahq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
24520 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqrdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
24521 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqrdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
24522 | ||
24523 | #define vqrdmladhxq(p0,p1,p2) __arm_vqrdmladhxq(p0,p1,p2) | |
24524 | #define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24525 | __typeof(p1) __p1 = (p1); \ | |
24526 | __typeof(p2) __p2 = (p2); \ | |
24527 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24528 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
24529 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24530 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
24531 | ||
24532 | #define vqrdmladhq(p0,p1,p2) __arm_vqrdmladhq(p0,p1,p2) | |
24533 | #define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24534 | __typeof(p1) __p1 = (p1); \ | |
24535 | __typeof(p2) __p2 = (p2); \ | |
24536 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24537 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
24538 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24539 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
24540 | ||
24541 | #define vqnegq_m(p0,p1,p2) __arm_vqnegq_m(p0,p1,p2) | |
24542 | #define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24543 | __typeof(p1) __p1 = (p1); \ | |
24544 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24545 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24546 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24547 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24548 | ||
24549 | #define vqdmlsdhxq(p0,p1,p2) __arm_vqdmlsdhxq(p0,p1,p2) | |
24550 | #define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24551 | __typeof(p1) __p1 = (p1); \ | |
24552 | __typeof(p2) __p2 = (p2); \ | |
24553 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24554 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
24555 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24556 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
24557 | ||
24558 | #define vabsq_m(p0,p1,p2) __arm_vabsq_m(p0,p1,p2) | |
24559 | #define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24560 | __typeof(p1) __p1 = (p1); \ | |
24561 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24562 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24563 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24564 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24565 | ||
24566 | #define vclsq_m(p0,p1,p2) __arm_vclsq_m(p0,p1,p2) | |
24567 | #define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24568 | __typeof(p1) __p1 = (p1); \ | |
24569 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24570 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24571 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24572 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24573 | ||
24574 | #define vclzq_m(p0,p1,p2) __arm_vclzq_m(p0,p1,p2) | |
24575 | #define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24576 | __typeof(p1) __p1 = (p1); \ | |
24577 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24578 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclzq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24579 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclzq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24580 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclzq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24581 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vclzq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24582 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24583 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24584 | ||
24585 | #define vcmpgeq_m(p0,p1,p2) __arm_vcmpgeq_m(p0,p1,p2) | |
24586 | #define __arm_vcmpgeq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24587 | __typeof(p1) __p1 = (p1); \ | |
24588 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24589 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24590 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
e81d0d9e | 24591 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ |
8165795c SP |
24592 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ |
24593 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
24594 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) | |
24595 | ||
24596 | #define vdupq_m(p0,p1,p2) __arm_vdupq_m(p0,p1,p2) | |
24597 | #define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24598 | __typeof(p1) __p1 = (p1); \ | |
24599 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24600 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vdupq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
24601 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vdupq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
24602 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vdupq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
24603 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
24604 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
24605 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));}) | |
24606 | ||
24607 | #define vmaxaq_m(p0,p1,p2) __arm_vmaxaq_m(p0,p1,p2) | |
24608 | #define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24609 | __typeof(p1) __p1 = (p1); \ | |
24610 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24611 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24612 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24613 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24614 | ||
24615 | #define vmlaq(p0,p1,p2) __arm_vmlaq(p0,p1,p2) | |
24616 | #define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24617 | __typeof(p1) __p1 = (p1); \ | |
24618 | __typeof(p2) __p2 = (p2); \ | |
24619 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24620 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
24621 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
24622 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
24623 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
24624 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
24625 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
24626 | ||
24627 | #define vmlasq(p0,p1,p2) __arm_vmlasq(p0,p1,p2) | |
24628 | #define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24629 | __typeof(p1) __p1 = (p1); \ | |
24630 | __typeof(p2) __p2 = (p2); \ | |
24631 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24632 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
24633 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
24634 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
24635 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
24636 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
24637 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
24638 | ||
8165795c SP |
24639 | #define vnegq_m(p0,p1,p2) __arm_vnegq_m(p0,p1,p2) |
24640 | #define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24641 | __typeof(p1) __p1 = (p1); \ | |
24642 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24643 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24644 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24645 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24646 | ||
24647 | #define vpselq(p0,p1,p2) __arm_vpselq(p0,p1,p2) | |
24648 | #define __arm_vpselq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24649 | __typeof(p1) __p1 = (p1); \ | |
24650 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24651 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vpselq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24652 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vpselq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24653 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vpselq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24654 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int64x2_t]: __arm_vpselq_s64 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \ | |
24655 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vpselq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24656 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vpselq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24657 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vpselq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
24658 | int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint64x2_t]: __arm_vpselq_u64 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint64x2_t), p2));}) | |
24659 | ||
24660 | #define vqdmlahq(p0,p1,p2) __arm_vqdmlahq(p0,p1,p2) | |
24661 | #define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24662 | __typeof(p1) __p1 = (p1); \ | |
24663 | __typeof(p2) __p2 = (p2); \ | |
24664 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24665 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
24666 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
24667 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
24668 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqdmlahq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
24669 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
24670 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
24671 | ||
24672 | #define vqdmlsdhq(p0,p1,p2) __arm_vqdmlsdhq(p0,p1,p2) | |
24673 | #define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24674 | __typeof(p1) __p1 = (p1); \ | |
24675 | __typeof(p2) __p2 = (p2); \ | |
24676 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24677 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
24678 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24679 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
24680 | ||
24681 | #define vqdmladhxq(p0,p1,p2) __arm_vqdmladhxq(p0,p1,p2) | |
24682 | #define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24683 | __typeof(p1) __p1 = (p1); \ | |
24684 | __typeof(p2) __p2 = (p2); \ | |
24685 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24686 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
24687 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24688 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
24689 | ||
24690 | #define vqdmladhq(p0,p1,p2) __arm_vqdmladhq(p0,p1,p2) | |
24691 | #define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24692 | __typeof(p1) __p1 = (p1); \ | |
24693 | __typeof(p2) __p2 = (p2); \ | |
24694 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24695 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
24696 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24697 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
24698 | ||
8165795c SP |
24699 | #define vminaq_m(p0,p1,p2) __arm_vminaq_m(p0,p1,p2) |
24700 | #define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24701 | __typeof(p1) __p1 = (p1); \ | |
24702 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24703 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24704 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24705 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24706 | ||
e81d0d9e SP |
24707 | #define vcmpltq_m(p0,p1,p2) __arm_vcmpltq_m(p0,p1,p2) |
24708 | #define __arm_vcmpltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 24709 | __typeof(p1) __p1 = (p1); \ |
e81d0d9e SP |
24710 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
24711 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24712 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24713 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24714 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpltq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
24715 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
24716 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) | |
8165795c | 24717 | |
e81d0d9e SP |
24718 | #define vcmpleq_m(p0,p1,p2) __arm_vcmpleq_m(p0,p1,p2) |
24719 | #define __arm_vcmpleq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
24720 | __typeof(p1) __p1 = (p1); \ |
24721 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e81d0d9e SP |
24722 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
24723 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24724 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24725 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpleq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
24726 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
24727 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) | |
8165795c | 24728 | |
e81d0d9e SP |
24729 | #define vcmpgtq_m(p0,p1,p2) __arm_vcmpgtq_m(p0,p1,p2) |
24730 | #define __arm_vcmpgtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
24731 | __typeof(p1) __p1 = (p1); \ |
24732 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e81d0d9e SP |
24733 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
24734 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24735 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24736 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
24737 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
24738 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) | |
8165795c | 24739 | |
e3678b44 SP |
24740 | #define vshrntq(p0,p1,p2) __arm_vshrntq(p0,p1,p2) |
24741 | #define __arm_vshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24742 | __typeof(p1) __p1 = (p1); \ | |
24743 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24744 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24745 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24746 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24747 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24748 | ||
24749 | #define vrshrntq(p0,p1,p2) __arm_vrshrntq(p0,p1,p2) | |
24750 | #define __arm_vrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24751 | __typeof(p1) __p1 = (p1); \ | |
24752 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24753 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24754 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24755 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24756 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24757 | ||
24758 | #define vmovlbq_m(p0,p1,p2) __arm_vmovlbq_m(p0,p1,p2) | |
24759 | #define __arm_vmovlbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24760 | __typeof(p1) __p1 = (p1); \ | |
24761 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24762 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovlbq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24763 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovlbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24764 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24765 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
24766 | ||
24767 | #define vmovnbq_m(p0,p1,p2) __arm_vmovnbq_m(p0,p1,p2) | |
24768 | #define __arm_vmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24769 | __typeof(p1) __p1 = (p1); \ | |
24770 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24771 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24772 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24773 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24774 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24775 | ||
24776 | #define vmovntq_m(p0,p1,p2) __arm_vmovntq_m(p0,p1,p2) | |
24777 | #define __arm_vmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24778 | __typeof(p1) __p1 = (p1); \ | |
24779 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24780 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24781 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24782 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24783 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24784 | ||
24785 | #define vshrnbq(p0,p1,p2) __arm_vshrnbq(p0,p1,p2) | |
24786 | #define __arm_vshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24787 | __typeof(p1) __p1 = (p1); \ | |
24788 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24789 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24790 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24791 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24792 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24793 | ||
24794 | #define vrshrnbq(p0,p1,p2) __arm_vrshrnbq(p0,p1,p2) | |
24795 | #define __arm_vrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24796 | __typeof(p1) __p1 = (p1); \ | |
24797 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24798 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24799 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24800 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24801 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24802 | ||
24803 | #define vrev32q_m(p0,p1,p2) __arm_vrev32q_m(p0,p1,p2) | |
24804 | #define __arm_vrev32q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24805 | __typeof(p1) __p1 = (p1); \ | |
24806 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24807 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev32q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24808 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev32q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24809 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev32q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24810 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev32q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
24811 | ||
24812 | #define vqshruntq(p0,p1,p2) __arm_vqshruntq(p0,p1,p2) | |
24813 | #define __arm_vqshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24814 | __typeof(p1) __p1 = (p1); \ | |
24815 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24816 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24817 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24818 | ||
24819 | #define vrev16q_m(p0,p1,p2) __arm_vrev16q_m(p0,p1,p2) | |
24820 | #define __arm_vrev16q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24821 | __typeof(p1) __p1 = (p1); \ | |
24822 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24823 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev16q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24824 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev16q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2));}) | |
24825 | ||
24826 | #define vqshrntq(p0,p1,p2) __arm_vqshrntq(p0,p1,p2) | |
24827 | #define __arm_vqshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24828 | __typeof(p1) __p1 = (p1); \ | |
24829 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24830 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24831 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24832 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24833 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24834 | ||
24835 | #define vqrshruntq(p0,p1,p2) __arm_vqrshruntq(p0,p1,p2) | |
24836 | #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24837 | __typeof(p1) __p1 = (p1); \ | |
24838 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24839 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24840 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24841 | ||
24842 | #define vqrshrntq(p0,p1,p2) __arm_vqrshrntq(p0,p1,p2) | |
24843 | #define __arm_vqrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24844 | __typeof(p1) __p1 = (p1); \ | |
24845 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24846 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24847 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24848 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24849 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24850 | ||
24851 | #define vqshrnbq(p0,p1,p2) __arm_vqshrnbq(p0,p1,p2) | |
24852 | #define __arm_vqshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24853 | __typeof(p1) __p1 = (p1); \ | |
24854 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24855 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24856 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24857 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24858 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24859 | ||
24860 | #define vqmovuntq_m(p0,p1,p2) __arm_vqmovuntq_m(p0,p1,p2) | |
24861 | #define __arm_vqmovuntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24862 | __typeof(p1) __p1 = (p1); \ | |
24863 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24864 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24865 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24866 | ||
24867 | #define vqmovntq_m(p0,p1,p2) __arm_vqmovntq_m(p0,p1,p2) | |
24868 | #define __arm_vqmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24869 | __typeof(p1) __p1 = (p1); \ | |
24870 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24871 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24872 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24873 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24874 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24875 | ||
24876 | #define vqmovnbq_m(p0,p1,p2) __arm_vqmovnbq_m(p0,p1,p2) | |
24877 | #define __arm_vqmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24878 | __typeof(p1) __p1 = (p1); \ | |
24879 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24880 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24881 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24882 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24883 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24884 | ||
24885 | #define vmovltq_m(p0,p1,p2) __arm_vmovltq_m(p0,p1,p2) | |
24886 | #define __arm_vmovltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24887 | __typeof(p1) __p1 = (p1); \ | |
24888 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24889 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovltq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24890 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24891 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovltq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24892 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovltq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
24893 | ||
24894 | #define vqmovunbq_m(p0,p1,p2) __arm_vqmovunbq_m(p0,p1,p2) | |
24895 | #define __arm_vqmovunbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24896 | __typeof(p1) __p1 = (p1); \ | |
24897 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24898 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24899 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24900 | ||
e81d0d9e SP |
24901 | #define vsubq_m(p0,p1,p2,p3) __arm_vsubq_m(p0,p1,p2,p3) |
24902 | #define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
e3678b44 | 24903 | __typeof(p1) __p1 = (p1); \ |
e81d0d9e SP |
24904 | __typeof(p2) __p2 = (p2); \ |
24905 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24906 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
24907 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
24908 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
24909 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
24910 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
24911 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
24912 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
24913 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24914 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24915 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24916 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24917 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
e3678b44 | 24918 | |
e81d0d9e SP |
24919 | #define vabdq_m(p0,p1,p2,p3) __arm_vabdq_m(p0,p1,p2,p3) |
24920 | #define __arm_vabdq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
e3678b44 SP |
24921 | __typeof(p1) __p1 = (p1); \ |
24922 | __typeof(p2) __p2 = (p2); \ | |
24923 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
e81d0d9e SP |
24924 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
24925 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24926 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24927 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24928 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24929 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
e3678b44 | 24930 | |
e81d0d9e SP |
24931 | #define vandq_m(p0,p1,p2,p3) __arm_vandq_m(p0,p1,p2,p3) |
24932 | #define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
e3678b44 SP |
24933 | __typeof(p1) __p1 = (p1); \ |
24934 | __typeof(p2) __p2 = (p2); \ | |
24935 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
e81d0d9e SP |
24936 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
24937 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24938 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24939 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24940 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24941 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
e3678b44 | 24942 | |
e81d0d9e SP |
24943 | #define vbicq_m(p0,p1,p2,p3) __arm_vbicq_m(p0,p1,p2,p3) |
24944 | #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
e3678b44 | 24945 | __typeof(p1) __p1 = (p1); \ |
e81d0d9e SP |
24946 | __typeof(p2) __p2 = (p2); \ |
24947 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24948 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
24949 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24950 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24951 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24952 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24953 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
e3678b44 | 24954 | |
e81d0d9e SP |
24955 | #define vbrsrq_m(p0,p1,p2,p3) __arm_vbrsrq_m(p0,p1,p2,p3) |
24956 | #define __arm_vbrsrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
e3678b44 | 24957 | __typeof(p1) __p1 = (p1); \ |
e81d0d9e | 24958 | __typeof(p2) __p2 = (p2); \ |
e3678b44 | 24959 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
e81d0d9e SP |
24960 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbrsrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __p2, p3), \ |
24961 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbrsrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __p2, p3), \ | |
24962 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbrsrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __p2, p3), \ | |
24963 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __p2, p3), \ | |
24964 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __p2, p3), \ | |
24965 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __p2, p3));}) | |
e3678b44 | 24966 | |
e81d0d9e SP |
24967 | #define vcaddq_rot270_m(p0,p1,p2,p3) __arm_vcaddq_rot270_m(p0,p1,p2,p3) |
24968 | #define __arm_vcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
24969 | __typeof(p1) __p1 = (p1); \ |
24970 | __typeof(p2) __p2 = (p2); \ | |
24971 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
24972 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
24973 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24974 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24975 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24976 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24977 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 24978 | |
532e9e24 SP |
24979 | #define vcaddq_rot90_m(p0,p1,p2,p3) __arm_vcaddq_rot90_m(p0,p1,p2,p3) |
24980 | #define __arm_vcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
24981 | __typeof(p1) __p1 = (p1); \ |
24982 | __typeof(p2) __p2 = (p2); \ | |
24983 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
24984 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
24985 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24986 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24987 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24988 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24989 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 24990 | |
532e9e24 SP |
24991 | #define veorq_m(p0,p1,p2,p3) __arm_veorq_m(p0,p1,p2,p3) |
24992 | #define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
24993 | __typeof(p1) __p1 = (p1); \ |
24994 | __typeof(p2) __p2 = (p2); \ | |
24995 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
24996 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
24997 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24998 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24999 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25000 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25001 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 25002 | |
8eb3b6b9 SP |
25003 | #define vornq_m(p0,p1,p2,p3) __arm_vornq_m(p0,p1,p2,p3) |
25004 | #define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25005 | __typeof(p1) __p1 = (p1); \ | |
25006 | __typeof(p2) __p2 = (p2); \ | |
25007 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25008 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25009 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25010 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25011 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25012 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25013 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25014 | ||
25015 | #define vorrq_m(p0,p1,p2,p3) __arm_vorrq_m(p0,p1,p2,p3) | |
25016 | #define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25017 | __typeof(p1) __p1 = (p1); \ | |
25018 | __typeof(p2) __p2 = (p2); \ | |
25019 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25020 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25021 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25022 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25023 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25024 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25025 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25026 | ||
532e9e24 SP |
25027 | #define vaddq_m(p0,p1,p2,p3) __arm_vaddq_m(p0,p1,p2,p3) |
25028 | #define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
25029 | __typeof(p1) __p1 = (p1); \ |
25030 | __typeof(p2) __p2 = (p2); \ | |
25031 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
25032 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ |
25033 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
25034 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
25035 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
25036 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
25037 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
25038 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25039 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25040 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25041 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25042 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25043 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25044 | ||
25045 | #define vmulq_m(p0,p1,p2,p3) __arm_vmulq_m(p0,p1,p2,p3) | |
25046 | #define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25047 | __typeof(p1) __p1 = (p1); \ | |
25048 | __typeof(p2) __p2 = (p2); \ | |
25049 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25050 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
25051 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
25052 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
25053 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
25054 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
25055 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
25056 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25057 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25058 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25059 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25060 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25061 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25062 | ||
4ff68575 SP |
25063 | #define vstrbq(p0,p1) __arm_vstrbq(p0,p1) |
25064 | #define __arm_vstrbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25065 | __typeof(p1) __p1 = (p1); \ | |
25066 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25067 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
25068 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
25069 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
25070 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
25071 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
25072 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
25073 | ||
4ff68575 SP |
25074 | #define vstrwq_scatter_base(p0,p1,p2) __arm_vstrwq_scatter_base(p0,p1,p2) |
25075 | #define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ | |
25076 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
25077 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
25078 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_u32(p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
25079 | ||
535a8645 SP |
25080 | #define vldrbq_gather_offset(p0,p1) __arm_vldrbq_gather_offset(p0,p1) |
25081 | #define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25082 | __typeof(p1) __p1 = (p1); \ | |
25083 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25084 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
25085 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
25086 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
25087 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
25088 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
25089 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
25090 | ||
535a8645 SP |
25091 | #define vstrwq_scatter_base_p(p0,p1,p2,p3) __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) |
25092 | #define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ | |
25093 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
25094 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25095 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25096 | ||
bf1e3d5a SP |
25097 | #define vld1q(p0) __arm_vld1q(p0) |
25098 | #define __arm_vld1q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
25099 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25100 | int (*)[__ARM_mve_type_int8_t_const_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce(__p0, int8_t const *)), \ | |
25101 | int (*)[__ARM_mve_type_int16_t_const_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce(__p0, int16_t const *)), \ | |
25102 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce(__p0, int32_t const *)), \ | |
25103 | int (*)[__ARM_mve_type_uint8_t_const_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce(__p0, uint8_t const *)), \ | |
25104 | int (*)[__ARM_mve_type_uint16_t_const_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce(__p0, uint16_t const *)), \ | |
25105 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce(__p0, uint32_t const *)));}) | |
25106 | ||
25107 | #define vldrhq_gather_offset(p0,p1) __arm_vldrhq_gather_offset(p0,p1) | |
25108 | #define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25109 | __typeof(p1) __p1 = (p1); \ | |
25110 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25111 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
25112 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
25113 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
25114 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
25115 | ||
25116 | #define vldrhq_gather_offset_z(p0,p1,p2) __arm_vldrhq_gather_offset_z(p0,p1,p2) | |
25117 | #define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25118 | __typeof(p1) __p1 = (p1); \ | |
25119 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25120 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25121 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
25122 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25123 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25124 | ||
25125 | #define vldrhq_gather_shifted_offset(p0,p1) __arm_vldrhq_gather_shifted_offset(p0,p1) | |
25126 | #define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25127 | __typeof(p1) __p1 = (p1); \ | |
25128 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25129 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
25130 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
25131 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
25132 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
25133 | ||
25134 | #define vldrhq_gather_shifted_offset_z(p0,p1,p2) __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) | |
25135 | #define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25136 | __typeof(p1) __p1 = (p1); \ | |
25137 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25138 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25139 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
25140 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25141 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25142 | ||
4cc23303 SP |
25143 | #define vldrwq_gather_offset(p0,p1) __arm_vldrwq_gather_offset(p0,p1) |
25144 | #define __arm_vldrwq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25145 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25146 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1), \ | |
25147 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1));}) | |
25148 | ||
25149 | #define vldrwq_gather_offset_z(p0,p1,p2) __arm_vldrwq_gather_offset_z(p0,p1,p2) | |
25150 | #define __arm_vldrwq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25151 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25152 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1, p2), \ | |
25153 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1, p2));}) | |
25154 | ||
25155 | #define vldrwq_gather_shifted_offset(p0,p1) __arm_vldrwq_gather_shifted_offset(p0,p1) | |
25156 | #define __arm_vldrwq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25157 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25158 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1), \ | |
25159 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1));}) | |
25160 | ||
25161 | #define vldrwq_gather_shifted_offset_z(p0,p1,p2) __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) | |
25162 | #define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25163 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25164 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1, p2), \ | |
25165 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1, p2));}) | |
25166 | ||
5cad47e0 SP |
25167 | #define vst1q(p0,p1) __arm_vst1q(p0,p1) |
25168 | #define __arm_vst1q(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25169 | __typeof(p1) __p1 = (p1); \ | |
25170 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25171 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
25172 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
25173 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
25174 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
25175 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
25176 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
25177 | ||
1dfcc3b5 SP |
25178 | #define vst1q_p(p0,p1,p2) __arm_vst1q_p(p0,p1,p2) |
25179 | #define __arm_vst1q_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25180 | __typeof(p1) __p1 = (p1); \ | |
25181 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25182 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25183 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25184 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
25185 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
25186 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25187 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25188 | ||
25189 | #define vst2q(p0,p1) __arm_vst2q(p0,p1) | |
25190 | #define __arm_vst2q(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25191 | __typeof(p1) __p1 = (p1); \ | |
25192 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25193 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \ | |
25194 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x2_t]: __arm_vst2q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x2_t)), \ | |
25195 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x2_t]: __arm_vst2q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x2_t)), \ | |
25196 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x2_t]: __arm_vst2q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x2_t)), \ | |
25197 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x2_t]: __arm_vst2q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x2_t)), \ | |
25198 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x2_t]: __arm_vst2q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x2_t)));}) | |
25199 | ||
5cad47e0 SP |
25200 | #define vstrhq(p0,p1) __arm_vstrhq(p0,p1) |
25201 | #define __arm_vstrhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25202 | __typeof(p1) __p1 = (p1); \ | |
25203 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25204 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
25205 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
25206 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
25207 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
25208 | ||
25209 | #define vstrhq_p(p0,p1,p2) __arm_vstrhq_p(p0,p1,p2) | |
25210 | #define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25211 | __typeof(p1) __p1 = (p1); \ | |
25212 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25213 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25214 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
25215 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25216 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25217 | ||
25218 | #define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) | |
25219 | #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25220 | __typeof(p1) __p1 = (p1); \ | |
25221 | __typeof(p2) __p2 = (p2); \ | |
25222 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25223 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25224 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25225 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25226 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25227 | ||
25228 | #define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2) | |
25229 | #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25230 | __typeof(p1) __p1 = (p1); \ | |
25231 | __typeof(p2) __p2 = (p2); \ | |
25232 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25233 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
25234 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
25235 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
25236 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
25237 | ||
25238 | #define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
25239 | #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25240 | __typeof(p1) __p1 = (p1); \ | |
25241 | __typeof(p2) __p2 = (p2); \ | |
25242 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25243 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25244 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25245 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25246 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25247 | ||
25248 | #define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) | |
25249 | #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25250 | __typeof(p1) __p1 = (p1); \ | |
25251 | __typeof(p2) __p2 = (p2); \ | |
25252 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25253 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
25254 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
25255 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
25256 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
25257 | ||
25258 | ||
25259 | #define vstrwq(p0,p1) __arm_vstrwq(p0,p1) | |
25260 | #define __arm_vstrwq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25261 | __typeof(p1) __p1 = (p1); \ | |
25262 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25263 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
25264 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
25265 | ||
25266 | #define vstrwq_p(p0,p1,p2) __arm_vstrwq_p(p0,p1,p2) | |
25267 | #define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25268 | __typeof(p1) __p1 = (p1); \ | |
25269 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25270 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
25271 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25272 | ||
7a5fffa5 SP |
25273 | #define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2) |
25274 | #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25275 | __typeof(p1) __p1 = (p1); \ | |
25276 | __typeof(p2) __p2 = (p2); \ | |
25277 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25278 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
25279 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
25280 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
25281 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
25282 | ||
25283 | #define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) | |
25284 | #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25285 | __typeof(p1) __p1 = (p1); \ | |
25286 | __typeof(p2) __p2 = (p2); \ | |
25287 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25288 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25289 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25290 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25291 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25292 | ||
25293 | #define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) | |
25294 | #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25295 | __typeof(p1) __p1 = (p1); \ | |
25296 | __typeof(p2) __p2 = (p2); \ | |
25297 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25298 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
25299 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
25300 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
25301 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
25302 | ||
25303 | #define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
25304 | #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25305 | __typeof(p1) __p1 = (p1); \ | |
25306 | __typeof(p2) __p2 = (p2); \ | |
25307 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25308 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25309 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25310 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25311 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25312 | ||
25313 | #define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) | |
25314 | #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25315 | __typeof(p1) __p1 = (p1); \ | |
25316 | __typeof(p2) __p2 = (p2); \ | |
25317 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
25318 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
25319 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
25320 | ||
25321 | #define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) | |
25322 | #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25323 | __typeof(p1) __p1 = (p1); \ | |
25324 | __typeof(p2) __p2 = (p2); \ | |
25325 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
25326 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25327 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
25328 | ||
25329 | #define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) | |
25330 | #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25331 | __typeof(p1) __p1 = (p1); \ | |
25332 | __typeof(p2) __p2 = (p2); \ | |
25333 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
25334 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25335 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25336 | ||
25337 | #define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) | |
25338 | #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25339 | __typeof(p1) __p1 = (p1); \ | |
25340 | __typeof(p2) __p2 = (p2); \ | |
25341 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
25342 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
25343 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
25344 | ||
25345 | #define vstrwq_scatter_shifted_offset(p0,p1,p2) __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) | |
25346 | #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25347 | __typeof(p1) __p1 = (p1); \ | |
25348 | __typeof(p2) __p2 = (p2); \ | |
25349 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
25350 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
25351 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
25352 | ||
25353 | #define vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
25354 | #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25355 | __typeof(p1) __p1 = (p1); \ | |
25356 | __typeof(p2) __p2 = (p2); \ | |
25357 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
25358 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25359 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25360 | ||
85a94e87 SP |
25361 | #define vuninitializedq(p0) __arm_vuninitializedq(p0) |
25362 | #define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
25363 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25364 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \ | |
25365 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \ | |
25366 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \ | |
25367 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \ | |
25368 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \ | |
25369 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \ | |
25370 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \ | |
25371 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 ());}) | |
25372 | ||
25373 | #define vreinterpretq_s16(p0) __arm_vreinterpretq_s16(p0) | |
25374 | #define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \ | |
25375 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25376 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
25377 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
25378 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
25379 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
25380 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
25381 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
25382 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
25383 | ||
25384 | #define vreinterpretq_s32(p0) __arm_vreinterpretq_s32(p0) | |
25385 | #define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \ | |
25386 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25387 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
25388 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
25389 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
25390 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
25391 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
25392 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
25393 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
25394 | ||
25395 | #define vreinterpretq_s64(p0) __arm_vreinterpretq_s64(p0) | |
25396 | #define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \ | |
25397 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25398 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
25399 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
25400 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
25401 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
25402 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
25403 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
25404 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
25405 | ||
25406 | #define vreinterpretq_s8(p0) __arm_vreinterpretq_s8(p0) | |
25407 | #define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \ | |
25408 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25409 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
25410 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
25411 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
25412 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
25413 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
25414 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
25415 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
25416 | ||
25417 | #define vreinterpretq_u16(p0) __arm_vreinterpretq_u16(p0) | |
25418 | #define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \ | |
25419 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25420 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
25421 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
25422 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
25423 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
25424 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
25425 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
25426 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
25427 | ||
25428 | #define vreinterpretq_u32(p0) __arm_vreinterpretq_u32(p0) | |
25429 | #define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \ | |
25430 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25431 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
25432 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
25433 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
25434 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
25435 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
25436 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
25437 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
25438 | ||
25439 | #define vreinterpretq_u64(p0) __arm_vreinterpretq_u64(p0) | |
25440 | #define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \ | |
25441 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25442 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
25443 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
25444 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
25445 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
25446 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
25447 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
25448 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)));}) | |
25449 | ||
25450 | #define vreinterpretq_u8(p0) __arm_vreinterpretq_u8(p0) | |
25451 | #define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \ | |
25452 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25453 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
25454 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
25455 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
25456 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
25457 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
25458 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
25459 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
25460 | ||
261014a1 SP |
25461 | #define vabsq_x(p1,p2) __arm_vabsq_x(p1,p2) |
25462 | #define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25463 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25464 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25465 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25466 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
25467 | ||
25468 | #define vaddq_x(p1,p2,p3) __arm_vaddq_x(p1,p2,p3) | |
25469 | #define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25470 | __typeof(p2) __p2 = (p2); \ | |
25471 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25472 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25473 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25474 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25475 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
25476 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
25477 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
25478 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25479 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25480 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
25481 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
25482 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
25483 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) | |
25484 | ||
25485 | #define vcaddq_rot270_x(p1,p2,p3) __arm_vcaddq_rot270_x(p1,p2,p3) | |
25486 | #define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25487 | __typeof(p2) __p2 = (p2); \ | |
25488 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25489 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25490 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25491 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25492 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25493 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25494 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25495 | ||
25496 | #define vcaddq_rot90_x(p1,p2,p3) __arm_vcaddq_rot90_x(p1,p2,p3) | |
25497 | #define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25498 | __typeof(p2) __p2 = (p2); \ | |
25499 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25500 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25501 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25502 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25503 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25504 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25505 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25506 | ||
25507 | #define veorq_x(p1,p2,p3) __arm_veorq_x(p1,p2,p3) | |
25508 | #define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25509 | __typeof(p2) __p2 = (p2); \ | |
25510 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25511 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25512 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25513 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25514 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25515 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25516 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25517 | ||
e81d0d9e SP |
25518 | #define vmulq_x(p1,p2,p3) __arm_vmulq_x(p1,p2,p3) |
25519 | #define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
261014a1 SP |
25520 | __typeof(p2) __p2 = (p2); \ |
25521 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
e81d0d9e SP |
25522 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
25523 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25524 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25525 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
25526 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
25527 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
25528 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25529 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25530 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
25531 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
25532 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
25533 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) | |
261014a1 | 25534 | |
e81d0d9e SP |
25535 | #define vnegq_x(p1,p2) __arm_vnegq_x(p1,p2) |
25536 | #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25537 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25538 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25539 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25540 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
25541 | ||
25542 | #define vornq_x(p1,p2,p3) __arm_vornq_x(p1,p2,p3) | |
25543 | #define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
261014a1 SP |
25544 | __typeof(p2) __p2 = (p2); \ |
25545 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25546 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25547 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25548 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25549 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25550 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25551 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25552 | ||
25553 | #define vorrq_x(p1,p2,p3) __arm_vorrq_x(p1,p2,p3) | |
25554 | #define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25555 | __typeof(p2) __p2 = (p2); \ | |
25556 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25557 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25558 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25559 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25560 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25561 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25562 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25563 | ||
25564 | #define vrev32q_x(p1,p2) __arm_vrev32q_x(p1,p2) | |
25565 | #define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25566 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25567 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25568 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25569 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
25570 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
25571 | ||
25572 | #define vrev64q_x(p1,p2) __arm_vrev64q_x(p1,p2) | |
25573 | #define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25574 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25575 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25576 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25577 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
25578 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
25579 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25580 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25581 | ||
25582 | #define vabdq_x(p1,p2,p3) __arm_vabdq_x(p1,p2,p3) | |
25583 | #define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25584 | __typeof(p2) __p2 = (p2); \ | |
25585 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25586 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25587 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25588 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25589 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25590 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25591 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25592 | ||
25593 | #define vandq_x(p1,p2,p3) __arm_vandq_x(p1,p2,p3) | |
25594 | #define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25595 | __typeof(p2) __p2 = (p2); \ | |
25596 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25597 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25598 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25599 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25600 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25601 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25602 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25603 | ||
25604 | #define vbicq_x(p1,p2,p3) __arm_vbicq_x(p1,p2,p3) | |
25605 | #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25606 | __typeof(p2) __p2 = (p2); \ | |
25607 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25608 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25609 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25610 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25611 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25612 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25613 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25614 | ||
25615 | #define vbrsrq_x(p1,p2,p3) __arm_vbrsrq_x(p1,p2,p3) | |
25616 | #define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25617 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25618 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25619 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25620 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25621 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25622 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25623 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25624 | ||
1dfcc3b5 SP |
25625 | #define vld1q_z(p0,p1) __arm_vld1q_z(p0, p1) |
25626 | #define __arm_vld1q_z(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25627 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25628 | int (*)[__ARM_mve_type_int8_t_const_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce(__p0, int8_t const *), p1), \ | |
25629 | int (*)[__ARM_mve_type_int16_t_const_ptr]: __arm_vld1q_z_s16 (__ARM_mve_coerce(__p0, int16_t const *), p1), \ | |
25630 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vld1q_z_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1), \ | |
25631 | int (*)[__ARM_mve_type_uint8_t_const_ptr]: __arm_vld1q_z_u8 (__ARM_mve_coerce(__p0, uint8_t const *), p1), \ | |
25632 | int (*)[__ARM_mve_type_uint16_t_const_ptr]: __arm_vld1q_z_u16 (__ARM_mve_coerce(__p0, uint16_t const *), p1), \ | |
25633 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vld1q_z_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1));}) | |
25634 | ||
25635 | #define vld2q(p0) __arm_vld2q(p0) | |
25636 | #define __arm_vld2q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
25637 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25638 | int (*)[__ARM_mve_type_int8_t_const_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce(__p0, int8_t const *)), \ | |
25639 | int (*)[__ARM_mve_type_int16_t_const_ptr]: __arm_vld2q_s16 (__ARM_mve_coerce(__p0, int16_t const *)), \ | |
25640 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vld2q_s32 (__ARM_mve_coerce(__p0, int32_t const *)), \ | |
25641 | int (*)[__ARM_mve_type_uint8_t_const_ptr]: __arm_vld2q_u8 (__ARM_mve_coerce(__p0, uint8_t const *)), \ | |
25642 | int (*)[__ARM_mve_type_uint16_t_const_ptr]: __arm_vld2q_u16 (__ARM_mve_coerce(__p0, uint16_t const *)), \ | |
25643 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vld2q_u32 (__ARM_mve_coerce(__p0, uint32_t const *)));}) | |
25644 | ||
25645 | #define vld4q(p0) __arm_vld4q(p0) | |
25646 | #define __arm_vld4q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
25647 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25648 | int (*)[__ARM_mve_type_int8_t_const_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce(__p0, int8_t const *)), \ | |
25649 | int (*)[__ARM_mve_type_int16_t_const_ptr]: __arm_vld4q_s16 (__ARM_mve_coerce(__p0, int16_t const *)), \ | |
25650 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vld4q_s32 (__ARM_mve_coerce(__p0, int32_t const *)), \ | |
25651 | int (*)[__ARM_mve_type_uint8_t_const_ptr]: __arm_vld4q_u8 (__ARM_mve_coerce(__p0, uint8_t const *)), \ | |
25652 | int (*)[__ARM_mve_type_uint16_t_const_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce(__p0, uint16_t const *)), \ | |
25653 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce(__p0, uint32_t const *)));}) | |
25654 | ||
1a5c27b1 SP |
25655 | #define vgetq_lane(p0,p1) __arm_vgetq_lane(p0,p1) |
25656 | #define __arm_vgetq_lane(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25657 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25658 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vgetq_lane_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
25659 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vgetq_lane_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
25660 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vgetq_lane_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
25661 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vgetq_lane_s64 (__ARM_mve_coerce(__p0, int64x2_t), p1), \ | |
25662 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vgetq_lane_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
25663 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vgetq_lane_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
25664 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vgetq_lane_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \ | |
25665 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vgetq_lane_u64 (__ARM_mve_coerce(__p0, uint64x2_t), p1));}) | |
25666 | ||
25667 | #define vsetq_lane(p0,p1,p2) __arm_vsetq_lane(p0,p1,p2) | |
25668 | #define __arm_vsetq_lane(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25669 | __typeof(p1) __p1 = (p1); \ | |
25670 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25671 | int (*)[__ARM_mve_type_int8_t][__ARM_mve_type_int8x16_t]: __arm_vsetq_lane_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25672 | int (*)[__ARM_mve_type_int16_t][__ARM_mve_type_int16x8_t]: __arm_vsetq_lane_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25673 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vsetq_lane_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
25674 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int64x2_t]: __arm_vsetq_lane_s64 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \ | |
25675 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_uint8x16_t]: __arm_vsetq_lane_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
25676 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_uint16x8_t]: __arm_vsetq_lane_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25677 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vsetq_lane_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
25678 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint64x2_t]: __arm_vsetq_lane_u64 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint64x2_t), p2));}) | |
25679 | ||
429d607b SP |
25680 | #endif /* MVE Integer. */ |
25681 | ||
261014a1 SP |
25682 | #define vmvnq_x(p1,p2) __arm_vmvnq_x(p1,p2) |
25683 | #define __arm_vmvnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25684 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25685 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25686 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25687 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
25688 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
25689 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25690 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25691 | ||
25692 | #define vrev16q_x(p1,p2) __arm_vrev16q_x(p1,p2) | |
25693 | #define __arm_vrev16q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25694 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25695 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25696 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2));}) | |
25697 | ||
25698 | #define vrhaddq_x(p1,p2,p3) __arm_vrhaddq_x(p1,p2,p3) | |
25699 | #define __arm_vrhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25700 | __typeof(p2) __p2 = (p2); \ | |
25701 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25702 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25703 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25704 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25705 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25706 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25707 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25708 | ||
25709 | #define vshlq_x(p0,p1,p2,p3) __arm_vshlq_x(p0,p1,p2,p3) | |
25710 | #define __arm_vshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25711 | __typeof(p2) __p2 = (p2); \ | |
25712 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25713 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25714 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25715 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25716 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25717 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25718 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
25719 | ||
25720 | #define vrmulhq_x(p1,p2,p3) __arm_vrmulhq_x(p1,p2,p3) | |
25721 | #define __arm_vrmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25722 | __typeof(p2) __p2 = (p2); \ | |
25723 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25724 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25725 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25726 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25727 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25728 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25729 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25730 | ||
25731 | #define vrshlq_x(p1,p2,p3) __arm_vrshlq_x(p1,p2,p3) | |
25732 | #define __arm_vrshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25733 | __typeof(p2) __p2 = (p2); \ | |
25734 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25735 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25736 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25737 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25738 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25739 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25740 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
25741 | ||
25742 | #define vrshrq_x(p1,p2,p3) __arm_vrshrq_x(p1,p2,p3) | |
25743 | #define __arm_vrshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25744 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25745 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25746 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25747 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25748 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25749 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25750 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25751 | ||
25752 | #define vshllbq_x(p1,p2,p3) __arm_vshllbq_x(p1,p2,p3) | |
25753 | #define __arm_vshllbq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25754 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25755 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25756 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25757 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25758 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) | |
25759 | ||
25760 | #define vshlltq_x(p1,p2,p3) __arm_vshlltq_x(p1,p2,p3) | |
25761 | #define __arm_vshlltq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25762 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25763 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25764 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25765 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25766 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) | |
25767 | ||
25768 | #define vshlq_x_n(p1,p2,p3) __arm_vshlq_x_n(p1,p2,p3) | |
25769 | #define __arm_vshlq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25770 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25771 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25772 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25773 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25774 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25775 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25776 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25777 | ||
25778 | #define vdwdupq_x_u8(p1,p2,p3,p4) __arm_vdwdupq_x_u8(p1,p2,p3,p4) | |
25779 | #define __arm_vdwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ | |
25780 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25781 | int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
25782 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
25783 | ||
25784 | #define vdwdupq_x_u16(p1,p2,p3,p4) __arm_vdwdupq_x_u16(p1,p2,p3,p4) | |
25785 | #define __arm_vdwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ | |
25786 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25787 | int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
25788 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
25789 | ||
25790 | #define vdwdupq_x_u32(p1,p2,p3,p4) __arm_vdwdupq_x_u32(p1,p2,p3,p4) | |
25791 | #define __arm_vdwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ | |
25792 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25793 | int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
25794 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
25795 | ||
25796 | #define viwdupq_x_u8(p1,p2,p3,p4) __arm_viwdupq_x_u8(p1,p2,p3,p4) | |
25797 | #define __arm_viwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ | |
25798 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25799 | int (*)[__ARM_mve_type_uint32_t]: __arm_viwdupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
25800 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
25801 | ||
25802 | #define viwdupq_x_u16(p1,p2,p3,p4) __arm_viwdupq_x_u16(p1,p2,p3,p4) | |
25803 | #define __arm_viwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ | |
25804 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25805 | int (*)[__ARM_mve_type_uint32_t]: __arm_viwdupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
25806 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
25807 | ||
25808 | #define viwdupq_x_u32(p1,p2,p3,p4) __arm_viwdupq_x_u32(p1,p2,p3,p4) | |
25809 | #define __arm_viwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ | |
25810 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25811 | int (*)[__ARM_mve_type_uint32_t]: __arm_viwdupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
25812 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
25813 | ||
25814 | #define vidupq_x_u8(p1,p2,p3) __arm_vidupq_x_u8(p1,p2,p3) | |
25815 | #define __arm_vidupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25816 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25817 | int (*)[__ARM_mve_type_uint32_t]: __arm_vidupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
25818 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
25819 | ||
25820 | #define vddupq_x_u8(p1,p2,p3) __arm_vddupq_x_u8(p1,p2,p3) | |
25821 | #define __arm_vddupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25822 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25823 | int (*)[__ARM_mve_type_uint32_t]: __arm_vddupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
25824 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
25825 | ||
25826 | #define vidupq_x_u16(p1,p2,p3) __arm_vidupq_x_u16(p1,p2,p3) | |
25827 | #define __arm_vidupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25828 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25829 | int (*)[__ARM_mve_type_uint32_t]: __arm_vidupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
25830 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
25831 | ||
25832 | #define vddupq_x_u16(p1,p2,p3) __arm_vddupq_x_u16(p1,p2,p3) | |
25833 | #define __arm_vddupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25834 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25835 | int (*)[__ARM_mve_type_uint32_t]: __arm_vddupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
25836 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
25837 | ||
25838 | #define vidupq_x_u32(p1,p2,p3) __arm_vidupq_x_u32(p1,p2,p3) | |
25839 | #define __arm_vidupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25840 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25841 | int (*)[__ARM_mve_type_uint32_t]: __arm_vidupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
25842 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
25843 | ||
25844 | #define vddupq_x_u32(p1,p2,p3) __arm_vddupq_x_u32(p1,p2,p3) | |
25845 | #define __arm_vddupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25846 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25847 | int (*)[__ARM_mve_type_uint32_t]: __arm_vddupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
25848 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
25849 | ||
25850 | #define vhaddq_x(p1,p2,p3) __arm_vhaddq_x(p1,p2,p3) | |
25851 | #define vshrq_x(p1,p2,p3) __arm_vshrq_x(p1,p2,p3) | |
25852 | #define __arm_vshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25853 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25854 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25855 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25856 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25857 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25858 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25859 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25860 | ||
25861 | #define __arm_vhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25862 | __typeof(p2) __p2 = (p2); \ | |
25863 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25864 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
25865 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
25866 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
25867 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhaddq_x_n_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
25868 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhaddq_x_n_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
25869 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhaddq_x_n_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
25870 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25871 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25872 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25873 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25874 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25875 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25876 | ||
25877 | #define vhcaddq_rot270_x(p1,p2,p3) __arm_vhcaddq_rot270_x(p1,p2,p3) | |
25878 | #define __arm_vhcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25879 | __typeof(p2) __p2 = (p2); \ | |
25880 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25881 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25882 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25883 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
25884 | ||
25885 | #define vhcaddq_rot90_x(p1,p2,p3) __arm_vhcaddq_rot90_x(p1,p2,p3) | |
25886 | #define __arm_vhcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25887 | __typeof(p2) __p2 = (p2); \ | |
25888 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25889 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25890 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25891 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
25892 | ||
25893 | #define vhsubq_x(p1,p2,p3) __arm_vhsubq_x(p1,p2,p3) | |
25894 | #define __arm_vhsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25895 | __typeof(p2) __p2 = (p2); \ | |
25896 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25897 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhsubq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
25898 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhsubq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
25899 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhsubq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
25900 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhsubq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
25901 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhsubq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
25902 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhsubq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
25903 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25904 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25905 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25906 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25907 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25908 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25909 | ||
25910 | #define vclsq_x(p1,p2) __arm_vclsq_x(p1,p2) | |
25911 | #define __arm_vclsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25912 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25913 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25914 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25915 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
25916 | ||
25917 | #define vclzq_x(p1,p2) __arm_vclzq_x(p1,p2) | |
25918 | #define __arm_vclzq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25919 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25920 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25921 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25922 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
25923 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
25924 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25925 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25926 | ||
25927 | #define vadciq(p0,p1,p2) __arm_vadciq(p0,p1,p2) | |
25928 | #define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25929 | __typeof(p1) __p1 = (p1); \ | |
25930 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25931 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
25932 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25933 | ||
41e1a7ff SP |
25934 | #define vstrdq_scatter_base_wb_p(p0,p1,p2,p3) __arm_vstrdq_scatter_base_wb_p(p0,p1,p2,p3) |
25935 | #define __arm_vstrdq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ | |
25936 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
25937 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ | |
25938 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_wb_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) | |
25939 | ||
25940 | #define vstrdq_scatter_base_wb(p0,p1,p2) __arm_vstrdq_scatter_base_wb(p0,p1,p2) | |
25941 | #define __arm_vstrdq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ | |
25942 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
25943 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \ | |
25944 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_wb_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) | |
25945 | ||
4cc23303 SP |
25946 | #define vldrdq_gather_offset(p0,p1) __arm_vldrdq_gather_offset(p0,p1) |
25947 | #define __arm_vldrdq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25948 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25949 | int (*)[__ARM_mve_type_int64_t_const_ptr]: __arm_vldrdq_gather_offset_s64 (__ARM_mve_coerce(__p0, int64_t const *), p1), \ | |
25950 | int (*)[__ARM_mve_type_uint64_t_const_ptr]: __arm_vldrdq_gather_offset_u64 (__ARM_mve_coerce(__p0, uint64_t const *), p1));}) | |
25951 | ||
25952 | #define vldrdq_gather_offset_z(p0,p1,p2) __arm_vldrdq_gather_offset_z(p0,p1,p2) | |
25953 | #define __arm_vldrdq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25954 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25955 | int (*)[__ARM_mve_type_int64_t_const_ptr]: __arm_vldrdq_gather_offset_z_s64 (__ARM_mve_coerce(__p0, int64_t const *), p1, p2), \ | |
25956 | int (*)[__ARM_mve_type_uint64_t_const_ptr]: __arm_vldrdq_gather_offset_z_u64 (__ARM_mve_coerce(__p0, uint64_t const *), p1, p2));}) | |
25957 | ||
25958 | #define vldrdq_gather_shifted_offset(p0,p1) __arm_vldrdq_gather_shifted_offset(p0,p1) | |
25959 | #define __arm_vldrdq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25960 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25961 | int (*)[__ARM_mve_type_int64_t_const_ptr]: __arm_vldrdq_gather_shifted_offset_s64 (__ARM_mve_coerce(__p0, int64_t const *), p1), \ | |
25962 | int (*)[__ARM_mve_type_uint64_t_const_ptr]: __arm_vldrdq_gather_shifted_offset_u64 (__ARM_mve_coerce(__p0, uint64_t const *), p1));}) | |
25963 | ||
25964 | #define vldrdq_gather_shifted_offset_z(p0,p1,p2) __arm_vldrdq_gather_shifted_offset_z(p0,p1,p2) | |
25965 | #define __arm_vldrdq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25966 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25967 | int (*)[__ARM_mve_type_int64_t_const_ptr]: __arm_vldrdq_gather_shifted_offset_z_s64 (__ARM_mve_coerce(__p0, int64_t const *), p1, p2), \ | |
25968 | int (*)[__ARM_mve_type_uint64_t_const_ptr]: __arm_vldrdq_gather_shifted_offset_z_u64 (__ARM_mve_coerce(__p0, uint64_t const *), p1, p2));}) | |
25969 | ||
c3562f81 SP |
25970 | #define vadciq_m(p0,p1,p2,p3,p4) __arm_vadciq_m(p0,p1,p2,p3,p4) |
25971 | #define __arm_vadciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ | |
25972 | __typeof(p1) __p1 = (p1); \ | |
25973 | __typeof(p2) __p2 = (p2); \ | |
25974 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25975 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ | |
25976 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) | |
25977 | ||
25978 | #define vadciq(p0,p1,p2) __arm_vadciq(p0,p1,p2) | |
25979 | #define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25980 | __typeof(p1) __p1 = (p1); \ | |
25981 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25982 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
25983 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25984 | ||
25985 | #define vadcq_m(p0,p1,p2,p3,p4) __arm_vadcq_m(p0,p1,p2,p3,p4) | |
25986 | #define __arm_vadcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ | |
25987 | __typeof(p1) __p1 = (p1); \ | |
25988 | __typeof(p2) __p2 = (p2); \ | |
25989 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25990 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ | |
25991 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) | |
25992 | ||
25993 | #define vadcq(p0,p1,p2) __arm_vadcq(p0,p1,p2) | |
25994 | #define __arm_vadcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25995 | __typeof(p1) __p1 = (p1); \ | |
25996 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25997 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
25998 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25999 | ||
26000 | #define vsbciq_m(p0,p1,p2,p3,p4) __arm_vsbciq_m(p0,p1,p2,p3,p4) | |
26001 | #define __arm_vsbciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ | |
26002 | __typeof(p1) __p1 = (p1); \ | |
26003 | __typeof(p2) __p2 = (p2); \ | |
26004 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26005 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ | |
26006 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) | |
26007 | ||
26008 | #define vsbciq(p0,p1,p2) __arm_vsbciq(p0,p1,p2) | |
26009 | #define __arm_vsbciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26010 | __typeof(p1) __p1 = (p1); \ | |
26011 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26012 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
26013 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
26014 | ||
26015 | #define vsbcq_m(p0,p1,p2,p3,p4) __arm_vsbcq_m(p0,p1,p2,p3,p4) | |
26016 | #define __arm_vsbcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ | |
26017 | __typeof(p1) __p1 = (p1); \ | |
26018 | __typeof(p2) __p2 = (p2); \ | |
26019 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26020 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ | |
26021 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) | |
26022 | ||
26023 | #define vsbcq(p0,p1,p2) __arm_vsbcq(p0,p1,p2) | |
26024 | #define __arm_vsbcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26025 | __typeof(p1) __p1 = (p1); \ | |
26026 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26027 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
26028 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
4cc23303 | 26029 | |
535a8645 SP |
26030 | #define vldrbq_gather_offset_z(p0,p1,p2) __arm_vldrbq_gather_offset_z(p0,p1,p2) |
26031 | #define __arm_vldrbq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26032 | __typeof(p1) __p1 = (p1); \ | |
26033 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26034 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_s8 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
26035 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_s16 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
26036 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_s32 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
26037 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_u8 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
26038 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_u16 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
26039 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_u32 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
26040 | ||
8eb3b6b9 SP |
26041 | #define vqrdmlahq_m(p0,p1,p2,p3) __arm_vqrdmlahq_m(p0,p1,p2,p3) |
26042 | #define __arm_vqrdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26043 | __typeof(p1) __p1 = (p1); \ | |
26044 | __typeof(p2) __p2 = (p2); \ | |
26045 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26046 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
26047 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26048 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));}) | |
26049 | ||
26050 | #define vqrdmlashq_m(p0,p1,p2,p3) __arm_vqrdmlashq_m(p0,p1,p2,p3) | |
26051 | #define __arm_vqrdmlashq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26052 | __typeof(p1) __p1 = (p1); \ | |
26053 | __typeof(p2) __p2 = (p2); \ | |
26054 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26055 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlashq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
26056 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26057 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));}) | |
26058 | ||
26059 | #define vqrshlq_m(p0,p1,p2,p3) __arm_vqrshlq_m(p0,p1,p2,p3) | |
26060 | #define __arm_vqrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26061 | __typeof(p1) __p1 = (p1); \ | |
26062 | __typeof(p2) __p2 = (p2); \ | |
26063 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26064 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26065 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26066 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26067 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26068 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26069 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
26070 | ||
26071 | #define vqshlq_m_n(p0,p1,p2,p3) __arm_vqshlq_m_n(p0,p1,p2,p3) | |
26072 | #define __arm_vqshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26073 | __typeof(p1) __p1 = (p1); \ | |
26074 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26075 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
26076 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26077 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
26078 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
26079 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
26080 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
26081 | ||
26082 | #define vqshlq_m(p0,p1,p2,p3) __arm_vqshlq_m(p0,p1,p2,p3) | |
26083 | #define __arm_vqshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26084 | __typeof(p1) __p1 = (p1); \ | |
26085 | __typeof(p2) __p2 = (p2); \ | |
26086 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26087 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26088 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26089 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26090 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26091 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26092 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
26093 | ||
26094 | #define vrhaddq_m(p0,p1,p2,p3) __arm_vrhaddq_m(p0,p1,p2,p3) | |
26095 | #define __arm_vrhaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26096 | __typeof(p1) __p1 = (p1); \ | |
26097 | __typeof(p2) __p2 = (p2); \ | |
26098 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26099 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26100 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26101 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26102 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26103 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26104 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
26105 | ||
26106 | #define vrmulhq_m(p0,p1,p2,p3) __arm_vrmulhq_m(p0,p1,p2,p3) | |
26107 | #define __arm_vrmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26108 | __typeof(p1) __p1 = (p1); \ | |
26109 | __typeof(p2) __p2 = (p2); \ | |
26110 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26111 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26112 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26113 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26114 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26115 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26116 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
26117 | ||
532e9e24 SP |
26118 | #define vrshlq_m(p0,p1,p2,p3) __arm_vrshlq_m(p0,p1,p2,p3) |
26119 | #define __arm_vrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
26120 | __typeof(p1) __p1 = (p1); \ |
26121 | __typeof(p2) __p2 = (p2); \ | |
26122 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26123 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
26124 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26125 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26126 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26127 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26128 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 26129 | |
532e9e24 SP |
26130 | #define vrshrq_m(p0,p1,p2,p3) __arm_vrshrq_m(p0,p1,p2,p3) |
26131 | #define __arm_vrshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 | 26132 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26133 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
26134 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
26135 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26136 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
26137 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrshrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
26138 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
26139 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
26140 | ||
26141 | #define vshrq_m(p0,p1,p2,p3) __arm_vshrq_m(p0,p1,p2,p3) | |
26142 | #define __arm_vshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26143 | __typeof(p1) __p1 = (p1); \ | |
26144 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26145 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
26146 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26147 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
26148 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vshrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
26149 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vshrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
26150 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vshrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
26151 | ||
26152 | #define vsliq_m(p0,p1,p2,p3) __arm_vsliq_m(p0,p1,p2,p3) | |
26153 | #define __arm_vsliq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26154 | __typeof(p1) __p1 = (p1); \ | |
26155 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26156 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
26157 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26158 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
26159 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
26160 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
26161 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
8eb3b6b9 SP |
26162 | |
26163 | #define vqsubq_m(p0,p1,p2,p3) __arm_vqsubq_m(p0,p1,p2,p3) | |
26164 | #define __arm_vqsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26165 | __typeof(p1) __p1 = (p1); \ | |
26166 | __typeof(p2) __p2 = (p2); \ | |
26167 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26168 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
26169 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26170 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
26171 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
26172 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
26173 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
26174 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26175 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26176 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26177 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26178 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26179 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
26180 | ||
26181 | #define vqrdmulhq_m(p0,p1,p2,p3) __arm_vqrdmulhq_m(p0,p1,p2,p3) | |
26182 | #define __arm_vqrdmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26183 | __typeof(p1) __p1 = (p1); \ | |
26184 | __typeof(p2) __p2 = (p2); \ | |
26185 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26186 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26187 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26188 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26189 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
26190 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26191 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));}) | |
26192 | ||
8eb3b6b9 SP |
26193 | #define vqrdmlsdhxq_m(p0,p1,p2,p3) __arm_vqrdmlsdhxq_m(p0,p1,p2,p3) |
26194 | #define __arm_vqrdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26195 | __typeof(p1) __p1 = (p1); \ | |
26196 | __typeof(p2) __p2 = (p2); \ | |
26197 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26198 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26199 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26200 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
26201 | ||
26202 | #define vqrdmlsdhq_m(p0,p1,p2,p3) __arm_vqrdmlsdhq_m(p0,p1,p2,p3) | |
26203 | #define __arm_vqrdmlsdhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26204 | __typeof(p1) __p1 = (p1); \ | |
26205 | __typeof(p2) __p2 = (p2); \ | |
26206 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26207 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26208 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26209 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
26210 | ||
532e9e24 SP |
26211 | #define vshllbq_m(p0,p1,p2,p3) __arm_vshllbq_m(p0,p1,p2,p3) |
26212 | #define __arm_vshllbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26213 | __typeof(p1) __p1 = (p1); \ | |
26214 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26215 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vshllbq_m_n_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
26216 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vshllbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26217 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshllbq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
26218 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshllbq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) | |
26219 | ||
26220 | #define vshrntq_m(p0,p1,p2,p3) __arm_vshrntq_m(p0,p1,p2,p3) | |
26221 | #define __arm_vshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26222 | __typeof(p1) __p1 = (p1); \ | |
26223 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26224 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26225 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
26226 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
26227 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
26228 | ||
26229 | #define vshrnbq_m(p0,p1,p2,p3) __arm_vshrnbq_m(p0,p1,p2,p3) | |
26230 | #define __arm_vshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26231 | __typeof(p1) __p1 = (p1); \ | |
26232 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26233 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26234 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
26235 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
26236 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
26237 | ||
26238 | #define vshlltq_m(p0,p1,p2,p3) __arm_vshlltq_m(p0,p1,p2,p3) | |
26239 | #define __arm_vshlltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26240 | __typeof(p1) __p1 = (p1); \ | |
26241 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26242 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vshlltq_m_n_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
26243 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vshlltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26244 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshlltq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
26245 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshlltq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) | |
26246 | ||
26247 | #define vrshrntq_m(p0,p1,p2,p3) __arm_vrshrntq_m(p0,p1,p2,p3) | |
26248 | #define __arm_vrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26249 | __typeof(p1) __p1 = (p1); \ | |
26250 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26251 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26252 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
26253 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
26254 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
26255 | ||
26256 | #define vqshruntq_m(p0,p1,p2,p3) __arm_vqshruntq_m(p0,p1,p2,p3) | |
26257 | #define __arm_vqshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26258 | __typeof(p1) __p1 = (p1); \ | |
26259 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26260 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26261 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) | |
26262 | ||
26263 | #define vqshrunbq_m(p0,p1,p2,p3) __arm_vqshrunbq_m(p0,p1,p2,p3) | |
26264 | #define __arm_vqshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26265 | __typeof(p1) __p1 = (p1); \ | |
26266 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26267 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26268 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) | |
26269 | ||
26270 | #define vqrshrnbq_m(p0,p1,p2,p3) __arm_vqrshrnbq_m(p0,p1,p2,p3) | |
26271 | #define __arm_vqrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26272 | __typeof(p1) __p1 = (p1); \ | |
26273 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26274 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26275 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
26276 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
26277 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
26278 | ||
26279 | #define vqrshrntq_m(p0,p1,p2,p3) __arm_vqrshrntq_m(p0,p1,p2,p3) | |
26280 | #define __arm_vqrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26281 | __typeof(p1) __p1 = (p1); \ | |
26282 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26283 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26284 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
26285 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
26286 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
26287 | ||
26288 | #define vqrshrunbq_m(p0,p1,p2,p3) __arm_vqrshrunbq_m(p0,p1,p2,p3) | |
26289 | #define __arm_vqrshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26290 | __typeof(p1) __p1 = (p1); \ | |
26291 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26292 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26293 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) | |
26294 | ||
26295 | #define vqrshruntq_m(p0,p1,p2,p3) __arm_vqrshruntq_m(p0,p1,p2,p3) | |
26296 | #define __arm_vqrshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26297 | __typeof(p1) __p1 = (p1); \ | |
26298 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26299 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26300 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) | |
26301 | ||
26302 | #define vqshrnbq_m(p0,p1,p2,p3) __arm_vqshrnbq_m(p0,p1,p2,p3) | |
26303 | #define __arm_vqshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26304 | __typeof(p1) __p1 = (p1); \ | |
26305 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26306 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26307 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
26308 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
26309 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
26310 | ||
26311 | #define vqshrntq_m(p0,p1,p2,p3) __arm_vqshrntq_m(p0,p1,p2,p3) | |
26312 | #define __arm_vqshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26313 | __typeof(p1) __p1 = (p1); \ | |
26314 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26315 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26316 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
26317 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
26318 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
26319 | ||
26320 | #define vrshrnbq_m(p0,p1,p2,p3) __arm_vrshrnbq_m(p0,p1,p2,p3) | |
26321 | #define __arm_vrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26322 | __typeof(p1) __p1 = (p1); \ | |
26323 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26324 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26325 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
26326 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
26327 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
26328 | ||
26329 | #define vmlaldavaq_p(p0,p1,p2,p3) __arm_vmlaldavaq_p(p0,p1,p2,p3) | |
26330 | #define __arm_vmlaldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
26331 | __typeof(p1) __p1 = (p1); \ |
26332 | __typeof(p2) __p2 = (p2); \ | |
26333 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26334 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_p_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
26335 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26336 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_p_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26337 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 26338 | |
532e9e24 SP |
26339 | #define vmlaldavaxq_p(p0,p1,p2,p3) __arm_vmlaldavaxq_p(p0,p1,p2,p3) |
26340 | #define __arm_vmlaldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
26341 | __typeof(p1) __p1 = (p1); \ |
26342 | __typeof(p2) __p2 = (p2); \ | |
26343 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26344 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_p_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
26345 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26346 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaxq_p_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26347 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaxq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 26348 | |
532e9e24 SP |
26349 | #define vmlsldavaq_p(p0,p1,p2,p3) __arm_vmlsldavaq_p(p0,p1,p2,p3) |
26350 | #define __arm_vmlsldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
26351 | __typeof(p1) __p1 = (p1); \ |
26352 | __typeof(p2) __p2 = (p2); \ | |
26353 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26354 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
26355 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 26356 | |
532e9e24 SP |
26357 | #define vmlsldavaxq_p(p0,p1,p2,p3) __arm_vmlsldavaxq_p(p0,p1,p2,p3) |
26358 | #define __arm_vmlsldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
26359 | __typeof(p1) __p1 = (p1); \ |
26360 | __typeof(p2) __p2 = (p2); \ | |
26361 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26362 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaxq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
26363 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaxq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 26364 | |
532e9e24 SP |
26365 | #define vrmlaldavhaq_p(p0,p1,p2,p3) __arm_vrmlaldavhaq_p(p0,p1,p2,p3) |
26366 | #define __arm_vrmlaldavhaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
26367 | __typeof(p1) __p1 = (p1); \ |
26368 | __typeof(p2) __p2 = (p2); \ | |
26369 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26370 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ |
26371 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
26372 | ||
26373 | #define vrmlaldavhaxq_p(p0,p1,p2,p3) __arm_vrmlaldavhaxq_p(p0,p1,p2,p3) | |
26374 | #define __arm_vrmlaldavhaxq_p(p0,p1,p2,p3) __arm_vrmlaldavhaxq_p_s32(p0,p1,p2,p3) | |
26375 | ||
26376 | #define vrmlsldavhaq_p(p0,p1,p2,p3) __arm_vrmlsldavhaq_p(p0,p1,p2,p3) | |
26377 | #define __arm_vrmlsldavhaq_p(p0,p1,p2,p3) __arm_vrmlsldavhaq_p_s32(p0,p1,p2,p3) | |
26378 | ||
26379 | #define vrmlsldavhaxq_p(p0,p1,p2,p3) __arm_vrmlsldavhaxq_p(p0,p1,p2,p3) | |
26380 | #define __arm_vrmlsldavhaxq_p(p0,p1,p2,p3) __arm_vrmlsldavhaxq_p_s32(p0,p1,p2,p3) | |
26381 | ||
26382 | #define vqdmladhq_m(p0,p1,p2,p3) __arm_vqdmladhq_m(p0,p1,p2,p3) | |
26383 | #define __arm_vqdmladhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26384 | __typeof(p1) __p1 = (p1); \ | |
26385 | __typeof(p2) __p2 = (p2); \ | |
26386 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26387 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26388 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26389 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
26390 | ||
26391 | #define vqdmladhxq_m(p0,p1,p2,p3) __arm_vqdmladhxq_m(p0,p1,p2,p3) | |
26392 | #define __arm_vqdmladhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26393 | __typeof(p1) __p1 = (p1); \ | |
26394 | __typeof(p2) __p2 = (p2); \ | |
26395 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26396 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26397 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26398 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 26399 | |
532e9e24 SP |
26400 | #define vqdmlsdhq_m(p0,p1,p2,p3) __arm_vqdmlsdhq_m(p0,p1,p2,p3) |
26401 | #define __arm_vqdmlsdhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26402 | __typeof(p1) __p1 = (p1); \ |
26403 | __typeof(p2) __p2 = (p2); \ | |
26404 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26405 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
26406 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26407 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
f2170a37 | 26408 | |
532e9e24 SP |
26409 | #define vqdmlsdhxq_m(p0,p1,p2,p3) __arm_vqdmlsdhxq_m(p0,p1,p2,p3) |
26410 | #define __arm_vqdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26411 | __typeof(p1) __p1 = (p1); \ |
26412 | __typeof(p2) __p2 = (p2); \ | |
26413 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26414 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
26415 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26416 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
f2170a37 | 26417 | |
532e9e24 SP |
26418 | #define vqabsq_m(p0,p1,p2) __arm_vqabsq_m(p0,p1,p2) |
26419 | #define __arm_vqabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26420 | __typeof(p1) __p1 = (p1); \ |
26421 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
26422 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
26423 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
26424 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f2170a37 | 26425 | |
532e9e24 SP |
26426 | #define vmvnq_m(p0,p1,p2) __arm_vmvnq_m(p0,p1,p2) |
26427 | #define __arm_vmvnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26428 | __typeof(p1) __p1 = (p1); \ |
26429 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
26430 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmvnq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
26431 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmvnq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
26432 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmvnq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
26433 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmvnq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
26434 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmvnq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
26435 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmvnq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
26436 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vmvnq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1(__p1, int) , p2), \ | |
26437 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmvnq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1(__p1, int) , p2), \ | |
26438 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vmvnq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1(__p1, int) , p2), \ | |
26439 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vmvnq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1(__p1, int) , p2));}) | |
f2170a37 | 26440 | |
532e9e24 SP |
26441 | #define vorrq_m_n(p0,p1,p2) __arm_vorrq_m_n(p0,p1,p2) |
26442 | #define __arm_vorrq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26443 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26444 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vorrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
26445 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vorrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
26446 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
26447 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
26448 | ||
26449 | #define vqshrunbq(p0,p1,p2) __arm_vqshrunbq(p0,p1,p2) | |
26450 | #define __arm_vqshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26451 | __typeof(p1) __p1 = (p1); \ |
26452 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
26453 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
26454 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f2170a37 | 26455 | |
532e9e24 SP |
26456 | #define vqshluq_m(p0,p1,p2,p3) __arm_vqshluq_m(p0,p1,p2,p3) |
26457 | #define __arm_vqshluq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26458 | __typeof(p1) __p1 = (p1); \ |
26459 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
26460 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshluq_m_n_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ |
26461 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshluq_m_n_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26462 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshluq_m_n_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) | |
f2170a37 | 26463 | |
532e9e24 SP |
26464 | #define vshlq_m(p0,p1,p2,p3) __arm_vshlq_m(p0,p1,p2,p3) |
26465 | #define __arm_vshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 | 26466 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26467 | __typeof(p2) __p2 = (p2); \ |
26468 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26469 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26470 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26471 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26472 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26473 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26474 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
f2170a37 | 26475 | |
532e9e24 SP |
26476 | #define vshlq_m_n(p0,p1,p2,p3) __arm_vshlq_m_n(p0,p1,p2,p3) |
26477 | #define __arm_vshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26478 | __typeof(p1) __p1 = (p1); \ |
26479 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
26480 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ |
26481 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26482 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
26483 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
26484 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
26485 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
f2170a37 | 26486 | |
532e9e24 SP |
26487 | #define vshlq_m_r(p0,p1,p2) __arm_vshlq_m_r(p0,p1,p2) |
26488 | #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26489 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26490 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
26491 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
26492 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
26493 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
26494 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
26495 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
26496 | ||
26497 | #define vsriq_m(p0,p1,p2,p3) __arm_vsriq_m(p0,p1,p2,p3) | |
26498 | #define __arm_vsriq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26499 | __typeof(p1) __p1 = (p1); \ |
26500 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
26501 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ |
26502 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26503 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
26504 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
26505 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
26506 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
f2170a37 | 26507 | |
532e9e24 SP |
26508 | #define vhaddq_m(p0,p1,p2,p3) __arm_vhaddq_m(p0,p1,p2,p3) |
26509 | #define __arm_vhaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26510 | __typeof(p1) __p1 = (p1); \ |
26511 | __typeof(p2) __p2 = (p2); \ | |
26512 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26513 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ |
26514 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26515 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
26516 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
26517 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
26518 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
26519 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26520 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26521 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26522 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26523 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26524 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 26525 | |
532e9e24 SP |
26526 | #define vhcaddq_rot270_m(p0,p1,p2,p3) __arm_vhcaddq_rot270_m(p0,p1,p2,p3) |
26527 | #define __arm_vhcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26528 | __typeof(p1) __p1 = (p1); \ |
26529 | __typeof(p2) __p2 = (p2); \ | |
26530 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26531 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
26532 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26533 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
f2170a37 | 26534 | |
532e9e24 SP |
26535 | #define vhcaddq_rot90_m(p0,p1,p2,p3) __arm_vhcaddq_rot90_m(p0,p1,p2,p3) |
26536 | #define __arm_vhcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 | 26537 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26538 | __typeof(p2) __p2 = (p2); \ |
26539 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26540 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26541 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26542 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
f2170a37 | 26543 | |
532e9e24 SP |
26544 | #define vhsubq_m(p0,p1,p2,p3) __arm_vhsubq_m(p0,p1,p2,p3) |
26545 | #define __arm_vhsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 | 26546 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26547 | __typeof(p2) __p2 = (p2); \ |
26548 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26549 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26550 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26551 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26552 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26553 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26554 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
26555 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
26556 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26557 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
26558 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
26559 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
26560 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) | |
f2170a37 | 26561 | |
532e9e24 SP |
26562 | #define vmaxq_m(p0,p1,p2,p3) __arm_vmaxq_m(p0,p1,p2,p3) |
26563 | #define __arm_vmaxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 | 26564 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26565 | __typeof(p2) __p2 = (p2); \ |
26566 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26567 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26568 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26569 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26570 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26571 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26572 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 26573 | |
532e9e24 SP |
26574 | #define vminq_m(p0,p1,p2,p3) __arm_vminq_m(p0,p1,p2,p3) |
26575 | #define __arm_vminq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 | 26576 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26577 | __typeof(p2) __p2 = (p2); \ |
26578 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26579 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26580 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26581 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26582 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26583 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26584 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 26585 | |
532e9e24 SP |
26586 | #define vmlaq_m(p0,p1,p2,p3) __arm_vmlaq_m(p0,p1,p2,p3) |
26587 | #define __arm_vmlaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 | 26588 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26589 | __typeof(p2) __p2 = (p2); \ |
26590 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26591 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlaq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
26592 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlaq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26593 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlaq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
26594 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlaq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
26595 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlaq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
26596 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlaq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) | |
f2170a37 | 26597 | |
532e9e24 SP |
26598 | #define vmlasq_m(p0,p1,p2,p3) __arm_vmlasq_m(p0,p1,p2,p3) |
26599 | #define __arm_vmlasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26600 | __typeof(p1) __p1 = (p1); \ |
26601 | __typeof(p2) __p2 = (p2); \ | |
26602 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26603 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlasq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ |
26604 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlasq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26605 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlasq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
26606 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlasq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
26607 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlasq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
26608 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlasq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) | |
f2170a37 | 26609 | |
532e9e24 SP |
26610 | #define vmulhq_m(p0,p1,p2,p3) __arm_vmulhq_m(p0,p1,p2,p3) |
26611 | #define __arm_vmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26612 | __typeof(p1) __p1 = (p1); \ |
26613 | __typeof(p2) __p2 = (p2); \ | |
26614 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26615 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
26616 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26617 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26618 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26619 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26620 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 26621 | |
532e9e24 SP |
26622 | #define vmullbq_int_m(p0,p1,p2,p3) __arm_vmullbq_int_m(p0,p1,p2,p3) |
26623 | #define __arm_vmullbq_int_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26624 | __typeof(p1) __p1 = (p1); \ |
26625 | __typeof(p2) __p2 = (p2); \ | |
532e9e24 SP |
26626 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
26627 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26628 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26629 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26630 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26631 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26632 | int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_m_u32 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 26633 | |
532e9e24 SP |
26634 | #define vmulltq_int_m(p0,p1,p2,p3) __arm_vmulltq_int_m(p0,p1,p2,p3) |
26635 | #define __arm_vmulltq_int_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26636 | __typeof(p1) __p1 = (p1); \ |
26637 | __typeof(p2) __p2 = (p2); \ | |
532e9e24 SP |
26638 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
26639 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26640 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26641 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26642 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26643 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26644 | int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_m_u32 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 26645 | |
532e9e24 SP |
26646 | #define vmulltq_poly_m(p0,p1,p2,p3) __arm_vmulltq_poly_m(p0,p1,p2,p3) |
26647 | #define __arm_vmulltq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26648 | __typeof(p1) __p1 = (p1); \ |
26649 | __typeof(p2) __p2 = (p2); \ | |
26650 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26651 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ |
26652 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) | |
e3678b44 | 26653 | |
532e9e24 SP |
26654 | #define vqaddq_m(p0,p1,p2,p3) __arm_vqaddq_m(p0,p1,p2,p3) |
26655 | #define __arm_vqaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
26656 | __typeof(p1) __p1 = (p1); \ |
26657 | __typeof(p2) __p2 = (p2); \ | |
26658 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26659 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ |
26660 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26661 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
26662 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
26663 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
26664 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
26665 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26666 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26667 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26668 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26669 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26670 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 26671 | |
532e9e24 SP |
26672 | #define vqdmlahq_m(p0,p1,p2,p3) __arm_vqdmlahq_m(p0,p1,p2,p3) |
26673 | #define __arm_vqdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
26674 | __typeof(p1) __p1 = (p1); \ |
26675 | __typeof(p2) __p2 = (p2); \ | |
26676 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26677 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ |
26678 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26679 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));}) | |
8eb3b6b9 | 26680 | |
532e9e24 SP |
26681 | #define vqdmulhq_m(p0,p1,p2,p3) __arm_vqdmulhq_m(p0,p1,p2,p3) |
26682 | #define __arm_vqdmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
26683 | __typeof(p1) __p1 = (p1); \ |
26684 | __typeof(p2) __p2 = (p2); \ | |
26685 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26686 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ |
26687 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26688 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
26689 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26690 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26691 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 26692 | |
532e9e24 SP |
26693 | #define vqdmullbq_m(p0,p1,p2,p3) __arm_vqdmullbq_m(p0,p1,p2,p3) |
26694 | #define __arm_vqdmullbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
26695 | __typeof(p1) __p1 = (p1); \ |
26696 | __typeof(p2) __p2 = (p2); \ | |
26697 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26698 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
26699 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26700 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmullbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26701 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmullbq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));}) | |
8eb3b6b9 | 26702 | |
532e9e24 SP |
26703 | #define vqdmulltq_m(p0,p1,p2,p3) __arm_vqdmulltq_m(p0,p1,p2,p3) |
26704 | #define __arm_vqdmulltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
e3678b44 | 26705 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26706 | __typeof(p2) __p2 = (p2); \ |
26707 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26708 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmulltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26709 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmulltq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
26710 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26711 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
e3678b44 | 26712 | |
532e9e24 SP |
26713 | #define vqrdmladhq_m(p0,p1,p2,p3) __arm_vqrdmladhq_m(p0,p1,p2,p3) |
26714 | #define __arm_vqrdmladhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
e3678b44 | 26715 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26716 | __typeof(p2) __p2 = (p2); \ |
26717 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26718 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26719 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26720 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
e3678b44 | 26721 | |
532e9e24 SP |
26722 | #define vqrdmladhxq_m(p0,p1,p2,p3) __arm_vqrdmladhxq_m(p0,p1,p2,p3) |
26723 | #define __arm_vqrdmladhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
e3678b44 | 26724 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26725 | __typeof(p2) __p2 = (p2); \ |
26726 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26727 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26728 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26729 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
14782c81 | 26730 | |
532e9e24 SP |
26731 | #define vmlsdavaxq_p(p0,p1,p2,p3) __arm_vmlsdavaxq_p(p0,p1,p2,p3) |
26732 | #define __arm_vmlsdavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
db5db9d2 | 26733 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26734 | __typeof(p2) __p2 = (p2); \ |
26735 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26736 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaxq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26737 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaxq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26738 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaxq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
db5db9d2 | 26739 | |
532e9e24 SP |
26740 | #define vmlsdavaq_p(p0,p1,p2,p3) __arm_vmlsdavaq_p(p0,p1,p2,p3) |
26741 | #define __arm_vmlsdavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
db5db9d2 SP |
26742 | __typeof(p1) __p1 = (p1); \ |
26743 | __typeof(p2) __p2 = (p2); \ | |
532e9e24 SP |
26744 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
26745 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaq_p_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26746 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26747 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
db5db9d2 | 26748 | |
532e9e24 SP |
26749 | #define vmladavaxq_p(p0,p1,p2,p3) __arm_vmladavaxq_p(p0,p1,p2,p3) |
26750 | #define __arm_vmladavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 | 26751 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26752 | __typeof(p2) __p2 = (p2); \ |
26753 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26754 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26755 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26756 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 26757 | |
532e9e24 SP |
26758 | #define vmullbq_poly_m(p0,p1,p2,p3) __arm_vmullbq_poly_m(p0,p1,p2,p3) |
26759 | #define __arm_vmullbq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
db5db9d2 | 26760 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26761 | __typeof(p2) __p2 = (p2); \ |
26762 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26763 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26764 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) | |
db5db9d2 | 26765 | |
535a8645 SP |
26766 | #define vldrbq_gather_offset(p0,p1) __arm_vldrbq_gather_offset(p0,p1) |
26767 | #define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26768 | __typeof(p1) __p1 = (p1); \ | |
26769 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26770 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
26771 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
26772 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
26773 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
26774 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
26775 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
26776 | ||
92f80065 SP |
26777 | #define vidupq_m(p0,p1,p2,p3) __arm_vidupq_m(p0,p1,p2,p3) |
26778 | #define __arm_vidupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26779 | __typeof(p1) __p1 = (p1); \ | |
26780 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26781 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t]: __arm_vidupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
26782 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t]: __arm_vidupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
26783 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vidupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
26784 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \ | |
26785 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \ | |
26786 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
26787 | ||
26788 | #define vddupq_m(p0,p1,p2,p3) __arm_vddupq_m(p0,p1,p2,p3) | |
26789 | #define __arm_vddupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26790 | __typeof(p1) __p1 = (p1); \ | |
26791 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26792 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t]: __arm_vddupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
26793 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t]: __arm_vddupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
26794 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vddupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
26795 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \ | |
26796 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \ | |
26797 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
26798 | ||
26799 | #define vidupq_u16(p0,p1) __arm_vidupq_u16(p0,p1) | |
26800 | #define __arm_vidupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26801 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26802 | int (*)[__ARM_mve_type_uint32_t]: __arm_vidupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1), \ | |
26803 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) | |
26804 | ||
26805 | #define vidupq_u32(p0,p1) __arm_vidupq_u32(p0,p1) | |
26806 | #define __arm_vidupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26807 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26808 | int (*)[__ARM_mve_type_uint32_t]: __arm_vidupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1), \ | |
26809 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) | |
26810 | ||
26811 | #define vidupq_u8(p0,p1) __arm_vidupq_u8(p0,p1) | |
26812 | #define __arm_vidupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26813 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26814 | int (*)[__ARM_mve_type_uint32_t]: __arm_vidupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1), \ | |
26815 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) | |
26816 | ||
26817 | #define vddupq_u16(p0,p1) __arm_vddupq_u16(p0,p1) | |
26818 | #define __arm_vddupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26819 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26820 | int (*)[__ARM_mve_type_uint32_t]: __arm_vddupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1), \ | |
26821 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) | |
26822 | ||
26823 | #define vddupq_u32(p0,p1) __arm_vddupq_u32(p0,p1) | |
26824 | #define __arm_vddupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26825 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26826 | int (*)[__ARM_mve_type_uint32_t]: __arm_vddupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1), \ | |
26827 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) | |
26828 | ||
26829 | #define vddupq_u8(p0,p1) __arm_vddupq_u8(p0,p1) | |
26830 | #define __arm_vddupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26831 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26832 | int (*)[__ARM_mve_type_uint32_t]: __arm_vddupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1), \ | |
26833 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) | |
26834 | ||
26835 | #define viwdupq_m(p0,p1,p2,p3,p4) __arm_viwdupq_m(p0,p1,p2,p3,p4) | |
26836 | #define __arm_viwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ | |
26837 | __typeof(p1) __p1 = (p1); \ | |
26838 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26839 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t]: __arm_viwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
26840 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t]: __arm_viwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
26841 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_viwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
26842 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \ | |
26843 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \ | |
26844 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
26845 | ||
26846 | #define viwdupq_u16(p0,p1,p2) __arm_viwdupq_u16(p0,p1,p2) | |
26847 | #define __arm_viwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26848 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26849 | int (*)[__ARM_mve_type_uint32_t]: __arm_viwdupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ | |
26850 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) | |
26851 | ||
26852 | #define viwdupq_u32(p0,p1,p2) __arm_viwdupq_u32(p0,p1,p2) | |
26853 | #define __arm_viwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26854 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26855 | int (*)[__ARM_mve_type_uint32_t]: __arm_viwdupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ | |
26856 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) | |
26857 | ||
26858 | #define viwdupq_u8(p0,p1,p2) __arm_viwdupq_u8(p0,p1,p2) | |
26859 | #define __arm_viwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26860 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26861 | int (*)[__ARM_mve_type_uint32_t]: __arm_viwdupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ | |
26862 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) | |
26863 | ||
26864 | #define vdwdupq_m(p0,p1,p2,p3,p4) __arm_vdwdupq_m(p0,p1,p2,p3,p4) | |
26865 | #define __arm_vdwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ | |
26866 | __typeof(p1) __p1 = (p1); \ | |
26867 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26868 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t]: __arm_vdwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
26869 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t]: __arm_vdwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
26870 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vdwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
26871 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \ | |
26872 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \ | |
26873 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
26874 | ||
26875 | #define vdwdupq_u16(p0,p1,p2) __arm_vdwdupq_u16(p0,p1,p2) | |
26876 | #define __arm_vdwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26877 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26878 | int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ | |
26879 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) | |
26880 | ||
26881 | #define vdwdupq_u32(p0,p1,p2) __arm_vdwdupq_u32(p0,p1,p2) | |
26882 | #define __arm_vdwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26883 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26884 | int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ | |
26885 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) | |
26886 | ||
26887 | #define vdwdupq_u8(p0,p1,p2) __arm_vdwdupq_u8(p0,p1,p2) | |
26888 | #define __arm_vdwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26889 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26890 | int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ | |
26891 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) | |
26892 | ||
88c9a831 SP |
26893 | #define vshlcq_m(p0,p1,p2,p3) __arm_vshlcq_m(p0,p1,p2,p3) |
26894 | #define __arm_vshlcq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26895 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26896 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2, p3), \ | |
26897 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2, p3), \ | |
26898 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2, p3), \ | |
26899 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2, p3), \ | |
26900 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2, p3), \ | |
26901 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2, p3));}) | |
26902 | ||
e81d0d9e SP |
26903 | #define vabavq(p0,p1,p2) __arm_vabavq(p0,p1,p2) |
26904 | #define __arm_vabavq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26905 | __typeof(p1) __p1 = (p1); \ | |
26906 | __typeof(p2) __p2 = (p2); \ | |
26907 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26908 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
26909 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
26910 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
26911 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \ | |
26912 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
26913 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
26914 | ||
26915 | #define vabavq_p(p0,p1,p2,p3) __arm_vabavq_p(p0,p1,p2,p3) | |
26916 | #define __arm_vabavq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26917 | __typeof(p1) __p1 = (p1); \ | |
26918 | __typeof(p2) __p2 = (p2); \ | |
26919 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26920 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_p_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26921 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26922 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26923 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_p_u8(__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26924 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_p_u16(__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26925 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_p_u32(__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
26926 | ||
26927 | #define vaddlvaq(p0,p1) __arm_vaddlvaq(p0,p1) | |
26928 | #define __arm_vaddlvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26929 | __typeof(p1) __p1 = (p1); \ | |
26930 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26931 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
26932 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
26933 | ||
26934 | #define vaddlvaq_p(p0,p1,p2) __arm_vaddlvaq_p(p0,p1,p2) | |
26935 | #define __arm_vaddlvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26936 | __typeof(p1) __p1 = (p1); \ | |
26937 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26938 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
26939 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
26940 | ||
26941 | #define vaddlvq(p0) __arm_vaddlvq(p0) | |
26942 | #define __arm_vaddlvq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
26943 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26944 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
26945 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
26946 | ||
26947 | #define vaddlvq_p(p0,p1) __arm_vaddlvq_p(p0,p1) | |
26948 | #define __arm_vaddlvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26949 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26950 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
26951 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
26952 | ||
26953 | #define vaddvaq(p0,p1) __arm_vaddvaq(p0,p1) | |
26954 | #define __arm_vaddvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26955 | __typeof(p1) __p1 = (p1); \ | |
26956 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26957 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
26958 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
26959 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
26960 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
26961 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
26962 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
26963 | ||
26964 | #define vaddvaq_p(p0,p1,p2) __arm_vaddvaq_p(p0,p1,p2) | |
26965 | #define __arm_vaddvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26966 | __typeof(p1) __p1 = (p1); \ | |
26967 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26968 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
26969 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
26970 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
26971 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_p_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
26972 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
26973 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
26974 | ||
26975 | #define vaddvq(p0) __arm_vaddvq(p0) | |
26976 | #define __arm_vaddvq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
26977 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26978 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vaddvq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
26979 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vaddvq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
26980 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddvq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
26981 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vaddvq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
26982 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vaddvq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
26983 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddvq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
26984 | ||
26985 | #define vaddvq_p(p0,p1) __arm_vaddvq_p(p0,p1) | |
26986 | #define __arm_vaddvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26987 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26988 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vaddvq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
26989 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vaddvq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
26990 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddvq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
26991 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vaddvq_p_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
26992 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vaddvq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
26993 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
26994 | ||
26995 | #define vcmpcsq(p0,p1) __arm_vcmpcsq(p0,p1) | |
26996 | #define __arm_vcmpcsq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26997 | __typeof(p1) __p1 = (p1); \ | |
26998 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26999 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
27000 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
27001 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
27002 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpcsq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
27003 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpcsq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
27004 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpcsq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
27005 | ||
27006 | #define vcmpcsq_m(p0,p1,p2) __arm_vcmpcsq_m(p0,p1,p2) | |
27007 | #define __arm_vcmpcsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27008 | __typeof(p1) __p1 = (p1); \ | |
27009 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27010 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
27011 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
27012 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
27013 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpcsq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
27014 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpcsq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
27015 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpcsq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));}) | |
27016 | ||
27017 | #define vcmphiq(p0,p1) __arm_vcmphiq(p0,p1) | |
27018 | #define __arm_vcmphiq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
27019 | __typeof(p1) __p1 = (p1); \ | |
27020 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27021 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
27022 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
27023 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
27024 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmphiq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
27025 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmphiq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
27026 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmphiq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
27027 | ||
27028 | #define vcmphiq_m(p0,p1,p2) __arm_vcmphiq_m(p0,p1,p2) | |
27029 | #define __arm_vcmphiq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27030 | __typeof(p1) __p1 = (p1); \ | |
27031 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27032 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmphiq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
27033 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmphiq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
27034 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmphiq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \ | |
27035 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
27036 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
27037 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
27038 | ||
27039 | #define vmaxavq(p0,p1) __arm_vmaxavq(p0,p1) | |
27040 | #define __arm_vmaxavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
27041 | __typeof(p1) __p1 = (p1); \ | |
27042 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27043 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_int8x16_t]: __arm_vmaxavq_s8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
27044 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
27045 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
27046 | ||
27047 | #define vmaxavq_p(p0,p1,p2) __arm_vmaxavq_p(p0,p1,p2) | |
27048 | #define __arm_vmaxavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27049 | __typeof(p1) __p1 = (p1); \ | |
27050 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27051 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_int8x16_t]: __arm_vmaxavq_p_s8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
27052 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_p_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
27053 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_p_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
27054 | ||
27055 | #define vmaxq_x(p1,p2,p3) __arm_vmaxq_x(p1,p2,p3) | |
27056 | #define __arm_vmaxq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
27057 | __typeof(p2) __p2 = (p2); \ | |
27058 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27059 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
27060 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
27061 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
27062 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
27063 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
27064 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
27065 | ||
27066 | #define vmaxvq(p0,p1) __arm_vmaxvq(p0,p1) | |
27067 | #define __arm_vmaxvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
27068 | __typeof(p1) __p1 = (p1); \ | |
27069 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27070 | int (*)[__ARM_mve_type_int8_t][__ARM_mve_type_int8x16_t]: __arm_vmaxvq_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
27071 | int (*)[__ARM_mve_type_int16_t][__ARM_mve_type_int16x8_t]: __arm_vmaxvq_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
27072 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vmaxvq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
27073 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxvq_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
27074 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
27075 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
27076 | ||
27077 | #define vmaxvq_p(p0,p1,p2) __arm_vmaxvq_p(p0,p1,p2) | |
27078 | #define __arm_vmaxvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27079 | __typeof(p1) __p1 = (p1); \ | |
27080 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27081 | int (*)[__ARM_mve_type_int8_t][__ARM_mve_type_int8x16_t]: __arm_vmaxvq_p_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
27082 | int (*)[__ARM_mve_type_int16_t][__ARM_mve_type_int16x8_t]: __arm_vmaxvq_p_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
27083 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vmaxvq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
27084 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxvq_p_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
27085 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_p_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
27086 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
27087 | ||
27088 | #define vminavq(p0,p1) __arm_vminavq(p0,p1) | |
27089 | #define __arm_vminavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
27090 | __typeof(p1) __p1 = (p1); \ | |
27091 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27092 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_int8x16_t]: __arm_vminavq_s8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
27093 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_int16x8_t]: __arm_vminavq_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
27094 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_int32x4_t]: __arm_vminavq_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
27095 | ||
27096 | #define vminavq_p(p0,p1,p2) __arm_vminavq_p(p0,p1,p2) | |
27097 | #define __arm_vminavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27098 | __typeof(p1) __p1 = (p1); \ | |
27099 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27100 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_int8x16_t]: __arm_vminavq_p_s8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
27101 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_int16x8_t]: __arm_vminavq_p_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
27102 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_int32x4_t]: __arm_vminavq_p_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
27103 | ||
27104 | #define vminq_x(p1,p2,p3) __arm_vminq_x(p1,p2,p3) | |
27105 | #define __arm_vminq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
27106 | __typeof(p2) __p2 = (p2); \ | |
27107 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27108 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
27109 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
27110 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
27111 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
27112 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
27113 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
27114 | ||
27115 | #define vminvq(p0,p1) __arm_vminvq(p0,p1) | |
27116 | #define __arm_vminvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
27117 | __typeof(p1) __p1 = (p1); \ | |
27118 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27119 | int (*)[__ARM_mve_type_int8_t][__ARM_mve_type_int8x16_t]: __arm_vminvq_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
27120 | int (*)[__ARM_mve_type_int16_t][__ARM_mve_type_int16x8_t]: __arm_vminvq_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
27121 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vminvq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
27122 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_uint8x16_t]: __arm_vminvq_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
27123 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_uint16x8_t]: __arm_vminvq_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
27124 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vminvq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
27125 | ||
27126 | #define vminvq_p(p0,p1,p2) __arm_vminvq_p(p0,p1,p2) | |
27127 | #define __arm_vminvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27128 | __typeof(p1) __p1 = (p1); \ | |
27129 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27130 | int (*)[__ARM_mve_type_int8_t][__ARM_mve_type_int8x16_t]: __arm_vminvq_p_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
27131 | int (*)[__ARM_mve_type_int16_t][__ARM_mve_type_int16x8_t]: __arm_vminvq_p_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
27132 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vminvq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
27133 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_uint8x16_t]: __arm_vminvq_p_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
27134 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_uint16x8_t]: __arm_vminvq_p_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
27135 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vminvq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
27136 | ||
27137 | #define vmladavaq(p0,p1,p2) __arm_vmladavaq(p0,p1,p2) | |
27138 | #define __arm_vmladavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27139 | __typeof(p1) __p1 = (p1); \ | |
27140 | __typeof(p2) __p2 = (p2); \ | |
27141 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27142 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
27143 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
27144 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
27145 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \ | |
27146 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
27147 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
27148 | ||
27149 | #define vmladavaq_p(p0,p1,p2,p3) __arm_vmladavaq_p(p0,p1,p2,p3) | |
27150 | #define __arm_vmladavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
27151 | __typeof(p1) __p1 = (p1); \ | |
27152 | __typeof(p2) __p2 = (p2); \ | |
27153 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27154 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
27155 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
27156 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
27157 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_p_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
27158 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
27159 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
27160 | ||
27161 | #define vmladavaxq(p0,p1,p2) __arm_vmladavaxq(p0,p1,p2) | |
27162 | #define __arm_vmladavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27163 | __typeof(p1) __p1 = (p1); \ | |
27164 | __typeof(p2) __p2 = (p2); \ | |
27165 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27166 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
27167 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
27168 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
27169 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaxq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \ | |
27170 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaxq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
27171 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaxq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
27172 | ||
27173 | #define vmladavq(p0,p1) __arm_vmladavq(p0,p1) | |
27174 | #define __arm_vmladavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
27175 | __typeof(p1) __p1 = (p1); \ | |
27176 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27177 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
27178 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
27179 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
27180 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
27181 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
27182 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
27183 | ||
27184 | #define vmladavq_p(p0,p1,p2) __arm_vmladavq_p(p0,p1,p2) | |
27185 | #define __arm_vmladavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27186 | __typeof(p1) __p1 = (p1); \ | |
27187 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27188 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
27189 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
27190 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
27191 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavq_p_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
27192 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
27193 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
27194 | ||
27195 | #define vmladavxq(p0,p1) __arm_vmladavxq(p0,p1) | |
27196 | #define __arm_vmladavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
27197 | __typeof(p1) __p1 = (p1); \ | |
27198 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27199 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
27200 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
27201 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
27202 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
27203 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
27204 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
27205 | ||
27206 | #define vmladavxq_p(p0,p1,p2) __arm_vmladavxq_p(p0,p1,p2) | |
27207 | #define __arm_vmladavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27208 | __typeof(p1) __p1 = (p1); \ | |
27209 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27210 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavxq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
27211 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
27212 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
27213 | ||
27214 | #define vmlaldavaq(p0,p1,p2) __arm_vmlaldavaq(p0,p1,p2) | |
27215 | #define __arm_vmlaldavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27216 | __typeof(p1) __p1 = (p1); \ | |
27217 | __typeof(p2) __p2 = (p2); \ | |
27218 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27219 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
27220 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
27221 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
27222 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
27223 | ||
27224 | #define vmlaldavaxq(p0,p1,p2) __arm_vmlaldavaxq(p0,p1,p2) | |
27225 | #define __arm_vmlaldavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27226 | __typeof(p1) __p1 = (p1); \ | |
27227 | __typeof(p2) __p2 = (p2); \ | |
27228 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27229 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
27230 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
27231 | ||
27232 | #define vmlaldavq(p0,p1) __arm_vmlaldavq(p0,p1) | |
27233 | #define __arm_vmlaldavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
27234 | __typeof(p1) __p1 = (p1); \ | |
27235 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27236 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
27237 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
27238 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
27239 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
27240 | ||
27241 | #define vmlaldavq_p(p0,p1,p2) __arm_vmlaldavq_p(p0,p1,p2) | |
27242 | #define __arm_vmlaldavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27243 | __typeof(p1) __p1 = (p1); \ | |
27244 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27245 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
27246 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
27247 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
27248 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
27249 | ||
27250 | #define vmlaldavxq_p(p0,p1,p2) __arm_vmlaldavxq_p(p0,p1,p2) | |
27251 | #define __arm_vmlaldavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27252 | __typeof(p1) __p1 = (p1); \ | |
27253 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27254 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
27255 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
27256 | ||
27257 | #define vmlsdavaq(p0,p1,p2) __arm_vmlsdavaq(p0,p1,p2) | |
27258 | #define __arm_vmlsdavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27259 | __typeof(p1) __p1 = (p1); \ | |
27260 | __typeof(p2) __p2 = (p2); \ | |
27261 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27262 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaq_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
27263 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
27264 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
27265 | ||
27266 | #define vmlsdavaxq(p0,p1,p2) __arm_vmlsdavaxq(p0,p1,p2) | |
27267 | #define __arm_vmlsdavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27268 | __typeof(p1) __p1 = (p1); \ | |
27269 | __typeof(p2) __p2 = (p2); \ | |
27270 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27271 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaxq_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
27272 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaxq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
27273 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaxq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
27274 | ||
27275 | #define vmlsdavq(p0,p1) __arm_vmlsdavq(p0,p1) | |
27276 | #define __arm_vmlsdavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
27277 | __typeof(p1) __p1 = (p1); \ | |
27278 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27279 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
27280 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
27281 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
27282 | ||
27283 | #define vmlsdavq_p(p0,p1,p2) __arm_vmlsdavq_p(p0,p1,p2) | |
27284 | #define __arm_vmlsdavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27285 | __typeof(p1) __p1 = (p1); \ | |
27286 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27287 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
27288 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
27289 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
27290 | ||
27291 | #define vmlsdavxq(p0,p1) __arm_vmlsdavxq(p0,p1) | |
27292 | #define __arm_vmlsdavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
27293 | __typeof(p1) __p1 = (p1); \ | |
27294 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27295 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
27296 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
27297 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
27298 | ||
27299 | #define vmlsdavxq_p(p0,p1,p2) __arm_vmlsdavxq_p(p0,p1,p2) | |
27300 | #define __arm_vmlsdavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27301 | __typeof(p1) __p1 = (p1); \ | |
27302 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27303 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavxq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
27304 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
27305 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
27306 | ||
27307 | #define vmlsldavaq(p0,p1,p2) __arm_vmlsldavaq(p0,p1,p2) | |
27308 | #define __arm_vmlsldavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27309 | __typeof(p1) __p1 = (p1); \ | |
27310 | __typeof(p2) __p2 = (p2); \ | |
27311 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27312 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
27313 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
27314 | ||
27315 | #define vmlsldavaxq(p0,p1,p2) __arm_vmlsldavaxq(p0,p1,p2) | |
27316 | #define __arm_vmlsldavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27317 | __typeof(p1) __p1 = (p1); \ | |
27318 | __typeof(p2) __p2 = (p2); \ | |
27319 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27320 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaxq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
27321 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaxq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
27322 | ||
27323 | #define vmlsldavq(p0,p1) __arm_vmlsldavq(p0,p1) | |
27324 | #define __arm_vmlsldavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
27325 | __typeof(p1) __p1 = (p1); \ | |
27326 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27327 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
27328 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
27329 | ||
27330 | #define vmlsldavq_p(p0,p1,p2) __arm_vmlsldavq_p(p0,p1,p2) | |
27331 | #define __arm_vmlsldavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27332 | __typeof(p1) __p1 = (p1); \ | |
27333 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27334 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
27335 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
27336 | ||
27337 | #define vmlsldavxq(p0,p1) __arm_vmlsldavxq(p0,p1) | |
27338 | #define __arm_vmlsldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
27339 | __typeof(p1) __p1 = (p1); \ | |
27340 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27341 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
27342 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
27343 | ||
27344 | #define vmlsldavxq_p(p0,p1,p2) __arm_vmlsldavxq_p(p0,p1,p2) | |
27345 | #define __arm_vmlsldavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27346 | __typeof(p1) __p1 = (p1); \ | |
27347 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27348 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
27349 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
27350 | ||
27351 | #define vmovlbq_x(p1,p2) __arm_vmovlbq_x(p1,p2) | |
27352 | #define __arm_vmovlbq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
27353 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
27354 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
27355 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
27356 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
27357 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
27358 | ||
27359 | #define vmovltq_x(p1,p2) __arm_vmovltq_x(p1,p2) | |
27360 | #define __arm_vmovltq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
27361 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
27362 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
27363 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
27364 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
27365 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
27366 | ||
27367 | #define vmulhq_x(p1,p2,p3) __arm_vmulhq_x(p1,p2,p3) | |
27368 | #define __arm_vmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
27369 | __typeof(p2) __p2 = (p2); \ | |
27370 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27371 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
27372 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
27373 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
27374 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
27375 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
27376 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
27377 | ||
27378 | #define vmullbq_int_x(p1,p2,p3) __arm_vmullbq_int_x(p1,p2,p3) | |
27379 | #define __arm_vmullbq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
27380 | __typeof(p2) __p2 = (p2); \ | |
27381 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27382 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
27383 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
27384 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
27385 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
27386 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
27387 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
27388 | ||
27389 | #define vmullbq_poly_x(p1,p2,p3) __arm_vmullbq_poly_x(p1,p2,p3) | |
27390 | #define __arm_vmullbq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
27391 | __typeof(p2) __p2 = (p2); \ | |
27392 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27393 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
27394 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) | |
27395 | ||
27396 | #define vmulltq_int_x(p1,p2,p3) __arm_vmulltq_int_x(p1,p2,p3) | |
27397 | #define __arm_vmulltq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
27398 | __typeof(p2) __p2 = (p2); \ | |
27399 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27400 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
27401 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
27402 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
27403 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
27404 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
27405 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
27406 | ||
27407 | #define vmulltq_poly_x(p1,p2,p3) __arm_vmulltq_poly_x(p1,p2,p3) | |
27408 | #define __arm_vmulltq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
27409 | __typeof(p2) __p2 = (p2); \ | |
27410 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27411 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
27412 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) | |
27413 | ||
27414 | #define vrmlaldavhaq(p0,p1,p2) __arm_vrmlaldavhaq(p0,p1,p2) | |
27415 | #define __arm_vrmlaldavhaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27416 | __typeof(p1) __p1 = (p1); \ | |
27417 | __typeof(p2) __p2 = (p2); \ | |
27418 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27419 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
27420 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
27421 | ||
27422 | #define vrmlaldavhaxq(p0,p1,p2) __arm_vrmlaldavhaxq(p0,p1,p2) | |
27423 | #define __arm_vrmlaldavhaxq(p0,p1,p2) __arm_vrmlaldavhaxq_s32(p0,p1,p2) | |
27424 | ||
27425 | #define vrmlaldavhq(p0,p1) __arm_vrmlaldavhq(p0,p1) | |
27426 | #define __arm_vrmlaldavhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
27427 | __typeof(p1) __p1 = (p1); \ | |
27428 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27429 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
27430 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
27431 | ||
27432 | #define vrmlaldavhq_p(p0,p1,p2) __arm_vrmlaldavhq_p(p0,p1,p2) | |
27433 | #define __arm_vrmlaldavhq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27434 | __typeof(p1) __p1 = (p1); \ | |
27435 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27436 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
27437 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
27438 | ||
27439 | #define vrmlaldavhxq(p0,p1) __arm_vrmlaldavhxq(p0,p1) | |
27440 | #define __arm_vrmlaldavhxq(p0,p1) __arm_vrmlaldavhxq_s32(p0,p1) | |
27441 | ||
27442 | #define vrmlaldavhxq_p(p0,p1,p2) __arm_vrmlaldavhxq_p(p0,p1,p2) | |
27443 | #define __arm_vrmlaldavhxq_p(p0,p1,p2) __arm_vrmlaldavhxq_p_s32(p0,p1,p2) | |
27444 | ||
27445 | #define vrmlsldavhaq(p0,p1,p2) __arm_vrmlsldavhaq(p0,p1,p2) | |
27446 | #define __arm_vrmlsldavhaq(p0,p1,p2) __arm_vrmlsldavhaq_s32(p0,p1,p2) | |
27447 | ||
27448 | #define vrmlsldavhaxq(p0,p1,p2) __arm_vrmlsldavhaxq(p0,p1,p2) | |
27449 | #define __arm_vrmlsldavhaxq(p0,p1,p2) __arm_vrmlsldavhaxq_s32(p0,p1,p2) | |
27450 | ||
27451 | #define vrmlsldavhq(p0,p1) __arm_vrmlsldavhq(p0,p1) | |
27452 | #define __arm_vrmlsldavhq(p0,p1) __arm_vrmlsldavhq_s32(p0,p1) | |
27453 | ||
27454 | #define vrmlsldavhq_p(p0,p1,p2) __arm_vrmlsldavhq_p(p0,p1,p2) | |
27455 | #define __arm_vrmlsldavhq_p(p0,p1,p2) __arm_vrmlsldavhq_p_s32(p0,p1,p2) | |
27456 | ||
27457 | #define vrmlsldavhxq(p0,p1) __arm_vrmlsldavhxq(p0,p1) | |
27458 | #define __arm_vrmlsldavhxq(p0,p1) __arm_vrmlsldavhxq_s32(p0,p1) | |
27459 | ||
27460 | #define vrmlsldavhxq_p(p0,p1,p2) __arm_vrmlsldavhxq_p(p0,p1,p2) | |
27461 | #define __arm_vrmlsldavhxq_p(p0,p1,p2) __arm_vrmlsldavhxq_p_s32(p0,p1,p2) | |
27462 | ||
27463 | #define vstrbq(p0,p1) __arm_vstrbq(p0,p1) | |
27464 | #define __arm_vstrbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
27465 | __typeof(p1) __p1 = (p1); \ | |
27466 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27467 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
27468 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
27469 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
27470 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
27471 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
27472 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
27473 | ||
27474 | #define vstrbq_p(p0,p1,p2) __arm_vstrbq_p(p0,p1,p2) | |
27475 | #define __arm_vstrbq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27476 | __typeof(p1) __p1 = (p1); \ | |
27477 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
27478 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
27479 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_p_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
27480 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_p_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
27481 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
27482 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_p_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
27483 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_p_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
27484 | ||
27485 | #define vstrbq_scatter_offset(p0,p1,p2) __arm_vstrbq_scatter_offset(p0,p1,p2) | |
27486 | #define __arm_vstrbq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27487 | __typeof(p1) __p1 = (p1); \ | |
27488 | __typeof(p2) __p2 = (p2); \ | |
27489 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27490 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
27491 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
27492 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
27493 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \ | |
27494 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
27495 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
27496 | ||
27497 | #define vstrbq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrbq_scatter_offset_p(p0,p1,p2,p3) | |
27498 | #define __arm_vstrbq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
27499 | __typeof(p1) __p1 = (p1); \ | |
27500 | __typeof(p2) __p2 = (p2); \ | |
27501 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
27502 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
27503 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
27504 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
27505 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
27506 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
27507 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
27508 | ||
27509 | #define vstrdq_scatter_base(p0,p1,p2) __arm_vstrdq_scatter_base(p0,p1,p2) | |
27510 | #define __arm_vstrdq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ | |
27511 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
27512 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \ | |
27513 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) | |
27514 | ||
27515 | #define vstrdq_scatter_base_p(p0,p1,p2,p3) __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) | |
27516 | #define __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ | |
27517 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
27518 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ | |
27519 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) | |
27520 | ||
27521 | #define vstrdq_scatter_offset(p0,p1,p2) __arm_vstrdq_scatter_offset(p0,p1,p2) | |
27522 | #define __arm_vstrdq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27523 | __typeof(p1) __p1 = (p1); \ | |
27524 | __typeof(p2) __p2 = (p2); \ | |
27525 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
27526 | int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t)), \ | |
27527 | int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) | |
27528 | ||
27529 | #define vstrdq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrdq_scatter_offset_p(p0,p1,p2,p3) | |
27530 | #define __arm_vstrdq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
27531 | __typeof(p1) __p1 = (p1); \ | |
27532 | __typeof(p2) __p2 = (p2); \ | |
27533 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
27534 | int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ | |
27535 | int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) | |
27536 | ||
27537 | #define vstrdq_scatter_shifted_offset(p0,p1,p2) __arm_vstrdq_scatter_shifted_offset(p0,p1,p2) | |
27538 | #define __arm_vstrdq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
27539 | __typeof(p1) __p1 = (p1); \ | |
27540 | __typeof(p2) __p2 = (p2); \ | |
27541 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
27542 | int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t)), \ | |
27543 | int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) | |
27544 | ||
27545 | #define vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
27546 | #define __arm_vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
27547 | __typeof(p1) __p1 = (p1); \ | |
27548 | __typeof(p2) __p2 = (p2); \ | |
27549 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
27550 | int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ | |
27551 | int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) | |
27552 | ||
63c8f7d6 SP |
27553 | #ifdef __cplusplus |
27554 | } | |
27555 | #endif | |
27556 | ||
27557 | #endif /* _GCC_ARM_MVE_H. */ |