]>
Commit | Line | Data |
---|---|---|
63c8f7d6 SP |
1 | /* Arm MVE intrinsics include file. |
2 | ||
3 | Copyright (C) 2019-2020 Free Software Foundation, Inc. | |
4 | Contributed by Arm. | |
5 | ||
6 | This file is part of GCC. | |
7 | ||
8 | GCC is free software; you can redistribute it and/or modify it | |
9 | under the terms of the GNU General Public License as published | |
10 | by the Free Software Foundation; either version 3, or (at your | |
11 | option) any later version. | |
12 | ||
13 | GCC is distributed in the hope that it will be useful, but WITHOUT | |
14 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public | |
16 | License for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with GCC; see the file COPYING3. If not see | |
20 | <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | #ifndef _GCC_ARM_MVE_H | |
23 | #define _GCC_ARM_MVE_H | |
24 | ||
25 | #if !__ARM_FEATURE_MVE | |
26 | #error "MVE feature not supported" | |
27 | #endif | |
28 | ||
29 | #include <stdint.h> | |
30 | #ifndef __cplusplus | |
31 | #include <stdbool.h> | |
32 | #endif | |
33 | ||
34 | #ifdef __cplusplus | |
35 | extern "C" { | |
36 | #endif | |
37 | ||
38 | #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ | |
39 | typedef __fp16 float16_t; | |
40 | typedef float float32_t; | |
41 | typedef __simd128_float16_t float16x8_t; | |
42 | typedef __simd128_float32_t float32x4_t; | |
43 | #endif | |
44 | ||
14782c81 SP |
45 | #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ |
46 | typedef struct { float16x8_t val[2]; } float16x8x2_t; | |
47 | typedef struct { float16x8_t val[4]; } float16x8x4_t; | |
48 | typedef struct { float32x4_t val[2]; } float32x4x2_t; | |
49 | typedef struct { float32x4_t val[4]; } float32x4x4_t; | |
50 | #endif | |
51 | ||
63c8f7d6 SP |
52 | typedef uint16_t mve_pred16_t; |
53 | typedef __simd128_uint8_t uint8x16_t; | |
54 | typedef __simd128_uint16_t uint16x8_t; | |
55 | typedef __simd128_uint32_t uint32x4_t; | |
56 | typedef __simd128_uint64_t uint64x2_t; | |
57 | typedef __simd128_int8_t int8x16_t; | |
58 | typedef __simd128_int16_t int16x8_t; | |
59 | typedef __simd128_int32_t int32x4_t; | |
60 | typedef __simd128_int64_t int64x2_t; | |
61 | ||
14782c81 SP |
62 | typedef struct { int16x8_t val[2]; } int16x8x2_t; |
63 | typedef struct { int16x8_t val[4]; } int16x8x4_t; | |
64 | typedef struct { int32x4_t val[2]; } int32x4x2_t; | |
65 | typedef struct { int32x4_t val[4]; } int32x4x4_t; | |
66 | typedef struct { int8x16_t val[2]; } int8x16x2_t; | |
67 | typedef struct { int8x16_t val[4]; } int8x16x4_t; | |
68 | typedef struct { uint16x8_t val[2]; } uint16x8x2_t; | |
69 | typedef struct { uint16x8_t val[4]; } uint16x8x4_t; | |
70 | typedef struct { uint32x4_t val[2]; } uint32x4x2_t; | |
71 | typedef struct { uint32x4_t val[4]; } uint32x4x4_t; | |
72 | typedef struct { uint8x16_t val[2]; } uint8x16x2_t; | |
73 | typedef struct { uint8x16_t val[4]; } uint8x16x4_t; | |
74 | ||
75 | #ifndef __ARM_MVE_PRESERVE_USER_NAMESPACE | |
76 | #define vst4q_s8( __addr, __value) __arm_vst4q_s8( __addr, __value) | |
77 | #define vst4q_s16( __addr, __value) __arm_vst4q_s16( __addr, __value) | |
78 | #define vst4q_s32( __addr, __value) __arm_vst4q_s32( __addr, __value) | |
79 | #define vst4q_u8( __addr, __value) __arm_vst4q_u8( __addr, __value) | |
80 | #define vst4q_u16( __addr, __value) __arm_vst4q_u16( __addr, __value) | |
81 | #define vst4q_u32( __addr, __value) __arm_vst4q_u32( __addr, __value) | |
82 | #define vst4q_f16( __addr, __value) __arm_vst4q_f16( __addr, __value) | |
83 | #define vst4q_f32( __addr, __value) __arm_vst4q_f32( __addr, __value) | |
a50f6abf SP |
84 | #define vrndxq_f16(__a) __arm_vrndxq_f16(__a) |
85 | #define vrndxq_f32(__a) __arm_vrndxq_f32(__a) | |
86 | #define vrndq_f16(__a) __arm_vrndq_f16(__a) | |
87 | #define vrndq_f32(__a) __arm_vrndq_f32(__a) | |
88 | #define vrndpq_f16(__a) __arm_vrndpq_f16(__a) | |
89 | #define vrndpq_f32(__a) __arm_vrndpq_f32(__a) | |
90 | #define vrndnq_f16(__a) __arm_vrndnq_f16(__a) | |
91 | #define vrndnq_f32(__a) __arm_vrndnq_f32(__a) | |
92 | #define vrndmq_f16(__a) __arm_vrndmq_f16(__a) | |
93 | #define vrndmq_f32(__a) __arm_vrndmq_f32(__a) | |
94 | #define vrndaq_f16(__a) __arm_vrndaq_f16(__a) | |
95 | #define vrndaq_f32(__a) __arm_vrndaq_f32(__a) | |
96 | #define vrev64q_f16(__a) __arm_vrev64q_f16(__a) | |
97 | #define vrev64q_f32(__a) __arm_vrev64q_f32(__a) | |
98 | #define vnegq_f16(__a) __arm_vnegq_f16(__a) | |
99 | #define vnegq_f32(__a) __arm_vnegq_f32(__a) | |
100 | #define vdupq_n_f16(__a) __arm_vdupq_n_f16(__a) | |
101 | #define vdupq_n_f32(__a) __arm_vdupq_n_f32(__a) | |
102 | #define vabsq_f16(__a) __arm_vabsq_f16(__a) | |
103 | #define vabsq_f32(__a) __arm_vabsq_f32(__a) | |
104 | #define vrev32q_f16(__a) __arm_vrev32q_f16(__a) | |
105 | #define vcvttq_f32_f16(__a) __arm_vcvttq_f32_f16(__a) | |
106 | #define vcvtbq_f32_f16(__a) __arm_vcvtbq_f32_f16(__a) | |
107 | #define vcvtq_f16_s16(__a) __arm_vcvtq_f16_s16(__a) | |
108 | #define vcvtq_f32_s32(__a) __arm_vcvtq_f32_s32(__a) | |
109 | #define vcvtq_f16_u16(__a) __arm_vcvtq_f16_u16(__a) | |
110 | #define vcvtq_f32_u32(__a) __arm_vcvtq_f32_u32(__a) | |
6df4618c SP |
111 | #define vdupq_n_s8(__a) __arm_vdupq_n_s8(__a) |
112 | #define vdupq_n_s16(__a) __arm_vdupq_n_s16(__a) | |
113 | #define vdupq_n_s32(__a) __arm_vdupq_n_s32(__a) | |
114 | #define vabsq_s8(__a) __arm_vabsq_s8(__a) | |
115 | #define vabsq_s16(__a) __arm_vabsq_s16(__a) | |
116 | #define vabsq_s32(__a) __arm_vabsq_s32(__a) | |
117 | #define vclsq_s8(__a) __arm_vclsq_s8(__a) | |
118 | #define vclsq_s16(__a) __arm_vclsq_s16(__a) | |
119 | #define vclsq_s32(__a) __arm_vclsq_s32(__a) | |
120 | #define vclzq_s8(__a) __arm_vclzq_s8(__a) | |
121 | #define vclzq_s16(__a) __arm_vclzq_s16(__a) | |
122 | #define vclzq_s32(__a) __arm_vclzq_s32(__a) | |
123 | #define vnegq_s8(__a) __arm_vnegq_s8(__a) | |
124 | #define vnegq_s16(__a) __arm_vnegq_s16(__a) | |
125 | #define vnegq_s32(__a) __arm_vnegq_s32(__a) | |
126 | #define vaddlvq_s32(__a) __arm_vaddlvq_s32(__a) | |
127 | #define vaddvq_s8(__a) __arm_vaddvq_s8(__a) | |
128 | #define vaddvq_s16(__a) __arm_vaddvq_s16(__a) | |
129 | #define vaddvq_s32(__a) __arm_vaddvq_s32(__a) | |
130 | #define vmovlbq_s8(__a) __arm_vmovlbq_s8(__a) | |
131 | #define vmovlbq_s16(__a) __arm_vmovlbq_s16(__a) | |
132 | #define vmovltq_s8(__a) __arm_vmovltq_s8(__a) | |
133 | #define vmovltq_s16(__a) __arm_vmovltq_s16(__a) | |
134 | #define vmvnq_s8(__a) __arm_vmvnq_s8(__a) | |
135 | #define vmvnq_s16(__a) __arm_vmvnq_s16(__a) | |
136 | #define vmvnq_s32(__a) __arm_vmvnq_s32(__a) | |
5db0eb95 SP |
137 | #define vmvnq_n_s16( __imm) __arm_vmvnq_n_s16( __imm) |
138 | #define vmvnq_n_s32( __imm) __arm_vmvnq_n_s32( __imm) | |
6df4618c SP |
139 | #define vrev16q_s8(__a) __arm_vrev16q_s8(__a) |
140 | #define vrev32q_s8(__a) __arm_vrev32q_s8(__a) | |
141 | #define vrev32q_s16(__a) __arm_vrev32q_s16(__a) | |
5db0eb95 SP |
142 | #define vrev64q_s8(__a) __arm_vrev64q_s8(__a) |
143 | #define vrev64q_s16(__a) __arm_vrev64q_s16(__a) | |
144 | #define vrev64q_s32(__a) __arm_vrev64q_s32(__a) | |
6df4618c SP |
145 | #define vqabsq_s8(__a) __arm_vqabsq_s8(__a) |
146 | #define vqabsq_s16(__a) __arm_vqabsq_s16(__a) | |
147 | #define vqabsq_s32(__a) __arm_vqabsq_s32(__a) | |
148 | #define vqnegq_s8(__a) __arm_vqnegq_s8(__a) | |
149 | #define vqnegq_s16(__a) __arm_vqnegq_s16(__a) | |
150 | #define vqnegq_s32(__a) __arm_vqnegq_s32(__a) | |
151 | #define vcvtaq_s16_f16(__a) __arm_vcvtaq_s16_f16(__a) | |
152 | #define vcvtaq_s32_f32(__a) __arm_vcvtaq_s32_f32(__a) | |
153 | #define vcvtnq_s16_f16(__a) __arm_vcvtnq_s16_f16(__a) | |
154 | #define vcvtnq_s32_f32(__a) __arm_vcvtnq_s32_f32(__a) | |
155 | #define vcvtpq_s16_f16(__a) __arm_vcvtpq_s16_f16(__a) | |
156 | #define vcvtpq_s32_f32(__a) __arm_vcvtpq_s32_f32(__a) | |
157 | #define vcvtmq_s16_f16(__a) __arm_vcvtmq_s16_f16(__a) | |
158 | #define vcvtmq_s32_f32(__a) __arm_vcvtmq_s32_f32(__a) | |
5db0eb95 SP |
159 | #define vcvtq_s16_f16(__a) __arm_vcvtq_s16_f16(__a) |
160 | #define vcvtq_s32_f32(__a) __arm_vcvtq_s32_f32(__a) | |
161 | #define vrev64q_u8(__a) __arm_vrev64q_u8(__a) | |
162 | #define vrev64q_u16(__a) __arm_vrev64q_u16(__a) | |
163 | #define vrev64q_u32(__a) __arm_vrev64q_u32(__a) | |
6df4618c SP |
164 | #define vmvnq_u8(__a) __arm_vmvnq_u8(__a) |
165 | #define vmvnq_u16(__a) __arm_vmvnq_u16(__a) | |
166 | #define vmvnq_u32(__a) __arm_vmvnq_u32(__a) | |
167 | #define vdupq_n_u8(__a) __arm_vdupq_n_u8(__a) | |
168 | #define vdupq_n_u16(__a) __arm_vdupq_n_u16(__a) | |
169 | #define vdupq_n_u32(__a) __arm_vdupq_n_u32(__a) | |
170 | #define vclzq_u8(__a) __arm_vclzq_u8(__a) | |
171 | #define vclzq_u16(__a) __arm_vclzq_u16(__a) | |
172 | #define vclzq_u32(__a) __arm_vclzq_u32(__a) | |
173 | #define vaddvq_u8(__a) __arm_vaddvq_u8(__a) | |
174 | #define vaddvq_u16(__a) __arm_vaddvq_u16(__a) | |
175 | #define vaddvq_u32(__a) __arm_vaddvq_u32(__a) | |
176 | #define vrev32q_u8(__a) __arm_vrev32q_u8(__a) | |
177 | #define vrev32q_u16(__a) __arm_vrev32q_u16(__a) | |
178 | #define vmovltq_u8(__a) __arm_vmovltq_u8(__a) | |
179 | #define vmovltq_u16(__a) __arm_vmovltq_u16(__a) | |
180 | #define vmovlbq_u8(__a) __arm_vmovlbq_u8(__a) | |
181 | #define vmovlbq_u16(__a) __arm_vmovlbq_u16(__a) | |
5db0eb95 SP |
182 | #define vmvnq_n_u16( __imm) __arm_vmvnq_n_u16( __imm) |
183 | #define vmvnq_n_u32( __imm) __arm_vmvnq_n_u32( __imm) | |
6df4618c SP |
184 | #define vrev16q_u8(__a) __arm_vrev16q_u8(__a) |
185 | #define vaddlvq_u32(__a) __arm_vaddlvq_u32(__a) | |
5db0eb95 SP |
186 | #define vcvtq_u16_f16(__a) __arm_vcvtq_u16_f16(__a) |
187 | #define vcvtq_u32_f32(__a) __arm_vcvtq_u32_f32(__a) | |
6df4618c SP |
188 | #define vcvtpq_u16_f16(__a) __arm_vcvtpq_u16_f16(__a) |
189 | #define vcvtpq_u32_f32(__a) __arm_vcvtpq_u32_f32(__a) | |
190 | #define vcvtnq_u16_f16(__a) __arm_vcvtnq_u16_f16(__a) | |
191 | #define vcvtmq_u16_f16(__a) __arm_vcvtmq_u16_f16(__a) | |
192 | #define vcvtmq_u32_f32(__a) __arm_vcvtmq_u32_f32(__a) | |
193 | #define vcvtaq_u16_f16(__a) __arm_vcvtaq_u16_f16(__a) | |
194 | #define vcvtaq_u32_f32(__a) __arm_vcvtaq_u32_f32(__a) | |
a475f153 SP |
195 | #define vctp16q(__a) __arm_vctp16q(__a) |
196 | #define vctp32q(__a) __arm_vctp32q(__a) | |
197 | #define vctp64q(__a) __arm_vctp64q(__a) | |
198 | #define vctp8q(__a) __arm_vctp8q(__a) | |
199 | #define vpnot(__a) __arm_vpnot(__a) | |
4be8cf77 SP |
200 | #define vsubq_n_f16(__a, __b) __arm_vsubq_n_f16(__a, __b) |
201 | #define vsubq_n_f32(__a, __b) __arm_vsubq_n_f32(__a, __b) | |
202 | #define vbrsrq_n_f16(__a, __b) __arm_vbrsrq_n_f16(__a, __b) | |
203 | #define vbrsrq_n_f32(__a, __b) __arm_vbrsrq_n_f32(__a, __b) | |
204 | #define vcvtq_n_f16_s16(__a, __imm6) __arm_vcvtq_n_f16_s16(__a, __imm6) | |
205 | #define vcvtq_n_f32_s32(__a, __imm6) __arm_vcvtq_n_f32_s32(__a, __imm6) | |
206 | #define vcvtq_n_f16_u16(__a, __imm6) __arm_vcvtq_n_f16_u16(__a, __imm6) | |
207 | #define vcvtq_n_f32_u32(__a, __imm6) __arm_vcvtq_n_f32_u32(__a, __imm6) | |
208 | #define vcreateq_f16(__a, __b) __arm_vcreateq_f16(__a, __b) | |
209 | #define vcreateq_f32(__a, __b) __arm_vcreateq_f32(__a, __b) | |
f166a8cd SP |
210 | #define vcvtq_n_s16_f16(__a, __imm6) __arm_vcvtq_n_s16_f16(__a, __imm6) |
211 | #define vcvtq_n_s32_f32(__a, __imm6) __arm_vcvtq_n_s32_f32(__a, __imm6) | |
212 | #define vcvtq_n_u16_f16(__a, __imm6) __arm_vcvtq_n_u16_f16(__a, __imm6) | |
213 | #define vcvtq_n_u32_f32(__a, __imm6) __arm_vcvtq_n_u32_f32(__a, __imm6) | |
214 | #define vcreateq_u8(__a, __b) __arm_vcreateq_u8(__a, __b) | |
215 | #define vcreateq_u16(__a, __b) __arm_vcreateq_u16(__a, __b) | |
216 | #define vcreateq_u32(__a, __b) __arm_vcreateq_u32(__a, __b) | |
217 | #define vcreateq_u64(__a, __b) __arm_vcreateq_u64(__a, __b) | |
218 | #define vcreateq_s8(__a, __b) __arm_vcreateq_s8(__a, __b) | |
219 | #define vcreateq_s16(__a, __b) __arm_vcreateq_s16(__a, __b) | |
220 | #define vcreateq_s32(__a, __b) __arm_vcreateq_s32(__a, __b) | |
221 | #define vcreateq_s64(__a, __b) __arm_vcreateq_s64(__a, __b) | |
222 | #define vshrq_n_s8(__a, __imm) __arm_vshrq_n_s8(__a, __imm) | |
223 | #define vshrq_n_s16(__a, __imm) __arm_vshrq_n_s16(__a, __imm) | |
224 | #define vshrq_n_s32(__a, __imm) __arm_vshrq_n_s32(__a, __imm) | |
225 | #define vshrq_n_u8(__a, __imm) __arm_vshrq_n_u8(__a, __imm) | |
226 | #define vshrq_n_u16(__a, __imm) __arm_vshrq_n_u16(__a, __imm) | |
227 | #define vshrq_n_u32(__a, __imm) __arm_vshrq_n_u32(__a, __imm) | |
d71dba7b SP |
228 | #define vaddlvq_p_s32(__a, __p) __arm_vaddlvq_p_s32(__a, __p) |
229 | #define vaddlvq_p_u32(__a, __p) __arm_vaddlvq_p_u32(__a, __p) | |
230 | #define vcmpneq_s8(__a, __b) __arm_vcmpneq_s8(__a, __b) | |
231 | #define vcmpneq_s16(__a, __b) __arm_vcmpneq_s16(__a, __b) | |
232 | #define vcmpneq_s32(__a, __b) __arm_vcmpneq_s32(__a, __b) | |
233 | #define vcmpneq_u8(__a, __b) __arm_vcmpneq_u8(__a, __b) | |
234 | #define vcmpneq_u16(__a, __b) __arm_vcmpneq_u16(__a, __b) | |
235 | #define vcmpneq_u32(__a, __b) __arm_vcmpneq_u32(__a, __b) | |
236 | #define vshlq_s8(__a, __b) __arm_vshlq_s8(__a, __b) | |
237 | #define vshlq_s16(__a, __b) __arm_vshlq_s16(__a, __b) | |
238 | #define vshlq_s32(__a, __b) __arm_vshlq_s32(__a, __b) | |
239 | #define vshlq_u8(__a, __b) __arm_vshlq_u8(__a, __b) | |
240 | #define vshlq_u16(__a, __b) __arm_vshlq_u16(__a, __b) | |
241 | #define vshlq_u32(__a, __b) __arm_vshlq_u32(__a, __b) | |
33203b4c SP |
242 | #define vsubq_u8(__a, __b) __arm_vsubq_u8(__a, __b) |
243 | #define vsubq_n_u8(__a, __b) __arm_vsubq_n_u8(__a, __b) | |
244 | #define vrmulhq_u8(__a, __b) __arm_vrmulhq_u8(__a, __b) | |
245 | #define vrhaddq_u8(__a, __b) __arm_vrhaddq_u8(__a, __b) | |
246 | #define vqsubq_u8(__a, __b) __arm_vqsubq_u8(__a, __b) | |
247 | #define vqsubq_n_u8(__a, __b) __arm_vqsubq_n_u8(__a, __b) | |
248 | #define vqaddq_u8(__a, __b) __arm_vqaddq_u8(__a, __b) | |
249 | #define vqaddq_n_u8(__a, __b) __arm_vqaddq_n_u8(__a, __b) | |
250 | #define vorrq_u8(__a, __b) __arm_vorrq_u8(__a, __b) | |
251 | #define vornq_u8(__a, __b) __arm_vornq_u8(__a, __b) | |
252 | #define vmulq_u8(__a, __b) __arm_vmulq_u8(__a, __b) | |
253 | #define vmulq_n_u8(__a, __b) __arm_vmulq_n_u8(__a, __b) | |
254 | #define vmulltq_int_u8(__a, __b) __arm_vmulltq_int_u8(__a, __b) | |
255 | #define vmullbq_int_u8(__a, __b) __arm_vmullbq_int_u8(__a, __b) | |
256 | #define vmulhq_u8(__a, __b) __arm_vmulhq_u8(__a, __b) | |
257 | #define vmladavq_u8(__a, __b) __arm_vmladavq_u8(__a, __b) | |
258 | #define vminvq_u8(__a, __b) __arm_vminvq_u8(__a, __b) | |
259 | #define vminq_u8(__a, __b) __arm_vminq_u8(__a, __b) | |
260 | #define vmaxvq_u8(__a, __b) __arm_vmaxvq_u8(__a, __b) | |
261 | #define vmaxq_u8(__a, __b) __arm_vmaxq_u8(__a, __b) | |
262 | #define vhsubq_u8(__a, __b) __arm_vhsubq_u8(__a, __b) | |
263 | #define vhsubq_n_u8(__a, __b) __arm_vhsubq_n_u8(__a, __b) | |
264 | #define vhaddq_u8(__a, __b) __arm_vhaddq_u8(__a, __b) | |
265 | #define vhaddq_n_u8(__a, __b) __arm_vhaddq_n_u8(__a, __b) | |
266 | #define veorq_u8(__a, __b) __arm_veorq_u8(__a, __b) | |
267 | #define vcmpneq_n_u8(__a, __b) __arm_vcmpneq_n_u8(__a, __b) | |
268 | #define vcmphiq_u8(__a, __b) __arm_vcmphiq_u8(__a, __b) | |
269 | #define vcmphiq_n_u8(__a, __b) __arm_vcmphiq_n_u8(__a, __b) | |
270 | #define vcmpeqq_u8(__a, __b) __arm_vcmpeqq_u8(__a, __b) | |
271 | #define vcmpeqq_n_u8(__a, __b) __arm_vcmpeqq_n_u8(__a, __b) | |
272 | #define vcmpcsq_u8(__a, __b) __arm_vcmpcsq_u8(__a, __b) | |
273 | #define vcmpcsq_n_u8(__a, __b) __arm_vcmpcsq_n_u8(__a, __b) | |
274 | #define vcaddq_rot90_u8(__a, __b) __arm_vcaddq_rot90_u8(__a, __b) | |
275 | #define vcaddq_rot270_u8(__a, __b) __arm_vcaddq_rot270_u8(__a, __b) | |
276 | #define vbicq_u8(__a, __b) __arm_vbicq_u8(__a, __b) | |
277 | #define vandq_u8(__a, __b) __arm_vandq_u8(__a, __b) | |
278 | #define vaddvq_p_u8(__a, __p) __arm_vaddvq_p_u8(__a, __p) | |
279 | #define vaddvaq_u8(__a, __b) __arm_vaddvaq_u8(__a, __b) | |
280 | #define vaddq_n_u8(__a, __b) __arm_vaddq_n_u8(__a, __b) | |
281 | #define vabdq_u8(__a, __b) __arm_vabdq_u8(__a, __b) | |
282 | #define vshlq_r_u8(__a, __b) __arm_vshlq_r_u8(__a, __b) | |
283 | #define vrshlq_u8(__a, __b) __arm_vrshlq_u8(__a, __b) | |
284 | #define vrshlq_n_u8(__a, __b) __arm_vrshlq_n_u8(__a, __b) | |
285 | #define vqshlq_u8(__a, __b) __arm_vqshlq_u8(__a, __b) | |
286 | #define vqshlq_r_u8(__a, __b) __arm_vqshlq_r_u8(__a, __b) | |
287 | #define vqrshlq_u8(__a, __b) __arm_vqrshlq_u8(__a, __b) | |
288 | #define vqrshlq_n_u8(__a, __b) __arm_vqrshlq_n_u8(__a, __b) | |
289 | #define vminavq_s8(__a, __b) __arm_vminavq_s8(__a, __b) | |
290 | #define vminaq_s8(__a, __b) __arm_vminaq_s8(__a, __b) | |
291 | #define vmaxavq_s8(__a, __b) __arm_vmaxavq_s8(__a, __b) | |
292 | #define vmaxaq_s8(__a, __b) __arm_vmaxaq_s8(__a, __b) | |
293 | #define vbrsrq_n_u8(__a, __b) __arm_vbrsrq_n_u8(__a, __b) | |
294 | #define vshlq_n_u8(__a, __imm) __arm_vshlq_n_u8(__a, __imm) | |
295 | #define vrshrq_n_u8(__a, __imm) __arm_vrshrq_n_u8(__a, __imm) | |
296 | #define vqshlq_n_u8(__a, __imm) __arm_vqshlq_n_u8(__a, __imm) | |
297 | #define vcmpneq_n_s8(__a, __b) __arm_vcmpneq_n_s8(__a, __b) | |
298 | #define vcmpltq_s8(__a, __b) __arm_vcmpltq_s8(__a, __b) | |
299 | #define vcmpltq_n_s8(__a, __b) __arm_vcmpltq_n_s8(__a, __b) | |
300 | #define vcmpleq_s8(__a, __b) __arm_vcmpleq_s8(__a, __b) | |
301 | #define vcmpleq_n_s8(__a, __b) __arm_vcmpleq_n_s8(__a, __b) | |
302 | #define vcmpgtq_s8(__a, __b) __arm_vcmpgtq_s8(__a, __b) | |
303 | #define vcmpgtq_n_s8(__a, __b) __arm_vcmpgtq_n_s8(__a, __b) | |
304 | #define vcmpgeq_s8(__a, __b) __arm_vcmpgeq_s8(__a, __b) | |
305 | #define vcmpgeq_n_s8(__a, __b) __arm_vcmpgeq_n_s8(__a, __b) | |
306 | #define vcmpeqq_s8(__a, __b) __arm_vcmpeqq_s8(__a, __b) | |
307 | #define vcmpeqq_n_s8(__a, __b) __arm_vcmpeqq_n_s8(__a, __b) | |
308 | #define vqshluq_n_s8(__a, __imm) __arm_vqshluq_n_s8(__a, __imm) | |
309 | #define vaddvq_p_s8(__a, __p) __arm_vaddvq_p_s8(__a, __p) | |
310 | #define vsubq_s8(__a, __b) __arm_vsubq_s8(__a, __b) | |
311 | #define vsubq_n_s8(__a, __b) __arm_vsubq_n_s8(__a, __b) | |
312 | #define vshlq_r_s8(__a, __b) __arm_vshlq_r_s8(__a, __b) | |
313 | #define vrshlq_s8(__a, __b) __arm_vrshlq_s8(__a, __b) | |
314 | #define vrshlq_n_s8(__a, __b) __arm_vrshlq_n_s8(__a, __b) | |
315 | #define vrmulhq_s8(__a, __b) __arm_vrmulhq_s8(__a, __b) | |
316 | #define vrhaddq_s8(__a, __b) __arm_vrhaddq_s8(__a, __b) | |
317 | #define vqsubq_s8(__a, __b) __arm_vqsubq_s8(__a, __b) | |
318 | #define vqsubq_n_s8(__a, __b) __arm_vqsubq_n_s8(__a, __b) | |
319 | #define vqshlq_s8(__a, __b) __arm_vqshlq_s8(__a, __b) | |
320 | #define vqshlq_r_s8(__a, __b) __arm_vqshlq_r_s8(__a, __b) | |
321 | #define vqrshlq_s8(__a, __b) __arm_vqrshlq_s8(__a, __b) | |
322 | #define vqrshlq_n_s8(__a, __b) __arm_vqrshlq_n_s8(__a, __b) | |
323 | #define vqrdmulhq_s8(__a, __b) __arm_vqrdmulhq_s8(__a, __b) | |
324 | #define vqrdmulhq_n_s8(__a, __b) __arm_vqrdmulhq_n_s8(__a, __b) | |
325 | #define vqdmulhq_s8(__a, __b) __arm_vqdmulhq_s8(__a, __b) | |
326 | #define vqdmulhq_n_s8(__a, __b) __arm_vqdmulhq_n_s8(__a, __b) | |
327 | #define vqaddq_s8(__a, __b) __arm_vqaddq_s8(__a, __b) | |
328 | #define vqaddq_n_s8(__a, __b) __arm_vqaddq_n_s8(__a, __b) | |
329 | #define vorrq_s8(__a, __b) __arm_vorrq_s8(__a, __b) | |
330 | #define vornq_s8(__a, __b) __arm_vornq_s8(__a, __b) | |
331 | #define vmulq_s8(__a, __b) __arm_vmulq_s8(__a, __b) | |
332 | #define vmulq_n_s8(__a, __b) __arm_vmulq_n_s8(__a, __b) | |
333 | #define vmulltq_int_s8(__a, __b) __arm_vmulltq_int_s8(__a, __b) | |
334 | #define vmullbq_int_s8(__a, __b) __arm_vmullbq_int_s8(__a, __b) | |
335 | #define vmulhq_s8(__a, __b) __arm_vmulhq_s8(__a, __b) | |
336 | #define vmlsdavxq_s8(__a, __b) __arm_vmlsdavxq_s8(__a, __b) | |
337 | #define vmlsdavq_s8(__a, __b) __arm_vmlsdavq_s8(__a, __b) | |
338 | #define vmladavxq_s8(__a, __b) __arm_vmladavxq_s8(__a, __b) | |
339 | #define vmladavq_s8(__a, __b) __arm_vmladavq_s8(__a, __b) | |
340 | #define vminvq_s8(__a, __b) __arm_vminvq_s8(__a, __b) | |
341 | #define vminq_s8(__a, __b) __arm_vminq_s8(__a, __b) | |
342 | #define vmaxvq_s8(__a, __b) __arm_vmaxvq_s8(__a, __b) | |
343 | #define vmaxq_s8(__a, __b) __arm_vmaxq_s8(__a, __b) | |
344 | #define vhsubq_s8(__a, __b) __arm_vhsubq_s8(__a, __b) | |
345 | #define vhsubq_n_s8(__a, __b) __arm_vhsubq_n_s8(__a, __b) | |
346 | #define vhcaddq_rot90_s8(__a, __b) __arm_vhcaddq_rot90_s8(__a, __b) | |
347 | #define vhcaddq_rot270_s8(__a, __b) __arm_vhcaddq_rot270_s8(__a, __b) | |
348 | #define vhaddq_s8(__a, __b) __arm_vhaddq_s8(__a, __b) | |
349 | #define vhaddq_n_s8(__a, __b) __arm_vhaddq_n_s8(__a, __b) | |
350 | #define veorq_s8(__a, __b) __arm_veorq_s8(__a, __b) | |
351 | #define vcaddq_rot90_s8(__a, __b) __arm_vcaddq_rot90_s8(__a, __b) | |
352 | #define vcaddq_rot270_s8(__a, __b) __arm_vcaddq_rot270_s8(__a, __b) | |
353 | #define vbrsrq_n_s8(__a, __b) __arm_vbrsrq_n_s8(__a, __b) | |
354 | #define vbicq_s8(__a, __b) __arm_vbicq_s8(__a, __b) | |
355 | #define vandq_s8(__a, __b) __arm_vandq_s8(__a, __b) | |
356 | #define vaddvaq_s8(__a, __b) __arm_vaddvaq_s8(__a, __b) | |
357 | #define vaddq_n_s8(__a, __b) __arm_vaddq_n_s8(__a, __b) | |
358 | #define vabdq_s8(__a, __b) __arm_vabdq_s8(__a, __b) | |
359 | #define vshlq_n_s8(__a, __imm) __arm_vshlq_n_s8(__a, __imm) | |
360 | #define vrshrq_n_s8(__a, __imm) __arm_vrshrq_n_s8(__a, __imm) | |
361 | #define vqshlq_n_s8(__a, __imm) __arm_vqshlq_n_s8(__a, __imm) | |
362 | #define vsubq_u16(__a, __b) __arm_vsubq_u16(__a, __b) | |
363 | #define vsubq_n_u16(__a, __b) __arm_vsubq_n_u16(__a, __b) | |
364 | #define vrmulhq_u16(__a, __b) __arm_vrmulhq_u16(__a, __b) | |
365 | #define vrhaddq_u16(__a, __b) __arm_vrhaddq_u16(__a, __b) | |
366 | #define vqsubq_u16(__a, __b) __arm_vqsubq_u16(__a, __b) | |
367 | #define vqsubq_n_u16(__a, __b) __arm_vqsubq_n_u16(__a, __b) | |
368 | #define vqaddq_u16(__a, __b) __arm_vqaddq_u16(__a, __b) | |
369 | #define vqaddq_n_u16(__a, __b) __arm_vqaddq_n_u16(__a, __b) | |
370 | #define vorrq_u16(__a, __b) __arm_vorrq_u16(__a, __b) | |
371 | #define vornq_u16(__a, __b) __arm_vornq_u16(__a, __b) | |
372 | #define vmulq_u16(__a, __b) __arm_vmulq_u16(__a, __b) | |
373 | #define vmulq_n_u16(__a, __b) __arm_vmulq_n_u16(__a, __b) | |
374 | #define vmulltq_int_u16(__a, __b) __arm_vmulltq_int_u16(__a, __b) | |
375 | #define vmullbq_int_u16(__a, __b) __arm_vmullbq_int_u16(__a, __b) | |
376 | #define vmulhq_u16(__a, __b) __arm_vmulhq_u16(__a, __b) | |
377 | #define vmladavq_u16(__a, __b) __arm_vmladavq_u16(__a, __b) | |
378 | #define vminvq_u16(__a, __b) __arm_vminvq_u16(__a, __b) | |
379 | #define vminq_u16(__a, __b) __arm_vminq_u16(__a, __b) | |
380 | #define vmaxvq_u16(__a, __b) __arm_vmaxvq_u16(__a, __b) | |
381 | #define vmaxq_u16(__a, __b) __arm_vmaxq_u16(__a, __b) | |
382 | #define vhsubq_u16(__a, __b) __arm_vhsubq_u16(__a, __b) | |
383 | #define vhsubq_n_u16(__a, __b) __arm_vhsubq_n_u16(__a, __b) | |
384 | #define vhaddq_u16(__a, __b) __arm_vhaddq_u16(__a, __b) | |
385 | #define vhaddq_n_u16(__a, __b) __arm_vhaddq_n_u16(__a, __b) | |
386 | #define veorq_u16(__a, __b) __arm_veorq_u16(__a, __b) | |
387 | #define vcmpneq_n_u16(__a, __b) __arm_vcmpneq_n_u16(__a, __b) | |
388 | #define vcmphiq_u16(__a, __b) __arm_vcmphiq_u16(__a, __b) | |
389 | #define vcmphiq_n_u16(__a, __b) __arm_vcmphiq_n_u16(__a, __b) | |
390 | #define vcmpeqq_u16(__a, __b) __arm_vcmpeqq_u16(__a, __b) | |
391 | #define vcmpeqq_n_u16(__a, __b) __arm_vcmpeqq_n_u16(__a, __b) | |
392 | #define vcmpcsq_u16(__a, __b) __arm_vcmpcsq_u16(__a, __b) | |
393 | #define vcmpcsq_n_u16(__a, __b) __arm_vcmpcsq_n_u16(__a, __b) | |
394 | #define vcaddq_rot90_u16(__a, __b) __arm_vcaddq_rot90_u16(__a, __b) | |
395 | #define vcaddq_rot270_u16(__a, __b) __arm_vcaddq_rot270_u16(__a, __b) | |
396 | #define vbicq_u16(__a, __b) __arm_vbicq_u16(__a, __b) | |
397 | #define vandq_u16(__a, __b) __arm_vandq_u16(__a, __b) | |
398 | #define vaddvq_p_u16(__a, __p) __arm_vaddvq_p_u16(__a, __p) | |
399 | #define vaddvaq_u16(__a, __b) __arm_vaddvaq_u16(__a, __b) | |
400 | #define vaddq_n_u16(__a, __b) __arm_vaddq_n_u16(__a, __b) | |
401 | #define vabdq_u16(__a, __b) __arm_vabdq_u16(__a, __b) | |
402 | #define vshlq_r_u16(__a, __b) __arm_vshlq_r_u16(__a, __b) | |
403 | #define vrshlq_u16(__a, __b) __arm_vrshlq_u16(__a, __b) | |
404 | #define vrshlq_n_u16(__a, __b) __arm_vrshlq_n_u16(__a, __b) | |
405 | #define vqshlq_u16(__a, __b) __arm_vqshlq_u16(__a, __b) | |
406 | #define vqshlq_r_u16(__a, __b) __arm_vqshlq_r_u16(__a, __b) | |
407 | #define vqrshlq_u16(__a, __b) __arm_vqrshlq_u16(__a, __b) | |
408 | #define vqrshlq_n_u16(__a, __b) __arm_vqrshlq_n_u16(__a, __b) | |
409 | #define vminavq_s16(__a, __b) __arm_vminavq_s16(__a, __b) | |
410 | #define vminaq_s16(__a, __b) __arm_vminaq_s16(__a, __b) | |
411 | #define vmaxavq_s16(__a, __b) __arm_vmaxavq_s16(__a, __b) | |
412 | #define vmaxaq_s16(__a, __b) __arm_vmaxaq_s16(__a, __b) | |
413 | #define vbrsrq_n_u16(__a, __b) __arm_vbrsrq_n_u16(__a, __b) | |
414 | #define vshlq_n_u16(__a, __imm) __arm_vshlq_n_u16(__a, __imm) | |
415 | #define vrshrq_n_u16(__a, __imm) __arm_vrshrq_n_u16(__a, __imm) | |
416 | #define vqshlq_n_u16(__a, __imm) __arm_vqshlq_n_u16(__a, __imm) | |
417 | #define vcmpneq_n_s16(__a, __b) __arm_vcmpneq_n_s16(__a, __b) | |
418 | #define vcmpltq_s16(__a, __b) __arm_vcmpltq_s16(__a, __b) | |
419 | #define vcmpltq_n_s16(__a, __b) __arm_vcmpltq_n_s16(__a, __b) | |
420 | #define vcmpleq_s16(__a, __b) __arm_vcmpleq_s16(__a, __b) | |
421 | #define vcmpleq_n_s16(__a, __b) __arm_vcmpleq_n_s16(__a, __b) | |
422 | #define vcmpgtq_s16(__a, __b) __arm_vcmpgtq_s16(__a, __b) | |
423 | #define vcmpgtq_n_s16(__a, __b) __arm_vcmpgtq_n_s16(__a, __b) | |
424 | #define vcmpgeq_s16(__a, __b) __arm_vcmpgeq_s16(__a, __b) | |
425 | #define vcmpgeq_n_s16(__a, __b) __arm_vcmpgeq_n_s16(__a, __b) | |
426 | #define vcmpeqq_s16(__a, __b) __arm_vcmpeqq_s16(__a, __b) | |
427 | #define vcmpeqq_n_s16(__a, __b) __arm_vcmpeqq_n_s16(__a, __b) | |
428 | #define vqshluq_n_s16(__a, __imm) __arm_vqshluq_n_s16(__a, __imm) | |
429 | #define vaddvq_p_s16(__a, __p) __arm_vaddvq_p_s16(__a, __p) | |
430 | #define vsubq_s16(__a, __b) __arm_vsubq_s16(__a, __b) | |
431 | #define vsubq_n_s16(__a, __b) __arm_vsubq_n_s16(__a, __b) | |
432 | #define vshlq_r_s16(__a, __b) __arm_vshlq_r_s16(__a, __b) | |
433 | #define vrshlq_s16(__a, __b) __arm_vrshlq_s16(__a, __b) | |
434 | #define vrshlq_n_s16(__a, __b) __arm_vrshlq_n_s16(__a, __b) | |
435 | #define vrmulhq_s16(__a, __b) __arm_vrmulhq_s16(__a, __b) | |
436 | #define vrhaddq_s16(__a, __b) __arm_vrhaddq_s16(__a, __b) | |
437 | #define vqsubq_s16(__a, __b) __arm_vqsubq_s16(__a, __b) | |
438 | #define vqsubq_n_s16(__a, __b) __arm_vqsubq_n_s16(__a, __b) | |
439 | #define vqshlq_s16(__a, __b) __arm_vqshlq_s16(__a, __b) | |
440 | #define vqshlq_r_s16(__a, __b) __arm_vqshlq_r_s16(__a, __b) | |
441 | #define vqrshlq_s16(__a, __b) __arm_vqrshlq_s16(__a, __b) | |
442 | #define vqrshlq_n_s16(__a, __b) __arm_vqrshlq_n_s16(__a, __b) | |
443 | #define vqrdmulhq_s16(__a, __b) __arm_vqrdmulhq_s16(__a, __b) | |
444 | #define vqrdmulhq_n_s16(__a, __b) __arm_vqrdmulhq_n_s16(__a, __b) | |
445 | #define vqdmulhq_s16(__a, __b) __arm_vqdmulhq_s16(__a, __b) | |
446 | #define vqdmulhq_n_s16(__a, __b) __arm_vqdmulhq_n_s16(__a, __b) | |
447 | #define vqaddq_s16(__a, __b) __arm_vqaddq_s16(__a, __b) | |
448 | #define vqaddq_n_s16(__a, __b) __arm_vqaddq_n_s16(__a, __b) | |
449 | #define vorrq_s16(__a, __b) __arm_vorrq_s16(__a, __b) | |
450 | #define vornq_s16(__a, __b) __arm_vornq_s16(__a, __b) | |
451 | #define vmulq_s16(__a, __b) __arm_vmulq_s16(__a, __b) | |
452 | #define vmulq_n_s16(__a, __b) __arm_vmulq_n_s16(__a, __b) | |
453 | #define vmulltq_int_s16(__a, __b) __arm_vmulltq_int_s16(__a, __b) | |
454 | #define vmullbq_int_s16(__a, __b) __arm_vmullbq_int_s16(__a, __b) | |
455 | #define vmulhq_s16(__a, __b) __arm_vmulhq_s16(__a, __b) | |
456 | #define vmlsdavxq_s16(__a, __b) __arm_vmlsdavxq_s16(__a, __b) | |
457 | #define vmlsdavq_s16(__a, __b) __arm_vmlsdavq_s16(__a, __b) | |
458 | #define vmladavxq_s16(__a, __b) __arm_vmladavxq_s16(__a, __b) | |
459 | #define vmladavq_s16(__a, __b) __arm_vmladavq_s16(__a, __b) | |
460 | #define vminvq_s16(__a, __b) __arm_vminvq_s16(__a, __b) | |
461 | #define vminq_s16(__a, __b) __arm_vminq_s16(__a, __b) | |
462 | #define vmaxvq_s16(__a, __b) __arm_vmaxvq_s16(__a, __b) | |
463 | #define vmaxq_s16(__a, __b) __arm_vmaxq_s16(__a, __b) | |
464 | #define vhsubq_s16(__a, __b) __arm_vhsubq_s16(__a, __b) | |
465 | #define vhsubq_n_s16(__a, __b) __arm_vhsubq_n_s16(__a, __b) | |
466 | #define vhcaddq_rot90_s16(__a, __b) __arm_vhcaddq_rot90_s16(__a, __b) | |
467 | #define vhcaddq_rot270_s16(__a, __b) __arm_vhcaddq_rot270_s16(__a, __b) | |
468 | #define vhaddq_s16(__a, __b) __arm_vhaddq_s16(__a, __b) | |
469 | #define vhaddq_n_s16(__a, __b) __arm_vhaddq_n_s16(__a, __b) | |
470 | #define veorq_s16(__a, __b) __arm_veorq_s16(__a, __b) | |
471 | #define vcaddq_rot90_s16(__a, __b) __arm_vcaddq_rot90_s16(__a, __b) | |
472 | #define vcaddq_rot270_s16(__a, __b) __arm_vcaddq_rot270_s16(__a, __b) | |
473 | #define vbrsrq_n_s16(__a, __b) __arm_vbrsrq_n_s16(__a, __b) | |
474 | #define vbicq_s16(__a, __b) __arm_vbicq_s16(__a, __b) | |
475 | #define vandq_s16(__a, __b) __arm_vandq_s16(__a, __b) | |
476 | #define vaddvaq_s16(__a, __b) __arm_vaddvaq_s16(__a, __b) | |
477 | #define vaddq_n_s16(__a, __b) __arm_vaddq_n_s16(__a, __b) | |
478 | #define vabdq_s16(__a, __b) __arm_vabdq_s16(__a, __b) | |
479 | #define vshlq_n_s16(__a, __imm) __arm_vshlq_n_s16(__a, __imm) | |
480 | #define vrshrq_n_s16(__a, __imm) __arm_vrshrq_n_s16(__a, __imm) | |
481 | #define vqshlq_n_s16(__a, __imm) __arm_vqshlq_n_s16(__a, __imm) | |
482 | #define vsubq_u32(__a, __b) __arm_vsubq_u32(__a, __b) | |
483 | #define vsubq_n_u32(__a, __b) __arm_vsubq_n_u32(__a, __b) | |
484 | #define vrmulhq_u32(__a, __b) __arm_vrmulhq_u32(__a, __b) | |
485 | #define vrhaddq_u32(__a, __b) __arm_vrhaddq_u32(__a, __b) | |
486 | #define vqsubq_u32(__a, __b) __arm_vqsubq_u32(__a, __b) | |
487 | #define vqsubq_n_u32(__a, __b) __arm_vqsubq_n_u32(__a, __b) | |
488 | #define vqaddq_u32(__a, __b) __arm_vqaddq_u32(__a, __b) | |
489 | #define vqaddq_n_u32(__a, __b) __arm_vqaddq_n_u32(__a, __b) | |
490 | #define vorrq_u32(__a, __b) __arm_vorrq_u32(__a, __b) | |
491 | #define vornq_u32(__a, __b) __arm_vornq_u32(__a, __b) | |
492 | #define vmulq_u32(__a, __b) __arm_vmulq_u32(__a, __b) | |
493 | #define vmulq_n_u32(__a, __b) __arm_vmulq_n_u32(__a, __b) | |
494 | #define vmulltq_int_u32(__a, __b) __arm_vmulltq_int_u32(__a, __b) | |
495 | #define vmullbq_int_u32(__a, __b) __arm_vmullbq_int_u32(__a, __b) | |
496 | #define vmulhq_u32(__a, __b) __arm_vmulhq_u32(__a, __b) | |
497 | #define vmladavq_u32(__a, __b) __arm_vmladavq_u32(__a, __b) | |
498 | #define vminvq_u32(__a, __b) __arm_vminvq_u32(__a, __b) | |
499 | #define vminq_u32(__a, __b) __arm_vminq_u32(__a, __b) | |
500 | #define vmaxvq_u32(__a, __b) __arm_vmaxvq_u32(__a, __b) | |
501 | #define vmaxq_u32(__a, __b) __arm_vmaxq_u32(__a, __b) | |
502 | #define vhsubq_u32(__a, __b) __arm_vhsubq_u32(__a, __b) | |
503 | #define vhsubq_n_u32(__a, __b) __arm_vhsubq_n_u32(__a, __b) | |
504 | #define vhaddq_u32(__a, __b) __arm_vhaddq_u32(__a, __b) | |
505 | #define vhaddq_n_u32(__a, __b) __arm_vhaddq_n_u32(__a, __b) | |
506 | #define veorq_u32(__a, __b) __arm_veorq_u32(__a, __b) | |
507 | #define vcmpneq_n_u32(__a, __b) __arm_vcmpneq_n_u32(__a, __b) | |
508 | #define vcmphiq_u32(__a, __b) __arm_vcmphiq_u32(__a, __b) | |
509 | #define vcmphiq_n_u32(__a, __b) __arm_vcmphiq_n_u32(__a, __b) | |
510 | #define vcmpeqq_u32(__a, __b) __arm_vcmpeqq_u32(__a, __b) | |
511 | #define vcmpeqq_n_u32(__a, __b) __arm_vcmpeqq_n_u32(__a, __b) | |
512 | #define vcmpcsq_u32(__a, __b) __arm_vcmpcsq_u32(__a, __b) | |
513 | #define vcmpcsq_n_u32(__a, __b) __arm_vcmpcsq_n_u32(__a, __b) | |
514 | #define vcaddq_rot90_u32(__a, __b) __arm_vcaddq_rot90_u32(__a, __b) | |
515 | #define vcaddq_rot270_u32(__a, __b) __arm_vcaddq_rot270_u32(__a, __b) | |
516 | #define vbicq_u32(__a, __b) __arm_vbicq_u32(__a, __b) | |
517 | #define vandq_u32(__a, __b) __arm_vandq_u32(__a, __b) | |
518 | #define vaddvq_p_u32(__a, __p) __arm_vaddvq_p_u32(__a, __p) | |
519 | #define vaddvaq_u32(__a, __b) __arm_vaddvaq_u32(__a, __b) | |
520 | #define vaddq_n_u32(__a, __b) __arm_vaddq_n_u32(__a, __b) | |
521 | #define vabdq_u32(__a, __b) __arm_vabdq_u32(__a, __b) | |
522 | #define vshlq_r_u32(__a, __b) __arm_vshlq_r_u32(__a, __b) | |
523 | #define vrshlq_u32(__a, __b) __arm_vrshlq_u32(__a, __b) | |
524 | #define vrshlq_n_u32(__a, __b) __arm_vrshlq_n_u32(__a, __b) | |
525 | #define vqshlq_u32(__a, __b) __arm_vqshlq_u32(__a, __b) | |
526 | #define vqshlq_r_u32(__a, __b) __arm_vqshlq_r_u32(__a, __b) | |
527 | #define vqrshlq_u32(__a, __b) __arm_vqrshlq_u32(__a, __b) | |
528 | #define vqrshlq_n_u32(__a, __b) __arm_vqrshlq_n_u32(__a, __b) | |
529 | #define vminavq_s32(__a, __b) __arm_vminavq_s32(__a, __b) | |
530 | #define vminaq_s32(__a, __b) __arm_vminaq_s32(__a, __b) | |
531 | #define vmaxavq_s32(__a, __b) __arm_vmaxavq_s32(__a, __b) | |
532 | #define vmaxaq_s32(__a, __b) __arm_vmaxaq_s32(__a, __b) | |
533 | #define vbrsrq_n_u32(__a, __b) __arm_vbrsrq_n_u32(__a, __b) | |
534 | #define vshlq_n_u32(__a, __imm) __arm_vshlq_n_u32(__a, __imm) | |
535 | #define vrshrq_n_u32(__a, __imm) __arm_vrshrq_n_u32(__a, __imm) | |
536 | #define vqshlq_n_u32(__a, __imm) __arm_vqshlq_n_u32(__a, __imm) | |
537 | #define vcmpneq_n_s32(__a, __b) __arm_vcmpneq_n_s32(__a, __b) | |
538 | #define vcmpltq_s32(__a, __b) __arm_vcmpltq_s32(__a, __b) | |
539 | #define vcmpltq_n_s32(__a, __b) __arm_vcmpltq_n_s32(__a, __b) | |
540 | #define vcmpleq_s32(__a, __b) __arm_vcmpleq_s32(__a, __b) | |
541 | #define vcmpleq_n_s32(__a, __b) __arm_vcmpleq_n_s32(__a, __b) | |
542 | #define vcmpgtq_s32(__a, __b) __arm_vcmpgtq_s32(__a, __b) | |
543 | #define vcmpgtq_n_s32(__a, __b) __arm_vcmpgtq_n_s32(__a, __b) | |
544 | #define vcmpgeq_s32(__a, __b) __arm_vcmpgeq_s32(__a, __b) | |
545 | #define vcmpgeq_n_s32(__a, __b) __arm_vcmpgeq_n_s32(__a, __b) | |
546 | #define vcmpeqq_s32(__a, __b) __arm_vcmpeqq_s32(__a, __b) | |
547 | #define vcmpeqq_n_s32(__a, __b) __arm_vcmpeqq_n_s32(__a, __b) | |
548 | #define vqshluq_n_s32(__a, __imm) __arm_vqshluq_n_s32(__a, __imm) | |
549 | #define vaddvq_p_s32(__a, __p) __arm_vaddvq_p_s32(__a, __p) | |
550 | #define vsubq_s32(__a, __b) __arm_vsubq_s32(__a, __b) | |
551 | #define vsubq_n_s32(__a, __b) __arm_vsubq_n_s32(__a, __b) | |
552 | #define vshlq_r_s32(__a, __b) __arm_vshlq_r_s32(__a, __b) | |
553 | #define vrshlq_s32(__a, __b) __arm_vrshlq_s32(__a, __b) | |
554 | #define vrshlq_n_s32(__a, __b) __arm_vrshlq_n_s32(__a, __b) | |
555 | #define vrmulhq_s32(__a, __b) __arm_vrmulhq_s32(__a, __b) | |
556 | #define vrhaddq_s32(__a, __b) __arm_vrhaddq_s32(__a, __b) | |
557 | #define vqsubq_s32(__a, __b) __arm_vqsubq_s32(__a, __b) | |
558 | #define vqsubq_n_s32(__a, __b) __arm_vqsubq_n_s32(__a, __b) | |
559 | #define vqshlq_s32(__a, __b) __arm_vqshlq_s32(__a, __b) | |
560 | #define vqshlq_r_s32(__a, __b) __arm_vqshlq_r_s32(__a, __b) | |
561 | #define vqrshlq_s32(__a, __b) __arm_vqrshlq_s32(__a, __b) | |
562 | #define vqrshlq_n_s32(__a, __b) __arm_vqrshlq_n_s32(__a, __b) | |
563 | #define vqrdmulhq_s32(__a, __b) __arm_vqrdmulhq_s32(__a, __b) | |
564 | #define vqrdmulhq_n_s32(__a, __b) __arm_vqrdmulhq_n_s32(__a, __b) | |
565 | #define vqdmulhq_s32(__a, __b) __arm_vqdmulhq_s32(__a, __b) | |
566 | #define vqdmulhq_n_s32(__a, __b) __arm_vqdmulhq_n_s32(__a, __b) | |
567 | #define vqaddq_s32(__a, __b) __arm_vqaddq_s32(__a, __b) | |
568 | #define vqaddq_n_s32(__a, __b) __arm_vqaddq_n_s32(__a, __b) | |
569 | #define vorrq_s32(__a, __b) __arm_vorrq_s32(__a, __b) | |
570 | #define vornq_s32(__a, __b) __arm_vornq_s32(__a, __b) | |
571 | #define vmulq_s32(__a, __b) __arm_vmulq_s32(__a, __b) | |
572 | #define vmulq_n_s32(__a, __b) __arm_vmulq_n_s32(__a, __b) | |
573 | #define vmulltq_int_s32(__a, __b) __arm_vmulltq_int_s32(__a, __b) | |
574 | #define vmullbq_int_s32(__a, __b) __arm_vmullbq_int_s32(__a, __b) | |
575 | #define vmulhq_s32(__a, __b) __arm_vmulhq_s32(__a, __b) | |
576 | #define vmlsdavxq_s32(__a, __b) __arm_vmlsdavxq_s32(__a, __b) | |
577 | #define vmlsdavq_s32(__a, __b) __arm_vmlsdavq_s32(__a, __b) | |
578 | #define vmladavxq_s32(__a, __b) __arm_vmladavxq_s32(__a, __b) | |
579 | #define vmladavq_s32(__a, __b) __arm_vmladavq_s32(__a, __b) | |
580 | #define vminvq_s32(__a, __b) __arm_vminvq_s32(__a, __b) | |
581 | #define vminq_s32(__a, __b) __arm_vminq_s32(__a, __b) | |
582 | #define vmaxvq_s32(__a, __b) __arm_vmaxvq_s32(__a, __b) | |
583 | #define vmaxq_s32(__a, __b) __arm_vmaxq_s32(__a, __b) | |
584 | #define vhsubq_s32(__a, __b) __arm_vhsubq_s32(__a, __b) | |
585 | #define vhsubq_n_s32(__a, __b) __arm_vhsubq_n_s32(__a, __b) | |
586 | #define vhcaddq_rot90_s32(__a, __b) __arm_vhcaddq_rot90_s32(__a, __b) | |
587 | #define vhcaddq_rot270_s32(__a, __b) __arm_vhcaddq_rot270_s32(__a, __b) | |
588 | #define vhaddq_s32(__a, __b) __arm_vhaddq_s32(__a, __b) | |
589 | #define vhaddq_n_s32(__a, __b) __arm_vhaddq_n_s32(__a, __b) | |
590 | #define veorq_s32(__a, __b) __arm_veorq_s32(__a, __b) | |
591 | #define vcaddq_rot90_s32(__a, __b) __arm_vcaddq_rot90_s32(__a, __b) | |
592 | #define vcaddq_rot270_s32(__a, __b) __arm_vcaddq_rot270_s32(__a, __b) | |
593 | #define vbrsrq_n_s32(__a, __b) __arm_vbrsrq_n_s32(__a, __b) | |
594 | #define vbicq_s32(__a, __b) __arm_vbicq_s32(__a, __b) | |
595 | #define vandq_s32(__a, __b) __arm_vandq_s32(__a, __b) | |
596 | #define vaddvaq_s32(__a, __b) __arm_vaddvaq_s32(__a, __b) | |
597 | #define vaddq_n_s32(__a, __b) __arm_vaddq_n_s32(__a, __b) | |
598 | #define vabdq_s32(__a, __b) __arm_vabdq_s32(__a, __b) | |
599 | #define vshlq_n_s32(__a, __imm) __arm_vshlq_n_s32(__a, __imm) | |
600 | #define vrshrq_n_s32(__a, __imm) __arm_vrshrq_n_s32(__a, __imm) | |
601 | #define vqshlq_n_s32(__a, __imm) __arm_vqshlq_n_s32(__a, __imm) | |
f9355dee SP |
602 | #define vqmovntq_u16(__a, __b) __arm_vqmovntq_u16(__a, __b) |
603 | #define vqmovnbq_u16(__a, __b) __arm_vqmovnbq_u16(__a, __b) | |
604 | #define vmulltq_poly_p8(__a, __b) __arm_vmulltq_poly_p8(__a, __b) | |
605 | #define vmullbq_poly_p8(__a, __b) __arm_vmullbq_poly_p8(__a, __b) | |
606 | #define vmovntq_u16(__a, __b) __arm_vmovntq_u16(__a, __b) | |
607 | #define vmovnbq_u16(__a, __b) __arm_vmovnbq_u16(__a, __b) | |
608 | #define vmlaldavq_u16(__a, __b) __arm_vmlaldavq_u16(__a, __b) | |
609 | #define vqmovuntq_s16(__a, __b) __arm_vqmovuntq_s16(__a, __b) | |
610 | #define vqmovunbq_s16(__a, __b) __arm_vqmovunbq_s16(__a, __b) | |
611 | #define vshlltq_n_u8(__a, __imm) __arm_vshlltq_n_u8(__a, __imm) | |
612 | #define vshllbq_n_u8(__a, __imm) __arm_vshllbq_n_u8(__a, __imm) | |
613 | #define vorrq_n_u16(__a, __imm) __arm_vorrq_n_u16(__a, __imm) | |
614 | #define vbicq_n_u16(__a, __imm) __arm_vbicq_n_u16(__a, __imm) | |
615 | #define vcmpneq_n_f16(__a, __b) __arm_vcmpneq_n_f16(__a, __b) | |
616 | #define vcmpneq_f16(__a, __b) __arm_vcmpneq_f16(__a, __b) | |
617 | #define vcmpltq_n_f16(__a, __b) __arm_vcmpltq_n_f16(__a, __b) | |
618 | #define vcmpltq_f16(__a, __b) __arm_vcmpltq_f16(__a, __b) | |
619 | #define vcmpleq_n_f16(__a, __b) __arm_vcmpleq_n_f16(__a, __b) | |
620 | #define vcmpleq_f16(__a, __b) __arm_vcmpleq_f16(__a, __b) | |
621 | #define vcmpgtq_n_f16(__a, __b) __arm_vcmpgtq_n_f16(__a, __b) | |
622 | #define vcmpgtq_f16(__a, __b) __arm_vcmpgtq_f16(__a, __b) | |
623 | #define vcmpgeq_n_f16(__a, __b) __arm_vcmpgeq_n_f16(__a, __b) | |
624 | #define vcmpgeq_f16(__a, __b) __arm_vcmpgeq_f16(__a, __b) | |
625 | #define vcmpeqq_n_f16(__a, __b) __arm_vcmpeqq_n_f16(__a, __b) | |
626 | #define vcmpeqq_f16(__a, __b) __arm_vcmpeqq_f16(__a, __b) | |
627 | #define vsubq_f16(__a, __b) __arm_vsubq_f16(__a, __b) | |
628 | #define vqmovntq_s16(__a, __b) __arm_vqmovntq_s16(__a, __b) | |
629 | #define vqmovnbq_s16(__a, __b) __arm_vqmovnbq_s16(__a, __b) | |
630 | #define vqdmulltq_s16(__a, __b) __arm_vqdmulltq_s16(__a, __b) | |
631 | #define vqdmulltq_n_s16(__a, __b) __arm_vqdmulltq_n_s16(__a, __b) | |
632 | #define vqdmullbq_s16(__a, __b) __arm_vqdmullbq_s16(__a, __b) | |
633 | #define vqdmullbq_n_s16(__a, __b) __arm_vqdmullbq_n_s16(__a, __b) | |
634 | #define vorrq_f16(__a, __b) __arm_vorrq_f16(__a, __b) | |
635 | #define vornq_f16(__a, __b) __arm_vornq_f16(__a, __b) | |
636 | #define vmulq_n_f16(__a, __b) __arm_vmulq_n_f16(__a, __b) | |
637 | #define vmulq_f16(__a, __b) __arm_vmulq_f16(__a, __b) | |
638 | #define vmovntq_s16(__a, __b) __arm_vmovntq_s16(__a, __b) | |
639 | #define vmovnbq_s16(__a, __b) __arm_vmovnbq_s16(__a, __b) | |
640 | #define vmlsldavxq_s16(__a, __b) __arm_vmlsldavxq_s16(__a, __b) | |
641 | #define vmlsldavq_s16(__a, __b) __arm_vmlsldavq_s16(__a, __b) | |
642 | #define vmlaldavxq_s16(__a, __b) __arm_vmlaldavxq_s16(__a, __b) | |
643 | #define vmlaldavq_s16(__a, __b) __arm_vmlaldavq_s16(__a, __b) | |
644 | #define vminnmvq_f16(__a, __b) __arm_vminnmvq_f16(__a, __b) | |
645 | #define vminnmq_f16(__a, __b) __arm_vminnmq_f16(__a, __b) | |
646 | #define vminnmavq_f16(__a, __b) __arm_vminnmavq_f16(__a, __b) | |
647 | #define vminnmaq_f16(__a, __b) __arm_vminnmaq_f16(__a, __b) | |
648 | #define vmaxnmvq_f16(__a, __b) __arm_vmaxnmvq_f16(__a, __b) | |
649 | #define vmaxnmq_f16(__a, __b) __arm_vmaxnmq_f16(__a, __b) | |
650 | #define vmaxnmavq_f16(__a, __b) __arm_vmaxnmavq_f16(__a, __b) | |
651 | #define vmaxnmaq_f16(__a, __b) __arm_vmaxnmaq_f16(__a, __b) | |
652 | #define veorq_f16(__a, __b) __arm_veorq_f16(__a, __b) | |
653 | #define vcmulq_rot90_f16(__a, __b) __arm_vcmulq_rot90_f16(__a, __b) | |
654 | #define vcmulq_rot270_f16(__a, __b) __arm_vcmulq_rot270_f16(__a, __b) | |
655 | #define vcmulq_rot180_f16(__a, __b) __arm_vcmulq_rot180_f16(__a, __b) | |
656 | #define vcmulq_f16(__a, __b) __arm_vcmulq_f16(__a, __b) | |
657 | #define vcaddq_rot90_f16(__a, __b) __arm_vcaddq_rot90_f16(__a, __b) | |
658 | #define vcaddq_rot270_f16(__a, __b) __arm_vcaddq_rot270_f16(__a, __b) | |
659 | #define vbicq_f16(__a, __b) __arm_vbicq_f16(__a, __b) | |
660 | #define vandq_f16(__a, __b) __arm_vandq_f16(__a, __b) | |
661 | #define vaddq_n_f16(__a, __b) __arm_vaddq_n_f16(__a, __b) | |
662 | #define vabdq_f16(__a, __b) __arm_vabdq_f16(__a, __b) | |
663 | #define vshlltq_n_s8(__a, __imm) __arm_vshlltq_n_s8(__a, __imm) | |
664 | #define vshllbq_n_s8(__a, __imm) __arm_vshllbq_n_s8(__a, __imm) | |
665 | #define vorrq_n_s16(__a, __imm) __arm_vorrq_n_s16(__a, __imm) | |
666 | #define vbicq_n_s16(__a, __imm) __arm_vbicq_n_s16(__a, __imm) | |
667 | #define vqmovntq_u32(__a, __b) __arm_vqmovntq_u32(__a, __b) | |
668 | #define vqmovnbq_u32(__a, __b) __arm_vqmovnbq_u32(__a, __b) | |
669 | #define vmulltq_poly_p16(__a, __b) __arm_vmulltq_poly_p16(__a, __b) | |
670 | #define vmullbq_poly_p16(__a, __b) __arm_vmullbq_poly_p16(__a, __b) | |
671 | #define vmovntq_u32(__a, __b) __arm_vmovntq_u32(__a, __b) | |
672 | #define vmovnbq_u32(__a, __b) __arm_vmovnbq_u32(__a, __b) | |
673 | #define vmlaldavq_u32(__a, __b) __arm_vmlaldavq_u32(__a, __b) | |
674 | #define vqmovuntq_s32(__a, __b) __arm_vqmovuntq_s32(__a, __b) | |
675 | #define vqmovunbq_s32(__a, __b) __arm_vqmovunbq_s32(__a, __b) | |
676 | #define vshlltq_n_u16(__a, __imm) __arm_vshlltq_n_u16(__a, __imm) | |
677 | #define vshllbq_n_u16(__a, __imm) __arm_vshllbq_n_u16(__a, __imm) | |
678 | #define vorrq_n_u32(__a, __imm) __arm_vorrq_n_u32(__a, __imm) | |
679 | #define vbicq_n_u32(__a, __imm) __arm_vbicq_n_u32(__a, __imm) | |
680 | #define vcmpneq_n_f32(__a, __b) __arm_vcmpneq_n_f32(__a, __b) | |
681 | #define vcmpneq_f32(__a, __b) __arm_vcmpneq_f32(__a, __b) | |
682 | #define vcmpltq_n_f32(__a, __b) __arm_vcmpltq_n_f32(__a, __b) | |
683 | #define vcmpltq_f32(__a, __b) __arm_vcmpltq_f32(__a, __b) | |
684 | #define vcmpleq_n_f32(__a, __b) __arm_vcmpleq_n_f32(__a, __b) | |
685 | #define vcmpleq_f32(__a, __b) __arm_vcmpleq_f32(__a, __b) | |
686 | #define vcmpgtq_n_f32(__a, __b) __arm_vcmpgtq_n_f32(__a, __b) | |
687 | #define vcmpgtq_f32(__a, __b) __arm_vcmpgtq_f32(__a, __b) | |
688 | #define vcmpgeq_n_f32(__a, __b) __arm_vcmpgeq_n_f32(__a, __b) | |
689 | #define vcmpgeq_f32(__a, __b) __arm_vcmpgeq_f32(__a, __b) | |
690 | #define vcmpeqq_n_f32(__a, __b) __arm_vcmpeqq_n_f32(__a, __b) | |
691 | #define vcmpeqq_f32(__a, __b) __arm_vcmpeqq_f32(__a, __b) | |
692 | #define vsubq_f32(__a, __b) __arm_vsubq_f32(__a, __b) | |
693 | #define vqmovntq_s32(__a, __b) __arm_vqmovntq_s32(__a, __b) | |
694 | #define vqmovnbq_s32(__a, __b) __arm_vqmovnbq_s32(__a, __b) | |
695 | #define vqdmulltq_s32(__a, __b) __arm_vqdmulltq_s32(__a, __b) | |
696 | #define vqdmulltq_n_s32(__a, __b) __arm_vqdmulltq_n_s32(__a, __b) | |
697 | #define vqdmullbq_s32(__a, __b) __arm_vqdmullbq_s32(__a, __b) | |
698 | #define vqdmullbq_n_s32(__a, __b) __arm_vqdmullbq_n_s32(__a, __b) | |
699 | #define vorrq_f32(__a, __b) __arm_vorrq_f32(__a, __b) | |
700 | #define vornq_f32(__a, __b) __arm_vornq_f32(__a, __b) | |
701 | #define vmulq_n_f32(__a, __b) __arm_vmulq_n_f32(__a, __b) | |
702 | #define vmulq_f32(__a, __b) __arm_vmulq_f32(__a, __b) | |
703 | #define vmovntq_s32(__a, __b) __arm_vmovntq_s32(__a, __b) | |
704 | #define vmovnbq_s32(__a, __b) __arm_vmovnbq_s32(__a, __b) | |
705 | #define vmlsldavxq_s32(__a, __b) __arm_vmlsldavxq_s32(__a, __b) | |
706 | #define vmlsldavq_s32(__a, __b) __arm_vmlsldavq_s32(__a, __b) | |
707 | #define vmlaldavxq_s32(__a, __b) __arm_vmlaldavxq_s32(__a, __b) | |
708 | #define vmlaldavq_s32(__a, __b) __arm_vmlaldavq_s32(__a, __b) | |
709 | #define vminnmvq_f32(__a, __b) __arm_vminnmvq_f32(__a, __b) | |
710 | #define vminnmq_f32(__a, __b) __arm_vminnmq_f32(__a, __b) | |
711 | #define vminnmavq_f32(__a, __b) __arm_vminnmavq_f32(__a, __b) | |
712 | #define vminnmaq_f32(__a, __b) __arm_vminnmaq_f32(__a, __b) | |
713 | #define vmaxnmvq_f32(__a, __b) __arm_vmaxnmvq_f32(__a, __b) | |
714 | #define vmaxnmq_f32(__a, __b) __arm_vmaxnmq_f32(__a, __b) | |
715 | #define vmaxnmavq_f32(__a, __b) __arm_vmaxnmavq_f32(__a, __b) | |
716 | #define vmaxnmaq_f32(__a, __b) __arm_vmaxnmaq_f32(__a, __b) | |
717 | #define veorq_f32(__a, __b) __arm_veorq_f32(__a, __b) | |
718 | #define vcmulq_rot90_f32(__a, __b) __arm_vcmulq_rot90_f32(__a, __b) | |
719 | #define vcmulq_rot270_f32(__a, __b) __arm_vcmulq_rot270_f32(__a, __b) | |
720 | #define vcmulq_rot180_f32(__a, __b) __arm_vcmulq_rot180_f32(__a, __b) | |
721 | #define vcmulq_f32(__a, __b) __arm_vcmulq_f32(__a, __b) | |
722 | #define vcaddq_rot90_f32(__a, __b) __arm_vcaddq_rot90_f32(__a, __b) | |
723 | #define vcaddq_rot270_f32(__a, __b) __arm_vcaddq_rot270_f32(__a, __b) | |
724 | #define vbicq_f32(__a, __b) __arm_vbicq_f32(__a, __b) | |
725 | #define vandq_f32(__a, __b) __arm_vandq_f32(__a, __b) | |
726 | #define vaddq_n_f32(__a, __b) __arm_vaddq_n_f32(__a, __b) | |
727 | #define vabdq_f32(__a, __b) __arm_vabdq_f32(__a, __b) | |
728 | #define vshlltq_n_s16(__a, __imm) __arm_vshlltq_n_s16(__a, __imm) | |
729 | #define vshllbq_n_s16(__a, __imm) __arm_vshllbq_n_s16(__a, __imm) | |
730 | #define vorrq_n_s32(__a, __imm) __arm_vorrq_n_s32(__a, __imm) | |
731 | #define vbicq_n_s32(__a, __imm) __arm_vbicq_n_s32(__a, __imm) | |
732 | #define vrmlaldavhq_u32(__a, __b) __arm_vrmlaldavhq_u32(__a, __b) | |
733 | #define vctp8q_m(__a, __p) __arm_vctp8q_m(__a, __p) | |
734 | #define vctp64q_m(__a, __p) __arm_vctp64q_m(__a, __p) | |
735 | #define vctp32q_m(__a, __p) __arm_vctp32q_m(__a, __p) | |
736 | #define vctp16q_m(__a, __p) __arm_vctp16q_m(__a, __p) | |
737 | #define vaddlvaq_u32(__a, __b) __arm_vaddlvaq_u32(__a, __b) | |
738 | #define vrmlsldavhxq_s32(__a, __b) __arm_vrmlsldavhxq_s32(__a, __b) | |
739 | #define vrmlsldavhq_s32(__a, __b) __arm_vrmlsldavhq_s32(__a, __b) | |
740 | #define vrmlaldavhxq_s32(__a, __b) __arm_vrmlaldavhxq_s32(__a, __b) | |
741 | #define vrmlaldavhq_s32(__a, __b) __arm_vrmlaldavhq_s32(__a, __b) | |
742 | #define vcvttq_f16_f32(__a, __b) __arm_vcvttq_f16_f32(__a, __b) | |
743 | #define vcvtbq_f16_f32(__a, __b) __arm_vcvtbq_f16_f32(__a, __b) | |
744 | #define vaddlvaq_s32(__a, __b) __arm_vaddlvaq_s32(__a, __b) | |
0dad5b33 SP |
745 | #define vabavq_s8(__a, __b, __c) __arm_vabavq_s8(__a, __b, __c) |
746 | #define vabavq_s16(__a, __b, __c) __arm_vabavq_s16(__a, __b, __c) | |
747 | #define vabavq_s32(__a, __b, __c) __arm_vabavq_s32(__a, __b, __c) | |
748 | #define vbicq_m_n_s16(__a, __imm, __p) __arm_vbicq_m_n_s16(__a, __imm, __p) | |
749 | #define vbicq_m_n_s32(__a, __imm, __p) __arm_vbicq_m_n_s32(__a, __imm, __p) | |
750 | #define vbicq_m_n_u16(__a, __imm, __p) __arm_vbicq_m_n_u16(__a, __imm, __p) | |
751 | #define vbicq_m_n_u32(__a, __imm, __p) __arm_vbicq_m_n_u32(__a, __imm, __p) | |
752 | #define vcmpeqq_m_f16(__a, __b, __p) __arm_vcmpeqq_m_f16(__a, __b, __p) | |
753 | #define vcmpeqq_m_f32(__a, __b, __p) __arm_vcmpeqq_m_f32(__a, __b, __p) | |
754 | #define vcvtaq_m_s16_f16(__inactive, __a, __p) __arm_vcvtaq_m_s16_f16(__inactive, __a, __p) | |
755 | #define vcvtaq_m_u16_f16(__inactive, __a, __p) __arm_vcvtaq_m_u16_f16(__inactive, __a, __p) | |
756 | #define vcvtaq_m_s32_f32(__inactive, __a, __p) __arm_vcvtaq_m_s32_f32(__inactive, __a, __p) | |
757 | #define vcvtaq_m_u32_f32(__inactive, __a, __p) __arm_vcvtaq_m_u32_f32(__inactive, __a, __p) | |
758 | #define vcvtq_m_f16_s16(__inactive, __a, __p) __arm_vcvtq_m_f16_s16(__inactive, __a, __p) | |
759 | #define vcvtq_m_f16_u16(__inactive, __a, __p) __arm_vcvtq_m_f16_u16(__inactive, __a, __p) | |
760 | #define vcvtq_m_f32_s32(__inactive, __a, __p) __arm_vcvtq_m_f32_s32(__inactive, __a, __p) | |
761 | #define vcvtq_m_f32_u32(__inactive, __a, __p) __arm_vcvtq_m_f32_u32(__inactive, __a, __p) | |
762 | #define vqrshrnbq_n_s16(__a, __b, __imm) __arm_vqrshrnbq_n_s16(__a, __b, __imm) | |
763 | #define vqrshrnbq_n_u16(__a, __b, __imm) __arm_vqrshrnbq_n_u16(__a, __b, __imm) | |
764 | #define vqrshrnbq_n_s32(__a, __b, __imm) __arm_vqrshrnbq_n_s32(__a, __b, __imm) | |
765 | #define vqrshrnbq_n_u32(__a, __b, __imm) __arm_vqrshrnbq_n_u32(__a, __b, __imm) | |
766 | #define vqrshrunbq_n_s16(__a, __b, __imm) __arm_vqrshrunbq_n_s16(__a, __b, __imm) | |
767 | #define vqrshrunbq_n_s32(__a, __b, __imm) __arm_vqrshrunbq_n_s32(__a, __b, __imm) | |
768 | #define vrmlaldavhaq_s32(__a, __b, __c) __arm_vrmlaldavhaq_s32(__a, __b, __c) | |
769 | #define vrmlaldavhaq_u32(__a, __b, __c) __arm_vrmlaldavhaq_u32(__a, __b, __c) | |
770 | #define vshlcq_s8(__a, __b, __imm) __arm_vshlcq_s8(__a, __b, __imm) | |
771 | #define vshlcq_u8(__a, __b, __imm) __arm_vshlcq_u8(__a, __b, __imm) | |
772 | #define vshlcq_s16(__a, __b, __imm) __arm_vshlcq_s16(__a, __b, __imm) | |
773 | #define vshlcq_u16(__a, __b, __imm) __arm_vshlcq_u16(__a, __b, __imm) | |
774 | #define vshlcq_s32(__a, __b, __imm) __arm_vshlcq_s32(__a, __b, __imm) | |
775 | #define vshlcq_u32(__a, __b, __imm) __arm_vshlcq_u32(__a, __b, __imm) | |
776 | #define vabavq_u8(__a, __b, __c) __arm_vabavq_u8(__a, __b, __c) | |
777 | #define vabavq_u16(__a, __b, __c) __arm_vabavq_u16(__a, __b, __c) | |
778 | #define vabavq_u32(__a, __b, __c) __arm_vabavq_u32(__a, __b, __c) | |
8165795c SP |
779 | #define vpselq_u8(__a, __b, __p) __arm_vpselq_u8(__a, __b, __p) |
780 | #define vpselq_s8(__a, __b, __p) __arm_vpselq_s8(__a, __b, __p) | |
781 | #define vrev64q_m_u8(__inactive, __a, __p) __arm_vrev64q_m_u8(__inactive, __a, __p) | |
782 | #define vqrdmlashq_n_u8(__a, __b, __c) __arm_vqrdmlashq_n_u8(__a, __b, __c) | |
783 | #define vqrdmlahq_n_u8(__a, __b, __c) __arm_vqrdmlahq_n_u8(__a, __b, __c) | |
784 | #define vqdmlahq_n_u8(__a, __b, __c) __arm_vqdmlahq_n_u8(__a, __b, __c) | |
785 | #define vmvnq_m_u8(__inactive, __a, __p) __arm_vmvnq_m_u8(__inactive, __a, __p) | |
786 | #define vmlasq_n_u8(__a, __b, __c) __arm_vmlasq_n_u8(__a, __b, __c) | |
787 | #define vmlaq_n_u8(__a, __b, __c) __arm_vmlaq_n_u8(__a, __b, __c) | |
788 | #define vmladavq_p_u8(__a, __b, __p) __arm_vmladavq_p_u8(__a, __b, __p) | |
789 | #define vmladavaq_u8(__a, __b, __c) __arm_vmladavaq_u8(__a, __b, __c) | |
790 | #define vminvq_p_u8(__a, __b, __p) __arm_vminvq_p_u8(__a, __b, __p) | |
791 | #define vmaxvq_p_u8(__a, __b, __p) __arm_vmaxvq_p_u8(__a, __b, __p) | |
792 | #define vdupq_m_n_u8(__inactive, __a, __p) __arm_vdupq_m_n_u8(__inactive, __a, __p) | |
793 | #define vcmpneq_m_u8(__a, __b, __p) __arm_vcmpneq_m_u8(__a, __b, __p) | |
794 | #define vcmpneq_m_n_u8(__a, __b, __p) __arm_vcmpneq_m_n_u8(__a, __b, __p) | |
795 | #define vcmphiq_m_u8(__a, __b, __p) __arm_vcmphiq_m_u8(__a, __b, __p) | |
796 | #define vcmphiq_m_n_u8(__a, __b, __p) __arm_vcmphiq_m_n_u8(__a, __b, __p) | |
797 | #define vcmpeqq_m_u8(__a, __b, __p) __arm_vcmpeqq_m_u8(__a, __b, __p) | |
798 | #define vcmpeqq_m_n_u8(__a, __b, __p) __arm_vcmpeqq_m_n_u8(__a, __b, __p) | |
799 | #define vcmpcsq_m_u8(__a, __b, __p) __arm_vcmpcsq_m_u8(__a, __b, __p) | |
800 | #define vcmpcsq_m_n_u8(__a, __b, __p) __arm_vcmpcsq_m_n_u8(__a, __b, __p) | |
801 | #define vclzq_m_u8(__inactive, __a, __p) __arm_vclzq_m_u8(__inactive, __a, __p) | |
802 | #define vaddvaq_p_u8(__a, __b, __p) __arm_vaddvaq_p_u8(__a, __b, __p) | |
803 | #define vsriq_n_u8(__a, __b, __imm) __arm_vsriq_n_u8(__a, __b, __imm) | |
804 | #define vsliq_n_u8(__a, __b, __imm) __arm_vsliq_n_u8(__a, __b, __imm) | |
805 | #define vshlq_m_r_u8(__a, __b, __p) __arm_vshlq_m_r_u8(__a, __b, __p) | |
806 | #define vrshlq_m_n_u8(__a, __b, __p) __arm_vrshlq_m_n_u8(__a, __b, __p) | |
807 | #define vqshlq_m_r_u8(__a, __b, __p) __arm_vqshlq_m_r_u8(__a, __b, __p) | |
808 | #define vqrshlq_m_n_u8(__a, __b, __p) __arm_vqrshlq_m_n_u8(__a, __b, __p) | |
809 | #define vminavq_p_s8(__a, __b, __p) __arm_vminavq_p_s8(__a, __b, __p) | |
810 | #define vminaq_m_s8(__a, __b, __p) __arm_vminaq_m_s8(__a, __b, __p) | |
811 | #define vmaxavq_p_s8(__a, __b, __p) __arm_vmaxavq_p_s8(__a, __b, __p) | |
812 | #define vmaxaq_m_s8(__a, __b, __p) __arm_vmaxaq_m_s8(__a, __b, __p) | |
813 | #define vcmpneq_m_s8(__a, __b, __p) __arm_vcmpneq_m_s8(__a, __b, __p) | |
814 | #define vcmpneq_m_n_s8(__a, __b, __p) __arm_vcmpneq_m_n_s8(__a, __b, __p) | |
815 | #define vcmpltq_m_s8(__a, __b, __p) __arm_vcmpltq_m_s8(__a, __b, __p) | |
816 | #define vcmpltq_m_n_s8(__a, __b, __p) __arm_vcmpltq_m_n_s8(__a, __b, __p) | |
817 | #define vcmpleq_m_s8(__a, __b, __p) __arm_vcmpleq_m_s8(__a, __b, __p) | |
818 | #define vcmpleq_m_n_s8(__a, __b, __p) __arm_vcmpleq_m_n_s8(__a, __b, __p) | |
819 | #define vcmpgtq_m_s8(__a, __b, __p) __arm_vcmpgtq_m_s8(__a, __b, __p) | |
820 | #define vcmpgtq_m_n_s8(__a, __b, __p) __arm_vcmpgtq_m_n_s8(__a, __b, __p) | |
821 | #define vcmpgeq_m_s8(__a, __b, __p) __arm_vcmpgeq_m_s8(__a, __b, __p) | |
822 | #define vcmpgeq_m_n_s8(__a, __b, __p) __arm_vcmpgeq_m_n_s8(__a, __b, __p) | |
823 | #define vcmpeqq_m_s8(__a, __b, __p) __arm_vcmpeqq_m_s8(__a, __b, __p) | |
824 | #define vcmpeqq_m_n_s8(__a, __b, __p) __arm_vcmpeqq_m_n_s8(__a, __b, __p) | |
825 | #define vshlq_m_r_s8(__a, __b, __p) __arm_vshlq_m_r_s8(__a, __b, __p) | |
826 | #define vrshlq_m_n_s8(__a, __b, __p) __arm_vrshlq_m_n_s8(__a, __b, __p) | |
827 | #define vrev64q_m_s8(__inactive, __a, __p) __arm_vrev64q_m_s8(__inactive, __a, __p) | |
828 | #define vqshlq_m_r_s8(__a, __b, __p) __arm_vqshlq_m_r_s8(__a, __b, __p) | |
829 | #define vqrshlq_m_n_s8(__a, __b, __p) __arm_vqrshlq_m_n_s8(__a, __b, __p) | |
830 | #define vqnegq_m_s8(__inactive, __a, __p) __arm_vqnegq_m_s8(__inactive, __a, __p) | |
831 | #define vqabsq_m_s8(__inactive, __a, __p) __arm_vqabsq_m_s8(__inactive, __a, __p) | |
832 | #define vnegq_m_s8(__inactive, __a, __p) __arm_vnegq_m_s8(__inactive, __a, __p) | |
833 | #define vmvnq_m_s8(__inactive, __a, __p) __arm_vmvnq_m_s8(__inactive, __a, __p) | |
834 | #define vmlsdavxq_p_s8(__a, __b, __p) __arm_vmlsdavxq_p_s8(__a, __b, __p) | |
835 | #define vmlsdavq_p_s8(__a, __b, __p) __arm_vmlsdavq_p_s8(__a, __b, __p) | |
836 | #define vmladavxq_p_s8(__a, __b, __p) __arm_vmladavxq_p_s8(__a, __b, __p) | |
837 | #define vmladavq_p_s8(__a, __b, __p) __arm_vmladavq_p_s8(__a, __b, __p) | |
838 | #define vminvq_p_s8(__a, __b, __p) __arm_vminvq_p_s8(__a, __b, __p) | |
839 | #define vmaxvq_p_s8(__a, __b, __p) __arm_vmaxvq_p_s8(__a, __b, __p) | |
840 | #define vdupq_m_n_s8(__inactive, __a, __p) __arm_vdupq_m_n_s8(__inactive, __a, __p) | |
841 | #define vclzq_m_s8(__inactive, __a, __p) __arm_vclzq_m_s8(__inactive, __a, __p) | |
842 | #define vclsq_m_s8(__inactive, __a, __p) __arm_vclsq_m_s8(__inactive, __a, __p) | |
843 | #define vaddvaq_p_s8(__a, __b, __p) __arm_vaddvaq_p_s8(__a, __b, __p) | |
844 | #define vabsq_m_s8(__inactive, __a, __p) __arm_vabsq_m_s8(__inactive, __a, __p) | |
845 | #define vqrdmlsdhxq_s8(__inactive, __a, __b) __arm_vqrdmlsdhxq_s8(__inactive, __a, __b) | |
846 | #define vqrdmlsdhq_s8(__inactive, __a, __b) __arm_vqrdmlsdhq_s8(__inactive, __a, __b) | |
847 | #define vqrdmlashq_n_s8(__a, __b, __c) __arm_vqrdmlashq_n_s8(__a, __b, __c) | |
848 | #define vqrdmlahq_n_s8(__a, __b, __c) __arm_vqrdmlahq_n_s8(__a, __b, __c) | |
849 | #define vqrdmladhxq_s8(__inactive, __a, __b) __arm_vqrdmladhxq_s8(__inactive, __a, __b) | |
850 | #define vqrdmladhq_s8(__inactive, __a, __b) __arm_vqrdmladhq_s8(__inactive, __a, __b) | |
851 | #define vqdmlsdhxq_s8(__inactive, __a, __b) __arm_vqdmlsdhxq_s8(__inactive, __a, __b) | |
852 | #define vqdmlsdhq_s8(__inactive, __a, __b) __arm_vqdmlsdhq_s8(__inactive, __a, __b) | |
853 | #define vqdmlahq_n_s8(__a, __b, __c) __arm_vqdmlahq_n_s8(__a, __b, __c) | |
854 | #define vqdmladhxq_s8(__inactive, __a, __b) __arm_vqdmladhxq_s8(__inactive, __a, __b) | |
855 | #define vqdmladhq_s8(__inactive, __a, __b) __arm_vqdmladhq_s8(__inactive, __a, __b) | |
856 | #define vmlsdavaxq_s8(__a, __b, __c) __arm_vmlsdavaxq_s8(__a, __b, __c) | |
857 | #define vmlsdavaq_s8(__a, __b, __c) __arm_vmlsdavaq_s8(__a, __b, __c) | |
858 | #define vmlasq_n_s8(__a, __b, __c) __arm_vmlasq_n_s8(__a, __b, __c) | |
859 | #define vmlaq_n_s8(__a, __b, __c) __arm_vmlaq_n_s8(__a, __b, __c) | |
860 | #define vmladavaxq_s8(__a, __b, __c) __arm_vmladavaxq_s8(__a, __b, __c) | |
861 | #define vmladavaq_s8(__a, __b, __c) __arm_vmladavaq_s8(__a, __b, __c) | |
862 | #define vsriq_n_s8(__a, __b, __imm) __arm_vsriq_n_s8(__a, __b, __imm) | |
863 | #define vsliq_n_s8(__a, __b, __imm) __arm_vsliq_n_s8(__a, __b, __imm) | |
864 | #define vpselq_u16(__a, __b, __p) __arm_vpselq_u16(__a, __b, __p) | |
865 | #define vpselq_s16(__a, __b, __p) __arm_vpselq_s16(__a, __b, __p) | |
866 | #define vrev64q_m_u16(__inactive, __a, __p) __arm_vrev64q_m_u16(__inactive, __a, __p) | |
867 | #define vqrdmlashq_n_u16(__a, __b, __c) __arm_vqrdmlashq_n_u16(__a, __b, __c) | |
868 | #define vqrdmlahq_n_u16(__a, __b, __c) __arm_vqrdmlahq_n_u16(__a, __b, __c) | |
869 | #define vqdmlahq_n_u16(__a, __b, __c) __arm_vqdmlahq_n_u16(__a, __b, __c) | |
870 | #define vmvnq_m_u16(__inactive, __a, __p) __arm_vmvnq_m_u16(__inactive, __a, __p) | |
871 | #define vmlasq_n_u16(__a, __b, __c) __arm_vmlasq_n_u16(__a, __b, __c) | |
872 | #define vmlaq_n_u16(__a, __b, __c) __arm_vmlaq_n_u16(__a, __b, __c) | |
873 | #define vmladavq_p_u16(__a, __b, __p) __arm_vmladavq_p_u16(__a, __b, __p) | |
874 | #define vmladavaq_u16(__a, __b, __c) __arm_vmladavaq_u16(__a, __b, __c) | |
875 | #define vminvq_p_u16(__a, __b, __p) __arm_vminvq_p_u16(__a, __b, __p) | |
876 | #define vmaxvq_p_u16(__a, __b, __p) __arm_vmaxvq_p_u16(__a, __b, __p) | |
877 | #define vdupq_m_n_u16(__inactive, __a, __p) __arm_vdupq_m_n_u16(__inactive, __a, __p) | |
878 | #define vcmpneq_m_u16(__a, __b, __p) __arm_vcmpneq_m_u16(__a, __b, __p) | |
879 | #define vcmpneq_m_n_u16(__a, __b, __p) __arm_vcmpneq_m_n_u16(__a, __b, __p) | |
880 | #define vcmphiq_m_u16(__a, __b, __p) __arm_vcmphiq_m_u16(__a, __b, __p) | |
881 | #define vcmphiq_m_n_u16(__a, __b, __p) __arm_vcmphiq_m_n_u16(__a, __b, __p) | |
882 | #define vcmpeqq_m_u16(__a, __b, __p) __arm_vcmpeqq_m_u16(__a, __b, __p) | |
883 | #define vcmpeqq_m_n_u16(__a, __b, __p) __arm_vcmpeqq_m_n_u16(__a, __b, __p) | |
884 | #define vcmpcsq_m_u16(__a, __b, __p) __arm_vcmpcsq_m_u16(__a, __b, __p) | |
885 | #define vcmpcsq_m_n_u16(__a, __b, __p) __arm_vcmpcsq_m_n_u16(__a, __b, __p) | |
886 | #define vclzq_m_u16(__inactive, __a, __p) __arm_vclzq_m_u16(__inactive, __a, __p) | |
887 | #define vaddvaq_p_u16(__a, __b, __p) __arm_vaddvaq_p_u16(__a, __b, __p) | |
888 | #define vsriq_n_u16(__a, __b, __imm) __arm_vsriq_n_u16(__a, __b, __imm) | |
889 | #define vsliq_n_u16(__a, __b, __imm) __arm_vsliq_n_u16(__a, __b, __imm) | |
890 | #define vshlq_m_r_u16(__a, __b, __p) __arm_vshlq_m_r_u16(__a, __b, __p) | |
891 | #define vrshlq_m_n_u16(__a, __b, __p) __arm_vrshlq_m_n_u16(__a, __b, __p) | |
892 | #define vqshlq_m_r_u16(__a, __b, __p) __arm_vqshlq_m_r_u16(__a, __b, __p) | |
893 | #define vqrshlq_m_n_u16(__a, __b, __p) __arm_vqrshlq_m_n_u16(__a, __b, __p) | |
894 | #define vminavq_p_s16(__a, __b, __p) __arm_vminavq_p_s16(__a, __b, __p) | |
895 | #define vminaq_m_s16(__a, __b, __p) __arm_vminaq_m_s16(__a, __b, __p) | |
896 | #define vmaxavq_p_s16(__a, __b, __p) __arm_vmaxavq_p_s16(__a, __b, __p) | |
897 | #define vmaxaq_m_s16(__a, __b, __p) __arm_vmaxaq_m_s16(__a, __b, __p) | |
898 | #define vcmpneq_m_s16(__a, __b, __p) __arm_vcmpneq_m_s16(__a, __b, __p) | |
899 | #define vcmpneq_m_n_s16(__a, __b, __p) __arm_vcmpneq_m_n_s16(__a, __b, __p) | |
900 | #define vcmpltq_m_s16(__a, __b, __p) __arm_vcmpltq_m_s16(__a, __b, __p) | |
901 | #define vcmpltq_m_n_s16(__a, __b, __p) __arm_vcmpltq_m_n_s16(__a, __b, __p) | |
902 | #define vcmpleq_m_s16(__a, __b, __p) __arm_vcmpleq_m_s16(__a, __b, __p) | |
903 | #define vcmpleq_m_n_s16(__a, __b, __p) __arm_vcmpleq_m_n_s16(__a, __b, __p) | |
904 | #define vcmpgtq_m_s16(__a, __b, __p) __arm_vcmpgtq_m_s16(__a, __b, __p) | |
905 | #define vcmpgtq_m_n_s16(__a, __b, __p) __arm_vcmpgtq_m_n_s16(__a, __b, __p) | |
906 | #define vcmpgeq_m_s16(__a, __b, __p) __arm_vcmpgeq_m_s16(__a, __b, __p) | |
907 | #define vcmpgeq_m_n_s16(__a, __b, __p) __arm_vcmpgeq_m_n_s16(__a, __b, __p) | |
908 | #define vcmpeqq_m_s16(__a, __b, __p) __arm_vcmpeqq_m_s16(__a, __b, __p) | |
909 | #define vcmpeqq_m_n_s16(__a, __b, __p) __arm_vcmpeqq_m_n_s16(__a, __b, __p) | |
910 | #define vshlq_m_r_s16(__a, __b, __p) __arm_vshlq_m_r_s16(__a, __b, __p) | |
911 | #define vrshlq_m_n_s16(__a, __b, __p) __arm_vrshlq_m_n_s16(__a, __b, __p) | |
912 | #define vrev64q_m_s16(__inactive, __a, __p) __arm_vrev64q_m_s16(__inactive, __a, __p) | |
913 | #define vqshlq_m_r_s16(__a, __b, __p) __arm_vqshlq_m_r_s16(__a, __b, __p) | |
914 | #define vqrshlq_m_n_s16(__a, __b, __p) __arm_vqrshlq_m_n_s16(__a, __b, __p) | |
915 | #define vqnegq_m_s16(__inactive, __a, __p) __arm_vqnegq_m_s16(__inactive, __a, __p) | |
916 | #define vqabsq_m_s16(__inactive, __a, __p) __arm_vqabsq_m_s16(__inactive, __a, __p) | |
917 | #define vnegq_m_s16(__inactive, __a, __p) __arm_vnegq_m_s16(__inactive, __a, __p) | |
918 | #define vmvnq_m_s16(__inactive, __a, __p) __arm_vmvnq_m_s16(__inactive, __a, __p) | |
919 | #define vmlsdavxq_p_s16(__a, __b, __p) __arm_vmlsdavxq_p_s16(__a, __b, __p) | |
920 | #define vmlsdavq_p_s16(__a, __b, __p) __arm_vmlsdavq_p_s16(__a, __b, __p) | |
921 | #define vmladavxq_p_s16(__a, __b, __p) __arm_vmladavxq_p_s16(__a, __b, __p) | |
922 | #define vmladavq_p_s16(__a, __b, __p) __arm_vmladavq_p_s16(__a, __b, __p) | |
923 | #define vminvq_p_s16(__a, __b, __p) __arm_vminvq_p_s16(__a, __b, __p) | |
924 | #define vmaxvq_p_s16(__a, __b, __p) __arm_vmaxvq_p_s16(__a, __b, __p) | |
925 | #define vdupq_m_n_s16(__inactive, __a, __p) __arm_vdupq_m_n_s16(__inactive, __a, __p) | |
926 | #define vclzq_m_s16(__inactive, __a, __p) __arm_vclzq_m_s16(__inactive, __a, __p) | |
927 | #define vclsq_m_s16(__inactive, __a, __p) __arm_vclsq_m_s16(__inactive, __a, __p) | |
928 | #define vaddvaq_p_s16(__a, __b, __p) __arm_vaddvaq_p_s16(__a, __b, __p) | |
929 | #define vabsq_m_s16(__inactive, __a, __p) __arm_vabsq_m_s16(__inactive, __a, __p) | |
930 | #define vqrdmlsdhxq_s16(__inactive, __a, __b) __arm_vqrdmlsdhxq_s16(__inactive, __a, __b) | |
931 | #define vqrdmlsdhq_s16(__inactive, __a, __b) __arm_vqrdmlsdhq_s16(__inactive, __a, __b) | |
932 | #define vqrdmlashq_n_s16(__a, __b, __c) __arm_vqrdmlashq_n_s16(__a, __b, __c) | |
933 | #define vqrdmlahq_n_s16(__a, __b, __c) __arm_vqrdmlahq_n_s16(__a, __b, __c) | |
934 | #define vqrdmladhxq_s16(__inactive, __a, __b) __arm_vqrdmladhxq_s16(__inactive, __a, __b) | |
935 | #define vqrdmladhq_s16(__inactive, __a, __b) __arm_vqrdmladhq_s16(__inactive, __a, __b) | |
936 | #define vqdmlsdhxq_s16(__inactive, __a, __b) __arm_vqdmlsdhxq_s16(__inactive, __a, __b) | |
937 | #define vqdmlsdhq_s16(__inactive, __a, __b) __arm_vqdmlsdhq_s16(__inactive, __a, __b) | |
938 | #define vqdmlahq_n_s16(__a, __b, __c) __arm_vqdmlahq_n_s16(__a, __b, __c) | |
939 | #define vqdmladhxq_s16(__inactive, __a, __b) __arm_vqdmladhxq_s16(__inactive, __a, __b) | |
940 | #define vqdmladhq_s16(__inactive, __a, __b) __arm_vqdmladhq_s16(__inactive, __a, __b) | |
941 | #define vmlsdavaxq_s16(__a, __b, __c) __arm_vmlsdavaxq_s16(__a, __b, __c) | |
942 | #define vmlsdavaq_s16(__a, __b, __c) __arm_vmlsdavaq_s16(__a, __b, __c) | |
943 | #define vmlasq_n_s16(__a, __b, __c) __arm_vmlasq_n_s16(__a, __b, __c) | |
944 | #define vmlaq_n_s16(__a, __b, __c) __arm_vmlaq_n_s16(__a, __b, __c) | |
945 | #define vmladavaxq_s16(__a, __b, __c) __arm_vmladavaxq_s16(__a, __b, __c) | |
946 | #define vmladavaq_s16(__a, __b, __c) __arm_vmladavaq_s16(__a, __b, __c) | |
947 | #define vsriq_n_s16(__a, __b, __imm) __arm_vsriq_n_s16(__a, __b, __imm) | |
948 | #define vsliq_n_s16(__a, __b, __imm) __arm_vsliq_n_s16(__a, __b, __imm) | |
949 | #define vpselq_u32(__a, __b, __p) __arm_vpselq_u32(__a, __b, __p) | |
950 | #define vpselq_s32(__a, __b, __p) __arm_vpselq_s32(__a, __b, __p) | |
951 | #define vrev64q_m_u32(__inactive, __a, __p) __arm_vrev64q_m_u32(__inactive, __a, __p) | |
952 | #define vqrdmlashq_n_u32(__a, __b, __c) __arm_vqrdmlashq_n_u32(__a, __b, __c) | |
953 | #define vqrdmlahq_n_u32(__a, __b, __c) __arm_vqrdmlahq_n_u32(__a, __b, __c) | |
954 | #define vqdmlahq_n_u32(__a, __b, __c) __arm_vqdmlahq_n_u32(__a, __b, __c) | |
955 | #define vmvnq_m_u32(__inactive, __a, __p) __arm_vmvnq_m_u32(__inactive, __a, __p) | |
956 | #define vmlasq_n_u32(__a, __b, __c) __arm_vmlasq_n_u32(__a, __b, __c) | |
957 | #define vmlaq_n_u32(__a, __b, __c) __arm_vmlaq_n_u32(__a, __b, __c) | |
958 | #define vmladavq_p_u32(__a, __b, __p) __arm_vmladavq_p_u32(__a, __b, __p) | |
959 | #define vmladavaq_u32(__a, __b, __c) __arm_vmladavaq_u32(__a, __b, __c) | |
960 | #define vminvq_p_u32(__a, __b, __p) __arm_vminvq_p_u32(__a, __b, __p) | |
961 | #define vmaxvq_p_u32(__a, __b, __p) __arm_vmaxvq_p_u32(__a, __b, __p) | |
962 | #define vdupq_m_n_u32(__inactive, __a, __p) __arm_vdupq_m_n_u32(__inactive, __a, __p) | |
963 | #define vcmpneq_m_u32(__a, __b, __p) __arm_vcmpneq_m_u32(__a, __b, __p) | |
964 | #define vcmpneq_m_n_u32(__a, __b, __p) __arm_vcmpneq_m_n_u32(__a, __b, __p) | |
965 | #define vcmphiq_m_u32(__a, __b, __p) __arm_vcmphiq_m_u32(__a, __b, __p) | |
966 | #define vcmphiq_m_n_u32(__a, __b, __p) __arm_vcmphiq_m_n_u32(__a, __b, __p) | |
967 | #define vcmpeqq_m_u32(__a, __b, __p) __arm_vcmpeqq_m_u32(__a, __b, __p) | |
968 | #define vcmpeqq_m_n_u32(__a, __b, __p) __arm_vcmpeqq_m_n_u32(__a, __b, __p) | |
969 | #define vcmpcsq_m_u32(__a, __b, __p) __arm_vcmpcsq_m_u32(__a, __b, __p) | |
970 | #define vcmpcsq_m_n_u32(__a, __b, __p) __arm_vcmpcsq_m_n_u32(__a, __b, __p) | |
971 | #define vclzq_m_u32(__inactive, __a, __p) __arm_vclzq_m_u32(__inactive, __a, __p) | |
972 | #define vaddvaq_p_u32(__a, __b, __p) __arm_vaddvaq_p_u32(__a, __b, __p) | |
973 | #define vsriq_n_u32(__a, __b, __imm) __arm_vsriq_n_u32(__a, __b, __imm) | |
974 | #define vsliq_n_u32(__a, __b, __imm) __arm_vsliq_n_u32(__a, __b, __imm) | |
975 | #define vshlq_m_r_u32(__a, __b, __p) __arm_vshlq_m_r_u32(__a, __b, __p) | |
976 | #define vrshlq_m_n_u32(__a, __b, __p) __arm_vrshlq_m_n_u32(__a, __b, __p) | |
977 | #define vqshlq_m_r_u32(__a, __b, __p) __arm_vqshlq_m_r_u32(__a, __b, __p) | |
978 | #define vqrshlq_m_n_u32(__a, __b, __p) __arm_vqrshlq_m_n_u32(__a, __b, __p) | |
979 | #define vminavq_p_s32(__a, __b, __p) __arm_vminavq_p_s32(__a, __b, __p) | |
980 | #define vminaq_m_s32(__a, __b, __p) __arm_vminaq_m_s32(__a, __b, __p) | |
981 | #define vmaxavq_p_s32(__a, __b, __p) __arm_vmaxavq_p_s32(__a, __b, __p) | |
982 | #define vmaxaq_m_s32(__a, __b, __p) __arm_vmaxaq_m_s32(__a, __b, __p) | |
983 | #define vcmpneq_m_s32(__a, __b, __p) __arm_vcmpneq_m_s32(__a, __b, __p) | |
984 | #define vcmpneq_m_n_s32(__a, __b, __p) __arm_vcmpneq_m_n_s32(__a, __b, __p) | |
985 | #define vcmpltq_m_s32(__a, __b, __p) __arm_vcmpltq_m_s32(__a, __b, __p) | |
986 | #define vcmpltq_m_n_s32(__a, __b, __p) __arm_vcmpltq_m_n_s32(__a, __b, __p) | |
987 | #define vcmpleq_m_s32(__a, __b, __p) __arm_vcmpleq_m_s32(__a, __b, __p) | |
988 | #define vcmpleq_m_n_s32(__a, __b, __p) __arm_vcmpleq_m_n_s32(__a, __b, __p) | |
989 | #define vcmpgtq_m_s32(__a, __b, __p) __arm_vcmpgtq_m_s32(__a, __b, __p) | |
990 | #define vcmpgtq_m_n_s32(__a, __b, __p) __arm_vcmpgtq_m_n_s32(__a, __b, __p) | |
991 | #define vcmpgeq_m_s32(__a, __b, __p) __arm_vcmpgeq_m_s32(__a, __b, __p) | |
992 | #define vcmpgeq_m_n_s32(__a, __b, __p) __arm_vcmpgeq_m_n_s32(__a, __b, __p) | |
993 | #define vcmpeqq_m_s32(__a, __b, __p) __arm_vcmpeqq_m_s32(__a, __b, __p) | |
994 | #define vcmpeqq_m_n_s32(__a, __b, __p) __arm_vcmpeqq_m_n_s32(__a, __b, __p) | |
995 | #define vshlq_m_r_s32(__a, __b, __p) __arm_vshlq_m_r_s32(__a, __b, __p) | |
996 | #define vrshlq_m_n_s32(__a, __b, __p) __arm_vrshlq_m_n_s32(__a, __b, __p) | |
997 | #define vrev64q_m_s32(__inactive, __a, __p) __arm_vrev64q_m_s32(__inactive, __a, __p) | |
998 | #define vqshlq_m_r_s32(__a, __b, __p) __arm_vqshlq_m_r_s32(__a, __b, __p) | |
999 | #define vqrshlq_m_n_s32(__a, __b, __p) __arm_vqrshlq_m_n_s32(__a, __b, __p) | |
1000 | #define vqnegq_m_s32(__inactive, __a, __p) __arm_vqnegq_m_s32(__inactive, __a, __p) | |
1001 | #define vqabsq_m_s32(__inactive, __a, __p) __arm_vqabsq_m_s32(__inactive, __a, __p) | |
1002 | #define vnegq_m_s32(__inactive, __a, __p) __arm_vnegq_m_s32(__inactive, __a, __p) | |
1003 | #define vmvnq_m_s32(__inactive, __a, __p) __arm_vmvnq_m_s32(__inactive, __a, __p) | |
1004 | #define vmlsdavxq_p_s32(__a, __b, __p) __arm_vmlsdavxq_p_s32(__a, __b, __p) | |
1005 | #define vmlsdavq_p_s32(__a, __b, __p) __arm_vmlsdavq_p_s32(__a, __b, __p) | |
1006 | #define vmladavxq_p_s32(__a, __b, __p) __arm_vmladavxq_p_s32(__a, __b, __p) | |
1007 | #define vmladavq_p_s32(__a, __b, __p) __arm_vmladavq_p_s32(__a, __b, __p) | |
1008 | #define vminvq_p_s32(__a, __b, __p) __arm_vminvq_p_s32(__a, __b, __p) | |
1009 | #define vmaxvq_p_s32(__a, __b, __p) __arm_vmaxvq_p_s32(__a, __b, __p) | |
1010 | #define vdupq_m_n_s32(__inactive, __a, __p) __arm_vdupq_m_n_s32(__inactive, __a, __p) | |
1011 | #define vclzq_m_s32(__inactive, __a, __p) __arm_vclzq_m_s32(__inactive, __a, __p) | |
1012 | #define vclsq_m_s32(__inactive, __a, __p) __arm_vclsq_m_s32(__inactive, __a, __p) | |
1013 | #define vaddvaq_p_s32(__a, __b, __p) __arm_vaddvaq_p_s32(__a, __b, __p) | |
1014 | #define vabsq_m_s32(__inactive, __a, __p) __arm_vabsq_m_s32(__inactive, __a, __p) | |
1015 | #define vqrdmlsdhxq_s32(__inactive, __a, __b) __arm_vqrdmlsdhxq_s32(__inactive, __a, __b) | |
1016 | #define vqrdmlsdhq_s32(__inactive, __a, __b) __arm_vqrdmlsdhq_s32(__inactive, __a, __b) | |
1017 | #define vqrdmlashq_n_s32(__a, __b, __c) __arm_vqrdmlashq_n_s32(__a, __b, __c) | |
1018 | #define vqrdmlahq_n_s32(__a, __b, __c) __arm_vqrdmlahq_n_s32(__a, __b, __c) | |
1019 | #define vqrdmladhxq_s32(__inactive, __a, __b) __arm_vqrdmladhxq_s32(__inactive, __a, __b) | |
1020 | #define vqrdmladhq_s32(__inactive, __a, __b) __arm_vqrdmladhq_s32(__inactive, __a, __b) | |
1021 | #define vqdmlsdhxq_s32(__inactive, __a, __b) __arm_vqdmlsdhxq_s32(__inactive, __a, __b) | |
1022 | #define vqdmlsdhq_s32(__inactive, __a, __b) __arm_vqdmlsdhq_s32(__inactive, __a, __b) | |
1023 | #define vqdmlahq_n_s32(__a, __b, __c) __arm_vqdmlahq_n_s32(__a, __b, __c) | |
1024 | #define vqdmladhxq_s32(__inactive, __a, __b) __arm_vqdmladhxq_s32(__inactive, __a, __b) | |
1025 | #define vqdmladhq_s32(__inactive, __a, __b) __arm_vqdmladhq_s32(__inactive, __a, __b) | |
1026 | #define vmlsdavaxq_s32(__a, __b, __c) __arm_vmlsdavaxq_s32(__a, __b, __c) | |
1027 | #define vmlsdavaq_s32(__a, __b, __c) __arm_vmlsdavaq_s32(__a, __b, __c) | |
1028 | #define vmlasq_n_s32(__a, __b, __c) __arm_vmlasq_n_s32(__a, __b, __c) | |
1029 | #define vmlaq_n_s32(__a, __b, __c) __arm_vmlaq_n_s32(__a, __b, __c) | |
1030 | #define vmladavaxq_s32(__a, __b, __c) __arm_vmladavaxq_s32(__a, __b, __c) | |
1031 | #define vmladavaq_s32(__a, __b, __c) __arm_vmladavaq_s32(__a, __b, __c) | |
1032 | #define vsriq_n_s32(__a, __b, __imm) __arm_vsriq_n_s32(__a, __b, __imm) | |
1033 | #define vsliq_n_s32(__a, __b, __imm) __arm_vsliq_n_s32(__a, __b, __imm) | |
1034 | #define vpselq_u64(__a, __b, __p) __arm_vpselq_u64(__a, __b, __p) | |
1035 | #define vpselq_s64(__a, __b, __p) __arm_vpselq_s64(__a, __b, __p) | |
e3678b44 SP |
1036 | #define vrmlaldavhaxq_s32(__a, __b, __c) __arm_vrmlaldavhaxq_s32(__a, __b, __c) |
1037 | #define vrmlsldavhaq_s32(__a, __b, __c) __arm_vrmlsldavhaq_s32(__a, __b, __c) | |
1038 | #define vrmlsldavhaxq_s32(__a, __b, __c) __arm_vrmlsldavhaxq_s32(__a, __b, __c) | |
1039 | #define vaddlvaq_p_s32(__a, __b, __p) __arm_vaddlvaq_p_s32(__a, __b, __p) | |
1040 | #define vcvtbq_m_f16_f32(__a, __b, __p) __arm_vcvtbq_m_f16_f32(__a, __b, __p) | |
1041 | #define vcvtbq_m_f32_f16(__inactive, __a, __p) __arm_vcvtbq_m_f32_f16(__inactive, __a, __p) | |
1042 | #define vcvttq_m_f16_f32(__a, __b, __p) __arm_vcvttq_m_f16_f32(__a, __b, __p) | |
1043 | #define vcvttq_m_f32_f16(__inactive, __a, __p) __arm_vcvttq_m_f32_f16(__inactive, __a, __p) | |
1044 | #define vrev16q_m_s8(__inactive, __a, __p) __arm_vrev16q_m_s8(__inactive, __a, __p) | |
1045 | #define vrev32q_m_f16(__inactive, __a, __p) __arm_vrev32q_m_f16(__inactive, __a, __p) | |
1046 | #define vrmlaldavhq_p_s32(__a, __b, __p) __arm_vrmlaldavhq_p_s32(__a, __b, __p) | |
1047 | #define vrmlaldavhxq_p_s32(__a, __b, __p) __arm_vrmlaldavhxq_p_s32(__a, __b, __p) | |
1048 | #define vrmlsldavhq_p_s32(__a, __b, __p) __arm_vrmlsldavhq_p_s32(__a, __b, __p) | |
1049 | #define vrmlsldavhxq_p_s32(__a, __b, __p) __arm_vrmlsldavhxq_p_s32(__a, __b, __p) | |
1050 | #define vaddlvaq_p_u32(__a, __b, __p) __arm_vaddlvaq_p_u32(__a, __b, __p) | |
1051 | #define vrev16q_m_u8(__inactive, __a, __p) __arm_vrev16q_m_u8(__inactive, __a, __p) | |
1052 | #define vrmlaldavhq_p_u32(__a, __b, __p) __arm_vrmlaldavhq_p_u32(__a, __b, __p) | |
1053 | #define vmvnq_m_n_s16(__inactive, __imm, __p) __arm_vmvnq_m_n_s16(__inactive, __imm, __p) | |
1054 | #define vorrq_m_n_s16(__a, __imm, __p) __arm_vorrq_m_n_s16(__a, __imm, __p) | |
1055 | #define vqrshrntq_n_s16(__a, __b, __imm) __arm_vqrshrntq_n_s16(__a, __b, __imm) | |
1056 | #define vqshrnbq_n_s16(__a, __b, __imm) __arm_vqshrnbq_n_s16(__a, __b, __imm) | |
1057 | #define vqshrntq_n_s16(__a, __b, __imm) __arm_vqshrntq_n_s16(__a, __b, __imm) | |
1058 | #define vrshrnbq_n_s16(__a, __b, __imm) __arm_vrshrnbq_n_s16(__a, __b, __imm) | |
1059 | #define vrshrntq_n_s16(__a, __b, __imm) __arm_vrshrntq_n_s16(__a, __b, __imm) | |
1060 | #define vshrnbq_n_s16(__a, __b, __imm) __arm_vshrnbq_n_s16(__a, __b, __imm) | |
1061 | #define vshrntq_n_s16(__a, __b, __imm) __arm_vshrntq_n_s16(__a, __b, __imm) | |
1062 | #define vcmlaq_f16(__a, __b, __c) __arm_vcmlaq_f16(__a, __b, __c) | |
1063 | #define vcmlaq_rot180_f16(__a, __b, __c) __arm_vcmlaq_rot180_f16(__a, __b, __c) | |
1064 | #define vcmlaq_rot270_f16(__a, __b, __c) __arm_vcmlaq_rot270_f16(__a, __b, __c) | |
1065 | #define vcmlaq_rot90_f16(__a, __b, __c) __arm_vcmlaq_rot90_f16(__a, __b, __c) | |
1066 | #define vfmaq_f16(__a, __b, __c) __arm_vfmaq_f16(__a, __b, __c) | |
1067 | #define vfmaq_n_f16(__a, __b, __c) __arm_vfmaq_n_f16(__a, __b, __c) | |
1068 | #define vfmasq_n_f16(__a, __b, __c) __arm_vfmasq_n_f16(__a, __b, __c) | |
1069 | #define vfmsq_f16(__a, __b, __c) __arm_vfmsq_f16(__a, __b, __c) | |
1070 | #define vmlaldavaq_s16(__a, __b, __c) __arm_vmlaldavaq_s16(__a, __b, __c) | |
1071 | #define vmlaldavaxq_s16(__a, __b, __c) __arm_vmlaldavaxq_s16(__a, __b, __c) | |
1072 | #define vmlsldavaq_s16(__a, __b, __c) __arm_vmlsldavaq_s16(__a, __b, __c) | |
1073 | #define vmlsldavaxq_s16(__a, __b, __c) __arm_vmlsldavaxq_s16(__a, __b, __c) | |
1074 | #define vabsq_m_f16(__inactive, __a, __p) __arm_vabsq_m_f16(__inactive, __a, __p) | |
1075 | #define vcvtmq_m_s16_f16(__inactive, __a, __p) __arm_vcvtmq_m_s16_f16(__inactive, __a, __p) | |
1076 | #define vcvtnq_m_s16_f16(__inactive, __a, __p) __arm_vcvtnq_m_s16_f16(__inactive, __a, __p) | |
1077 | #define vcvtpq_m_s16_f16(__inactive, __a, __p) __arm_vcvtpq_m_s16_f16(__inactive, __a, __p) | |
1078 | #define vcvtq_m_s16_f16(__inactive, __a, __p) __arm_vcvtq_m_s16_f16(__inactive, __a, __p) | |
1079 | #define vdupq_m_n_f16(__inactive, __a, __p) __arm_vdupq_m_n_f16(__inactive, __a, __p) | |
1080 | #define vmaxnmaq_m_f16(__a, __b, __p) __arm_vmaxnmaq_m_f16(__a, __b, __p) | |
1081 | #define vmaxnmavq_p_f16(__a, __b, __p) __arm_vmaxnmavq_p_f16(__a, __b, __p) | |
1082 | #define vmaxnmvq_p_f16(__a, __b, __p) __arm_vmaxnmvq_p_f16(__a, __b, __p) | |
1083 | #define vminnmaq_m_f16(__a, __b, __p) __arm_vminnmaq_m_f16(__a, __b, __p) | |
1084 | #define vminnmavq_p_f16(__a, __b, __p) __arm_vminnmavq_p_f16(__a, __b, __p) | |
1085 | #define vminnmvq_p_f16(__a, __b, __p) __arm_vminnmvq_p_f16(__a, __b, __p) | |
1086 | #define vmlaldavq_p_s16(__a, __b, __p) __arm_vmlaldavq_p_s16(__a, __b, __p) | |
1087 | #define vmlaldavxq_p_s16(__a, __b, __p) __arm_vmlaldavxq_p_s16(__a, __b, __p) | |
1088 | #define vmlsldavq_p_s16(__a, __b, __p) __arm_vmlsldavq_p_s16(__a, __b, __p) | |
1089 | #define vmlsldavxq_p_s16(__a, __b, __p) __arm_vmlsldavxq_p_s16(__a, __b, __p) | |
1090 | #define vmovlbq_m_s8(__inactive, __a, __p) __arm_vmovlbq_m_s8(__inactive, __a, __p) | |
1091 | #define vmovltq_m_s8(__inactive, __a, __p) __arm_vmovltq_m_s8(__inactive, __a, __p) | |
1092 | #define vmovnbq_m_s16(__a, __b, __p) __arm_vmovnbq_m_s16(__a, __b, __p) | |
1093 | #define vmovntq_m_s16(__a, __b, __p) __arm_vmovntq_m_s16(__a, __b, __p) | |
1094 | #define vnegq_m_f16(__inactive, __a, __p) __arm_vnegq_m_f16(__inactive, __a, __p) | |
1095 | #define vpselq_f16(__a, __b, __p) __arm_vpselq_f16(__a, __b, __p) | |
1096 | #define vqmovnbq_m_s16(__a, __b, __p) __arm_vqmovnbq_m_s16(__a, __b, __p) | |
1097 | #define vqmovntq_m_s16(__a, __b, __p) __arm_vqmovntq_m_s16(__a, __b, __p) | |
1098 | #define vrev32q_m_s8(__inactive, __a, __p) __arm_vrev32q_m_s8(__inactive, __a, __p) | |
1099 | #define vrev64q_m_f16(__inactive, __a, __p) __arm_vrev64q_m_f16(__inactive, __a, __p) | |
1100 | #define vrndaq_m_f16(__inactive, __a, __p) __arm_vrndaq_m_f16(__inactive, __a, __p) | |
1101 | #define vrndmq_m_f16(__inactive, __a, __p) __arm_vrndmq_m_f16(__inactive, __a, __p) | |
1102 | #define vrndnq_m_f16(__inactive, __a, __p) __arm_vrndnq_m_f16(__inactive, __a, __p) | |
1103 | #define vrndpq_m_f16(__inactive, __a, __p) __arm_vrndpq_m_f16(__inactive, __a, __p) | |
1104 | #define vrndq_m_f16(__inactive, __a, __p) __arm_vrndq_m_f16(__inactive, __a, __p) | |
1105 | #define vrndxq_m_f16(__inactive, __a, __p) __arm_vrndxq_m_f16(__inactive, __a, __p) | |
1106 | #define vcmpeqq_m_n_f16(__a, __b, __p) __arm_vcmpeqq_m_n_f16(__a, __b, __p) | |
1107 | #define vcmpgeq_m_f16(__a, __b, __p) __arm_vcmpgeq_m_f16(__a, __b, __p) | |
1108 | #define vcmpgeq_m_n_f16(__a, __b, __p) __arm_vcmpgeq_m_n_f16(__a, __b, __p) | |
1109 | #define vcmpgtq_m_f16(__a, __b, __p) __arm_vcmpgtq_m_f16(__a, __b, __p) | |
1110 | #define vcmpgtq_m_n_f16(__a, __b, __p) __arm_vcmpgtq_m_n_f16(__a, __b, __p) | |
1111 | #define vcmpleq_m_f16(__a, __b, __p) __arm_vcmpleq_m_f16(__a, __b, __p) | |
1112 | #define vcmpleq_m_n_f16(__a, __b, __p) __arm_vcmpleq_m_n_f16(__a, __b, __p) | |
1113 | #define vcmpltq_m_f16(__a, __b, __p) __arm_vcmpltq_m_f16(__a, __b, __p) | |
1114 | #define vcmpltq_m_n_f16(__a, __b, __p) __arm_vcmpltq_m_n_f16(__a, __b, __p) | |
1115 | #define vcmpneq_m_f16(__a, __b, __p) __arm_vcmpneq_m_f16(__a, __b, __p) | |
1116 | #define vcmpneq_m_n_f16(__a, __b, __p) __arm_vcmpneq_m_n_f16(__a, __b, __p) | |
1117 | #define vmvnq_m_n_u16(__inactive, __imm, __p) __arm_vmvnq_m_n_u16(__inactive, __imm, __p) | |
1118 | #define vorrq_m_n_u16(__a, __imm, __p) __arm_vorrq_m_n_u16(__a, __imm, __p) | |
1119 | #define vqrshruntq_n_s16(__a, __b, __imm) __arm_vqrshruntq_n_s16(__a, __b, __imm) | |
1120 | #define vqshrunbq_n_s16(__a, __b, __imm) __arm_vqshrunbq_n_s16(__a, __b, __imm) | |
1121 | #define vqshruntq_n_s16(__a, __b, __imm) __arm_vqshruntq_n_s16(__a, __b, __imm) | |
1122 | #define vcvtmq_m_u16_f16(__inactive, __a, __p) __arm_vcvtmq_m_u16_f16(__inactive, __a, __p) | |
1123 | #define vcvtnq_m_u16_f16(__inactive, __a, __p) __arm_vcvtnq_m_u16_f16(__inactive, __a, __p) | |
1124 | #define vcvtpq_m_u16_f16(__inactive, __a, __p) __arm_vcvtpq_m_u16_f16(__inactive, __a, __p) | |
1125 | #define vcvtq_m_u16_f16(__inactive, __a, __p) __arm_vcvtq_m_u16_f16(__inactive, __a, __p) | |
1126 | #define vqmovunbq_m_s16(__a, __b, __p) __arm_vqmovunbq_m_s16(__a, __b, __p) | |
1127 | #define vqmovuntq_m_s16(__a, __b, __p) __arm_vqmovuntq_m_s16(__a, __b, __p) | |
1128 | #define vqrshrntq_n_u16(__a, __b, __imm) __arm_vqrshrntq_n_u16(__a, __b, __imm) | |
1129 | #define vqshrnbq_n_u16(__a, __b, __imm) __arm_vqshrnbq_n_u16(__a, __b, __imm) | |
1130 | #define vqshrntq_n_u16(__a, __b, __imm) __arm_vqshrntq_n_u16(__a, __b, __imm) | |
1131 | #define vrshrnbq_n_u16(__a, __b, __imm) __arm_vrshrnbq_n_u16(__a, __b, __imm) | |
1132 | #define vrshrntq_n_u16(__a, __b, __imm) __arm_vrshrntq_n_u16(__a, __b, __imm) | |
1133 | #define vshrnbq_n_u16(__a, __b, __imm) __arm_vshrnbq_n_u16(__a, __b, __imm) | |
1134 | #define vshrntq_n_u16(__a, __b, __imm) __arm_vshrntq_n_u16(__a, __b, __imm) | |
1135 | #define vmlaldavaq_u16(__a, __b, __c) __arm_vmlaldavaq_u16(__a, __b, __c) | |
1136 | #define vmlaldavq_p_u16(__a, __b, __p) __arm_vmlaldavq_p_u16(__a, __b, __p) | |
1137 | #define vmovlbq_m_u8(__inactive, __a, __p) __arm_vmovlbq_m_u8(__inactive, __a, __p) | |
1138 | #define vmovltq_m_u8(__inactive, __a, __p) __arm_vmovltq_m_u8(__inactive, __a, __p) | |
1139 | #define vmovnbq_m_u16(__a, __b, __p) __arm_vmovnbq_m_u16(__a, __b, __p) | |
1140 | #define vmovntq_m_u16(__a, __b, __p) __arm_vmovntq_m_u16(__a, __b, __p) | |
1141 | #define vqmovnbq_m_u16(__a, __b, __p) __arm_vqmovnbq_m_u16(__a, __b, __p) | |
1142 | #define vqmovntq_m_u16(__a, __b, __p) __arm_vqmovntq_m_u16(__a, __b, __p) | |
1143 | #define vrev32q_m_u8(__inactive, __a, __p) __arm_vrev32q_m_u8(__inactive, __a, __p) | |
1144 | #define vmvnq_m_n_s32(__inactive, __imm, __p) __arm_vmvnq_m_n_s32(__inactive, __imm, __p) | |
1145 | #define vorrq_m_n_s32(__a, __imm, __p) __arm_vorrq_m_n_s32(__a, __imm, __p) | |
1146 | #define vqrshrntq_n_s32(__a, __b, __imm) __arm_vqrshrntq_n_s32(__a, __b, __imm) | |
1147 | #define vqshrnbq_n_s32(__a, __b, __imm) __arm_vqshrnbq_n_s32(__a, __b, __imm) | |
1148 | #define vqshrntq_n_s32(__a, __b, __imm) __arm_vqshrntq_n_s32(__a, __b, __imm) | |
1149 | #define vrshrnbq_n_s32(__a, __b, __imm) __arm_vrshrnbq_n_s32(__a, __b, __imm) | |
1150 | #define vrshrntq_n_s32(__a, __b, __imm) __arm_vrshrntq_n_s32(__a, __b, __imm) | |
1151 | #define vshrnbq_n_s32(__a, __b, __imm) __arm_vshrnbq_n_s32(__a, __b, __imm) | |
1152 | #define vshrntq_n_s32(__a, __b, __imm) __arm_vshrntq_n_s32(__a, __b, __imm) | |
1153 | #define vcmlaq_f32(__a, __b, __c) __arm_vcmlaq_f32(__a, __b, __c) | |
1154 | #define vcmlaq_rot180_f32(__a, __b, __c) __arm_vcmlaq_rot180_f32(__a, __b, __c) | |
1155 | #define vcmlaq_rot270_f32(__a, __b, __c) __arm_vcmlaq_rot270_f32(__a, __b, __c) | |
1156 | #define vcmlaq_rot90_f32(__a, __b, __c) __arm_vcmlaq_rot90_f32(__a, __b, __c) | |
1157 | #define vfmaq_f32(__a, __b, __c) __arm_vfmaq_f32(__a, __b, __c) | |
1158 | #define vfmaq_n_f32(__a, __b, __c) __arm_vfmaq_n_f32(__a, __b, __c) | |
1159 | #define vfmasq_n_f32(__a, __b, __c) __arm_vfmasq_n_f32(__a, __b, __c) | |
1160 | #define vfmsq_f32(__a, __b, __c) __arm_vfmsq_f32(__a, __b, __c) | |
1161 | #define vmlaldavaq_s32(__a, __b, __c) __arm_vmlaldavaq_s32(__a, __b, __c) | |
1162 | #define vmlaldavaxq_s32(__a, __b, __c) __arm_vmlaldavaxq_s32(__a, __b, __c) | |
1163 | #define vmlsldavaq_s32(__a, __b, __c) __arm_vmlsldavaq_s32(__a, __b, __c) | |
1164 | #define vmlsldavaxq_s32(__a, __b, __c) __arm_vmlsldavaxq_s32(__a, __b, __c) | |
1165 | #define vabsq_m_f32(__inactive, __a, __p) __arm_vabsq_m_f32(__inactive, __a, __p) | |
1166 | #define vcvtmq_m_s32_f32(__inactive, __a, __p) __arm_vcvtmq_m_s32_f32(__inactive, __a, __p) | |
1167 | #define vcvtnq_m_s32_f32(__inactive, __a, __p) __arm_vcvtnq_m_s32_f32(__inactive, __a, __p) | |
1168 | #define vcvtpq_m_s32_f32(__inactive, __a, __p) __arm_vcvtpq_m_s32_f32(__inactive, __a, __p) | |
1169 | #define vcvtq_m_s32_f32(__inactive, __a, __p) __arm_vcvtq_m_s32_f32(__inactive, __a, __p) | |
1170 | #define vdupq_m_n_f32(__inactive, __a, __p) __arm_vdupq_m_n_f32(__inactive, __a, __p) | |
1171 | #define vmaxnmaq_m_f32(__a, __b, __p) __arm_vmaxnmaq_m_f32(__a, __b, __p) | |
1172 | #define vmaxnmavq_p_f32(__a, __b, __p) __arm_vmaxnmavq_p_f32(__a, __b, __p) | |
1173 | #define vmaxnmvq_p_f32(__a, __b, __p) __arm_vmaxnmvq_p_f32(__a, __b, __p) | |
1174 | #define vminnmaq_m_f32(__a, __b, __p) __arm_vminnmaq_m_f32(__a, __b, __p) | |
1175 | #define vminnmavq_p_f32(__a, __b, __p) __arm_vminnmavq_p_f32(__a, __b, __p) | |
1176 | #define vminnmvq_p_f32(__a, __b, __p) __arm_vminnmvq_p_f32(__a, __b, __p) | |
1177 | #define vmlaldavq_p_s32(__a, __b, __p) __arm_vmlaldavq_p_s32(__a, __b, __p) | |
1178 | #define vmlaldavxq_p_s32(__a, __b, __p) __arm_vmlaldavxq_p_s32(__a, __b, __p) | |
1179 | #define vmlsldavq_p_s32(__a, __b, __p) __arm_vmlsldavq_p_s32(__a, __b, __p) | |
1180 | #define vmlsldavxq_p_s32(__a, __b, __p) __arm_vmlsldavxq_p_s32(__a, __b, __p) | |
1181 | #define vmovlbq_m_s16(__inactive, __a, __p) __arm_vmovlbq_m_s16(__inactive, __a, __p) | |
1182 | #define vmovltq_m_s16(__inactive, __a, __p) __arm_vmovltq_m_s16(__inactive, __a, __p) | |
1183 | #define vmovnbq_m_s32(__a, __b, __p) __arm_vmovnbq_m_s32(__a, __b, __p) | |
1184 | #define vmovntq_m_s32(__a, __b, __p) __arm_vmovntq_m_s32(__a, __b, __p) | |
1185 | #define vnegq_m_f32(__inactive, __a, __p) __arm_vnegq_m_f32(__inactive, __a, __p) | |
1186 | #define vpselq_f32(__a, __b, __p) __arm_vpselq_f32(__a, __b, __p) | |
1187 | #define vqmovnbq_m_s32(__a, __b, __p) __arm_vqmovnbq_m_s32(__a, __b, __p) | |
1188 | #define vqmovntq_m_s32(__a, __b, __p) __arm_vqmovntq_m_s32(__a, __b, __p) | |
1189 | #define vrev32q_m_s16(__inactive, __a, __p) __arm_vrev32q_m_s16(__inactive, __a, __p) | |
1190 | #define vrev64q_m_f32(__inactive, __a, __p) __arm_vrev64q_m_f32(__inactive, __a, __p) | |
1191 | #define vrndaq_m_f32(__inactive, __a, __p) __arm_vrndaq_m_f32(__inactive, __a, __p) | |
1192 | #define vrndmq_m_f32(__inactive, __a, __p) __arm_vrndmq_m_f32(__inactive, __a, __p) | |
1193 | #define vrndnq_m_f32(__inactive, __a, __p) __arm_vrndnq_m_f32(__inactive, __a, __p) | |
1194 | #define vrndpq_m_f32(__inactive, __a, __p) __arm_vrndpq_m_f32(__inactive, __a, __p) | |
1195 | #define vrndq_m_f32(__inactive, __a, __p) __arm_vrndq_m_f32(__inactive, __a, __p) | |
1196 | #define vrndxq_m_f32(__inactive, __a, __p) __arm_vrndxq_m_f32(__inactive, __a, __p) | |
1197 | #define vcmpeqq_m_n_f32(__a, __b, __p) __arm_vcmpeqq_m_n_f32(__a, __b, __p) | |
1198 | #define vcmpgeq_m_f32(__a, __b, __p) __arm_vcmpgeq_m_f32(__a, __b, __p) | |
1199 | #define vcmpgeq_m_n_f32(__a, __b, __p) __arm_vcmpgeq_m_n_f32(__a, __b, __p) | |
1200 | #define vcmpgtq_m_f32(__a, __b, __p) __arm_vcmpgtq_m_f32(__a, __b, __p) | |
1201 | #define vcmpgtq_m_n_f32(__a, __b, __p) __arm_vcmpgtq_m_n_f32(__a, __b, __p) | |
1202 | #define vcmpleq_m_f32(__a, __b, __p) __arm_vcmpleq_m_f32(__a, __b, __p) | |
1203 | #define vcmpleq_m_n_f32(__a, __b, __p) __arm_vcmpleq_m_n_f32(__a, __b, __p) | |
1204 | #define vcmpltq_m_f32(__a, __b, __p) __arm_vcmpltq_m_f32(__a, __b, __p) | |
1205 | #define vcmpltq_m_n_f32(__a, __b, __p) __arm_vcmpltq_m_n_f32(__a, __b, __p) | |
1206 | #define vcmpneq_m_f32(__a, __b, __p) __arm_vcmpneq_m_f32(__a, __b, __p) | |
1207 | #define vcmpneq_m_n_f32(__a, __b, __p) __arm_vcmpneq_m_n_f32(__a, __b, __p) | |
1208 | #define vmvnq_m_n_u32(__inactive, __imm, __p) __arm_vmvnq_m_n_u32(__inactive, __imm, __p) | |
1209 | #define vorrq_m_n_u32(__a, __imm, __p) __arm_vorrq_m_n_u32(__a, __imm, __p) | |
1210 | #define vqrshruntq_n_s32(__a, __b, __imm) __arm_vqrshruntq_n_s32(__a, __b, __imm) | |
1211 | #define vqshrunbq_n_s32(__a, __b, __imm) __arm_vqshrunbq_n_s32(__a, __b, __imm) | |
1212 | #define vqshruntq_n_s32(__a, __b, __imm) __arm_vqshruntq_n_s32(__a, __b, __imm) | |
1213 | #define vcvtmq_m_u32_f32(__inactive, __a, __p) __arm_vcvtmq_m_u32_f32(__inactive, __a, __p) | |
1214 | #define vcvtnq_m_u32_f32(__inactive, __a, __p) __arm_vcvtnq_m_u32_f32(__inactive, __a, __p) | |
1215 | #define vcvtpq_m_u32_f32(__inactive, __a, __p) __arm_vcvtpq_m_u32_f32(__inactive, __a, __p) | |
1216 | #define vcvtq_m_u32_f32(__inactive, __a, __p) __arm_vcvtq_m_u32_f32(__inactive, __a, __p) | |
1217 | #define vqmovunbq_m_s32(__a, __b, __p) __arm_vqmovunbq_m_s32(__a, __b, __p) | |
1218 | #define vqmovuntq_m_s32(__a, __b, __p) __arm_vqmovuntq_m_s32(__a, __b, __p) | |
1219 | #define vqrshrntq_n_u32(__a, __b, __imm) __arm_vqrshrntq_n_u32(__a, __b, __imm) | |
1220 | #define vqshrnbq_n_u32(__a, __b, __imm) __arm_vqshrnbq_n_u32(__a, __b, __imm) | |
1221 | #define vqshrntq_n_u32(__a, __b, __imm) __arm_vqshrntq_n_u32(__a, __b, __imm) | |
1222 | #define vrshrnbq_n_u32(__a, __b, __imm) __arm_vrshrnbq_n_u32(__a, __b, __imm) | |
1223 | #define vrshrntq_n_u32(__a, __b, __imm) __arm_vrshrntq_n_u32(__a, __b, __imm) | |
1224 | #define vshrnbq_n_u32(__a, __b, __imm) __arm_vshrnbq_n_u32(__a, __b, __imm) | |
1225 | #define vshrntq_n_u32(__a, __b, __imm) __arm_vshrntq_n_u32(__a, __b, __imm) | |
1226 | #define vmlaldavaq_u32(__a, __b, __c) __arm_vmlaldavaq_u32(__a, __b, __c) | |
1227 | #define vmlaldavq_p_u32(__a, __b, __p) __arm_vmlaldavq_p_u32(__a, __b, __p) | |
1228 | #define vmovlbq_m_u16(__inactive, __a, __p) __arm_vmovlbq_m_u16(__inactive, __a, __p) | |
1229 | #define vmovltq_m_u16(__inactive, __a, __p) __arm_vmovltq_m_u16(__inactive, __a, __p) | |
1230 | #define vmovnbq_m_u32(__a, __b, __p) __arm_vmovnbq_m_u32(__a, __b, __p) | |
1231 | #define vmovntq_m_u32(__a, __b, __p) __arm_vmovntq_m_u32(__a, __b, __p) | |
1232 | #define vqmovnbq_m_u32(__a, __b, __p) __arm_vqmovnbq_m_u32(__a, __b, __p) | |
1233 | #define vqmovntq_m_u32(__a, __b, __p) __arm_vqmovntq_m_u32(__a, __b, __p) | |
1234 | #define vrev32q_m_u16(__inactive, __a, __p) __arm_vrev32q_m_u16(__inactive, __a, __p) | |
db5db9d2 SP |
1235 | #define vsriq_m_n_s8(__a, __b, __imm, __p) __arm_vsriq_m_n_s8(__a, __b, __imm, __p) |
1236 | #define vsubq_m_s8(__inactive, __a, __b, __p) __arm_vsubq_m_s8(__inactive, __a, __b, __p) | |
1237 | #define vcvtq_m_n_f16_u16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f16_u16(__inactive, __a, __imm6, __p) | |
1238 | #define vqshluq_m_n_s8(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s8(__inactive, __a, __imm, __p) | |
1239 | #define vabavq_p_s8(__a, __b, __c, __p) __arm_vabavq_p_s8(__a, __b, __c, __p) | |
1240 | #define vsriq_m_n_u8(__a, __b, __imm, __p) __arm_vsriq_m_n_u8(__a, __b, __imm, __p) | |
1241 | #define vshlq_m_u8(__inactive, __a, __b, __p) __arm_vshlq_m_u8(__inactive, __a, __b, __p) | |
1242 | #define vsubq_m_u8(__inactive, __a, __b, __p) __arm_vsubq_m_u8(__inactive, __a, __b, __p) | |
1243 | #define vabavq_p_u8(__a, __b, __c, __p) __arm_vabavq_p_u8(__a, __b, __c, __p) | |
1244 | #define vshlq_m_s8(__inactive, __a, __b, __p) __arm_vshlq_m_s8(__inactive, __a, __b, __p) | |
1245 | #define vcvtq_m_n_f16_s16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f16_s16(__inactive, __a, __imm6, __p) | |
1246 | #define vsriq_m_n_s16(__a, __b, __imm, __p) __arm_vsriq_m_n_s16(__a, __b, __imm, __p) | |
1247 | #define vsubq_m_s16(__inactive, __a, __b, __p) __arm_vsubq_m_s16(__inactive, __a, __b, __p) | |
1248 | #define vcvtq_m_n_f32_u32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f32_u32(__inactive, __a, __imm6, __p) | |
1249 | #define vqshluq_m_n_s16(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s16(__inactive, __a, __imm, __p) | |
1250 | #define vabavq_p_s16(__a, __b, __c, __p) __arm_vabavq_p_s16(__a, __b, __c, __p) | |
1251 | #define vsriq_m_n_u16(__a, __b, __imm, __p) __arm_vsriq_m_n_u16(__a, __b, __imm, __p) | |
1252 | #define vshlq_m_u16(__inactive, __a, __b, __p) __arm_vshlq_m_u16(__inactive, __a, __b, __p) | |
1253 | #define vsubq_m_u16(__inactive, __a, __b, __p) __arm_vsubq_m_u16(__inactive, __a, __b, __p) | |
1254 | #define vabavq_p_u16(__a, __b, __c, __p) __arm_vabavq_p_u16(__a, __b, __c, __p) | |
1255 | #define vshlq_m_s16(__inactive, __a, __b, __p) __arm_vshlq_m_s16(__inactive, __a, __b, __p) | |
1256 | #define vcvtq_m_n_f32_s32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f32_s32(__inactive, __a, __imm6, __p) | |
1257 | #define vsriq_m_n_s32(__a, __b, __imm, __p) __arm_vsriq_m_n_s32(__a, __b, __imm, __p) | |
1258 | #define vsubq_m_s32(__inactive, __a, __b, __p) __arm_vsubq_m_s32(__inactive, __a, __b, __p) | |
1259 | #define vqshluq_m_n_s32(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s32(__inactive, __a, __imm, __p) | |
1260 | #define vabavq_p_s32(__a, __b, __c, __p) __arm_vabavq_p_s32(__a, __b, __c, __p) | |
1261 | #define vsriq_m_n_u32(__a, __b, __imm, __p) __arm_vsriq_m_n_u32(__a, __b, __imm, __p) | |
1262 | #define vshlq_m_u32(__inactive, __a, __b, __p) __arm_vshlq_m_u32(__inactive, __a, __b, __p) | |
1263 | #define vsubq_m_u32(__inactive, __a, __b, __p) __arm_vsubq_m_u32(__inactive, __a, __b, __p) | |
1264 | #define vabavq_p_u32(__a, __b, __c, __p) __arm_vabavq_p_u32(__a, __b, __c, __p) | |
1265 | #define vshlq_m_s32(__inactive, __a, __b, __p) __arm_vshlq_m_s32(__inactive, __a, __b, __p) | |
8eb3b6b9 SP |
1266 | #define vabdq_m_s8(__inactive, __a, __b, __p) __arm_vabdq_m_s8(__inactive, __a, __b, __p) |
1267 | #define vabdq_m_s32(__inactive, __a, __b, __p) __arm_vabdq_m_s32(__inactive, __a, __b, __p) | |
1268 | #define vabdq_m_s16(__inactive, __a, __b, __p) __arm_vabdq_m_s16(__inactive, __a, __b, __p) | |
1269 | #define vabdq_m_u8(__inactive, __a, __b, __p) __arm_vabdq_m_u8(__inactive, __a, __b, __p) | |
1270 | #define vabdq_m_u32(__inactive, __a, __b, __p) __arm_vabdq_m_u32(__inactive, __a, __b, __p) | |
1271 | #define vabdq_m_u16(__inactive, __a, __b, __p) __arm_vabdq_m_u16(__inactive, __a, __b, __p) | |
1272 | #define vaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vaddq_m_n_s8(__inactive, __a, __b, __p) | |
1273 | #define vaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vaddq_m_n_s32(__inactive, __a, __b, __p) | |
1274 | #define vaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vaddq_m_n_s16(__inactive, __a, __b, __p) | |
1275 | #define vaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vaddq_m_n_u8(__inactive, __a, __b, __p) | |
1276 | #define vaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vaddq_m_n_u32(__inactive, __a, __b, __p) | |
1277 | #define vaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vaddq_m_n_u16(__inactive, __a, __b, __p) | |
1278 | #define vaddq_m_s8(__inactive, __a, __b, __p) __arm_vaddq_m_s8(__inactive, __a, __b, __p) | |
1279 | #define vaddq_m_s32(__inactive, __a, __b, __p) __arm_vaddq_m_s32(__inactive, __a, __b, __p) | |
1280 | #define vaddq_m_s16(__inactive, __a, __b, __p) __arm_vaddq_m_s16(__inactive, __a, __b, __p) | |
1281 | #define vaddq_m_u8(__inactive, __a, __b, __p) __arm_vaddq_m_u8(__inactive, __a, __b, __p) | |
1282 | #define vaddq_m_u32(__inactive, __a, __b, __p) __arm_vaddq_m_u32(__inactive, __a, __b, __p) | |
1283 | #define vaddq_m_u16(__inactive, __a, __b, __p) __arm_vaddq_m_u16(__inactive, __a, __b, __p) | |
1284 | #define vandq_m_s8(__inactive, __a, __b, __p) __arm_vandq_m_s8(__inactive, __a, __b, __p) | |
1285 | #define vandq_m_s32(__inactive, __a, __b, __p) __arm_vandq_m_s32(__inactive, __a, __b, __p) | |
1286 | #define vandq_m_s16(__inactive, __a, __b, __p) __arm_vandq_m_s16(__inactive, __a, __b, __p) | |
1287 | #define vandq_m_u8(__inactive, __a, __b, __p) __arm_vandq_m_u8(__inactive, __a, __b, __p) | |
1288 | #define vandq_m_u32(__inactive, __a, __b, __p) __arm_vandq_m_u32(__inactive, __a, __b, __p) | |
1289 | #define vandq_m_u16(__inactive, __a, __b, __p) __arm_vandq_m_u16(__inactive, __a, __b, __p) | |
1290 | #define vbicq_m_s8(__inactive, __a, __b, __p) __arm_vbicq_m_s8(__inactive, __a, __b, __p) | |
1291 | #define vbicq_m_s32(__inactive, __a, __b, __p) __arm_vbicq_m_s32(__inactive, __a, __b, __p) | |
1292 | #define vbicq_m_s16(__inactive, __a, __b, __p) __arm_vbicq_m_s16(__inactive, __a, __b, __p) | |
1293 | #define vbicq_m_u8(__inactive, __a, __b, __p) __arm_vbicq_m_u8(__inactive, __a, __b, __p) | |
1294 | #define vbicq_m_u32(__inactive, __a, __b, __p) __arm_vbicq_m_u32(__inactive, __a, __b, __p) | |
1295 | #define vbicq_m_u16(__inactive, __a, __b, __p) __arm_vbicq_m_u16(__inactive, __a, __b, __p) | |
1296 | #define vbrsrq_m_n_s8(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s8(__inactive, __a, __b, __p) | |
1297 | #define vbrsrq_m_n_s32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s32(__inactive, __a, __b, __p) | |
1298 | #define vbrsrq_m_n_s16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s16(__inactive, __a, __b, __p) | |
1299 | #define vbrsrq_m_n_u8(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u8(__inactive, __a, __b, __p) | |
1300 | #define vbrsrq_m_n_u32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u32(__inactive, __a, __b, __p) | |
1301 | #define vbrsrq_m_n_u16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u16(__inactive, __a, __b, __p) | |
1302 | #define vcaddq_rot270_m_s8(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s8(__inactive, __a, __b, __p) | |
1303 | #define vcaddq_rot270_m_s32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s32(__inactive, __a, __b, __p) | |
1304 | #define vcaddq_rot270_m_s16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s16(__inactive, __a, __b, __p) | |
1305 | #define vcaddq_rot270_m_u8(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u8(__inactive, __a, __b, __p) | |
1306 | #define vcaddq_rot270_m_u32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u32(__inactive, __a, __b, __p) | |
1307 | #define vcaddq_rot270_m_u16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u16(__inactive, __a, __b, __p) | |
1308 | #define vcaddq_rot90_m_s8(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s8(__inactive, __a, __b, __p) | |
1309 | #define vcaddq_rot90_m_s32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s32(__inactive, __a, __b, __p) | |
1310 | #define vcaddq_rot90_m_s16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s16(__inactive, __a, __b, __p) | |
1311 | #define vcaddq_rot90_m_u8(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u8(__inactive, __a, __b, __p) | |
1312 | #define vcaddq_rot90_m_u32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u32(__inactive, __a, __b, __p) | |
1313 | #define vcaddq_rot90_m_u16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u16(__inactive, __a, __b, __p) | |
1314 | #define veorq_m_s8(__inactive, __a, __b, __p) __arm_veorq_m_s8(__inactive, __a, __b, __p) | |
1315 | #define veorq_m_s32(__inactive, __a, __b, __p) __arm_veorq_m_s32(__inactive, __a, __b, __p) | |
1316 | #define veorq_m_s16(__inactive, __a, __b, __p) __arm_veorq_m_s16(__inactive, __a, __b, __p) | |
1317 | #define veorq_m_u8(__inactive, __a, __b, __p) __arm_veorq_m_u8(__inactive, __a, __b, __p) | |
1318 | #define veorq_m_u32(__inactive, __a, __b, __p) __arm_veorq_m_u32(__inactive, __a, __b, __p) | |
1319 | #define veorq_m_u16(__inactive, __a, __b, __p) __arm_veorq_m_u16(__inactive, __a, __b, __p) | |
1320 | #define vhaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s8(__inactive, __a, __b, __p) | |
1321 | #define vhaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s32(__inactive, __a, __b, __p) | |
1322 | #define vhaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s16(__inactive, __a, __b, __p) | |
1323 | #define vhaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u8(__inactive, __a, __b, __p) | |
1324 | #define vhaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u32(__inactive, __a, __b, __p) | |
1325 | #define vhaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u16(__inactive, __a, __b, __p) | |
1326 | #define vhaddq_m_s8(__inactive, __a, __b, __p) __arm_vhaddq_m_s8(__inactive, __a, __b, __p) | |
1327 | #define vhaddq_m_s32(__inactive, __a, __b, __p) __arm_vhaddq_m_s32(__inactive, __a, __b, __p) | |
1328 | #define vhaddq_m_s16(__inactive, __a, __b, __p) __arm_vhaddq_m_s16(__inactive, __a, __b, __p) | |
1329 | #define vhaddq_m_u8(__inactive, __a, __b, __p) __arm_vhaddq_m_u8(__inactive, __a, __b, __p) | |
1330 | #define vhaddq_m_u32(__inactive, __a, __b, __p) __arm_vhaddq_m_u32(__inactive, __a, __b, __p) | |
1331 | #define vhaddq_m_u16(__inactive, __a, __b, __p) __arm_vhaddq_m_u16(__inactive, __a, __b, __p) | |
1332 | #define vhcaddq_rot270_m_s8(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s8(__inactive, __a, __b, __p) | |
1333 | #define vhcaddq_rot270_m_s32(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s32(__inactive, __a, __b, __p) | |
1334 | #define vhcaddq_rot270_m_s16(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s16(__inactive, __a, __b, __p) | |
1335 | #define vhcaddq_rot90_m_s8(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s8(__inactive, __a, __b, __p) | |
1336 | #define vhcaddq_rot90_m_s32(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s32(__inactive, __a, __b, __p) | |
1337 | #define vhcaddq_rot90_m_s16(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s16(__inactive, __a, __b, __p) | |
1338 | #define vhsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s8(__inactive, __a, __b, __p) | |
1339 | #define vhsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s32(__inactive, __a, __b, __p) | |
1340 | #define vhsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s16(__inactive, __a, __b, __p) | |
1341 | #define vhsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u8(__inactive, __a, __b, __p) | |
1342 | #define vhsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u32(__inactive, __a, __b, __p) | |
1343 | #define vhsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u16(__inactive, __a, __b, __p) | |
1344 | #define vhsubq_m_s8(__inactive, __a, __b, __p) __arm_vhsubq_m_s8(__inactive, __a, __b, __p) | |
1345 | #define vhsubq_m_s32(__inactive, __a, __b, __p) __arm_vhsubq_m_s32(__inactive, __a, __b, __p) | |
1346 | #define vhsubq_m_s16(__inactive, __a, __b, __p) __arm_vhsubq_m_s16(__inactive, __a, __b, __p) | |
1347 | #define vhsubq_m_u8(__inactive, __a, __b, __p) __arm_vhsubq_m_u8(__inactive, __a, __b, __p) | |
1348 | #define vhsubq_m_u32(__inactive, __a, __b, __p) __arm_vhsubq_m_u32(__inactive, __a, __b, __p) | |
1349 | #define vhsubq_m_u16(__inactive, __a, __b, __p) __arm_vhsubq_m_u16(__inactive, __a, __b, __p) | |
1350 | #define vmaxq_m_s8(__inactive, __a, __b, __p) __arm_vmaxq_m_s8(__inactive, __a, __b, __p) | |
1351 | #define vmaxq_m_s32(__inactive, __a, __b, __p) __arm_vmaxq_m_s32(__inactive, __a, __b, __p) | |
1352 | #define vmaxq_m_s16(__inactive, __a, __b, __p) __arm_vmaxq_m_s16(__inactive, __a, __b, __p) | |
1353 | #define vmaxq_m_u8(__inactive, __a, __b, __p) __arm_vmaxq_m_u8(__inactive, __a, __b, __p) | |
1354 | #define vmaxq_m_u32(__inactive, __a, __b, __p) __arm_vmaxq_m_u32(__inactive, __a, __b, __p) | |
1355 | #define vmaxq_m_u16(__inactive, __a, __b, __p) __arm_vmaxq_m_u16(__inactive, __a, __b, __p) | |
1356 | #define vminq_m_s8(__inactive, __a, __b, __p) __arm_vminq_m_s8(__inactive, __a, __b, __p) | |
1357 | #define vminq_m_s32(__inactive, __a, __b, __p) __arm_vminq_m_s32(__inactive, __a, __b, __p) | |
1358 | #define vminq_m_s16(__inactive, __a, __b, __p) __arm_vminq_m_s16(__inactive, __a, __b, __p) | |
1359 | #define vminq_m_u8(__inactive, __a, __b, __p) __arm_vminq_m_u8(__inactive, __a, __b, __p) | |
1360 | #define vminq_m_u32(__inactive, __a, __b, __p) __arm_vminq_m_u32(__inactive, __a, __b, __p) | |
1361 | #define vminq_m_u16(__inactive, __a, __b, __p) __arm_vminq_m_u16(__inactive, __a, __b, __p) | |
1362 | #define vmladavaq_p_s8(__a, __b, __c, __p) __arm_vmladavaq_p_s8(__a, __b, __c, __p) | |
1363 | #define vmladavaq_p_s32(__a, __b, __c, __p) __arm_vmladavaq_p_s32(__a, __b, __c, __p) | |
1364 | #define vmladavaq_p_s16(__a, __b, __c, __p) __arm_vmladavaq_p_s16(__a, __b, __c, __p) | |
1365 | #define vmladavaq_p_u8(__a, __b, __c, __p) __arm_vmladavaq_p_u8(__a, __b, __c, __p) | |
1366 | #define vmladavaq_p_u32(__a, __b, __c, __p) __arm_vmladavaq_p_u32(__a, __b, __c, __p) | |
1367 | #define vmladavaq_p_u16(__a, __b, __c, __p) __arm_vmladavaq_p_u16(__a, __b, __c, __p) | |
1368 | #define vmladavaxq_p_s8(__a, __b, __c, __p) __arm_vmladavaxq_p_s8(__a, __b, __c, __p) | |
1369 | #define vmladavaxq_p_s32(__a, __b, __c, __p) __arm_vmladavaxq_p_s32(__a, __b, __c, __p) | |
1370 | #define vmladavaxq_p_s16(__a, __b, __c, __p) __arm_vmladavaxq_p_s16(__a, __b, __c, __p) | |
1371 | #define vmlaq_m_n_s8(__a, __b, __c, __p) __arm_vmlaq_m_n_s8(__a, __b, __c, __p) | |
1372 | #define vmlaq_m_n_s32(__a, __b, __c, __p) __arm_vmlaq_m_n_s32(__a, __b, __c, __p) | |
1373 | #define vmlaq_m_n_s16(__a, __b, __c, __p) __arm_vmlaq_m_n_s16(__a, __b, __c, __p) | |
1374 | #define vmlaq_m_n_u8(__a, __b, __c, __p) __arm_vmlaq_m_n_u8(__a, __b, __c, __p) | |
1375 | #define vmlaq_m_n_u32(__a, __b, __c, __p) __arm_vmlaq_m_n_u32(__a, __b, __c, __p) | |
1376 | #define vmlaq_m_n_u16(__a, __b, __c, __p) __arm_vmlaq_m_n_u16(__a, __b, __c, __p) | |
1377 | #define vmlasq_m_n_s8(__a, __b, __c, __p) __arm_vmlasq_m_n_s8(__a, __b, __c, __p) | |
1378 | #define vmlasq_m_n_s32(__a, __b, __c, __p) __arm_vmlasq_m_n_s32(__a, __b, __c, __p) | |
1379 | #define vmlasq_m_n_s16(__a, __b, __c, __p) __arm_vmlasq_m_n_s16(__a, __b, __c, __p) | |
1380 | #define vmlasq_m_n_u8(__a, __b, __c, __p) __arm_vmlasq_m_n_u8(__a, __b, __c, __p) | |
1381 | #define vmlasq_m_n_u32(__a, __b, __c, __p) __arm_vmlasq_m_n_u32(__a, __b, __c, __p) | |
1382 | #define vmlasq_m_n_u16(__a, __b, __c, __p) __arm_vmlasq_m_n_u16(__a, __b, __c, __p) | |
1383 | #define vmlsdavaq_p_s8(__a, __b, __c, __p) __arm_vmlsdavaq_p_s8(__a, __b, __c, __p) | |
1384 | #define vmlsdavaq_p_s32(__a, __b, __c, __p) __arm_vmlsdavaq_p_s32(__a, __b, __c, __p) | |
1385 | #define vmlsdavaq_p_s16(__a, __b, __c, __p) __arm_vmlsdavaq_p_s16(__a, __b, __c, __p) | |
1386 | #define vmlsdavaxq_p_s8(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s8(__a, __b, __c, __p) | |
1387 | #define vmlsdavaxq_p_s32(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s32(__a, __b, __c, __p) | |
1388 | #define vmlsdavaxq_p_s16(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s16(__a, __b, __c, __p) | |
1389 | #define vmulhq_m_s8(__inactive, __a, __b, __p) __arm_vmulhq_m_s8(__inactive, __a, __b, __p) | |
1390 | #define vmulhq_m_s32(__inactive, __a, __b, __p) __arm_vmulhq_m_s32(__inactive, __a, __b, __p) | |
1391 | #define vmulhq_m_s16(__inactive, __a, __b, __p) __arm_vmulhq_m_s16(__inactive, __a, __b, __p) | |
1392 | #define vmulhq_m_u8(__inactive, __a, __b, __p) __arm_vmulhq_m_u8(__inactive, __a, __b, __p) | |
1393 | #define vmulhq_m_u32(__inactive, __a, __b, __p) __arm_vmulhq_m_u32(__inactive, __a, __b, __p) | |
1394 | #define vmulhq_m_u16(__inactive, __a, __b, __p) __arm_vmulhq_m_u16(__inactive, __a, __b, __p) | |
1395 | #define vmullbq_int_m_s8(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s8(__inactive, __a, __b, __p) | |
1396 | #define vmullbq_int_m_s32(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s32(__inactive, __a, __b, __p) | |
1397 | #define vmullbq_int_m_s16(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s16(__inactive, __a, __b, __p) | |
1398 | #define vmullbq_int_m_u8(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u8(__inactive, __a, __b, __p) | |
1399 | #define vmullbq_int_m_u32(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u32(__inactive, __a, __b, __p) | |
1400 | #define vmullbq_int_m_u16(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u16(__inactive, __a, __b, __p) | |
1401 | #define vmulltq_int_m_s8(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s8(__inactive, __a, __b, __p) | |
1402 | #define vmulltq_int_m_s32(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s32(__inactive, __a, __b, __p) | |
1403 | #define vmulltq_int_m_s16(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s16(__inactive, __a, __b, __p) | |
1404 | #define vmulltq_int_m_u8(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u8(__inactive, __a, __b, __p) | |
1405 | #define vmulltq_int_m_u32(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u32(__inactive, __a, __b, __p) | |
1406 | #define vmulltq_int_m_u16(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u16(__inactive, __a, __b, __p) | |
1407 | #define vmulq_m_n_s8(__inactive, __a, __b, __p) __arm_vmulq_m_n_s8(__inactive, __a, __b, __p) | |
1408 | #define vmulq_m_n_s32(__inactive, __a, __b, __p) __arm_vmulq_m_n_s32(__inactive, __a, __b, __p) | |
1409 | #define vmulq_m_n_s16(__inactive, __a, __b, __p) __arm_vmulq_m_n_s16(__inactive, __a, __b, __p) | |
1410 | #define vmulq_m_n_u8(__inactive, __a, __b, __p) __arm_vmulq_m_n_u8(__inactive, __a, __b, __p) | |
1411 | #define vmulq_m_n_u32(__inactive, __a, __b, __p) __arm_vmulq_m_n_u32(__inactive, __a, __b, __p) | |
1412 | #define vmulq_m_n_u16(__inactive, __a, __b, __p) __arm_vmulq_m_n_u16(__inactive, __a, __b, __p) | |
1413 | #define vmulq_m_s8(__inactive, __a, __b, __p) __arm_vmulq_m_s8(__inactive, __a, __b, __p) | |
1414 | #define vmulq_m_s32(__inactive, __a, __b, __p) __arm_vmulq_m_s32(__inactive, __a, __b, __p) | |
1415 | #define vmulq_m_s16(__inactive, __a, __b, __p) __arm_vmulq_m_s16(__inactive, __a, __b, __p) | |
1416 | #define vmulq_m_u8(__inactive, __a, __b, __p) __arm_vmulq_m_u8(__inactive, __a, __b, __p) | |
1417 | #define vmulq_m_u32(__inactive, __a, __b, __p) __arm_vmulq_m_u32(__inactive, __a, __b, __p) | |
1418 | #define vmulq_m_u16(__inactive, __a, __b, __p) __arm_vmulq_m_u16(__inactive, __a, __b, __p) | |
1419 | #define vornq_m_s8(__inactive, __a, __b, __p) __arm_vornq_m_s8(__inactive, __a, __b, __p) | |
1420 | #define vornq_m_s32(__inactive, __a, __b, __p) __arm_vornq_m_s32(__inactive, __a, __b, __p) | |
1421 | #define vornq_m_s16(__inactive, __a, __b, __p) __arm_vornq_m_s16(__inactive, __a, __b, __p) | |
1422 | #define vornq_m_u8(__inactive, __a, __b, __p) __arm_vornq_m_u8(__inactive, __a, __b, __p) | |
1423 | #define vornq_m_u32(__inactive, __a, __b, __p) __arm_vornq_m_u32(__inactive, __a, __b, __p) | |
1424 | #define vornq_m_u16(__inactive, __a, __b, __p) __arm_vornq_m_u16(__inactive, __a, __b, __p) | |
1425 | #define vorrq_m_s8(__inactive, __a, __b, __p) __arm_vorrq_m_s8(__inactive, __a, __b, __p) | |
1426 | #define vorrq_m_s32(__inactive, __a, __b, __p) __arm_vorrq_m_s32(__inactive, __a, __b, __p) | |
1427 | #define vorrq_m_s16(__inactive, __a, __b, __p) __arm_vorrq_m_s16(__inactive, __a, __b, __p) | |
1428 | #define vorrq_m_u8(__inactive, __a, __b, __p) __arm_vorrq_m_u8(__inactive, __a, __b, __p) | |
1429 | #define vorrq_m_u32(__inactive, __a, __b, __p) __arm_vorrq_m_u32(__inactive, __a, __b, __p) | |
1430 | #define vorrq_m_u16(__inactive, __a, __b, __p) __arm_vorrq_m_u16(__inactive, __a, __b, __p) | |
1431 | #define vqaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s8(__inactive, __a, __b, __p) | |
1432 | #define vqaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s32(__inactive, __a, __b, __p) | |
1433 | #define vqaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s16(__inactive, __a, __b, __p) | |
1434 | #define vqaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u8(__inactive, __a, __b, __p) | |
1435 | #define vqaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u32(__inactive, __a, __b, __p) | |
1436 | #define vqaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u16(__inactive, __a, __b, __p) | |
1437 | #define vqaddq_m_s8(__inactive, __a, __b, __p) __arm_vqaddq_m_s8(__inactive, __a, __b, __p) | |
1438 | #define vqaddq_m_s32(__inactive, __a, __b, __p) __arm_vqaddq_m_s32(__inactive, __a, __b, __p) | |
1439 | #define vqaddq_m_s16(__inactive, __a, __b, __p) __arm_vqaddq_m_s16(__inactive, __a, __b, __p) | |
1440 | #define vqaddq_m_u8(__inactive, __a, __b, __p) __arm_vqaddq_m_u8(__inactive, __a, __b, __p) | |
1441 | #define vqaddq_m_u32(__inactive, __a, __b, __p) __arm_vqaddq_m_u32(__inactive, __a, __b, __p) | |
1442 | #define vqaddq_m_u16(__inactive, __a, __b, __p) __arm_vqaddq_m_u16(__inactive, __a, __b, __p) | |
1443 | #define vqdmladhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s8(__inactive, __a, __b, __p) | |
1444 | #define vqdmladhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s32(__inactive, __a, __b, __p) | |
1445 | #define vqdmladhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s16(__inactive, __a, __b, __p) | |
1446 | #define vqdmladhxq_m_s8(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s8(__inactive, __a, __b, __p) | |
1447 | #define vqdmladhxq_m_s32(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s32(__inactive, __a, __b, __p) | |
1448 | #define vqdmladhxq_m_s16(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s16(__inactive, __a, __b, __p) | |
1449 | #define vqdmlahq_m_n_s8(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s8(__a, __b, __c, __p) | |
1450 | #define vqdmlahq_m_n_s32(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s32(__a, __b, __c, __p) | |
1451 | #define vqdmlahq_m_n_s16(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s16(__a, __b, __c, __p) | |
1452 | #define vqdmlsdhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s8(__inactive, __a, __b, __p) | |
1453 | #define vqdmlsdhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s32(__inactive, __a, __b, __p) | |
1454 | #define vqdmlsdhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s16(__inactive, __a, __b, __p) | |
1455 | #define vqdmlsdhxq_m_s8(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s8(__inactive, __a, __b, __p) | |
1456 | #define vqdmlsdhxq_m_s32(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s32(__inactive, __a, __b, __p) | |
1457 | #define vqdmlsdhxq_m_s16(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s16(__inactive, __a, __b, __p) | |
1458 | #define vqdmulhq_m_n_s8(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s8(__inactive, __a, __b, __p) | |
1459 | #define vqdmulhq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s32(__inactive, __a, __b, __p) | |
1460 | #define vqdmulhq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s16(__inactive, __a, __b, __p) | |
1461 | #define vqdmulhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s8(__inactive, __a, __b, __p) | |
1462 | #define vqdmulhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s32(__inactive, __a, __b, __p) | |
1463 | #define vqdmulhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s16(__inactive, __a, __b, __p) | |
1464 | #define vqrdmladhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s8(__inactive, __a, __b, __p) | |
1465 | #define vqrdmladhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s32(__inactive, __a, __b, __p) | |
1466 | #define vqrdmladhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s16(__inactive, __a, __b, __p) | |
1467 | #define vqrdmladhxq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s8(__inactive, __a, __b, __p) | |
1468 | #define vqrdmladhxq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s32(__inactive, __a, __b, __p) | |
1469 | #define vqrdmladhxq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s16(__inactive, __a, __b, __p) | |
1470 | #define vqrdmlahq_m_n_s8(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s8(__a, __b, __c, __p) | |
1471 | #define vqrdmlahq_m_n_s32(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s32(__a, __b, __c, __p) | |
1472 | #define vqrdmlahq_m_n_s16(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s16(__a, __b, __c, __p) | |
1473 | #define vqrdmlashq_m_n_s8(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s8(__a, __b, __c, __p) | |
1474 | #define vqrdmlashq_m_n_s32(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s32(__a, __b, __c, __p) | |
1475 | #define vqrdmlashq_m_n_s16(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s16(__a, __b, __c, __p) | |
1476 | #define vqrdmlsdhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s8(__inactive, __a, __b, __p) | |
1477 | #define vqrdmlsdhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s32(__inactive, __a, __b, __p) | |
1478 | #define vqrdmlsdhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s16(__inactive, __a, __b, __p) | |
1479 | #define vqrdmlsdhxq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s8(__inactive, __a, __b, __p) | |
1480 | #define vqrdmlsdhxq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s32(__inactive, __a, __b, __p) | |
1481 | #define vqrdmlsdhxq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s16(__inactive, __a, __b, __p) | |
1482 | #define vqrdmulhq_m_n_s8(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s8(__inactive, __a, __b, __p) | |
1483 | #define vqrdmulhq_m_n_s32(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s32(__inactive, __a, __b, __p) | |
1484 | #define vqrdmulhq_m_n_s16(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s16(__inactive, __a, __b, __p) | |
1485 | #define vqrdmulhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s8(__inactive, __a, __b, __p) | |
1486 | #define vqrdmulhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s32(__inactive, __a, __b, __p) | |
1487 | #define vqrdmulhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s16(__inactive, __a, __b, __p) | |
1488 | #define vqrshlq_m_s8(__inactive, __a, __b, __p) __arm_vqrshlq_m_s8(__inactive, __a, __b, __p) | |
1489 | #define vqrshlq_m_s32(__inactive, __a, __b, __p) __arm_vqrshlq_m_s32(__inactive, __a, __b, __p) | |
1490 | #define vqrshlq_m_s16(__inactive, __a, __b, __p) __arm_vqrshlq_m_s16(__inactive, __a, __b, __p) | |
1491 | #define vqrshlq_m_u8(__inactive, __a, __b, __p) __arm_vqrshlq_m_u8(__inactive, __a, __b, __p) | |
1492 | #define vqrshlq_m_u32(__inactive, __a, __b, __p) __arm_vqrshlq_m_u32(__inactive, __a, __b, __p) | |
1493 | #define vqrshlq_m_u16(__inactive, __a, __b, __p) __arm_vqrshlq_m_u16(__inactive, __a, __b, __p) | |
1494 | #define vqshlq_m_n_s8(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s8(__inactive, __a, __imm, __p) | |
1495 | #define vqshlq_m_n_s32(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s32(__inactive, __a, __imm, __p) | |
1496 | #define vqshlq_m_n_s16(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s16(__inactive, __a, __imm, __p) | |
1497 | #define vqshlq_m_n_u8(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_u8(__inactive, __a, __imm, __p) | |
1498 | #define vqshlq_m_n_u32(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_u32(__inactive, __a, __imm, __p) | |
1499 | #define vqshlq_m_n_u16(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_u16(__inactive, __a, __imm, __p) | |
1500 | #define vqshlq_m_s8(__inactive, __a, __b, __p) __arm_vqshlq_m_s8(__inactive, __a, __b, __p) | |
1501 | #define vqshlq_m_s32(__inactive, __a, __b, __p) __arm_vqshlq_m_s32(__inactive, __a, __b, __p) | |
1502 | #define vqshlq_m_s16(__inactive, __a, __b, __p) __arm_vqshlq_m_s16(__inactive, __a, __b, __p) | |
1503 | #define vqshlq_m_u8(__inactive, __a, __b, __p) __arm_vqshlq_m_u8(__inactive, __a, __b, __p) | |
1504 | #define vqshlq_m_u32(__inactive, __a, __b, __p) __arm_vqshlq_m_u32(__inactive, __a, __b, __p) | |
1505 | #define vqshlq_m_u16(__inactive, __a, __b, __p) __arm_vqshlq_m_u16(__inactive, __a, __b, __p) | |
1506 | #define vqsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s8(__inactive, __a, __b, __p) | |
1507 | #define vqsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s32(__inactive, __a, __b, __p) | |
1508 | #define vqsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s16(__inactive, __a, __b, __p) | |
1509 | #define vqsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u8(__inactive, __a, __b, __p) | |
1510 | #define vqsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u32(__inactive, __a, __b, __p) | |
1511 | #define vqsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u16(__inactive, __a, __b, __p) | |
1512 | #define vqsubq_m_s8(__inactive, __a, __b, __p) __arm_vqsubq_m_s8(__inactive, __a, __b, __p) | |
1513 | #define vqsubq_m_s32(__inactive, __a, __b, __p) __arm_vqsubq_m_s32(__inactive, __a, __b, __p) | |
1514 | #define vqsubq_m_s16(__inactive, __a, __b, __p) __arm_vqsubq_m_s16(__inactive, __a, __b, __p) | |
1515 | #define vqsubq_m_u8(__inactive, __a, __b, __p) __arm_vqsubq_m_u8(__inactive, __a, __b, __p) | |
1516 | #define vqsubq_m_u32(__inactive, __a, __b, __p) __arm_vqsubq_m_u32(__inactive, __a, __b, __p) | |
1517 | #define vqsubq_m_u16(__inactive, __a, __b, __p) __arm_vqsubq_m_u16(__inactive, __a, __b, __p) | |
1518 | #define vrhaddq_m_s8(__inactive, __a, __b, __p) __arm_vrhaddq_m_s8(__inactive, __a, __b, __p) | |
1519 | #define vrhaddq_m_s32(__inactive, __a, __b, __p) __arm_vrhaddq_m_s32(__inactive, __a, __b, __p) | |
1520 | #define vrhaddq_m_s16(__inactive, __a, __b, __p) __arm_vrhaddq_m_s16(__inactive, __a, __b, __p) | |
1521 | #define vrhaddq_m_u8(__inactive, __a, __b, __p) __arm_vrhaddq_m_u8(__inactive, __a, __b, __p) | |
1522 | #define vrhaddq_m_u32(__inactive, __a, __b, __p) __arm_vrhaddq_m_u32(__inactive, __a, __b, __p) | |
1523 | #define vrhaddq_m_u16(__inactive, __a, __b, __p) __arm_vrhaddq_m_u16(__inactive, __a, __b, __p) | |
1524 | #define vrmulhq_m_s8(__inactive, __a, __b, __p) __arm_vrmulhq_m_s8(__inactive, __a, __b, __p) | |
1525 | #define vrmulhq_m_s32(__inactive, __a, __b, __p) __arm_vrmulhq_m_s32(__inactive, __a, __b, __p) | |
1526 | #define vrmulhq_m_s16(__inactive, __a, __b, __p) __arm_vrmulhq_m_s16(__inactive, __a, __b, __p) | |
1527 | #define vrmulhq_m_u8(__inactive, __a, __b, __p) __arm_vrmulhq_m_u8(__inactive, __a, __b, __p) | |
1528 | #define vrmulhq_m_u32(__inactive, __a, __b, __p) __arm_vrmulhq_m_u32(__inactive, __a, __b, __p) | |
1529 | #define vrmulhq_m_u16(__inactive, __a, __b, __p) __arm_vrmulhq_m_u16(__inactive, __a, __b, __p) | |
1530 | #define vrshlq_m_s8(__inactive, __a, __b, __p) __arm_vrshlq_m_s8(__inactive, __a, __b, __p) | |
1531 | #define vrshlq_m_s32(__inactive, __a, __b, __p) __arm_vrshlq_m_s32(__inactive, __a, __b, __p) | |
1532 | #define vrshlq_m_s16(__inactive, __a, __b, __p) __arm_vrshlq_m_s16(__inactive, __a, __b, __p) | |
1533 | #define vrshlq_m_u8(__inactive, __a, __b, __p) __arm_vrshlq_m_u8(__inactive, __a, __b, __p) | |
1534 | #define vrshlq_m_u32(__inactive, __a, __b, __p) __arm_vrshlq_m_u32(__inactive, __a, __b, __p) | |
1535 | #define vrshlq_m_u16(__inactive, __a, __b, __p) __arm_vrshlq_m_u16(__inactive, __a, __b, __p) | |
1536 | #define vrshrq_m_n_s8(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s8(__inactive, __a, __imm, __p) | |
1537 | #define vrshrq_m_n_s32(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s32(__inactive, __a, __imm, __p) | |
1538 | #define vrshrq_m_n_s16(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s16(__inactive, __a, __imm, __p) | |
1539 | #define vrshrq_m_n_u8(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_u8(__inactive, __a, __imm, __p) | |
1540 | #define vrshrq_m_n_u32(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_u32(__inactive, __a, __imm, __p) | |
1541 | #define vrshrq_m_n_u16(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_u16(__inactive, __a, __imm, __p) | |
1542 | #define vshlq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshlq_m_n_s8(__inactive, __a, __imm, __p) | |
1543 | #define vshlq_m_n_s32(__inactive, __a, __imm, __p) __arm_vshlq_m_n_s32(__inactive, __a, __imm, __p) | |
1544 | #define vshlq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshlq_m_n_s16(__inactive, __a, __imm, __p) | |
1545 | #define vshlq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshlq_m_n_u8(__inactive, __a, __imm, __p) | |
1546 | #define vshlq_m_n_u32(__inactive, __a, __imm, __p) __arm_vshlq_m_n_u32(__inactive, __a, __imm, __p) | |
1547 | #define vshlq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshlq_m_n_u16(__inactive, __a, __imm, __p) | |
1548 | #define vshrq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshrq_m_n_s8(__inactive, __a, __imm, __p) | |
1549 | #define vshrq_m_n_s32(__inactive, __a, __imm, __p) __arm_vshrq_m_n_s32(__inactive, __a, __imm, __p) | |
1550 | #define vshrq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshrq_m_n_s16(__inactive, __a, __imm, __p) | |
1551 | #define vshrq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshrq_m_n_u8(__inactive, __a, __imm, __p) | |
1552 | #define vshrq_m_n_u32(__inactive, __a, __imm, __p) __arm_vshrq_m_n_u32(__inactive, __a, __imm, __p) | |
1553 | #define vshrq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshrq_m_n_u16(__inactive, __a, __imm, __p) | |
1554 | #define vsliq_m_n_s8(__a, __b, __imm, __p) __arm_vsliq_m_n_s8(__a, __b, __imm, __p) | |
1555 | #define vsliq_m_n_s32(__a, __b, __imm, __p) __arm_vsliq_m_n_s32(__a, __b, __imm, __p) | |
1556 | #define vsliq_m_n_s16(__a, __b, __imm, __p) __arm_vsliq_m_n_s16(__a, __b, __imm, __p) | |
1557 | #define vsliq_m_n_u8(__a, __b, __imm, __p) __arm_vsliq_m_n_u8(__a, __b, __imm, __p) | |
1558 | #define vsliq_m_n_u32(__a, __b, __imm, __p) __arm_vsliq_m_n_u32(__a, __b, __imm, __p) | |
1559 | #define vsliq_m_n_u16(__a, __b, __imm, __p) __arm_vsliq_m_n_u16(__a, __b, __imm, __p) | |
1560 | #define vsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vsubq_m_n_s8(__inactive, __a, __b, __p) | |
1561 | #define vsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vsubq_m_n_s32(__inactive, __a, __b, __p) | |
1562 | #define vsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vsubq_m_n_s16(__inactive, __a, __b, __p) | |
1563 | #define vsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vsubq_m_n_u8(__inactive, __a, __b, __p) | |
1564 | #define vsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vsubq_m_n_u32(__inactive, __a, __b, __p) | |
1565 | #define vsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vsubq_m_n_u16(__inactive, __a, __b, __p) | |
f2170a37 SP |
1566 | #define vmlaldavaq_p_s32(__a, __b, __c, __p) __arm_vmlaldavaq_p_s32(__a, __b, __c, __p) |
1567 | #define vmlaldavaq_p_s16(__a, __b, __c, __p) __arm_vmlaldavaq_p_s16(__a, __b, __c, __p) | |
1568 | #define vmlaldavaq_p_u32(__a, __b, __c, __p) __arm_vmlaldavaq_p_u32(__a, __b, __c, __p) | |
1569 | #define vmlaldavaq_p_u16(__a, __b, __c, __p) __arm_vmlaldavaq_p_u16(__a, __b, __c, __p) | |
1570 | #define vmlaldavaxq_p_s32(__a, __b, __c, __p) __arm_vmlaldavaxq_p_s32(__a, __b, __c, __p) | |
1571 | #define vmlaldavaxq_p_s16(__a, __b, __c, __p) __arm_vmlaldavaxq_p_s16(__a, __b, __c, __p) | |
1572 | #define vmlaldavaxq_p_u32(__a, __b, __c, __p) __arm_vmlaldavaxq_p_u32(__a, __b, __c, __p) | |
1573 | #define vmlaldavaxq_p_u16(__a, __b, __c, __p) __arm_vmlaldavaxq_p_u16(__a, __b, __c, __p) | |
1574 | #define vmlsldavaq_p_s32(__a, __b, __c, __p) __arm_vmlsldavaq_p_s32(__a, __b, __c, __p) | |
1575 | #define vmlsldavaq_p_s16(__a, __b, __c, __p) __arm_vmlsldavaq_p_s16(__a, __b, __c, __p) | |
1576 | #define vmlsldavaxq_p_s32(__a, __b, __c, __p) __arm_vmlsldavaxq_p_s32(__a, __b, __c, __p) | |
1577 | #define vmlsldavaxq_p_s16(__a, __b, __c, __p) __arm_vmlsldavaxq_p_s16(__a, __b, __c, __p) | |
1578 | #define vmullbq_poly_m_p8(__inactive, __a, __b, __p) __arm_vmullbq_poly_m_p8(__inactive, __a, __b, __p) | |
1579 | #define vmullbq_poly_m_p16(__inactive, __a, __b, __p) __arm_vmullbq_poly_m_p16(__inactive, __a, __b, __p) | |
1580 | #define vmulltq_poly_m_p8(__inactive, __a, __b, __p) __arm_vmulltq_poly_m_p8(__inactive, __a, __b, __p) | |
1581 | #define vmulltq_poly_m_p16(__inactive, __a, __b, __p) __arm_vmulltq_poly_m_p16(__inactive, __a, __b, __p) | |
1582 | #define vqdmullbq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmullbq_m_n_s32(__inactive, __a, __b, __p) | |
1583 | #define vqdmullbq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmullbq_m_n_s16(__inactive, __a, __b, __p) | |
1584 | #define vqdmullbq_m_s32(__inactive, __a, __b, __p) __arm_vqdmullbq_m_s32(__inactive, __a, __b, __p) | |
1585 | #define vqdmullbq_m_s16(__inactive, __a, __b, __p) __arm_vqdmullbq_m_s16(__inactive, __a, __b, __p) | |
1586 | #define vqdmulltq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmulltq_m_n_s32(__inactive, __a, __b, __p) | |
1587 | #define vqdmulltq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmulltq_m_n_s16(__inactive, __a, __b, __p) | |
1588 | #define vqdmulltq_m_s32(__inactive, __a, __b, __p) __arm_vqdmulltq_m_s32(__inactive, __a, __b, __p) | |
1589 | #define vqdmulltq_m_s16(__inactive, __a, __b, __p) __arm_vqdmulltq_m_s16(__inactive, __a, __b, __p) | |
1590 | #define vqrshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_s32(__a, __b, __imm, __p) | |
1591 | #define vqrshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_s16(__a, __b, __imm, __p) | |
1592 | #define vqrshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_u32(__a, __b, __imm, __p) | |
1593 | #define vqrshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_u16(__a, __b, __imm, __p) | |
1594 | #define vqrshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_s32(__a, __b, __imm, __p) | |
1595 | #define vqrshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_s16(__a, __b, __imm, __p) | |
1596 | #define vqrshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_u32(__a, __b, __imm, __p) | |
1597 | #define vqrshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_u16(__a, __b, __imm, __p) | |
1598 | #define vqrshrunbq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrunbq_m_n_s32(__a, __b, __imm, __p) | |
1599 | #define vqrshrunbq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrunbq_m_n_s16(__a, __b, __imm, __p) | |
1600 | #define vqrshruntq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshruntq_m_n_s32(__a, __b, __imm, __p) | |
1601 | #define vqrshruntq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshruntq_m_n_s16(__a, __b, __imm, __p) | |
1602 | #define vqshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_s32(__a, __b, __imm, __p) | |
1603 | #define vqshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_s16(__a, __b, __imm, __p) | |
1604 | #define vqshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_u32(__a, __b, __imm, __p) | |
1605 | #define vqshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_u16(__a, __b, __imm, __p) | |
1606 | #define vqshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrntq_m_n_s32(__a, __b, __imm, __p) | |
1607 | #define vqshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrntq_m_n_s16(__a, __b, __imm, __p) | |
1608 | #define vqshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vqshrntq_m_n_u32(__a, __b, __imm, __p) | |
1609 | #define vqshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vqshrntq_m_n_u16(__a, __b, __imm, __p) | |
1610 | #define vqshrunbq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrunbq_m_n_s32(__a, __b, __imm, __p) | |
1611 | #define vqshrunbq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrunbq_m_n_s16(__a, __b, __imm, __p) | |
1612 | #define vqshruntq_m_n_s32(__a, __b, __imm, __p) __arm_vqshruntq_m_n_s32(__a, __b, __imm, __p) | |
1613 | #define vqshruntq_m_n_s16(__a, __b, __imm, __p) __arm_vqshruntq_m_n_s16(__a, __b, __imm, __p) | |
1614 | #define vrmlaldavhaq_p_s32(__a, __b, __c, __p) __arm_vrmlaldavhaq_p_s32(__a, __b, __c, __p) | |
1615 | #define vrmlaldavhaq_p_u32(__a, __b, __c, __p) __arm_vrmlaldavhaq_p_u32(__a, __b, __c, __p) | |
1616 | #define vrmlaldavhaxq_p_s32(__a, __b, __c, __p) __arm_vrmlaldavhaxq_p_s32(__a, __b, __c, __p) | |
1617 | #define vrmlsldavhaq_p_s32(__a, __b, __c, __p) __arm_vrmlsldavhaq_p_s32(__a, __b, __c, __p) | |
1618 | #define vrmlsldavhaxq_p_s32(__a, __b, __c, __p) __arm_vrmlsldavhaxq_p_s32(__a, __b, __c, __p) | |
1619 | #define vrshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_s32(__a, __b, __imm, __p) | |
1620 | #define vrshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_s16(__a, __b, __imm, __p) | |
1621 | #define vrshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_u32(__a, __b, __imm, __p) | |
1622 | #define vrshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_u16(__a, __b, __imm, __p) | |
1623 | #define vrshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vrshrntq_m_n_s32(__a, __b, __imm, __p) | |
1624 | #define vrshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vrshrntq_m_n_s16(__a, __b, __imm, __p) | |
1625 | #define vrshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vrshrntq_m_n_u32(__a, __b, __imm, __p) | |
1626 | #define vrshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vrshrntq_m_n_u16(__a, __b, __imm, __p) | |
1627 | #define vshllbq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_s8(__inactive, __a, __imm, __p) | |
1628 | #define vshllbq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_s16(__inactive, __a, __imm, __p) | |
1629 | #define vshllbq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_u8(__inactive, __a, __imm, __p) | |
1630 | #define vshllbq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_u16(__inactive, __a, __imm, __p) | |
1631 | #define vshlltq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_s8(__inactive, __a, __imm, __p) | |
1632 | #define vshlltq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_s16(__inactive, __a, __imm, __p) | |
1633 | #define vshlltq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_u8(__inactive, __a, __imm, __p) | |
1634 | #define vshlltq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_u16(__inactive, __a, __imm, __p) | |
1635 | #define vshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vshrnbq_m_n_s32(__a, __b, __imm, __p) | |
1636 | #define vshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vshrnbq_m_n_s16(__a, __b, __imm, __p) | |
1637 | #define vshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vshrnbq_m_n_u32(__a, __b, __imm, __p) | |
1638 | #define vshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vshrnbq_m_n_u16(__a, __b, __imm, __p) | |
1639 | #define vshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vshrntq_m_n_s32(__a, __b, __imm, __p) | |
1640 | #define vshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vshrntq_m_n_s16(__a, __b, __imm, __p) | |
1641 | #define vshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vshrntq_m_n_u32(__a, __b, __imm, __p) | |
1642 | #define vshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vshrntq_m_n_u16(__a, __b, __imm, __p) | |
532e9e24 SP |
1643 | #define vabdq_m_f32(__inactive, __a, __b, __p) __arm_vabdq_m_f32(__inactive, __a, __b, __p) |
1644 | #define vabdq_m_f16(__inactive, __a, __b, __p) __arm_vabdq_m_f16(__inactive, __a, __b, __p) | |
1645 | #define vaddq_m_f32(__inactive, __a, __b, __p) __arm_vaddq_m_f32(__inactive, __a, __b, __p) | |
1646 | #define vaddq_m_f16(__inactive, __a, __b, __p) __arm_vaddq_m_f16(__inactive, __a, __b, __p) | |
1647 | #define vaddq_m_n_f32(__inactive, __a, __b, __p) __arm_vaddq_m_n_f32(__inactive, __a, __b, __p) | |
1648 | #define vaddq_m_n_f16(__inactive, __a, __b, __p) __arm_vaddq_m_n_f16(__inactive, __a, __b, __p) | |
1649 | #define vandq_m_f32(__inactive, __a, __b, __p) __arm_vandq_m_f32(__inactive, __a, __b, __p) | |
1650 | #define vandq_m_f16(__inactive, __a, __b, __p) __arm_vandq_m_f16(__inactive, __a, __b, __p) | |
1651 | #define vbicq_m_f32(__inactive, __a, __b, __p) __arm_vbicq_m_f32(__inactive, __a, __b, __p) | |
1652 | #define vbicq_m_f16(__inactive, __a, __b, __p) __arm_vbicq_m_f16(__inactive, __a, __b, __p) | |
1653 | #define vbrsrq_m_n_f32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_f32(__inactive, __a, __b, __p) | |
1654 | #define vbrsrq_m_n_f16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_f16(__inactive, __a, __b, __p) | |
1655 | #define vcaddq_rot270_m_f32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_f32(__inactive, __a, __b, __p) | |
1656 | #define vcaddq_rot270_m_f16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_f16(__inactive, __a, __b, __p) | |
1657 | #define vcaddq_rot90_m_f32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_f32(__inactive, __a, __b, __p) | |
1658 | #define vcaddq_rot90_m_f16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_f16(__inactive, __a, __b, __p) | |
1659 | #define vcmlaq_m_f32(__a, __b, __c, __p) __arm_vcmlaq_m_f32(__a, __b, __c, __p) | |
1660 | #define vcmlaq_m_f16(__a, __b, __c, __p) __arm_vcmlaq_m_f16(__a, __b, __c, __p) | |
1661 | #define vcmlaq_rot180_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot180_m_f32(__a, __b, __c, __p) | |
1662 | #define vcmlaq_rot180_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot180_m_f16(__a, __b, __c, __p) | |
1663 | #define vcmlaq_rot270_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot270_m_f32(__a, __b, __c, __p) | |
1664 | #define vcmlaq_rot270_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot270_m_f16(__a, __b, __c, __p) | |
1665 | #define vcmlaq_rot90_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot90_m_f32(__a, __b, __c, __p) | |
1666 | #define vcmlaq_rot90_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot90_m_f16(__a, __b, __c, __p) | |
1667 | #define vcmulq_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_m_f32(__inactive, __a, __b, __p) | |
1668 | #define vcmulq_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_m_f16(__inactive, __a, __b, __p) | |
1669 | #define vcmulq_rot180_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m_f32(__inactive, __a, __b, __p) | |
1670 | #define vcmulq_rot180_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m_f16(__inactive, __a, __b, __p) | |
1671 | #define vcmulq_rot270_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m_f32(__inactive, __a, __b, __p) | |
1672 | #define vcmulq_rot270_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m_f16(__inactive, __a, __b, __p) | |
1673 | #define vcmulq_rot90_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m_f32(__inactive, __a, __b, __p) | |
1674 | #define vcmulq_rot90_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m_f16(__inactive, __a, __b, __p) | |
1675 | #define vcvtq_m_n_s32_f32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_s32_f32(__inactive, __a, __imm6, __p) | |
1676 | #define vcvtq_m_n_s16_f16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_s16_f16(__inactive, __a, __imm6, __p) | |
1677 | #define vcvtq_m_n_u32_f32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_u32_f32(__inactive, __a, __imm6, __p) | |
1678 | #define vcvtq_m_n_u16_f16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_u16_f16(__inactive, __a, __imm6, __p) | |
1679 | #define veorq_m_f32(__inactive, __a, __b, __p) __arm_veorq_m_f32(__inactive, __a, __b, __p) | |
1680 | #define veorq_m_f16(__inactive, __a, __b, __p) __arm_veorq_m_f16(__inactive, __a, __b, __p) | |
1681 | #define vfmaq_m_f32(__a, __b, __c, __p) __arm_vfmaq_m_f32(__a, __b, __c, __p) | |
1682 | #define vfmaq_m_f16(__a, __b, __c, __p) __arm_vfmaq_m_f16(__a, __b, __c, __p) | |
1683 | #define vfmaq_m_n_f32(__a, __b, __c, __p) __arm_vfmaq_m_n_f32(__a, __b, __c, __p) | |
1684 | #define vfmaq_m_n_f16(__a, __b, __c, __p) __arm_vfmaq_m_n_f16(__a, __b, __c, __p) | |
1685 | #define vfmasq_m_n_f32(__a, __b, __c, __p) __arm_vfmasq_m_n_f32(__a, __b, __c, __p) | |
1686 | #define vfmasq_m_n_f16(__a, __b, __c, __p) __arm_vfmasq_m_n_f16(__a, __b, __c, __p) | |
1687 | #define vfmsq_m_f32(__a, __b, __c, __p) __arm_vfmsq_m_f32(__a, __b, __c, __p) | |
1688 | #define vfmsq_m_f16(__a, __b, __c, __p) __arm_vfmsq_m_f16(__a, __b, __c, __p) | |
1689 | #define vmaxnmq_m_f32(__inactive, __a, __b, __p) __arm_vmaxnmq_m_f32(__inactive, __a, __b, __p) | |
1690 | #define vmaxnmq_m_f16(__inactive, __a, __b, __p) __arm_vmaxnmq_m_f16(__inactive, __a, __b, __p) | |
1691 | #define vminnmq_m_f32(__inactive, __a, __b, __p) __arm_vminnmq_m_f32(__inactive, __a, __b, __p) | |
1692 | #define vminnmq_m_f16(__inactive, __a, __b, __p) __arm_vminnmq_m_f16(__inactive, __a, __b, __p) | |
1693 | #define vmulq_m_f32(__inactive, __a, __b, __p) __arm_vmulq_m_f32(__inactive, __a, __b, __p) | |
1694 | #define vmulq_m_f16(__inactive, __a, __b, __p) __arm_vmulq_m_f16(__inactive, __a, __b, __p) | |
1695 | #define vmulq_m_n_f32(__inactive, __a, __b, __p) __arm_vmulq_m_n_f32(__inactive, __a, __b, __p) | |
1696 | #define vmulq_m_n_f16(__inactive, __a, __b, __p) __arm_vmulq_m_n_f16(__inactive, __a, __b, __p) | |
1697 | #define vornq_m_f32(__inactive, __a, __b, __p) __arm_vornq_m_f32(__inactive, __a, __b, __p) | |
1698 | #define vornq_m_f16(__inactive, __a, __b, __p) __arm_vornq_m_f16(__inactive, __a, __b, __p) | |
1699 | #define vorrq_m_f32(__inactive, __a, __b, __p) __arm_vorrq_m_f32(__inactive, __a, __b, __p) | |
1700 | #define vorrq_m_f16(__inactive, __a, __b, __p) __arm_vorrq_m_f16(__inactive, __a, __b, __p) | |
1701 | #define vsubq_m_f32(__inactive, __a, __b, __p) __arm_vsubq_m_f32(__inactive, __a, __b, __p) | |
1702 | #define vsubq_m_f16(__inactive, __a, __b, __p) __arm_vsubq_m_f16(__inactive, __a, __b, __p) | |
1703 | #define vsubq_m_n_f32(__inactive, __a, __b, __p) __arm_vsubq_m_n_f32(__inactive, __a, __b, __p) | |
1704 | #define vsubq_m_n_f16(__inactive, __a, __b, __p) __arm_vsubq_m_n_f16(__inactive, __a, __b, __p) | |
4ff68575 SP |
1705 | #define vstrbq_s8( __addr, __value) __arm_vstrbq_s8( __addr, __value) |
1706 | #define vstrbq_u8( __addr, __value) __arm_vstrbq_u8( __addr, __value) | |
1707 | #define vstrbq_u16( __addr, __value) __arm_vstrbq_u16( __addr, __value) | |
1708 | #define vstrbq_scatter_offset_s8( __base, __offset, __value) __arm_vstrbq_scatter_offset_s8( __base, __offset, __value) | |
1709 | #define vstrbq_scatter_offset_u8( __base, __offset, __value) __arm_vstrbq_scatter_offset_u8( __base, __offset, __value) | |
1710 | #define vstrbq_scatter_offset_u16( __base, __offset, __value) __arm_vstrbq_scatter_offset_u16( __base, __offset, __value) | |
1711 | #define vstrbq_s16( __addr, __value) __arm_vstrbq_s16( __addr, __value) | |
1712 | #define vstrbq_u32( __addr, __value) __arm_vstrbq_u32( __addr, __value) | |
1713 | #define vstrbq_scatter_offset_s16( __base, __offset, __value) __arm_vstrbq_scatter_offset_s16( __base, __offset, __value) | |
1714 | #define vstrbq_scatter_offset_u32( __base, __offset, __value) __arm_vstrbq_scatter_offset_u32( __base, __offset, __value) | |
1715 | #define vstrbq_s32( __addr, __value) __arm_vstrbq_s32( __addr, __value) | |
1716 | #define vstrbq_scatter_offset_s32( __base, __offset, __value) __arm_vstrbq_scatter_offset_s32( __base, __offset, __value) | |
1717 | #define vstrwq_scatter_base_s32(__addr, __offset, __value) __arm_vstrwq_scatter_base_s32(__addr, __offset, __value) | |
1718 | #define vstrwq_scatter_base_u32(__addr, __offset, __value) __arm_vstrwq_scatter_base_u32(__addr, __offset, __value) | |
535a8645 SP |
1719 | #define vldrbq_gather_offset_u8(__base, __offset) __arm_vldrbq_gather_offset_u8(__base, __offset) |
1720 | #define vldrbq_gather_offset_s8(__base, __offset) __arm_vldrbq_gather_offset_s8(__base, __offset) | |
1721 | #define vldrbq_s8(__base) __arm_vldrbq_s8(__base) | |
1722 | #define vldrbq_u8(__base) __arm_vldrbq_u8(__base) | |
1723 | #define vldrbq_gather_offset_u16(__base, __offset) __arm_vldrbq_gather_offset_u16(__base, __offset) | |
1724 | #define vldrbq_gather_offset_s16(__base, __offset) __arm_vldrbq_gather_offset_s16(__base, __offset) | |
1725 | #define vldrbq_s16(__base) __arm_vldrbq_s16(__base) | |
1726 | #define vldrbq_u16(__base) __arm_vldrbq_u16(__base) | |
1727 | #define vldrbq_gather_offset_u32(__base, __offset) __arm_vldrbq_gather_offset_u32(__base, __offset) | |
1728 | #define vldrbq_gather_offset_s32(__base, __offset) __arm_vldrbq_gather_offset_s32(__base, __offset) | |
1729 | #define vldrbq_s32(__base) __arm_vldrbq_s32(__base) | |
1730 | #define vldrbq_u32(__base) __arm_vldrbq_u32(__base) | |
1731 | #define vldrwq_gather_base_s32(__addr, __offset) __arm_vldrwq_gather_base_s32(__addr, __offset) | |
1732 | #define vldrwq_gather_base_u32(__addr, __offset) __arm_vldrwq_gather_base_u32(__addr, __offset) | |
405e918c SP |
1733 | #define vstrbq_p_s8( __addr, __value, __p) __arm_vstrbq_p_s8( __addr, __value, __p) |
1734 | #define vstrbq_p_s32( __addr, __value, __p) __arm_vstrbq_p_s32( __addr, __value, __p) | |
1735 | #define vstrbq_p_s16( __addr, __value, __p) __arm_vstrbq_p_s16( __addr, __value, __p) | |
1736 | #define vstrbq_p_u8( __addr, __value, __p) __arm_vstrbq_p_u8( __addr, __value, __p) | |
1737 | #define vstrbq_p_u32( __addr, __value, __p) __arm_vstrbq_p_u32( __addr, __value, __p) | |
1738 | #define vstrbq_p_u16( __addr, __value, __p) __arm_vstrbq_p_u16( __addr, __value, __p) | |
1739 | #define vstrbq_scatter_offset_p_s8( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s8( __base, __offset, __value, __p) | |
1740 | #define vstrbq_scatter_offset_p_s32( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s32( __base, __offset, __value, __p) | |
1741 | #define vstrbq_scatter_offset_p_s16( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s16( __base, __offset, __value, __p) | |
1742 | #define vstrbq_scatter_offset_p_u8( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u8( __base, __offset, __value, __p) | |
1743 | #define vstrbq_scatter_offset_p_u32( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u32( __base, __offset, __value, __p) | |
1744 | #define vstrbq_scatter_offset_p_u16( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u16( __base, __offset, __value, __p) | |
1745 | #define vstrwq_scatter_base_p_s32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_s32(__addr, __offset, __value, __p) | |
1746 | #define vstrwq_scatter_base_p_u32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_u32(__addr, __offset, __value, __p) | |
429d607b SP |
1747 | #define vldrbq_gather_offset_z_s16(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s16(__base, __offset, __p) |
1748 | #define vldrbq_gather_offset_z_u8(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u8(__base, __offset, __p) | |
1749 | #define vldrbq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s32(__base, __offset, __p) | |
1750 | #define vldrbq_gather_offset_z_u16(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u16(__base, __offset, __p) | |
1751 | #define vldrbq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u32(__base, __offset, __p) | |
1752 | #define vldrbq_gather_offset_z_s8(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s8(__base, __offset, __p) | |
1753 | #define vldrbq_z_s16(__base, __p) __arm_vldrbq_z_s16(__base, __p) | |
1754 | #define vldrbq_z_u8(__base, __p) __arm_vldrbq_z_u8(__base, __p) | |
1755 | #define vldrbq_z_s8(__base, __p) __arm_vldrbq_z_s8(__base, __p) | |
1756 | #define vldrbq_z_s32(__base, __p) __arm_vldrbq_z_s32(__base, __p) | |
1757 | #define vldrbq_z_u16(__base, __p) __arm_vldrbq_z_u16(__base, __p) | |
1758 | #define vldrbq_z_u32(__base, __p) __arm_vldrbq_z_u32(__base, __p) | |
1759 | #define vldrwq_gather_base_z_u32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_u32(__addr, __offset, __p) | |
1760 | #define vldrwq_gather_base_z_s32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_s32(__addr, __offset, __p) | |
bf1e3d5a SP |
1761 | #define vld1q_s8(__base) __arm_vld1q_s8(__base) |
1762 | #define vld1q_s32(__base) __arm_vld1q_s32(__base) | |
1763 | #define vld1q_s16(__base) __arm_vld1q_s16(__base) | |
1764 | #define vld1q_u8(__base) __arm_vld1q_u8(__base) | |
1765 | #define vld1q_u32(__base) __arm_vld1q_u32(__base) | |
1766 | #define vld1q_u16(__base) __arm_vld1q_u16(__base) | |
1767 | #define vldrhq_gather_offset_s32(__base, __offset) __arm_vldrhq_gather_offset_s32(__base, __offset) | |
1768 | #define vldrhq_gather_offset_s16(__base, __offset) __arm_vldrhq_gather_offset_s16(__base, __offset) | |
1769 | #define vldrhq_gather_offset_u32(__base, __offset) __arm_vldrhq_gather_offset_u32(__base, __offset) | |
1770 | #define vldrhq_gather_offset_u16(__base, __offset) __arm_vldrhq_gather_offset_u16(__base, __offset) | |
1771 | #define vldrhq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrhq_gather_offset_z_s32(__base, __offset, __p) | |
1772 | #define vldrhq_gather_offset_z_s16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_s16(__base, __offset, __p) | |
1773 | #define vldrhq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrhq_gather_offset_z_u32(__base, __offset, __p) | |
1774 | #define vldrhq_gather_offset_z_u16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_u16(__base, __offset, __p) | |
1775 | #define vldrhq_gather_shifted_offset_s32(__base, __offset) __arm_vldrhq_gather_shifted_offset_s32(__base, __offset) | |
1776 | #define vldrhq_gather_shifted_offset_s16(__base, __offset) __arm_vldrhq_gather_shifted_offset_s16(__base, __offset) | |
1777 | #define vldrhq_gather_shifted_offset_u32(__base, __offset) __arm_vldrhq_gather_shifted_offset_u32(__base, __offset) | |
1778 | #define vldrhq_gather_shifted_offset_u16(__base, __offset) __arm_vldrhq_gather_shifted_offset_u16(__base, __offset) | |
1779 | #define vldrhq_gather_shifted_offset_z_s32(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_s32(__base, __offset, __p) | |
1780 | #define vldrhq_gather_shifted_offset_z_s16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_s16(__base, __offset, __p) | |
1781 | #define vldrhq_gather_shifted_offset_z_u32(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_u32(__base, __offset, __p) | |
1782 | #define vldrhq_gather_shifted_offset_z_u16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_u16(__base, __offset, __p) | |
1783 | #define vldrhq_s32(__base) __arm_vldrhq_s32(__base) | |
1784 | #define vldrhq_s16(__base) __arm_vldrhq_s16(__base) | |
1785 | #define vldrhq_u32(__base) __arm_vldrhq_u32(__base) | |
1786 | #define vldrhq_u16(__base) __arm_vldrhq_u16(__base) | |
1787 | #define vldrhq_z_s32(__base, __p) __arm_vldrhq_z_s32(__base, __p) | |
1788 | #define vldrhq_z_s16(__base, __p) __arm_vldrhq_z_s16(__base, __p) | |
1789 | #define vldrhq_z_u32(__base, __p) __arm_vldrhq_z_u32(__base, __p) | |
1790 | #define vldrhq_z_u16(__base, __p) __arm_vldrhq_z_u16(__base, __p) | |
1791 | #define vldrwq_s32(__base) __arm_vldrwq_s32(__base) | |
1792 | #define vldrwq_u32(__base) __arm_vldrwq_u32(__base) | |
1793 | #define vldrwq_z_s32(__base, __p) __arm_vldrwq_z_s32(__base, __p) | |
1794 | #define vldrwq_z_u32(__base, __p) __arm_vldrwq_z_u32(__base, __p) | |
1795 | #define vld1q_f32(__base) __arm_vld1q_f32(__base) | |
1796 | #define vld1q_f16(__base) __arm_vld1q_f16(__base) | |
1797 | #define vldrhq_f16(__base) __arm_vldrhq_f16(__base) | |
1798 | #define vldrhq_z_f16(__base, __p) __arm_vldrhq_z_f16(__base, __p) | |
1799 | #define vldrwq_f32(__base) __arm_vldrwq_f32(__base) | |
1800 | #define vldrwq_z_f32(__base, __p) __arm_vldrwq_z_f32(__base, __p) | |
4cc23303 SP |
1801 | #define vldrdq_gather_base_s64(__addr, __offset) __arm_vldrdq_gather_base_s64(__addr, __offset) |
1802 | #define vldrdq_gather_base_u64(__addr, __offset) __arm_vldrdq_gather_base_u64(__addr, __offset) | |
1803 | #define vldrdq_gather_base_z_s64(__addr, __offset, __p) __arm_vldrdq_gather_base_z_s64(__addr, __offset, __p) | |
1804 | #define vldrdq_gather_base_z_u64(__addr, __offset, __p) __arm_vldrdq_gather_base_z_u64(__addr, __offset, __p) | |
1805 | #define vldrdq_gather_offset_s64(__base, __offset) __arm_vldrdq_gather_offset_s64(__base, __offset) | |
1806 | #define vldrdq_gather_offset_u64(__base, __offset) __arm_vldrdq_gather_offset_u64(__base, __offset) | |
1807 | #define vldrdq_gather_offset_z_s64(__base, __offset, __p) __arm_vldrdq_gather_offset_z_s64(__base, __offset, __p) | |
1808 | #define vldrdq_gather_offset_z_u64(__base, __offset, __p) __arm_vldrdq_gather_offset_z_u64(__base, __offset, __p) | |
1809 | #define vldrdq_gather_shifted_offset_s64(__base, __offset) __arm_vldrdq_gather_shifted_offset_s64(__base, __offset) | |
1810 | #define vldrdq_gather_shifted_offset_u64(__base, __offset) __arm_vldrdq_gather_shifted_offset_u64(__base, __offset) | |
1811 | #define vldrdq_gather_shifted_offset_z_s64(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z_s64(__base, __offset, __p) | |
1812 | #define vldrdq_gather_shifted_offset_z_u64(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z_u64(__base, __offset, __p) | |
1813 | #define vldrhq_gather_offset_f16(__base, __offset) __arm_vldrhq_gather_offset_f16(__base, __offset) | |
1814 | #define vldrhq_gather_offset_z_f16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_f16(__base, __offset, __p) | |
1815 | #define vldrhq_gather_shifted_offset_f16(__base, __offset) __arm_vldrhq_gather_shifted_offset_f16(__base, __offset) | |
1816 | #define vldrhq_gather_shifted_offset_z_f16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_f16(__base, __offset, __p) | |
1817 | #define vldrwq_gather_base_f32(__addr, __offset) __arm_vldrwq_gather_base_f32(__addr, __offset) | |
1818 | #define vldrwq_gather_base_z_f32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_f32(__addr, __offset, __p) | |
1819 | #define vldrwq_gather_offset_f32(__base, __offset) __arm_vldrwq_gather_offset_f32(__base, __offset) | |
1820 | #define vldrwq_gather_offset_s32(__base, __offset) __arm_vldrwq_gather_offset_s32(__base, __offset) | |
1821 | #define vldrwq_gather_offset_u32(__base, __offset) __arm_vldrwq_gather_offset_u32(__base, __offset) | |
1822 | #define vldrwq_gather_offset_z_f32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_f32(__base, __offset, __p) | |
1823 | #define vldrwq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_s32(__base, __offset, __p) | |
1824 | #define vldrwq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_u32(__base, __offset, __p) | |
1825 | #define vldrwq_gather_shifted_offset_f32(__base, __offset) __arm_vldrwq_gather_shifted_offset_f32(__base, __offset) | |
1826 | #define vldrwq_gather_shifted_offset_s32(__base, __offset) __arm_vldrwq_gather_shifted_offset_s32(__base, __offset) | |
1827 | #define vldrwq_gather_shifted_offset_u32(__base, __offset) __arm_vldrwq_gather_shifted_offset_u32(__base, __offset) | |
1828 | #define vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p) | |
1829 | #define vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p) | |
1830 | #define vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p) | |
5cad47e0 SP |
1831 | #define vst1q_f32(__addr, __value) __arm_vst1q_f32(__addr, __value) |
1832 | #define vst1q_f16(__addr, __value) __arm_vst1q_f16(__addr, __value) | |
1833 | #define vst1q_s8(__addr, __value) __arm_vst1q_s8(__addr, __value) | |
1834 | #define vst1q_s32(__addr, __value) __arm_vst1q_s32(__addr, __value) | |
1835 | #define vst1q_s16(__addr, __value) __arm_vst1q_s16(__addr, __value) | |
1836 | #define vst1q_u8(__addr, __value) __arm_vst1q_u8(__addr, __value) | |
1837 | #define vst1q_u32(__addr, __value) __arm_vst1q_u32(__addr, __value) | |
1838 | #define vst1q_u16(__addr, __value) __arm_vst1q_u16(__addr, __value) | |
1839 | #define vstrhq_f16(__addr, __value) __arm_vstrhq_f16(__addr, __value) | |
1840 | #define vstrhq_scatter_offset_s32( __base, __offset, __value) __arm_vstrhq_scatter_offset_s32( __base, __offset, __value) | |
1841 | #define vstrhq_scatter_offset_s16( __base, __offset, __value) __arm_vstrhq_scatter_offset_s16( __base, __offset, __value) | |
1842 | #define vstrhq_scatter_offset_u32( __base, __offset, __value) __arm_vstrhq_scatter_offset_u32( __base, __offset, __value) | |
1843 | #define vstrhq_scatter_offset_u16( __base, __offset, __value) __arm_vstrhq_scatter_offset_u16( __base, __offset, __value) | |
1844 | #define vstrhq_scatter_offset_p_s32( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_s32( __base, __offset, __value, __p) | |
1845 | #define vstrhq_scatter_offset_p_s16( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_s16( __base, __offset, __value, __p) | |
1846 | #define vstrhq_scatter_offset_p_u32( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_u32( __base, __offset, __value, __p) | |
1847 | #define vstrhq_scatter_offset_p_u16( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_u16( __base, __offset, __value, __p) | |
1848 | #define vstrhq_scatter_shifted_offset_s32( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_s32( __base, __offset, __value) | |
1849 | #define vstrhq_scatter_shifted_offset_s16( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_s16( __base, __offset, __value) | |
1850 | #define vstrhq_scatter_shifted_offset_u32( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_u32( __base, __offset, __value) | |
1851 | #define vstrhq_scatter_shifted_offset_u16( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_u16( __base, __offset, __value) | |
1852 | #define vstrhq_scatter_shifted_offset_p_s32( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_s32( __base, __offset, __value, __p) | |
1853 | #define vstrhq_scatter_shifted_offset_p_s16( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_s16( __base, __offset, __value, __p) | |
1854 | #define vstrhq_scatter_shifted_offset_p_u32( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_u32( __base, __offset, __value, __p) | |
1855 | #define vstrhq_scatter_shifted_offset_p_u16( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_u16( __base, __offset, __value, __p) | |
1856 | #define vstrhq_s32(__addr, __value) __arm_vstrhq_s32(__addr, __value) | |
1857 | #define vstrhq_s16(__addr, __value) __arm_vstrhq_s16(__addr, __value) | |
1858 | #define vstrhq_u32(__addr, __value) __arm_vstrhq_u32(__addr, __value) | |
1859 | #define vstrhq_u16(__addr, __value) __arm_vstrhq_u16(__addr, __value) | |
1860 | #define vstrhq_p_f16(__addr, __value, __p) __arm_vstrhq_p_f16(__addr, __value, __p) | |
1861 | #define vstrhq_p_s32(__addr, __value, __p) __arm_vstrhq_p_s32(__addr, __value, __p) | |
1862 | #define vstrhq_p_s16(__addr, __value, __p) __arm_vstrhq_p_s16(__addr, __value, __p) | |
1863 | #define vstrhq_p_u32(__addr, __value, __p) __arm_vstrhq_p_u32(__addr, __value, __p) | |
1864 | #define vstrhq_p_u16(__addr, __value, __p) __arm_vstrhq_p_u16(__addr, __value, __p) | |
1865 | #define vstrwq_f32(__addr, __value) __arm_vstrwq_f32(__addr, __value) | |
1866 | #define vstrwq_s32(__addr, __value) __arm_vstrwq_s32(__addr, __value) | |
1867 | #define vstrwq_u32(__addr, __value) __arm_vstrwq_u32(__addr, __value) | |
1868 | #define vstrwq_p_f32(__addr, __value, __p) __arm_vstrwq_p_f32(__addr, __value, __p) | |
1869 | #define vstrwq_p_s32(__addr, __value, __p) __arm_vstrwq_p_s32(__addr, __value, __p) | |
1870 | #define vstrwq_p_u32(__addr, __value, __p) __arm_vstrwq_p_u32(__addr, __value, __p) | |
7a5fffa5 SP |
1871 | #define vstrdq_scatter_base_p_s64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p_s64(__addr, __offset, __value, __p) |
1872 | #define vstrdq_scatter_base_p_u64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p_u64(__addr, __offset, __value, __p) | |
1873 | #define vstrdq_scatter_base_s64(__addr, __offset, __value) __arm_vstrdq_scatter_base_s64(__addr, __offset, __value) | |
1874 | #define vstrdq_scatter_base_u64(__addr, __offset, __value) __arm_vstrdq_scatter_base_u64(__addr, __offset, __value) | |
1875 | #define vstrdq_scatter_offset_p_s64(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p_s64(__base, __offset, __value, __p) | |
1876 | #define vstrdq_scatter_offset_p_u64(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p_u64(__base, __offset, __value, __p) | |
1877 | #define vstrdq_scatter_offset_s64(__base, __offset, __value) __arm_vstrdq_scatter_offset_s64(__base, __offset, __value) | |
1878 | #define vstrdq_scatter_offset_u64(__base, __offset, __value) __arm_vstrdq_scatter_offset_u64(__base, __offset, __value) | |
1879 | #define vstrdq_scatter_shifted_offset_p_s64(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p_s64(__base, __offset, __value, __p) | |
1880 | #define vstrdq_scatter_shifted_offset_p_u64(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p_u64(__base, __offset, __value, __p) | |
1881 | #define vstrdq_scatter_shifted_offset_s64(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset_s64(__base, __offset, __value) | |
1882 | #define vstrdq_scatter_shifted_offset_u64(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset_u64(__base, __offset, __value) | |
1883 | #define vstrhq_scatter_offset_f16(__base, __offset, __value) __arm_vstrhq_scatter_offset_f16(__base, __offset, __value) | |
1884 | #define vstrhq_scatter_offset_p_f16(__base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_f16(__base, __offset, __value, __p) | |
1885 | #define vstrhq_scatter_shifted_offset_f16(__base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_f16(__base, __offset, __value) | |
1886 | #define vstrhq_scatter_shifted_offset_p_f16(__base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_f16(__base, __offset, __value, __p) | |
1887 | #define vstrwq_scatter_base_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_f32(__addr, __offset, __value) | |
1888 | #define vstrwq_scatter_base_p_f32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_f32(__addr, __offset, __value, __p) | |
1889 | #define vstrwq_scatter_offset_f32(__base, __offset, __value) __arm_vstrwq_scatter_offset_f32(__base, __offset, __value) | |
1890 | #define vstrwq_scatter_offset_p_f32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_f32(__base, __offset, __value, __p) | |
1891 | #define vstrwq_scatter_offset_p_s32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_s32(__base, __offset, __value, __p) | |
1892 | #define vstrwq_scatter_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_u32(__base, __offset, __value, __p) | |
1893 | #define vstrwq_scatter_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_offset_s32(__base, __offset, __value) | |
1894 | #define vstrwq_scatter_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_offset_u32(__base, __offset, __value) | |
1895 | #define vstrwq_scatter_shifted_offset_f32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_f32(__base, __offset, __value) | |
1896 | #define vstrwq_scatter_shifted_offset_p_f32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_f32(__base, __offset, __value, __p) | |
1897 | #define vstrwq_scatter_shifted_offset_p_s32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_s32(__base, __offset, __value, __p) | |
1898 | #define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) | |
1899 | #define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) | |
1900 | #define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) | |
3eff57aa SP |
1901 | #define vaddq_s8(__a, __b) __arm_vaddq_s8(__a, __b) |
1902 | #define vaddq_s16(__a, __b) __arm_vaddq_s16(__a, __b) | |
1903 | #define vaddq_s32(__a, __b) __arm_vaddq_s32(__a, __b) | |
1904 | #define vaddq_u8(__a, __b) __arm_vaddq_u8(__a, __b) | |
1905 | #define vaddq_u16(__a, __b) __arm_vaddq_u16(__a, __b) | |
1906 | #define vaddq_u32(__a, __b) __arm_vaddq_u32(__a, __b) | |
1907 | #define vaddq_f16(__a, __b) __arm_vaddq_f16(__a, __b) | |
1908 | #define vaddq_f32(__a, __b) __arm_vaddq_f32(__a, __b) | |
85a94e87 SP |
1909 | #define vreinterpretq_s16_s32(__a) __arm_vreinterpretq_s16_s32(__a) |
1910 | #define vreinterpretq_s16_s64(__a) __arm_vreinterpretq_s16_s64(__a) | |
1911 | #define vreinterpretq_s16_s8(__a) __arm_vreinterpretq_s16_s8(__a) | |
1912 | #define vreinterpretq_s16_u16(__a) __arm_vreinterpretq_s16_u16(__a) | |
1913 | #define vreinterpretq_s16_u32(__a) __arm_vreinterpretq_s16_u32(__a) | |
1914 | #define vreinterpretq_s16_u64(__a) __arm_vreinterpretq_s16_u64(__a) | |
1915 | #define vreinterpretq_s16_u8(__a) __arm_vreinterpretq_s16_u8(__a) | |
1916 | #define vreinterpretq_s32_s16(__a) __arm_vreinterpretq_s32_s16(__a) | |
1917 | #define vreinterpretq_s32_s64(__a) __arm_vreinterpretq_s32_s64(__a) | |
1918 | #define vreinterpretq_s32_s8(__a) __arm_vreinterpretq_s32_s8(__a) | |
1919 | #define vreinterpretq_s32_u16(__a) __arm_vreinterpretq_s32_u16(__a) | |
1920 | #define vreinterpretq_s32_u32(__a) __arm_vreinterpretq_s32_u32(__a) | |
1921 | #define vreinterpretq_s32_u64(__a) __arm_vreinterpretq_s32_u64(__a) | |
1922 | #define vreinterpretq_s32_u8(__a) __arm_vreinterpretq_s32_u8(__a) | |
1923 | #define vreinterpretq_s64_s16(__a) __arm_vreinterpretq_s64_s16(__a) | |
1924 | #define vreinterpretq_s64_s32(__a) __arm_vreinterpretq_s64_s32(__a) | |
1925 | #define vreinterpretq_s64_s8(__a) __arm_vreinterpretq_s64_s8(__a) | |
1926 | #define vreinterpretq_s64_u16(__a) __arm_vreinterpretq_s64_u16(__a) | |
1927 | #define vreinterpretq_s64_u32(__a) __arm_vreinterpretq_s64_u32(__a) | |
1928 | #define vreinterpretq_s64_u64(__a) __arm_vreinterpretq_s64_u64(__a) | |
1929 | #define vreinterpretq_s64_u8(__a) __arm_vreinterpretq_s64_u8(__a) | |
1930 | #define vreinterpretq_s8_s16(__a) __arm_vreinterpretq_s8_s16(__a) | |
1931 | #define vreinterpretq_s8_s32(__a) __arm_vreinterpretq_s8_s32(__a) | |
1932 | #define vreinterpretq_s8_s64(__a) __arm_vreinterpretq_s8_s64(__a) | |
1933 | #define vreinterpretq_s8_u16(__a) __arm_vreinterpretq_s8_u16(__a) | |
1934 | #define vreinterpretq_s8_u32(__a) __arm_vreinterpretq_s8_u32(__a) | |
1935 | #define vreinterpretq_s8_u64(__a) __arm_vreinterpretq_s8_u64(__a) | |
1936 | #define vreinterpretq_s8_u8(__a) __arm_vreinterpretq_s8_u8(__a) | |
1937 | #define vreinterpretq_u16_s16(__a) __arm_vreinterpretq_u16_s16(__a) | |
1938 | #define vreinterpretq_u16_s32(__a) __arm_vreinterpretq_u16_s32(__a) | |
1939 | #define vreinterpretq_u16_s64(__a) __arm_vreinterpretq_u16_s64(__a) | |
1940 | #define vreinterpretq_u16_s8(__a) __arm_vreinterpretq_u16_s8(__a) | |
1941 | #define vreinterpretq_u16_u32(__a) __arm_vreinterpretq_u16_u32(__a) | |
1942 | #define vreinterpretq_u16_u64(__a) __arm_vreinterpretq_u16_u64(__a) | |
1943 | #define vreinterpretq_u16_u8(__a) __arm_vreinterpretq_u16_u8(__a) | |
1944 | #define vreinterpretq_u32_s16(__a) __arm_vreinterpretq_u32_s16(__a) | |
1945 | #define vreinterpretq_u32_s32(__a) __arm_vreinterpretq_u32_s32(__a) | |
1946 | #define vreinterpretq_u32_s64(__a) __arm_vreinterpretq_u32_s64(__a) | |
1947 | #define vreinterpretq_u32_s8(__a) __arm_vreinterpretq_u32_s8(__a) | |
1948 | #define vreinterpretq_u32_u16(__a) __arm_vreinterpretq_u32_u16(__a) | |
1949 | #define vreinterpretq_u32_u64(__a) __arm_vreinterpretq_u32_u64(__a) | |
1950 | #define vreinterpretq_u32_u8(__a) __arm_vreinterpretq_u32_u8(__a) | |
1951 | #define vreinterpretq_u64_s16(__a) __arm_vreinterpretq_u64_s16(__a) | |
1952 | #define vreinterpretq_u64_s32(__a) __arm_vreinterpretq_u64_s32(__a) | |
1953 | #define vreinterpretq_u64_s64(__a) __arm_vreinterpretq_u64_s64(__a) | |
1954 | #define vreinterpretq_u64_s8(__a) __arm_vreinterpretq_u64_s8(__a) | |
1955 | #define vreinterpretq_u64_u16(__a) __arm_vreinterpretq_u64_u16(__a) | |
1956 | #define vreinterpretq_u64_u32(__a) __arm_vreinterpretq_u64_u32(__a) | |
1957 | #define vreinterpretq_u64_u8(__a) __arm_vreinterpretq_u64_u8(__a) | |
1958 | #define vreinterpretq_u8_s16(__a) __arm_vreinterpretq_u8_s16(__a) | |
1959 | #define vreinterpretq_u8_s32(__a) __arm_vreinterpretq_u8_s32(__a) | |
1960 | #define vreinterpretq_u8_s64(__a) __arm_vreinterpretq_u8_s64(__a) | |
1961 | #define vreinterpretq_u8_s8(__a) __arm_vreinterpretq_u8_s8(__a) | |
1962 | #define vreinterpretq_u8_u16(__a) __arm_vreinterpretq_u8_u16(__a) | |
1963 | #define vreinterpretq_u8_u32(__a) __arm_vreinterpretq_u8_u32(__a) | |
1964 | #define vreinterpretq_u8_u64(__a) __arm_vreinterpretq_u8_u64(__a) | |
1965 | #define vreinterpretq_s32_f16(__a) __arm_vreinterpretq_s32_f16(__a) | |
1966 | #define vreinterpretq_s32_f32(__a) __arm_vreinterpretq_s32_f32(__a) | |
1967 | #define vreinterpretq_u16_f16(__a) __arm_vreinterpretq_u16_f16(__a) | |
1968 | #define vreinterpretq_u16_f32(__a) __arm_vreinterpretq_u16_f32(__a) | |
1969 | #define vreinterpretq_u32_f16(__a) __arm_vreinterpretq_u32_f16(__a) | |
1970 | #define vreinterpretq_u32_f32(__a) __arm_vreinterpretq_u32_f32(__a) | |
1971 | #define vreinterpretq_u64_f16(__a) __arm_vreinterpretq_u64_f16(__a) | |
1972 | #define vreinterpretq_u64_f32(__a) __arm_vreinterpretq_u64_f32(__a) | |
1973 | #define vreinterpretq_u8_f16(__a) __arm_vreinterpretq_u8_f16(__a) | |
1974 | #define vreinterpretq_u8_f32(__a) __arm_vreinterpretq_u8_f32(__a) | |
1975 | #define vreinterpretq_f16_f32(__a) __arm_vreinterpretq_f16_f32(__a) | |
1976 | #define vreinterpretq_f16_s16(__a) __arm_vreinterpretq_f16_s16(__a) | |
1977 | #define vreinterpretq_f16_s32(__a) __arm_vreinterpretq_f16_s32(__a) | |
1978 | #define vreinterpretq_f16_s64(__a) __arm_vreinterpretq_f16_s64(__a) | |
1979 | #define vreinterpretq_f16_s8(__a) __arm_vreinterpretq_f16_s8(__a) | |
1980 | #define vreinterpretq_f16_u16(__a) __arm_vreinterpretq_f16_u16(__a) | |
1981 | #define vreinterpretq_f16_u32(__a) __arm_vreinterpretq_f16_u32(__a) | |
1982 | #define vreinterpretq_f16_u64(__a) __arm_vreinterpretq_f16_u64(__a) | |
1983 | #define vreinterpretq_f16_u8(__a) __arm_vreinterpretq_f16_u8(__a) | |
1984 | #define vreinterpretq_f32_f16(__a) __arm_vreinterpretq_f32_f16(__a) | |
1985 | #define vreinterpretq_f32_s16(__a) __arm_vreinterpretq_f32_s16(__a) | |
1986 | #define vreinterpretq_f32_s32(__a) __arm_vreinterpretq_f32_s32(__a) | |
1987 | #define vreinterpretq_f32_s64(__a) __arm_vreinterpretq_f32_s64(__a) | |
1988 | #define vreinterpretq_f32_s8(__a) __arm_vreinterpretq_f32_s8(__a) | |
1989 | #define vreinterpretq_f32_u16(__a) __arm_vreinterpretq_f32_u16(__a) | |
1990 | #define vreinterpretq_f32_u32(__a) __arm_vreinterpretq_f32_u32(__a) | |
1991 | #define vreinterpretq_f32_u64(__a) __arm_vreinterpretq_f32_u64(__a) | |
1992 | #define vreinterpretq_f32_u8(__a) __arm_vreinterpretq_f32_u8(__a) | |
1993 | #define vreinterpretq_s16_f16(__a) __arm_vreinterpretq_s16_f16(__a) | |
1994 | #define vreinterpretq_s16_f32(__a) __arm_vreinterpretq_s16_f32(__a) | |
1995 | #define vreinterpretq_s64_f16(__a) __arm_vreinterpretq_s64_f16(__a) | |
1996 | #define vreinterpretq_s64_f32(__a) __arm_vreinterpretq_s64_f32(__a) | |
1997 | #define vreinterpretq_s8_f16(__a) __arm_vreinterpretq_s8_f16(__a) | |
1998 | #define vreinterpretq_s8_f32(__a) __arm_vreinterpretq_s8_f32(__a) | |
1999 | #define vuninitializedq_u8(void) __arm_vuninitializedq_u8(void) | |
2000 | #define vuninitializedq_u16(void) __arm_vuninitializedq_u16(void) | |
2001 | #define vuninitializedq_u32(void) __arm_vuninitializedq_u32(void) | |
2002 | #define vuninitializedq_u64(void) __arm_vuninitializedq_u64(void) | |
2003 | #define vuninitializedq_s8(void) __arm_vuninitializedq_s8(void) | |
2004 | #define vuninitializedq_s16(void) __arm_vuninitializedq_s16(void) | |
2005 | #define vuninitializedq_s32(void) __arm_vuninitializedq_s32(void) | |
2006 | #define vuninitializedq_s64(void) __arm_vuninitializedq_s64(void) | |
2007 | #define vuninitializedq_f16(void) __arm_vuninitializedq_f16(void) | |
2008 | #define vuninitializedq_f32(void) __arm_vuninitializedq_f32(void) | |
92f80065 SP |
2009 | #define vddupq_m_n_u8(__inactive, __a, __imm, __p) __arm_vddupq_m_n_u8(__inactive, __a, __imm, __p) |
2010 | #define vddupq_m_n_u32(__inactive, __a, __imm, __p) __arm_vddupq_m_n_u32(__inactive, __a, __imm, __p) | |
2011 | #define vddupq_m_n_u16(__inactive, __a, __imm, __p) __arm_vddupq_m_n_u16(__inactive, __a, __imm, __p) | |
2012 | #define vddupq_m_wb_u8(__inactive, __a, __imm, __p) __arm_vddupq_m_wb_u8(__inactive, __a, __imm, __p) | |
2013 | #define vddupq_m_wb_u16(__inactive, __a, __imm, __p) __arm_vddupq_m_wb_u16(__inactive, __a, __imm, __p) | |
2014 | #define vddupq_m_wb_u32(__inactive, __a, __imm, __p) __arm_vddupq_m_wb_u32(__inactive, __a, __imm, __p) | |
2015 | #define vddupq_n_u8(__a, __imm) __arm_vddupq_n_u8(__a, __imm) | |
2016 | #define vddupq_n_u32(__a, __imm) __arm_vddupq_n_u32(__a, __imm) | |
2017 | #define vddupq_n_u16(__a, __imm) __arm_vddupq_n_u16(__a, __imm) | |
2018 | #define vddupq_wb_u8( __a, __imm) __arm_vddupq_wb_u8( __a, __imm) | |
2019 | #define vddupq_wb_u16( __a, __imm) __arm_vddupq_wb_u16( __a, __imm) | |
2020 | #define vddupq_wb_u32( __a, __imm) __arm_vddupq_wb_u32( __a, __imm) | |
2021 | #define vdwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) | |
2022 | #define vdwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) | |
2023 | #define vdwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) | |
2024 | #define vdwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) | |
2025 | #define vdwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) | |
2026 | #define vdwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) | |
2027 | #define vdwdupq_n_u8(__a, __b, __imm) __arm_vdwdupq_n_u8(__a, __b, __imm) | |
2028 | #define vdwdupq_n_u32(__a, __b, __imm) __arm_vdwdupq_n_u32(__a, __b, __imm) | |
2029 | #define vdwdupq_n_u16(__a, __b, __imm) __arm_vdwdupq_n_u16(__a, __b, __imm) | |
2030 | #define vdwdupq_wb_u8( __a, __b, __imm) __arm_vdwdupq_wb_u8( __a, __b, __imm) | |
2031 | #define vdwdupq_wb_u32( __a, __b, __imm) __arm_vdwdupq_wb_u32( __a, __b, __imm) | |
2032 | #define vdwdupq_wb_u16( __a, __b, __imm) __arm_vdwdupq_wb_u16( __a, __b, __imm) | |
2033 | #define vidupq_m_n_u8(__inactive, __a, __imm, __p) __arm_vidupq_m_n_u8(__inactive, __a, __imm, __p) | |
2034 | #define vidupq_m_n_u32(__inactive, __a, __imm, __p) __arm_vidupq_m_n_u32(__inactive, __a, __imm, __p) | |
2035 | #define vidupq_m_n_u16(__inactive, __a, __imm, __p) __arm_vidupq_m_n_u16(__inactive, __a, __imm, __p) | |
2036 | #define vidupq_m_wb_u8(__inactive, __a, __imm, __p) __arm_vidupq_m_wb_u8(__inactive, __a, __imm, __p) | |
2037 | #define vidupq_m_wb_u16(__inactive, __a, __imm, __p) __arm_vidupq_m_wb_u16(__inactive, __a, __imm, __p) | |
2038 | #define vidupq_m_wb_u32(__inactive, __a, __imm, __p) __arm_vidupq_m_wb_u32(__inactive, __a, __imm, __p) | |
2039 | #define vidupq_n_u8(__a, __imm) __arm_vidupq_n_u8(__a, __imm) | |
2040 | #define vidupq_n_u32(__a, __imm) __arm_vidupq_n_u32(__a, __imm) | |
2041 | #define vidupq_n_u16(__a, __imm) __arm_vidupq_n_u16(__a, __imm) | |
2042 | #define vidupq_wb_u8( __a, __imm) __arm_vidupq_wb_u8( __a, __imm) | |
2043 | #define vidupq_wb_u16( __a, __imm) __arm_vidupq_wb_u16( __a, __imm) | |
2044 | #define vidupq_wb_u32( __a, __imm) __arm_vidupq_wb_u32( __a, __imm) | |
2045 | #define viwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) | |
2046 | #define viwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) | |
2047 | #define viwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) | |
2048 | #define viwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) | |
2049 | #define viwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) | |
2050 | #define viwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) | |
2051 | #define viwdupq_n_u8(__a, __b, __imm) __arm_viwdupq_n_u8(__a, __b, __imm) | |
2052 | #define viwdupq_n_u32(__a, __b, __imm) __arm_viwdupq_n_u32(__a, __b, __imm) | |
2053 | #define viwdupq_n_u16(__a, __b, __imm) __arm_viwdupq_n_u16(__a, __b, __imm) | |
2054 | #define viwdupq_wb_u8( __a, __b, __imm) __arm_viwdupq_wb_u8( __a, __b, __imm) | |
2055 | #define viwdupq_wb_u32( __a, __b, __imm) __arm_viwdupq_wb_u32( __a, __b, __imm) | |
2056 | #define viwdupq_wb_u16( __a, __b, __imm) __arm_viwdupq_wb_u16( __a, __b, __imm) | |
41e1a7ff SP |
2057 | #define vldrdq_gather_base_wb_s64(__addr, __offset) __arm_vldrdq_gather_base_wb_s64(__addr, __offset) |
2058 | #define vldrdq_gather_base_wb_u64(__addr, __offset) __arm_vldrdq_gather_base_wb_u64(__addr, __offset) | |
2059 | #define vldrdq_gather_base_wb_z_s64(__addr, __offset, __p) __arm_vldrdq_gather_base_wb_z_s64(__addr, __offset, __p) | |
2060 | #define vldrdq_gather_base_wb_z_u64(__addr, __offset, __p) __arm_vldrdq_gather_base_wb_z_u64(__addr, __offset, __p) | |
2061 | #define vldrwq_gather_base_wb_f32(__addr, __offset) __arm_vldrwq_gather_base_wb_f32(__addr, __offset) | |
2062 | #define vldrwq_gather_base_wb_s32(__addr, __offset) __arm_vldrwq_gather_base_wb_s32(__addr, __offset) | |
2063 | #define vldrwq_gather_base_wb_u32(__addr, __offset) __arm_vldrwq_gather_base_wb_u32(__addr, __offset) | |
2064 | #define vldrwq_gather_base_wb_z_f32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_f32(__addr, __offset, __p) | |
2065 | #define vldrwq_gather_base_wb_z_s32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_s32(__addr, __offset, __p) | |
2066 | #define vldrwq_gather_base_wb_z_u32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_u32(__addr, __offset, __p) | |
2067 | #define vstrdq_scatter_base_wb_p_s64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p_s64(__addr, __offset, __value, __p) | |
2068 | #define vstrdq_scatter_base_wb_p_u64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p_u64(__addr, __offset, __value, __p) | |
2069 | #define vstrdq_scatter_base_wb_s64(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb_s64(__addr, __offset, __value) | |
2070 | #define vstrdq_scatter_base_wb_u64(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb_u64(__addr, __offset, __value) | |
2071 | #define vstrwq_scatter_base_wb_p_s32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_s32(__addr, __offset, __value, __p) | |
2072 | #define vstrwq_scatter_base_wb_p_f32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_f32(__addr, __offset, __value, __p) | |
2073 | #define vstrwq_scatter_base_wb_p_u32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_u32(__addr, __offset, __value, __p) | |
2074 | #define vstrwq_scatter_base_wb_s32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_s32(__addr, __offset, __value) | |
2075 | #define vstrwq_scatter_base_wb_u32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_u32(__addr, __offset, __value) | |
2076 | #define vstrwq_scatter_base_wb_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_f32(__addr, __offset, __value) | |
261014a1 SP |
2077 | #define vddupq_x_n_u8(__a, __imm, __p) __arm_vddupq_x_n_u8(__a, __imm, __p) |
2078 | #define vddupq_x_n_u16(__a, __imm, __p) __arm_vddupq_x_n_u16(__a, __imm, __p) | |
2079 | #define vddupq_x_n_u32(__a, __imm, __p) __arm_vddupq_x_n_u32(__a, __imm, __p) | |
2080 | #define vddupq_x_wb_u8(__a, __imm, __p) __arm_vddupq_x_wb_u8(__a, __imm, __p) | |
2081 | #define vddupq_x_wb_u16(__a, __imm, __p) __arm_vddupq_x_wb_u16(__a, __imm, __p) | |
2082 | #define vddupq_x_wb_u32(__a, __imm, __p) __arm_vddupq_x_wb_u32(__a, __imm, __p) | |
2083 | #define vdwdupq_x_n_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u8(__a, __b, __imm, __p) | |
2084 | #define vdwdupq_x_n_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u16(__a, __b, __imm, __p) | |
2085 | #define vdwdupq_x_n_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u32(__a, __b, __imm, __p) | |
2086 | #define vdwdupq_x_wb_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u8(__a, __b, __imm, __p) | |
2087 | #define vdwdupq_x_wb_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u16(__a, __b, __imm, __p) | |
2088 | #define vdwdupq_x_wb_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u32(__a, __b, __imm, __p) | |
2089 | #define vidupq_x_n_u8(__a, __imm, __p) __arm_vidupq_x_n_u8(__a, __imm, __p) | |
2090 | #define vidupq_x_n_u16(__a, __imm, __p) __arm_vidupq_x_n_u16(__a, __imm, __p) | |
2091 | #define vidupq_x_n_u32(__a, __imm, __p) __arm_vidupq_x_n_u32(__a, __imm, __p) | |
2092 | #define vidupq_x_wb_u8(__a, __imm, __p) __arm_vidupq_x_wb_u8(__a, __imm, __p) | |
2093 | #define vidupq_x_wb_u16(__a, __imm, __p) __arm_vidupq_x_wb_u16(__a, __imm, __p) | |
2094 | #define vidupq_x_wb_u32(__a, __imm, __p) __arm_vidupq_x_wb_u32(__a, __imm, __p) | |
2095 | #define viwdupq_x_n_u8(__a, __b, __imm, __p) __arm_viwdupq_x_n_u8(__a, __b, __imm, __p) | |
2096 | #define viwdupq_x_n_u16(__a, __b, __imm, __p) __arm_viwdupq_x_n_u16(__a, __b, __imm, __p) | |
2097 | #define viwdupq_x_n_u32(__a, __b, __imm, __p) __arm_viwdupq_x_n_u32(__a, __b, __imm, __p) | |
2098 | #define viwdupq_x_wb_u8(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u8(__a, __b, __imm, __p) | |
2099 | #define viwdupq_x_wb_u16(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u16(__a, __b, __imm, __p) | |
2100 | #define viwdupq_x_wb_u32(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u32(__a, __b, __imm, __p) | |
2101 | #define vdupq_x_n_s8(__a, __p) __arm_vdupq_x_n_s8(__a, __p) | |
2102 | #define vdupq_x_n_s16(__a, __p) __arm_vdupq_x_n_s16(__a, __p) | |
2103 | #define vdupq_x_n_s32(__a, __p) __arm_vdupq_x_n_s32(__a, __p) | |
2104 | #define vdupq_x_n_u8(__a, __p) __arm_vdupq_x_n_u8(__a, __p) | |
2105 | #define vdupq_x_n_u16(__a, __p) __arm_vdupq_x_n_u16(__a, __p) | |
2106 | #define vdupq_x_n_u32(__a, __p) __arm_vdupq_x_n_u32(__a, __p) | |
2107 | #define vminq_x_s8(__a, __b, __p) __arm_vminq_x_s8(__a, __b, __p) | |
2108 | #define vminq_x_s16(__a, __b, __p) __arm_vminq_x_s16(__a, __b, __p) | |
2109 | #define vminq_x_s32(__a, __b, __p) __arm_vminq_x_s32(__a, __b, __p) | |
2110 | #define vminq_x_u8(__a, __b, __p) __arm_vminq_x_u8(__a, __b, __p) | |
2111 | #define vminq_x_u16(__a, __b, __p) __arm_vminq_x_u16(__a, __b, __p) | |
2112 | #define vminq_x_u32(__a, __b, __p) __arm_vminq_x_u32(__a, __b, __p) | |
2113 | #define vmaxq_x_s8(__a, __b, __p) __arm_vmaxq_x_s8(__a, __b, __p) | |
2114 | #define vmaxq_x_s16(__a, __b, __p) __arm_vmaxq_x_s16(__a, __b, __p) | |
2115 | #define vmaxq_x_s32(__a, __b, __p) __arm_vmaxq_x_s32(__a, __b, __p) | |
2116 | #define vmaxq_x_u8(__a, __b, __p) __arm_vmaxq_x_u8(__a, __b, __p) | |
2117 | #define vmaxq_x_u16(__a, __b, __p) __arm_vmaxq_x_u16(__a, __b, __p) | |
2118 | #define vmaxq_x_u32(__a, __b, __p) __arm_vmaxq_x_u32(__a, __b, __p) | |
2119 | #define vabdq_x_s8(__a, __b, __p) __arm_vabdq_x_s8(__a, __b, __p) | |
2120 | #define vabdq_x_s16(__a, __b, __p) __arm_vabdq_x_s16(__a, __b, __p) | |
2121 | #define vabdq_x_s32(__a, __b, __p) __arm_vabdq_x_s32(__a, __b, __p) | |
2122 | #define vabdq_x_u8(__a, __b, __p) __arm_vabdq_x_u8(__a, __b, __p) | |
2123 | #define vabdq_x_u16(__a, __b, __p) __arm_vabdq_x_u16(__a, __b, __p) | |
2124 | #define vabdq_x_u32(__a, __b, __p) __arm_vabdq_x_u32(__a, __b, __p) | |
2125 | #define vabsq_x_s8(__a, __p) __arm_vabsq_x_s8(__a, __p) | |
2126 | #define vabsq_x_s16(__a, __p) __arm_vabsq_x_s16(__a, __p) | |
2127 | #define vabsq_x_s32(__a, __p) __arm_vabsq_x_s32(__a, __p) | |
2128 | #define vaddq_x_s8(__a, __b, __p) __arm_vaddq_x_s8(__a, __b, __p) | |
2129 | #define vaddq_x_s16(__a, __b, __p) __arm_vaddq_x_s16(__a, __b, __p) | |
2130 | #define vaddq_x_s32(__a, __b, __p) __arm_vaddq_x_s32(__a, __b, __p) | |
2131 | #define vaddq_x_n_s8(__a, __b, __p) __arm_vaddq_x_n_s8(__a, __b, __p) | |
2132 | #define vaddq_x_n_s16(__a, __b, __p) __arm_vaddq_x_n_s16(__a, __b, __p) | |
2133 | #define vaddq_x_n_s32(__a, __b, __p) __arm_vaddq_x_n_s32(__a, __b, __p) | |
2134 | #define vaddq_x_u8(__a, __b, __p) __arm_vaddq_x_u8(__a, __b, __p) | |
2135 | #define vaddq_x_u16(__a, __b, __p) __arm_vaddq_x_u16(__a, __b, __p) | |
2136 | #define vaddq_x_u32(__a, __b, __p) __arm_vaddq_x_u32(__a, __b, __p) | |
2137 | #define vaddq_x_n_u8(__a, __b, __p) __arm_vaddq_x_n_u8(__a, __b, __p) | |
2138 | #define vaddq_x_n_u16(__a, __b, __p) __arm_vaddq_x_n_u16(__a, __b, __p) | |
2139 | #define vaddq_x_n_u32(__a, __b, __p) __arm_vaddq_x_n_u32(__a, __b, __p) | |
2140 | #define vclsq_x_s8(__a, __p) __arm_vclsq_x_s8(__a, __p) | |
2141 | #define vclsq_x_s16(__a, __p) __arm_vclsq_x_s16(__a, __p) | |
2142 | #define vclsq_x_s32(__a, __p) __arm_vclsq_x_s32(__a, __p) | |
2143 | #define vclzq_x_s8(__a, __p) __arm_vclzq_x_s8(__a, __p) | |
2144 | #define vclzq_x_s16(__a, __p) __arm_vclzq_x_s16(__a, __p) | |
2145 | #define vclzq_x_s32(__a, __p) __arm_vclzq_x_s32(__a, __p) | |
2146 | #define vclzq_x_u8(__a, __p) __arm_vclzq_x_u8(__a, __p) | |
2147 | #define vclzq_x_u16(__a, __p) __arm_vclzq_x_u16(__a, __p) | |
2148 | #define vclzq_x_u32(__a, __p) __arm_vclzq_x_u32(__a, __p) | |
2149 | #define vnegq_x_s8(__a, __p) __arm_vnegq_x_s8(__a, __p) | |
2150 | #define vnegq_x_s16(__a, __p) __arm_vnegq_x_s16(__a, __p) | |
2151 | #define vnegq_x_s32(__a, __p) __arm_vnegq_x_s32(__a, __p) | |
2152 | #define vmulhq_x_s8(__a, __b, __p) __arm_vmulhq_x_s8(__a, __b, __p) | |
2153 | #define vmulhq_x_s16(__a, __b, __p) __arm_vmulhq_x_s16(__a, __b, __p) | |
2154 | #define vmulhq_x_s32(__a, __b, __p) __arm_vmulhq_x_s32(__a, __b, __p) | |
2155 | #define vmulhq_x_u8(__a, __b, __p) __arm_vmulhq_x_u8(__a, __b, __p) | |
2156 | #define vmulhq_x_u16(__a, __b, __p) __arm_vmulhq_x_u16(__a, __b, __p) | |
2157 | #define vmulhq_x_u32(__a, __b, __p) __arm_vmulhq_x_u32(__a, __b, __p) | |
2158 | #define vmullbq_poly_x_p8(__a, __b, __p) __arm_vmullbq_poly_x_p8(__a, __b, __p) | |
2159 | #define vmullbq_poly_x_p16(__a, __b, __p) __arm_vmullbq_poly_x_p16(__a, __b, __p) | |
2160 | #define vmullbq_int_x_s8(__a, __b, __p) __arm_vmullbq_int_x_s8(__a, __b, __p) | |
2161 | #define vmullbq_int_x_s16(__a, __b, __p) __arm_vmullbq_int_x_s16(__a, __b, __p) | |
2162 | #define vmullbq_int_x_s32(__a, __b, __p) __arm_vmullbq_int_x_s32(__a, __b, __p) | |
2163 | #define vmullbq_int_x_u8(__a, __b, __p) __arm_vmullbq_int_x_u8(__a, __b, __p) | |
2164 | #define vmullbq_int_x_u16(__a, __b, __p) __arm_vmullbq_int_x_u16(__a, __b, __p) | |
2165 | #define vmullbq_int_x_u32(__a, __b, __p) __arm_vmullbq_int_x_u32(__a, __b, __p) | |
2166 | #define vmulltq_poly_x_p8(__a, __b, __p) __arm_vmulltq_poly_x_p8(__a, __b, __p) | |
2167 | #define vmulltq_poly_x_p16(__a, __b, __p) __arm_vmulltq_poly_x_p16(__a, __b, __p) | |
2168 | #define vmulltq_int_x_s8(__a, __b, __p) __arm_vmulltq_int_x_s8(__a, __b, __p) | |
2169 | #define vmulltq_int_x_s16(__a, __b, __p) __arm_vmulltq_int_x_s16(__a, __b, __p) | |
2170 | #define vmulltq_int_x_s32(__a, __b, __p) __arm_vmulltq_int_x_s32(__a, __b, __p) | |
2171 | #define vmulltq_int_x_u8(__a, __b, __p) __arm_vmulltq_int_x_u8(__a, __b, __p) | |
2172 | #define vmulltq_int_x_u16(__a, __b, __p) __arm_vmulltq_int_x_u16(__a, __b, __p) | |
2173 | #define vmulltq_int_x_u32(__a, __b, __p) __arm_vmulltq_int_x_u32(__a, __b, __p) | |
2174 | #define vmulq_x_s8(__a, __b, __p) __arm_vmulq_x_s8(__a, __b, __p) | |
2175 | #define vmulq_x_s16(__a, __b, __p) __arm_vmulq_x_s16(__a, __b, __p) | |
2176 | #define vmulq_x_s32(__a, __b, __p) __arm_vmulq_x_s32(__a, __b, __p) | |
2177 | #define vmulq_x_n_s8(__a, __b, __p) __arm_vmulq_x_n_s8(__a, __b, __p) | |
2178 | #define vmulq_x_n_s16(__a, __b, __p) __arm_vmulq_x_n_s16(__a, __b, __p) | |
2179 | #define vmulq_x_n_s32(__a, __b, __p) __arm_vmulq_x_n_s32(__a, __b, __p) | |
2180 | #define vmulq_x_u8(__a, __b, __p) __arm_vmulq_x_u8(__a, __b, __p) | |
2181 | #define vmulq_x_u16(__a, __b, __p) __arm_vmulq_x_u16(__a, __b, __p) | |
2182 | #define vmulq_x_u32(__a, __b, __p) __arm_vmulq_x_u32(__a, __b, __p) | |
2183 | #define vmulq_x_n_u8(__a, __b, __p) __arm_vmulq_x_n_u8(__a, __b, __p) | |
2184 | #define vmulq_x_n_u16(__a, __b, __p) __arm_vmulq_x_n_u16(__a, __b, __p) | |
2185 | #define vmulq_x_n_u32(__a, __b, __p) __arm_vmulq_x_n_u32(__a, __b, __p) | |
2186 | #define vsubq_x_s8(__a, __b, __p) __arm_vsubq_x_s8(__a, __b, __p) | |
2187 | #define vsubq_x_s16(__a, __b, __p) __arm_vsubq_x_s16(__a, __b, __p) | |
2188 | #define vsubq_x_s32(__a, __b, __p) __arm_vsubq_x_s32(__a, __b, __p) | |
2189 | #define vsubq_x_n_s8(__a, __b, __p) __arm_vsubq_x_n_s8(__a, __b, __p) | |
2190 | #define vsubq_x_n_s16(__a, __b, __p) __arm_vsubq_x_n_s16(__a, __b, __p) | |
2191 | #define vsubq_x_n_s32(__a, __b, __p) __arm_vsubq_x_n_s32(__a, __b, __p) | |
2192 | #define vsubq_x_u8(__a, __b, __p) __arm_vsubq_x_u8(__a, __b, __p) | |
2193 | #define vsubq_x_u16(__a, __b, __p) __arm_vsubq_x_u16(__a, __b, __p) | |
2194 | #define vsubq_x_u32(__a, __b, __p) __arm_vsubq_x_u32(__a, __b, __p) | |
2195 | #define vsubq_x_n_u8(__a, __b, __p) __arm_vsubq_x_n_u8(__a, __b, __p) | |
2196 | #define vsubq_x_n_u16(__a, __b, __p) __arm_vsubq_x_n_u16(__a, __b, __p) | |
2197 | #define vsubq_x_n_u32(__a, __b, __p) __arm_vsubq_x_n_u32(__a, __b, __p) | |
2198 | #define vcaddq_rot90_x_s8(__a, __b, __p) __arm_vcaddq_rot90_x_s8(__a, __b, __p) | |
2199 | #define vcaddq_rot90_x_s16(__a, __b, __p) __arm_vcaddq_rot90_x_s16(__a, __b, __p) | |
2200 | #define vcaddq_rot90_x_s32(__a, __b, __p) __arm_vcaddq_rot90_x_s32(__a, __b, __p) | |
2201 | #define vcaddq_rot90_x_u8(__a, __b, __p) __arm_vcaddq_rot90_x_u8(__a, __b, __p) | |
2202 | #define vcaddq_rot90_x_u16(__a, __b, __p) __arm_vcaddq_rot90_x_u16(__a, __b, __p) | |
2203 | #define vcaddq_rot90_x_u32(__a, __b, __p) __arm_vcaddq_rot90_x_u32(__a, __b, __p) | |
2204 | #define vcaddq_rot270_x_s8(__a, __b, __p) __arm_vcaddq_rot270_x_s8(__a, __b, __p) | |
2205 | #define vcaddq_rot270_x_s16(__a, __b, __p) __arm_vcaddq_rot270_x_s16(__a, __b, __p) | |
2206 | #define vcaddq_rot270_x_s32(__a, __b, __p) __arm_vcaddq_rot270_x_s32(__a, __b, __p) | |
2207 | #define vcaddq_rot270_x_u8(__a, __b, __p) __arm_vcaddq_rot270_x_u8(__a, __b, __p) | |
2208 | #define vcaddq_rot270_x_u16(__a, __b, __p) __arm_vcaddq_rot270_x_u16(__a, __b, __p) | |
2209 | #define vcaddq_rot270_x_u32(__a, __b, __p) __arm_vcaddq_rot270_x_u32(__a, __b, __p) | |
2210 | #define vhaddq_x_n_s8(__a, __b, __p) __arm_vhaddq_x_n_s8(__a, __b, __p) | |
2211 | #define vhaddq_x_n_s16(__a, __b, __p) __arm_vhaddq_x_n_s16(__a, __b, __p) | |
2212 | #define vhaddq_x_n_s32(__a, __b, __p) __arm_vhaddq_x_n_s32(__a, __b, __p) | |
2213 | #define vhaddq_x_n_u8(__a, __b, __p) __arm_vhaddq_x_n_u8(__a, __b, __p) | |
2214 | #define vhaddq_x_n_u16(__a, __b, __p) __arm_vhaddq_x_n_u16(__a, __b, __p) | |
2215 | #define vhaddq_x_n_u32(__a, __b, __p) __arm_vhaddq_x_n_u32(__a, __b, __p) | |
2216 | #define vhaddq_x_s8(__a, __b, __p) __arm_vhaddq_x_s8(__a, __b, __p) | |
2217 | #define vhaddq_x_s16(__a, __b, __p) __arm_vhaddq_x_s16(__a, __b, __p) | |
2218 | #define vhaddq_x_s32(__a, __b, __p) __arm_vhaddq_x_s32(__a, __b, __p) | |
2219 | #define vhaddq_x_u8(__a, __b, __p) __arm_vhaddq_x_u8(__a, __b, __p) | |
2220 | #define vhaddq_x_u16(__a, __b, __p) __arm_vhaddq_x_u16(__a, __b, __p) | |
2221 | #define vhaddq_x_u32(__a, __b, __p) __arm_vhaddq_x_u32(__a, __b, __p) | |
2222 | #define vhcaddq_rot90_x_s8(__a, __b, __p) __arm_vhcaddq_rot90_x_s8(__a, __b, __p) | |
2223 | #define vhcaddq_rot90_x_s16(__a, __b, __p) __arm_vhcaddq_rot90_x_s16(__a, __b, __p) | |
2224 | #define vhcaddq_rot90_x_s32(__a, __b, __p) __arm_vhcaddq_rot90_x_s32(__a, __b, __p) | |
2225 | #define vhcaddq_rot270_x_s8(__a, __b, __p) __arm_vhcaddq_rot270_x_s8(__a, __b, __p) | |
2226 | #define vhcaddq_rot270_x_s16(__a, __b, __p) __arm_vhcaddq_rot270_x_s16(__a, __b, __p) | |
2227 | #define vhcaddq_rot270_x_s32(__a, __b, __p) __arm_vhcaddq_rot270_x_s32(__a, __b, __p) | |
2228 | #define vhsubq_x_n_s8(__a, __b, __p) __arm_vhsubq_x_n_s8(__a, __b, __p) | |
2229 | #define vhsubq_x_n_s16(__a, __b, __p) __arm_vhsubq_x_n_s16(__a, __b, __p) | |
2230 | #define vhsubq_x_n_s32(__a, __b, __p) __arm_vhsubq_x_n_s32(__a, __b, __p) | |
2231 | #define vhsubq_x_n_u8(__a, __b, __p) __arm_vhsubq_x_n_u8(__a, __b, __p) | |
2232 | #define vhsubq_x_n_u16(__a, __b, __p) __arm_vhsubq_x_n_u16(__a, __b, __p) | |
2233 | #define vhsubq_x_n_u32(__a, __b, __p) __arm_vhsubq_x_n_u32(__a, __b, __p) | |
2234 | #define vhsubq_x_s8(__a, __b, __p) __arm_vhsubq_x_s8(__a, __b, __p) | |
2235 | #define vhsubq_x_s16(__a, __b, __p) __arm_vhsubq_x_s16(__a, __b, __p) | |
2236 | #define vhsubq_x_s32(__a, __b, __p) __arm_vhsubq_x_s32(__a, __b, __p) | |
2237 | #define vhsubq_x_u8(__a, __b, __p) __arm_vhsubq_x_u8(__a, __b, __p) | |
2238 | #define vhsubq_x_u16(__a, __b, __p) __arm_vhsubq_x_u16(__a, __b, __p) | |
2239 | #define vhsubq_x_u32(__a, __b, __p) __arm_vhsubq_x_u32(__a, __b, __p) | |
2240 | #define vrhaddq_x_s8(__a, __b, __p) __arm_vrhaddq_x_s8(__a, __b, __p) | |
2241 | #define vrhaddq_x_s16(__a, __b, __p) __arm_vrhaddq_x_s16(__a, __b, __p) | |
2242 | #define vrhaddq_x_s32(__a, __b, __p) __arm_vrhaddq_x_s32(__a, __b, __p) | |
2243 | #define vrhaddq_x_u8(__a, __b, __p) __arm_vrhaddq_x_u8(__a, __b, __p) | |
2244 | #define vrhaddq_x_u16(__a, __b, __p) __arm_vrhaddq_x_u16(__a, __b, __p) | |
2245 | #define vrhaddq_x_u32(__a, __b, __p) __arm_vrhaddq_x_u32(__a, __b, __p) | |
2246 | #define vrmulhq_x_s8(__a, __b, __p) __arm_vrmulhq_x_s8(__a, __b, __p) | |
2247 | #define vrmulhq_x_s16(__a, __b, __p) __arm_vrmulhq_x_s16(__a, __b, __p) | |
2248 | #define vrmulhq_x_s32(__a, __b, __p) __arm_vrmulhq_x_s32(__a, __b, __p) | |
2249 | #define vrmulhq_x_u8(__a, __b, __p) __arm_vrmulhq_x_u8(__a, __b, __p) | |
2250 | #define vrmulhq_x_u16(__a, __b, __p) __arm_vrmulhq_x_u16(__a, __b, __p) | |
2251 | #define vrmulhq_x_u32(__a, __b, __p) __arm_vrmulhq_x_u32(__a, __b, __p) | |
2252 | #define vandq_x_s8(__a, __b, __p) __arm_vandq_x_s8(__a, __b, __p) | |
2253 | #define vandq_x_s16(__a, __b, __p) __arm_vandq_x_s16(__a, __b, __p) | |
2254 | #define vandq_x_s32(__a, __b, __p) __arm_vandq_x_s32(__a, __b, __p) | |
2255 | #define vandq_x_u8(__a, __b, __p) __arm_vandq_x_u8(__a, __b, __p) | |
2256 | #define vandq_x_u16(__a, __b, __p) __arm_vandq_x_u16(__a, __b, __p) | |
2257 | #define vandq_x_u32(__a, __b, __p) __arm_vandq_x_u32(__a, __b, __p) | |
2258 | #define vbicq_x_s8(__a, __b, __p) __arm_vbicq_x_s8(__a, __b, __p) | |
2259 | #define vbicq_x_s16(__a, __b, __p) __arm_vbicq_x_s16(__a, __b, __p) | |
2260 | #define vbicq_x_s32(__a, __b, __p) __arm_vbicq_x_s32(__a, __b, __p) | |
2261 | #define vbicq_x_u8(__a, __b, __p) __arm_vbicq_x_u8(__a, __b, __p) | |
2262 | #define vbicq_x_u16(__a, __b, __p) __arm_vbicq_x_u16(__a, __b, __p) | |
2263 | #define vbicq_x_u32(__a, __b, __p) __arm_vbicq_x_u32(__a, __b, __p) | |
2264 | #define vbrsrq_x_n_s8(__a, __b, __p) __arm_vbrsrq_x_n_s8(__a, __b, __p) | |
2265 | #define vbrsrq_x_n_s16(__a, __b, __p) __arm_vbrsrq_x_n_s16(__a, __b, __p) | |
2266 | #define vbrsrq_x_n_s32(__a, __b, __p) __arm_vbrsrq_x_n_s32(__a, __b, __p) | |
2267 | #define vbrsrq_x_n_u8(__a, __b, __p) __arm_vbrsrq_x_n_u8(__a, __b, __p) | |
2268 | #define vbrsrq_x_n_u16(__a, __b, __p) __arm_vbrsrq_x_n_u16(__a, __b, __p) | |
2269 | #define vbrsrq_x_n_u32(__a, __b, __p) __arm_vbrsrq_x_n_u32(__a, __b, __p) | |
2270 | #define veorq_x_s8(__a, __b, __p) __arm_veorq_x_s8(__a, __b, __p) | |
2271 | #define veorq_x_s16(__a, __b, __p) __arm_veorq_x_s16(__a, __b, __p) | |
2272 | #define veorq_x_s32(__a, __b, __p) __arm_veorq_x_s32(__a, __b, __p) | |
2273 | #define veorq_x_u8(__a, __b, __p) __arm_veorq_x_u8(__a, __b, __p) | |
2274 | #define veorq_x_u16(__a, __b, __p) __arm_veorq_x_u16(__a, __b, __p) | |
2275 | #define veorq_x_u32(__a, __b, __p) __arm_veorq_x_u32(__a, __b, __p) | |
2276 | #define vmovlbq_x_s8(__a, __p) __arm_vmovlbq_x_s8(__a, __p) | |
2277 | #define vmovlbq_x_s16(__a, __p) __arm_vmovlbq_x_s16(__a, __p) | |
2278 | #define vmovlbq_x_u8(__a, __p) __arm_vmovlbq_x_u8(__a, __p) | |
2279 | #define vmovlbq_x_u16(__a, __p) __arm_vmovlbq_x_u16(__a, __p) | |
2280 | #define vmovltq_x_s8(__a, __p) __arm_vmovltq_x_s8(__a, __p) | |
2281 | #define vmovltq_x_s16(__a, __p) __arm_vmovltq_x_s16(__a, __p) | |
2282 | #define vmovltq_x_u8(__a, __p) __arm_vmovltq_x_u8(__a, __p) | |
2283 | #define vmovltq_x_u16(__a, __p) __arm_vmovltq_x_u16(__a, __p) | |
2284 | #define vmvnq_x_s8(__a, __p) __arm_vmvnq_x_s8(__a, __p) | |
2285 | #define vmvnq_x_s16(__a, __p) __arm_vmvnq_x_s16(__a, __p) | |
2286 | #define vmvnq_x_s32(__a, __p) __arm_vmvnq_x_s32(__a, __p) | |
2287 | #define vmvnq_x_u8(__a, __p) __arm_vmvnq_x_u8(__a, __p) | |
2288 | #define vmvnq_x_u16(__a, __p) __arm_vmvnq_x_u16(__a, __p) | |
2289 | #define vmvnq_x_u32(__a, __p) __arm_vmvnq_x_u32(__a, __p) | |
2290 | #define vmvnq_x_n_s16( __imm, __p) __arm_vmvnq_x_n_s16( __imm, __p) | |
2291 | #define vmvnq_x_n_s32( __imm, __p) __arm_vmvnq_x_n_s32( __imm, __p) | |
2292 | #define vmvnq_x_n_u16( __imm, __p) __arm_vmvnq_x_n_u16( __imm, __p) | |
2293 | #define vmvnq_x_n_u32( __imm, __p) __arm_vmvnq_x_n_u32( __imm, __p) | |
2294 | #define vornq_x_s8(__a, __b, __p) __arm_vornq_x_s8(__a, __b, __p) | |
2295 | #define vornq_x_s16(__a, __b, __p) __arm_vornq_x_s16(__a, __b, __p) | |
2296 | #define vornq_x_s32(__a, __b, __p) __arm_vornq_x_s32(__a, __b, __p) | |
2297 | #define vornq_x_u8(__a, __b, __p) __arm_vornq_x_u8(__a, __b, __p) | |
2298 | #define vornq_x_u16(__a, __b, __p) __arm_vornq_x_u16(__a, __b, __p) | |
2299 | #define vornq_x_u32(__a, __b, __p) __arm_vornq_x_u32(__a, __b, __p) | |
2300 | #define vorrq_x_s8(__a, __b, __p) __arm_vorrq_x_s8(__a, __b, __p) | |
2301 | #define vorrq_x_s16(__a, __b, __p) __arm_vorrq_x_s16(__a, __b, __p) | |
2302 | #define vorrq_x_s32(__a, __b, __p) __arm_vorrq_x_s32(__a, __b, __p) | |
2303 | #define vorrq_x_u8(__a, __b, __p) __arm_vorrq_x_u8(__a, __b, __p) | |
2304 | #define vorrq_x_u16(__a, __b, __p) __arm_vorrq_x_u16(__a, __b, __p) | |
2305 | #define vorrq_x_u32(__a, __b, __p) __arm_vorrq_x_u32(__a, __b, __p) | |
2306 | #define vrev16q_x_s8(__a, __p) __arm_vrev16q_x_s8(__a, __p) | |
2307 | #define vrev16q_x_u8(__a, __p) __arm_vrev16q_x_u8(__a, __p) | |
2308 | #define vrev32q_x_s8(__a, __p) __arm_vrev32q_x_s8(__a, __p) | |
2309 | #define vrev32q_x_s16(__a, __p) __arm_vrev32q_x_s16(__a, __p) | |
2310 | #define vrev32q_x_u8(__a, __p) __arm_vrev32q_x_u8(__a, __p) | |
2311 | #define vrev32q_x_u16(__a, __p) __arm_vrev32q_x_u16(__a, __p) | |
2312 | #define vrev64q_x_s8(__a, __p) __arm_vrev64q_x_s8(__a, __p) | |
2313 | #define vrev64q_x_s16(__a, __p) __arm_vrev64q_x_s16(__a, __p) | |
2314 | #define vrev64q_x_s32(__a, __p) __arm_vrev64q_x_s32(__a, __p) | |
2315 | #define vrev64q_x_u8(__a, __p) __arm_vrev64q_x_u8(__a, __p) | |
2316 | #define vrev64q_x_u16(__a, __p) __arm_vrev64q_x_u16(__a, __p) | |
2317 | #define vrev64q_x_u32(__a, __p) __arm_vrev64q_x_u32(__a, __p) | |
2318 | #define vrshlq_x_s8(__a, __b, __p) __arm_vrshlq_x_s8(__a, __b, __p) | |
2319 | #define vrshlq_x_s16(__a, __b, __p) __arm_vrshlq_x_s16(__a, __b, __p) | |
2320 | #define vrshlq_x_s32(__a, __b, __p) __arm_vrshlq_x_s32(__a, __b, __p) | |
2321 | #define vrshlq_x_u8(__a, __b, __p) __arm_vrshlq_x_u8(__a, __b, __p) | |
2322 | #define vrshlq_x_u16(__a, __b, __p) __arm_vrshlq_x_u16(__a, __b, __p) | |
2323 | #define vrshlq_x_u32(__a, __b, __p) __arm_vrshlq_x_u32(__a, __b, __p) | |
2324 | #define vshllbq_x_n_s8(__a, __imm, __p) __arm_vshllbq_x_n_s8(__a, __imm, __p) | |
2325 | #define vshllbq_x_n_s16(__a, __imm, __p) __arm_vshllbq_x_n_s16(__a, __imm, __p) | |
2326 | #define vshllbq_x_n_u8(__a, __imm, __p) __arm_vshllbq_x_n_u8(__a, __imm, __p) | |
2327 | #define vshllbq_x_n_u16(__a, __imm, __p) __arm_vshllbq_x_n_u16(__a, __imm, __p) | |
2328 | #define vshlltq_x_n_s8(__a, __imm, __p) __arm_vshlltq_x_n_s8(__a, __imm, __p) | |
2329 | #define vshlltq_x_n_s16(__a, __imm, __p) __arm_vshlltq_x_n_s16(__a, __imm, __p) | |
2330 | #define vshlltq_x_n_u8(__a, __imm, __p) __arm_vshlltq_x_n_u8(__a, __imm, __p) | |
2331 | #define vshlltq_x_n_u16(__a, __imm, __p) __arm_vshlltq_x_n_u16(__a, __imm, __p) | |
2332 | #define vshlq_x_s8(__a, __b, __p) __arm_vshlq_x_s8(__a, __b, __p) | |
2333 | #define vshlq_x_s16(__a, __b, __p) __arm_vshlq_x_s16(__a, __b, __p) | |
2334 | #define vshlq_x_s32(__a, __b, __p) __arm_vshlq_x_s32(__a, __b, __p) | |
2335 | #define vshlq_x_u8(__a, __b, __p) __arm_vshlq_x_u8(__a, __b, __p) | |
2336 | #define vshlq_x_u16(__a, __b, __p) __arm_vshlq_x_u16(__a, __b, __p) | |
2337 | #define vshlq_x_u32(__a, __b, __p) __arm_vshlq_x_u32(__a, __b, __p) | |
2338 | #define vshlq_x_n_s8(__a, __imm, __p) __arm_vshlq_x_n_s8(__a, __imm, __p) | |
2339 | #define vshlq_x_n_s16(__a, __imm, __p) __arm_vshlq_x_n_s16(__a, __imm, __p) | |
2340 | #define vshlq_x_n_s32(__a, __imm, __p) __arm_vshlq_x_n_s32(__a, __imm, __p) | |
2341 | #define vshlq_x_n_u8(__a, __imm, __p) __arm_vshlq_x_n_u8(__a, __imm, __p) | |
2342 | #define vshlq_x_n_u16(__a, __imm, __p) __arm_vshlq_x_n_u16(__a, __imm, __p) | |
2343 | #define vshlq_x_n_u32(__a, __imm, __p) __arm_vshlq_x_n_u32(__a, __imm, __p) | |
2344 | #define vrshrq_x_n_s8(__a, __imm, __p) __arm_vrshrq_x_n_s8(__a, __imm, __p) | |
2345 | #define vrshrq_x_n_s16(__a, __imm, __p) __arm_vrshrq_x_n_s16(__a, __imm, __p) | |
2346 | #define vrshrq_x_n_s32(__a, __imm, __p) __arm_vrshrq_x_n_s32(__a, __imm, __p) | |
2347 | #define vrshrq_x_n_u8(__a, __imm, __p) __arm_vrshrq_x_n_u8(__a, __imm, __p) | |
2348 | #define vrshrq_x_n_u16(__a, __imm, __p) __arm_vrshrq_x_n_u16(__a, __imm, __p) | |
2349 | #define vrshrq_x_n_u32(__a, __imm, __p) __arm_vrshrq_x_n_u32(__a, __imm, __p) | |
2350 | #define vshrq_x_n_s8(__a, __imm, __p) __arm_vshrq_x_n_s8(__a, __imm, __p) | |
2351 | #define vshrq_x_n_s16(__a, __imm, __p) __arm_vshrq_x_n_s16(__a, __imm, __p) | |
2352 | #define vshrq_x_n_s32(__a, __imm, __p) __arm_vshrq_x_n_s32(__a, __imm, __p) | |
2353 | #define vshrq_x_n_u8(__a, __imm, __p) __arm_vshrq_x_n_u8(__a, __imm, __p) | |
2354 | #define vshrq_x_n_u16(__a, __imm, __p) __arm_vshrq_x_n_u16(__a, __imm, __p) | |
2355 | #define vshrq_x_n_u32(__a, __imm, __p) __arm_vshrq_x_n_u32(__a, __imm, __p) | |
2356 | #define vdupq_x_n_f16(__a, __p) __arm_vdupq_x_n_f16(__a, __p) | |
2357 | #define vdupq_x_n_f32(__a, __p) __arm_vdupq_x_n_f32(__a, __p) | |
2358 | #define vminnmq_x_f16(__a, __b, __p) __arm_vminnmq_x_f16(__a, __b, __p) | |
2359 | #define vminnmq_x_f32(__a, __b, __p) __arm_vminnmq_x_f32(__a, __b, __p) | |
2360 | #define vmaxnmq_x_f16(__a, __b, __p) __arm_vmaxnmq_x_f16(__a, __b, __p) | |
2361 | #define vmaxnmq_x_f32(__a, __b, __p) __arm_vmaxnmq_x_f32(__a, __b, __p) | |
2362 | #define vabdq_x_f16(__a, __b, __p) __arm_vabdq_x_f16(__a, __b, __p) | |
2363 | #define vabdq_x_f32(__a, __b, __p) __arm_vabdq_x_f32(__a, __b, __p) | |
2364 | #define vabsq_x_f16(__a, __p) __arm_vabsq_x_f16(__a, __p) | |
2365 | #define vabsq_x_f32(__a, __p) __arm_vabsq_x_f32(__a, __p) | |
2366 | #define vaddq_x_f16(__a, __b, __p) __arm_vaddq_x_f16(__a, __b, __p) | |
2367 | #define vaddq_x_f32(__a, __b, __p) __arm_vaddq_x_f32(__a, __b, __p) | |
2368 | #define vaddq_x_n_f16(__a, __b, __p) __arm_vaddq_x_n_f16(__a, __b, __p) | |
2369 | #define vaddq_x_n_f32(__a, __b, __p) __arm_vaddq_x_n_f32(__a, __b, __p) | |
2370 | #define vnegq_x_f16(__a, __p) __arm_vnegq_x_f16(__a, __p) | |
2371 | #define vnegq_x_f32(__a, __p) __arm_vnegq_x_f32(__a, __p) | |
2372 | #define vmulq_x_f16(__a, __b, __p) __arm_vmulq_x_f16(__a, __b, __p) | |
2373 | #define vmulq_x_f32(__a, __b, __p) __arm_vmulq_x_f32(__a, __b, __p) | |
2374 | #define vmulq_x_n_f16(__a, __b, __p) __arm_vmulq_x_n_f16(__a, __b, __p) | |
2375 | #define vmulq_x_n_f32(__a, __b, __p) __arm_vmulq_x_n_f32(__a, __b, __p) | |
2376 | #define vsubq_x_f16(__a, __b, __p) __arm_vsubq_x_f16(__a, __b, __p) | |
2377 | #define vsubq_x_f32(__a, __b, __p) __arm_vsubq_x_f32(__a, __b, __p) | |
2378 | #define vsubq_x_n_f16(__a, __b, __p) __arm_vsubq_x_n_f16(__a, __b, __p) | |
2379 | #define vsubq_x_n_f32(__a, __b, __p) __arm_vsubq_x_n_f32(__a, __b, __p) | |
2380 | #define vcaddq_rot90_x_f16(__a, __b, __p) __arm_vcaddq_rot90_x_f16(__a, __b, __p) | |
2381 | #define vcaddq_rot90_x_f32(__a, __b, __p) __arm_vcaddq_rot90_x_f32(__a, __b, __p) | |
2382 | #define vcaddq_rot270_x_f16(__a, __b, __p) __arm_vcaddq_rot270_x_f16(__a, __b, __p) | |
2383 | #define vcaddq_rot270_x_f32(__a, __b, __p) __arm_vcaddq_rot270_x_f32(__a, __b, __p) | |
2384 | #define vcmulq_x_f16(__a, __b, __p) __arm_vcmulq_x_f16(__a, __b, __p) | |
2385 | #define vcmulq_x_f32(__a, __b, __p) __arm_vcmulq_x_f32(__a, __b, __p) | |
2386 | #define vcmulq_rot90_x_f16(__a, __b, __p) __arm_vcmulq_rot90_x_f16(__a, __b, __p) | |
2387 | #define vcmulq_rot90_x_f32(__a, __b, __p) __arm_vcmulq_rot90_x_f32(__a, __b, __p) | |
2388 | #define vcmulq_rot180_x_f16(__a, __b, __p) __arm_vcmulq_rot180_x_f16(__a, __b, __p) | |
2389 | #define vcmulq_rot180_x_f32(__a, __b, __p) __arm_vcmulq_rot180_x_f32(__a, __b, __p) | |
2390 | #define vcmulq_rot270_x_f16(__a, __b, __p) __arm_vcmulq_rot270_x_f16(__a, __b, __p) | |
2391 | #define vcmulq_rot270_x_f32(__a, __b, __p) __arm_vcmulq_rot270_x_f32(__a, __b, __p) | |
2392 | #define vcvtaq_x_s16_f16(__a, __p) __arm_vcvtaq_x_s16_f16(__a, __p) | |
2393 | #define vcvtaq_x_s32_f32(__a, __p) __arm_vcvtaq_x_s32_f32(__a, __p) | |
2394 | #define vcvtaq_x_u16_f16(__a, __p) __arm_vcvtaq_x_u16_f16(__a, __p) | |
2395 | #define vcvtaq_x_u32_f32(__a, __p) __arm_vcvtaq_x_u32_f32(__a, __p) | |
2396 | #define vcvtnq_x_s16_f16(__a, __p) __arm_vcvtnq_x_s16_f16(__a, __p) | |
2397 | #define vcvtnq_x_s32_f32(__a, __p) __arm_vcvtnq_x_s32_f32(__a, __p) | |
2398 | #define vcvtnq_x_u16_f16(__a, __p) __arm_vcvtnq_x_u16_f16(__a, __p) | |
2399 | #define vcvtnq_x_u32_f32(__a, __p) __arm_vcvtnq_x_u32_f32(__a, __p) | |
2400 | #define vcvtpq_x_s16_f16(__a, __p) __arm_vcvtpq_x_s16_f16(__a, __p) | |
2401 | #define vcvtpq_x_s32_f32(__a, __p) __arm_vcvtpq_x_s32_f32(__a, __p) | |
2402 | #define vcvtpq_x_u16_f16(__a, __p) __arm_vcvtpq_x_u16_f16(__a, __p) | |
2403 | #define vcvtpq_x_u32_f32(__a, __p) __arm_vcvtpq_x_u32_f32(__a, __p) | |
2404 | #define vcvtmq_x_s16_f16(__a, __p) __arm_vcvtmq_x_s16_f16(__a, __p) | |
2405 | #define vcvtmq_x_s32_f32(__a, __p) __arm_vcvtmq_x_s32_f32(__a, __p) | |
2406 | #define vcvtmq_x_u16_f16(__a, __p) __arm_vcvtmq_x_u16_f16(__a, __p) | |
2407 | #define vcvtmq_x_u32_f32(__a, __p) __arm_vcvtmq_x_u32_f32(__a, __p) | |
2408 | #define vcvtbq_x_f32_f16(__a, __p) __arm_vcvtbq_x_f32_f16(__a, __p) | |
2409 | #define vcvttq_x_f32_f16(__a, __p) __arm_vcvttq_x_f32_f16(__a, __p) | |
2410 | #define vcvtq_x_f16_u16(__a, __p) __arm_vcvtq_x_f16_u16(__a, __p) | |
2411 | #define vcvtq_x_f16_s16(__a, __p) __arm_vcvtq_x_f16_s16(__a, __p) | |
2412 | #define vcvtq_x_f32_s32(__a, __p) __arm_vcvtq_x_f32_s32(__a, __p) | |
2413 | #define vcvtq_x_f32_u32(__a, __p) __arm_vcvtq_x_f32_u32(__a, __p) | |
2414 | #define vcvtq_x_n_f16_s16(__a, __imm6, __p) __arm_vcvtq_x_n_f16_s16(__a, __imm6, __p) | |
2415 | #define vcvtq_x_n_f16_u16(__a, __imm6, __p) __arm_vcvtq_x_n_f16_u16(__a, __imm6, __p) | |
2416 | #define vcvtq_x_n_f32_s32(__a, __imm6, __p) __arm_vcvtq_x_n_f32_s32(__a, __imm6, __p) | |
2417 | #define vcvtq_x_n_f32_u32(__a, __imm6, __p) __arm_vcvtq_x_n_f32_u32(__a, __imm6, __p) | |
2418 | #define vcvtq_x_s16_f16(__a, __p) __arm_vcvtq_x_s16_f16(__a, __p) | |
2419 | #define vcvtq_x_s32_f32(__a, __p) __arm_vcvtq_x_s32_f32(__a, __p) | |
2420 | #define vcvtq_x_u16_f16(__a, __p) __arm_vcvtq_x_u16_f16(__a, __p) | |
2421 | #define vcvtq_x_u32_f32(__a, __p) __arm_vcvtq_x_u32_f32(__a, __p) | |
2422 | #define vcvtq_x_n_s16_f16(__a, __imm6, __p) __arm_vcvtq_x_n_s16_f16(__a, __imm6, __p) | |
2423 | #define vcvtq_x_n_s32_f32(__a, __imm6, __p) __arm_vcvtq_x_n_s32_f32(__a, __imm6, __p) | |
2424 | #define vcvtq_x_n_u16_f16(__a, __imm6, __p) __arm_vcvtq_x_n_u16_f16(__a, __imm6, __p) | |
2425 | #define vcvtq_x_n_u32_f32(__a, __imm6, __p) __arm_vcvtq_x_n_u32_f32(__a, __imm6, __p) | |
2426 | #define vrndq_x_f16(__a, __p) __arm_vrndq_x_f16(__a, __p) | |
2427 | #define vrndq_x_f32(__a, __p) __arm_vrndq_x_f32(__a, __p) | |
2428 | #define vrndnq_x_f16(__a, __p) __arm_vrndnq_x_f16(__a, __p) | |
2429 | #define vrndnq_x_f32(__a, __p) __arm_vrndnq_x_f32(__a, __p) | |
2430 | #define vrndmq_x_f16(__a, __p) __arm_vrndmq_x_f16(__a, __p) | |
2431 | #define vrndmq_x_f32(__a, __p) __arm_vrndmq_x_f32(__a, __p) | |
2432 | #define vrndpq_x_f16(__a, __p) __arm_vrndpq_x_f16(__a, __p) | |
2433 | #define vrndpq_x_f32(__a, __p) __arm_vrndpq_x_f32(__a, __p) | |
2434 | #define vrndaq_x_f16(__a, __p) __arm_vrndaq_x_f16(__a, __p) | |
2435 | #define vrndaq_x_f32(__a, __p) __arm_vrndaq_x_f32(__a, __p) | |
2436 | #define vrndxq_x_f16(__a, __p) __arm_vrndxq_x_f16(__a, __p) | |
2437 | #define vrndxq_x_f32(__a, __p) __arm_vrndxq_x_f32(__a, __p) | |
2438 | #define vandq_x_f16(__a, __b, __p) __arm_vandq_x_f16(__a, __b, __p) | |
2439 | #define vandq_x_f32(__a, __b, __p) __arm_vandq_x_f32(__a, __b, __p) | |
2440 | #define vbicq_x_f16(__a, __b, __p) __arm_vbicq_x_f16(__a, __b, __p) | |
2441 | #define vbicq_x_f32(__a, __b, __p) __arm_vbicq_x_f32(__a, __b, __p) | |
2442 | #define vbrsrq_x_n_f16(__a, __b, __p) __arm_vbrsrq_x_n_f16(__a, __b, __p) | |
2443 | #define vbrsrq_x_n_f32(__a, __b, __p) __arm_vbrsrq_x_n_f32(__a, __b, __p) | |
2444 | #define veorq_x_f16(__a, __b, __p) __arm_veorq_x_f16(__a, __b, __p) | |
2445 | #define veorq_x_f32(__a, __b, __p) __arm_veorq_x_f32(__a, __b, __p) | |
2446 | #define vornq_x_f16(__a, __b, __p) __arm_vornq_x_f16(__a, __b, __p) | |
2447 | #define vornq_x_f32(__a, __b, __p) __arm_vornq_x_f32(__a, __b, __p) | |
2448 | #define vorrq_x_f16(__a, __b, __p) __arm_vorrq_x_f16(__a, __b, __p) | |
2449 | #define vorrq_x_f32(__a, __b, __p) __arm_vorrq_x_f32(__a, __b, __p) | |
2450 | #define vrev32q_x_f16(__a, __p) __arm_vrev32q_x_f16(__a, __p) | |
2451 | #define vrev64q_x_f16(__a, __p) __arm_vrev64q_x_f16(__a, __p) | |
2452 | #define vrev64q_x_f32(__a, __p) __arm_vrev64q_x_f32(__a, __p) | |
14782c81 SP |
2453 | #endif |
2454 | ||
2455 | __extension__ extern __inline void | |
2456 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2457 | __arm_vst4q_s8 (int8_t * __addr, int8x16x4_t __value) | |
2458 | { | |
2459 | union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv; | |
2460 | __rv.__i = __value; | |
2461 | __builtin_mve_vst4qv16qi ((__builtin_neon_qi *) __addr, __rv.__o); | |
2462 | } | |
2463 | ||
2464 | __extension__ extern __inline void | |
2465 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2466 | __arm_vst4q_s16 (int16_t * __addr, int16x8x4_t __value) | |
2467 | { | |
2468 | union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv; | |
2469 | __rv.__i = __value; | |
2470 | __builtin_mve_vst4qv8hi ((__builtin_neon_hi *) __addr, __rv.__o); | |
2471 | } | |
2472 | ||
2473 | __extension__ extern __inline void | |
2474 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2475 | __arm_vst4q_s32 (int32_t * __addr, int32x4x4_t __value) | |
2476 | { | |
2477 | union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv; | |
2478 | __rv.__i = __value; | |
2479 | __builtin_mve_vst4qv4si ((__builtin_neon_si *) __addr, __rv.__o); | |
2480 | } | |
2481 | ||
2482 | __extension__ extern __inline void | |
2483 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2484 | __arm_vst4q_u8 (uint8_t * __addr, uint8x16x4_t __value) | |
2485 | { | |
2486 | union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv; | |
2487 | __rv.__i = __value; | |
2488 | __builtin_mve_vst4qv16qi ((__builtin_neon_qi *) __addr, __rv.__o); | |
2489 | } | |
2490 | ||
2491 | __extension__ extern __inline void | |
2492 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2493 | __arm_vst4q_u16 (uint16_t * __addr, uint16x8x4_t __value) | |
2494 | { | |
2495 | union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv; | |
2496 | __rv.__i = __value; | |
2497 | __builtin_mve_vst4qv8hi ((__builtin_neon_hi *) __addr, __rv.__o); | |
2498 | } | |
2499 | ||
2500 | __extension__ extern __inline void | |
2501 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2502 | __arm_vst4q_u32 (uint32_t * __addr, uint32x4x4_t __value) | |
2503 | { | |
2504 | union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv; | |
2505 | __rv.__i = __value; | |
2506 | __builtin_mve_vst4qv4si ((__builtin_neon_si *) __addr, __rv.__o); | |
2507 | } | |
2508 | ||
6df4618c SP |
2509 | __extension__ extern __inline int8x16_t |
2510 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2511 | __arm_vdupq_n_s8 (int8_t __a) | |
2512 | { | |
2513 | return __builtin_mve_vdupq_n_sv16qi (__a); | |
2514 | } | |
2515 | ||
2516 | __extension__ extern __inline int16x8_t | |
2517 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2518 | __arm_vdupq_n_s16 (int16_t __a) | |
2519 | { | |
2520 | return __builtin_mve_vdupq_n_sv8hi (__a); | |
2521 | } | |
2522 | ||
2523 | __extension__ extern __inline int32x4_t | |
2524 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2525 | __arm_vdupq_n_s32 (int32_t __a) | |
2526 | { | |
2527 | return __builtin_mve_vdupq_n_sv4si (__a); | |
2528 | } | |
2529 | ||
2530 | __extension__ extern __inline int8x16_t | |
2531 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2532 | __arm_vabsq_s8 (int8x16_t __a) | |
2533 | { | |
2534 | return __builtin_mve_vabsq_sv16qi (__a); | |
2535 | } | |
2536 | ||
2537 | __extension__ extern __inline int16x8_t | |
2538 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2539 | __arm_vabsq_s16 (int16x8_t __a) | |
2540 | { | |
2541 | return __builtin_mve_vabsq_sv8hi (__a); | |
2542 | } | |
2543 | ||
2544 | __extension__ extern __inline int32x4_t | |
2545 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2546 | __arm_vabsq_s32 (int32x4_t __a) | |
2547 | { | |
2548 | return __builtin_mve_vabsq_sv4si (__a); | |
2549 | } | |
2550 | ||
2551 | __extension__ extern __inline int8x16_t | |
2552 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2553 | __arm_vclsq_s8 (int8x16_t __a) | |
2554 | { | |
2555 | return __builtin_mve_vclsq_sv16qi (__a); | |
2556 | } | |
2557 | ||
2558 | __extension__ extern __inline int16x8_t | |
2559 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2560 | __arm_vclsq_s16 (int16x8_t __a) | |
2561 | { | |
2562 | return __builtin_mve_vclsq_sv8hi (__a); | |
2563 | } | |
2564 | ||
2565 | __extension__ extern __inline int32x4_t | |
2566 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2567 | __arm_vclsq_s32 (int32x4_t __a) | |
2568 | { | |
2569 | return __builtin_mve_vclsq_sv4si (__a); | |
2570 | } | |
2571 | ||
2572 | __extension__ extern __inline int8x16_t | |
2573 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2574 | __arm_vclzq_s8 (int8x16_t __a) | |
2575 | { | |
2576 | return __builtin_mve_vclzq_sv16qi (__a); | |
2577 | } | |
2578 | ||
2579 | __extension__ extern __inline int16x8_t | |
2580 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2581 | __arm_vclzq_s16 (int16x8_t __a) | |
2582 | { | |
2583 | return __builtin_mve_vclzq_sv8hi (__a); | |
2584 | } | |
2585 | ||
2586 | __extension__ extern __inline int32x4_t | |
2587 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2588 | __arm_vclzq_s32 (int32x4_t __a) | |
2589 | { | |
2590 | return __builtin_mve_vclzq_sv4si (__a); | |
2591 | } | |
2592 | ||
2593 | __extension__ extern __inline int8x16_t | |
2594 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2595 | __arm_vnegq_s8 (int8x16_t __a) | |
2596 | { | |
2597 | return __builtin_mve_vnegq_sv16qi (__a); | |
2598 | } | |
2599 | ||
2600 | __extension__ extern __inline int16x8_t | |
2601 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2602 | __arm_vnegq_s16 (int16x8_t __a) | |
2603 | { | |
2604 | return __builtin_mve_vnegq_sv8hi (__a); | |
2605 | } | |
2606 | ||
2607 | __extension__ extern __inline int32x4_t | |
2608 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2609 | __arm_vnegq_s32 (int32x4_t __a) | |
2610 | { | |
2611 | return __builtin_mve_vnegq_sv4si (__a); | |
2612 | } | |
2613 | ||
2614 | __extension__ extern __inline int64_t | |
2615 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2616 | __arm_vaddlvq_s32 (int32x4_t __a) | |
2617 | { | |
2618 | return __builtin_mve_vaddlvq_sv4si (__a); | |
2619 | } | |
2620 | ||
2621 | __extension__ extern __inline int32_t | |
2622 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2623 | __arm_vaddvq_s8 (int8x16_t __a) | |
2624 | { | |
2625 | return __builtin_mve_vaddvq_sv16qi (__a); | |
2626 | } | |
2627 | ||
2628 | __extension__ extern __inline int32_t | |
2629 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2630 | __arm_vaddvq_s16 (int16x8_t __a) | |
2631 | { | |
2632 | return __builtin_mve_vaddvq_sv8hi (__a); | |
2633 | } | |
2634 | ||
2635 | __extension__ extern __inline int32_t | |
2636 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2637 | __arm_vaddvq_s32 (int32x4_t __a) | |
2638 | { | |
2639 | return __builtin_mve_vaddvq_sv4si (__a); | |
2640 | } | |
2641 | ||
2642 | __extension__ extern __inline int16x8_t | |
2643 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2644 | __arm_vmovlbq_s8 (int8x16_t __a) | |
2645 | { | |
2646 | return __builtin_mve_vmovlbq_sv16qi (__a); | |
2647 | } | |
2648 | ||
2649 | __extension__ extern __inline int32x4_t | |
2650 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2651 | __arm_vmovlbq_s16 (int16x8_t __a) | |
2652 | { | |
2653 | return __builtin_mve_vmovlbq_sv8hi (__a); | |
2654 | } | |
2655 | ||
2656 | __extension__ extern __inline int16x8_t | |
2657 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2658 | __arm_vmovltq_s8 (int8x16_t __a) | |
2659 | { | |
2660 | return __builtin_mve_vmovltq_sv16qi (__a); | |
2661 | } | |
2662 | ||
2663 | __extension__ extern __inline int32x4_t | |
2664 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2665 | __arm_vmovltq_s16 (int16x8_t __a) | |
2666 | { | |
2667 | return __builtin_mve_vmovltq_sv8hi (__a); | |
2668 | } | |
2669 | ||
2670 | __extension__ extern __inline int8x16_t | |
2671 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2672 | __arm_vmvnq_s8 (int8x16_t __a) | |
2673 | { | |
2674 | return __builtin_mve_vmvnq_sv16qi (__a); | |
2675 | } | |
2676 | ||
2677 | __extension__ extern __inline int16x8_t | |
2678 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2679 | __arm_vmvnq_s16 (int16x8_t __a) | |
2680 | { | |
2681 | return __builtin_mve_vmvnq_sv8hi (__a); | |
2682 | } | |
2683 | ||
2684 | __extension__ extern __inline int32x4_t | |
2685 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2686 | __arm_vmvnq_s32 (int32x4_t __a) | |
2687 | { | |
2688 | return __builtin_mve_vmvnq_sv4si (__a); | |
2689 | } | |
2690 | ||
5db0eb95 SP |
2691 | __extension__ extern __inline int16x8_t |
2692 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2693 | __arm_vmvnq_n_s16 (const int16_t __imm) | |
2694 | { | |
2695 | return __builtin_mve_vmvnq_n_sv8hi (__imm); | |
2696 | } | |
2697 | ||
2698 | __extension__ extern __inline int32x4_t | |
2699 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2700 | __arm_vmvnq_n_s32 (const int32_t __imm) | |
2701 | { | |
2702 | return __builtin_mve_vmvnq_n_sv4si (__imm); | |
2703 | } | |
2704 | ||
6df4618c SP |
2705 | __extension__ extern __inline int8x16_t |
2706 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2707 | __arm_vrev16q_s8 (int8x16_t __a) | |
2708 | { | |
2709 | return __builtin_mve_vrev16q_sv16qi (__a); | |
2710 | } | |
2711 | ||
2712 | __extension__ extern __inline int8x16_t | |
2713 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2714 | __arm_vrev32q_s8 (int8x16_t __a) | |
2715 | { | |
2716 | return __builtin_mve_vrev32q_sv16qi (__a); | |
2717 | } | |
2718 | ||
2719 | __extension__ extern __inline int16x8_t | |
2720 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2721 | __arm_vrev32q_s16 (int16x8_t __a) | |
2722 | { | |
2723 | return __builtin_mve_vrev32q_sv8hi (__a); | |
2724 | } | |
2725 | ||
5db0eb95 SP |
2726 | __extension__ extern __inline int8x16_t |
2727 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2728 | __arm_vrev64q_s8 (int8x16_t __a) | |
2729 | { | |
2730 | return __builtin_mve_vrev64q_sv16qi (__a); | |
2731 | } | |
2732 | ||
2733 | __extension__ extern __inline int16x8_t | |
2734 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2735 | __arm_vrev64q_s16 (int16x8_t __a) | |
2736 | { | |
2737 | return __builtin_mve_vrev64q_sv8hi (__a); | |
2738 | } | |
2739 | ||
2740 | __extension__ extern __inline int32x4_t | |
2741 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2742 | __arm_vrev64q_s32 (int32x4_t __a) | |
2743 | { | |
2744 | return __builtin_mve_vrev64q_sv4si (__a); | |
2745 | } | |
2746 | ||
6df4618c SP |
2747 | __extension__ extern __inline int8x16_t |
2748 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2749 | __arm_vqabsq_s8 (int8x16_t __a) | |
2750 | { | |
2751 | return __builtin_mve_vqabsq_sv16qi (__a); | |
2752 | } | |
2753 | ||
2754 | __extension__ extern __inline int16x8_t | |
2755 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2756 | __arm_vqabsq_s16 (int16x8_t __a) | |
2757 | { | |
2758 | return __builtin_mve_vqabsq_sv8hi (__a); | |
2759 | } | |
2760 | ||
2761 | __extension__ extern __inline int32x4_t | |
2762 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2763 | __arm_vqabsq_s32 (int32x4_t __a) | |
2764 | { | |
2765 | return __builtin_mve_vqabsq_sv4si (__a); | |
2766 | } | |
2767 | ||
2768 | __extension__ extern __inline int8x16_t | |
2769 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2770 | __arm_vqnegq_s8 (int8x16_t __a) | |
2771 | { | |
2772 | return __builtin_mve_vqnegq_sv16qi (__a); | |
2773 | } | |
2774 | ||
2775 | __extension__ extern __inline int16x8_t | |
2776 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2777 | __arm_vqnegq_s16 (int16x8_t __a) | |
2778 | { | |
2779 | return __builtin_mve_vqnegq_sv8hi (__a); | |
2780 | } | |
2781 | ||
2782 | __extension__ extern __inline int32x4_t | |
2783 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2784 | __arm_vqnegq_s32 (int32x4_t __a) | |
2785 | { | |
2786 | return __builtin_mve_vqnegq_sv4si (__a); | |
2787 | } | |
2788 | ||
5db0eb95 SP |
2789 | __extension__ extern __inline uint8x16_t |
2790 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2791 | __arm_vrev64q_u8 (uint8x16_t __a) | |
2792 | { | |
2793 | return __builtin_mve_vrev64q_uv16qi (__a); | |
2794 | } | |
2795 | ||
2796 | __extension__ extern __inline uint16x8_t | |
2797 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2798 | __arm_vrev64q_u16 (uint16x8_t __a) | |
2799 | { | |
2800 | return __builtin_mve_vrev64q_uv8hi (__a); | |
2801 | } | |
2802 | ||
2803 | __extension__ extern __inline uint32x4_t | |
2804 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2805 | __arm_vrev64q_u32 (uint32x4_t __a) | |
2806 | { | |
2807 | return __builtin_mve_vrev64q_uv4si (__a); | |
2808 | } | |
2809 | ||
6df4618c SP |
2810 | __extension__ extern __inline uint8x16_t |
2811 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2812 | __arm_vmvnq_u8 (uint8x16_t __a) | |
2813 | { | |
2814 | return __builtin_mve_vmvnq_uv16qi (__a); | |
2815 | } | |
2816 | ||
2817 | __extension__ extern __inline uint16x8_t | |
2818 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2819 | __arm_vmvnq_u16 (uint16x8_t __a) | |
2820 | { | |
2821 | return __builtin_mve_vmvnq_uv8hi (__a); | |
2822 | } | |
2823 | ||
2824 | __extension__ extern __inline uint32x4_t | |
2825 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2826 | __arm_vmvnq_u32 (uint32x4_t __a) | |
2827 | { | |
2828 | return __builtin_mve_vmvnq_uv4si (__a); | |
2829 | } | |
2830 | ||
2831 | __extension__ extern __inline uint8x16_t | |
2832 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2833 | __arm_vdupq_n_u8 (uint8_t __a) | |
2834 | { | |
2835 | return __builtin_mve_vdupq_n_uv16qi (__a); | |
2836 | } | |
2837 | ||
2838 | __extension__ extern __inline uint16x8_t | |
2839 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2840 | __arm_vdupq_n_u16 (uint16_t __a) | |
2841 | { | |
2842 | return __builtin_mve_vdupq_n_uv8hi (__a); | |
2843 | } | |
2844 | ||
2845 | __extension__ extern __inline uint32x4_t | |
2846 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2847 | __arm_vdupq_n_u32 (uint32_t __a) | |
2848 | { | |
2849 | return __builtin_mve_vdupq_n_uv4si (__a); | |
2850 | } | |
2851 | ||
2852 | __extension__ extern __inline uint8x16_t | |
2853 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2854 | __arm_vclzq_u8 (uint8x16_t __a) | |
2855 | { | |
2856 | return __builtin_mve_vclzq_uv16qi (__a); | |
2857 | } | |
2858 | ||
2859 | __extension__ extern __inline uint16x8_t | |
2860 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2861 | __arm_vclzq_u16 (uint16x8_t __a) | |
2862 | { | |
2863 | return __builtin_mve_vclzq_uv8hi (__a); | |
2864 | } | |
2865 | ||
2866 | __extension__ extern __inline uint32x4_t | |
2867 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2868 | __arm_vclzq_u32 (uint32x4_t __a) | |
2869 | { | |
2870 | return __builtin_mve_vclzq_uv4si (__a); | |
2871 | } | |
2872 | ||
2873 | __extension__ extern __inline uint32_t | |
2874 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2875 | __arm_vaddvq_u8 (uint8x16_t __a) | |
2876 | { | |
2877 | return __builtin_mve_vaddvq_uv16qi (__a); | |
2878 | } | |
2879 | ||
2880 | __extension__ extern __inline uint32_t | |
2881 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2882 | __arm_vaddvq_u16 (uint16x8_t __a) | |
2883 | { | |
2884 | return __builtin_mve_vaddvq_uv8hi (__a); | |
2885 | } | |
2886 | ||
2887 | __extension__ extern __inline uint32_t | |
2888 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2889 | __arm_vaddvq_u32 (uint32x4_t __a) | |
2890 | { | |
2891 | return __builtin_mve_vaddvq_uv4si (__a); | |
2892 | } | |
2893 | ||
2894 | __extension__ extern __inline uint8x16_t | |
2895 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2896 | __arm_vrev32q_u8 (uint8x16_t __a) | |
2897 | { | |
2898 | return __builtin_mve_vrev32q_uv16qi (__a); | |
2899 | } | |
2900 | ||
2901 | __extension__ extern __inline uint16x8_t | |
2902 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2903 | __arm_vrev32q_u16 (uint16x8_t __a) | |
2904 | { | |
2905 | return __builtin_mve_vrev32q_uv8hi (__a); | |
2906 | } | |
2907 | ||
2908 | __extension__ extern __inline uint16x8_t | |
2909 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2910 | __arm_vmovltq_u8 (uint8x16_t __a) | |
2911 | { | |
2912 | return __builtin_mve_vmovltq_uv16qi (__a); | |
2913 | } | |
2914 | ||
2915 | __extension__ extern __inline uint32x4_t | |
2916 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2917 | __arm_vmovltq_u16 (uint16x8_t __a) | |
2918 | { | |
2919 | return __builtin_mve_vmovltq_uv8hi (__a); | |
2920 | } | |
2921 | ||
2922 | __extension__ extern __inline uint16x8_t | |
2923 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2924 | __arm_vmovlbq_u8 (uint8x16_t __a) | |
2925 | { | |
2926 | return __builtin_mve_vmovlbq_uv16qi (__a); | |
2927 | } | |
2928 | ||
2929 | __extension__ extern __inline uint32x4_t | |
2930 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2931 | __arm_vmovlbq_u16 (uint16x8_t __a) | |
2932 | { | |
2933 | return __builtin_mve_vmovlbq_uv8hi (__a); | |
2934 | } | |
2935 | ||
5db0eb95 SP |
2936 | __extension__ extern __inline uint16x8_t |
2937 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2938 | __arm_vmvnq_n_u16 (const int __imm) | |
2939 | { | |
2940 | return __builtin_mve_vmvnq_n_uv8hi (__imm); | |
2941 | } | |
2942 | ||
2943 | __extension__ extern __inline uint32x4_t | |
2944 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2945 | __arm_vmvnq_n_u32 (const int __imm) | |
2946 | { | |
2947 | return __builtin_mve_vmvnq_n_uv4si (__imm); | |
2948 | } | |
2949 | ||
6df4618c SP |
2950 | __extension__ extern __inline uint8x16_t |
2951 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2952 | __arm_vrev16q_u8 (uint8x16_t __a) | |
2953 | { | |
2954 | return __builtin_mve_vrev16q_uv16qi (__a); | |
2955 | } | |
2956 | ||
2957 | __extension__ extern __inline uint64_t | |
2958 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2959 | __arm_vaddlvq_u32 (uint32x4_t __a) | |
2960 | { | |
2961 | return __builtin_mve_vaddlvq_uv4si (__a); | |
2962 | } | |
2963 | ||
a475f153 SP |
2964 | __extension__ extern __inline int64_t |
2965 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2966 | __arm_vctp16q (uint32_t __a) | |
2967 | { | |
2968 | return __builtin_mve_vctp16qhi (__a); | |
2969 | } | |
2970 | ||
2971 | __extension__ extern __inline mve_pred16_t | |
2972 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2973 | __arm_vctp32q (uint32_t __a) | |
2974 | { | |
2975 | return __builtin_mve_vctp32qhi (__a); | |
2976 | } | |
2977 | ||
2978 | __extension__ extern __inline mve_pred16_t | |
2979 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2980 | __arm_vctp64q (uint32_t __a) | |
2981 | { | |
2982 | return __builtin_mve_vctp64qhi (__a); | |
2983 | } | |
2984 | ||
2985 | __extension__ extern __inline mve_pred16_t | |
2986 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2987 | __arm_vctp8q (uint32_t __a) | |
2988 | { | |
2989 | return __builtin_mve_vctp8qhi (__a); | |
2990 | } | |
2991 | ||
2992 | __extension__ extern __inline mve_pred16_t | |
2993 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
2994 | __arm_vpnot (mve_pred16_t __a) | |
2995 | { | |
2996 | return __builtin_mve_vpnothi (__a); | |
2997 | } | |
2998 | ||
f166a8cd SP |
2999 | __extension__ extern __inline uint8x16_t |
3000 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3001 | __arm_vcreateq_u8 (uint64_t __a, uint64_t __b) | |
3002 | { | |
3003 | return __builtin_mve_vcreateq_uv16qi (__a, __b); | |
3004 | } | |
3005 | ||
3006 | __extension__ extern __inline uint16x8_t | |
3007 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3008 | __arm_vcreateq_u16 (uint64_t __a, uint64_t __b) | |
3009 | { | |
3010 | return __builtin_mve_vcreateq_uv8hi (__a, __b); | |
3011 | } | |
3012 | ||
3013 | __extension__ extern __inline uint32x4_t | |
3014 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3015 | __arm_vcreateq_u32 (uint64_t __a, uint64_t __b) | |
3016 | { | |
3017 | return __builtin_mve_vcreateq_uv4si (__a, __b); | |
3018 | } | |
3019 | ||
3020 | __extension__ extern __inline uint64x2_t | |
3021 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3022 | __arm_vcreateq_u64 (uint64_t __a, uint64_t __b) | |
3023 | { | |
3024 | return __builtin_mve_vcreateq_uv2di (__a, __b); | |
3025 | } | |
3026 | ||
3027 | __extension__ extern __inline int8x16_t | |
3028 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3029 | __arm_vcreateq_s8 (uint64_t __a, uint64_t __b) | |
3030 | { | |
3031 | return __builtin_mve_vcreateq_sv16qi (__a, __b); | |
3032 | } | |
3033 | ||
3034 | __extension__ extern __inline int16x8_t | |
3035 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3036 | __arm_vcreateq_s16 (uint64_t __a, uint64_t __b) | |
3037 | { | |
3038 | return __builtin_mve_vcreateq_sv8hi (__a, __b); | |
3039 | } | |
3040 | ||
3041 | __extension__ extern __inline int32x4_t | |
3042 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3043 | __arm_vcreateq_s32 (uint64_t __a, uint64_t __b) | |
3044 | { | |
3045 | return __builtin_mve_vcreateq_sv4si (__a, __b); | |
3046 | } | |
3047 | ||
3048 | __extension__ extern __inline int64x2_t | |
3049 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3050 | __arm_vcreateq_s64 (uint64_t __a, uint64_t __b) | |
3051 | { | |
3052 | return __builtin_mve_vcreateq_sv2di (__a, __b); | |
3053 | } | |
3054 | ||
3055 | __extension__ extern __inline int8x16_t | |
3056 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3057 | __arm_vshrq_n_s8 (int8x16_t __a, const int __imm) | |
3058 | { | |
3059 | return __builtin_mve_vshrq_n_sv16qi (__a, __imm); | |
3060 | } | |
3061 | ||
3062 | __extension__ extern __inline int16x8_t | |
3063 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3064 | __arm_vshrq_n_s16 (int16x8_t __a, const int __imm) | |
3065 | { | |
3066 | return __builtin_mve_vshrq_n_sv8hi (__a, __imm); | |
3067 | } | |
3068 | ||
3069 | __extension__ extern __inline int32x4_t | |
3070 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3071 | __arm_vshrq_n_s32 (int32x4_t __a, const int __imm) | |
3072 | { | |
3073 | return __builtin_mve_vshrq_n_sv4si (__a, __imm); | |
3074 | } | |
3075 | ||
3076 | __extension__ extern __inline uint8x16_t | |
3077 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3078 | __arm_vshrq_n_u8 (uint8x16_t __a, const int __imm) | |
3079 | { | |
3080 | return __builtin_mve_vshrq_n_uv16qi (__a, __imm); | |
3081 | } | |
3082 | ||
3083 | __extension__ extern __inline uint16x8_t | |
3084 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3085 | __arm_vshrq_n_u16 (uint16x8_t __a, const int __imm) | |
3086 | { | |
3087 | return __builtin_mve_vshrq_n_uv8hi (__a, __imm); | |
3088 | } | |
3089 | ||
3090 | __extension__ extern __inline uint32x4_t | |
3091 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3092 | __arm_vshrq_n_u32 (uint32x4_t __a, const int __imm) | |
3093 | { | |
3094 | return __builtin_mve_vshrq_n_uv4si (__a, __imm); | |
3095 | } | |
d71dba7b SP |
3096 | __extension__ extern __inline int64_t |
3097 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3098 | __arm_vaddlvq_p_s32 (int32x4_t __a, mve_pred16_t __p) | |
3099 | { | |
3100 | return __builtin_mve_vaddlvq_p_sv4si (__a, __p); | |
3101 | } | |
3102 | ||
3103 | __extension__ extern __inline uint64_t | |
3104 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3105 | __arm_vaddlvq_p_u32 (uint32x4_t __a, mve_pred16_t __p) | |
3106 | { | |
3107 | return __builtin_mve_vaddlvq_p_uv4si (__a, __p); | |
3108 | } | |
3109 | ||
3110 | __extension__ extern __inline int32_t | |
3111 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3112 | __arm_vcmpneq_s8 (int8x16_t __a, int8x16_t __b) | |
3113 | { | |
3114 | return __builtin_mve_vcmpneq_sv16qi (__a, __b); | |
3115 | } | |
3116 | ||
3117 | __extension__ extern __inline mve_pred16_t | |
3118 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3119 | __arm_vcmpneq_s16 (int16x8_t __a, int16x8_t __b) | |
3120 | { | |
3121 | return __builtin_mve_vcmpneq_sv8hi (__a, __b); | |
3122 | } | |
3123 | ||
3124 | __extension__ extern __inline mve_pred16_t | |
3125 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3126 | __arm_vcmpneq_s32 (int32x4_t __a, int32x4_t __b) | |
3127 | { | |
3128 | return __builtin_mve_vcmpneq_sv4si (__a, __b); | |
3129 | } | |
3130 | ||
3131 | __extension__ extern __inline mve_pred16_t | |
3132 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3133 | __arm_vcmpneq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3134 | { | |
3135 | return __builtin_mve_vcmpneq_uv16qi (__a, __b); | |
3136 | } | |
3137 | ||
3138 | __extension__ extern __inline mve_pred16_t | |
3139 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3140 | __arm_vcmpneq_u16 (uint16x8_t __a, uint16x8_t __b) | |
3141 | { | |
3142 | return __builtin_mve_vcmpneq_uv8hi (__a, __b); | |
3143 | } | |
3144 | ||
3145 | __extension__ extern __inline mve_pred16_t | |
3146 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3147 | __arm_vcmpneq_u32 (uint32x4_t __a, uint32x4_t __b) | |
3148 | { | |
3149 | return __builtin_mve_vcmpneq_uv4si (__a, __b); | |
3150 | } | |
3151 | ||
3152 | __extension__ extern __inline int8x16_t | |
3153 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3154 | __arm_vshlq_s8 (int8x16_t __a, int8x16_t __b) | |
3155 | { | |
3156 | return __builtin_mve_vshlq_sv16qi (__a, __b); | |
3157 | } | |
3158 | ||
3159 | __extension__ extern __inline int16x8_t | |
3160 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3161 | __arm_vshlq_s16 (int16x8_t __a, int16x8_t __b) | |
3162 | { | |
3163 | return __builtin_mve_vshlq_sv8hi (__a, __b); | |
3164 | } | |
3165 | ||
3166 | __extension__ extern __inline int32x4_t | |
3167 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3168 | __arm_vshlq_s32 (int32x4_t __a, int32x4_t __b) | |
3169 | { | |
3170 | return __builtin_mve_vshlq_sv4si (__a, __b); | |
3171 | } | |
3172 | ||
3173 | __extension__ extern __inline uint8x16_t | |
3174 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3175 | __arm_vshlq_u8 (uint8x16_t __a, int8x16_t __b) | |
3176 | { | |
3177 | return __builtin_mve_vshlq_uv16qi (__a, __b); | |
3178 | } | |
3179 | ||
3180 | __extension__ extern __inline uint16x8_t | |
3181 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3182 | __arm_vshlq_u16 (uint16x8_t __a, int16x8_t __b) | |
3183 | { | |
3184 | return __builtin_mve_vshlq_uv8hi (__a, __b); | |
3185 | } | |
3186 | ||
3187 | __extension__ extern __inline uint32x4_t | |
3188 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3189 | __arm_vshlq_u32 (uint32x4_t __a, int32x4_t __b) | |
3190 | { | |
3191 | return __builtin_mve_vshlq_uv4si (__a, __b); | |
3192 | } | |
33203b4c SP |
3193 | __extension__ extern __inline uint8x16_t |
3194 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3195 | __arm_vsubq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3196 | { | |
3197 | return __builtin_mve_vsubq_uv16qi (__a, __b); | |
3198 | } | |
3199 | ||
3200 | __extension__ extern __inline uint8x16_t | |
3201 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3202 | __arm_vsubq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3203 | { | |
3204 | return __builtin_mve_vsubq_n_uv16qi (__a, __b); | |
3205 | } | |
3206 | ||
3207 | __extension__ extern __inline uint8x16_t | |
3208 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3209 | __arm_vrmulhq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3210 | { | |
3211 | return __builtin_mve_vrmulhq_uv16qi (__a, __b); | |
3212 | } | |
3213 | ||
3214 | __extension__ extern __inline uint8x16_t | |
3215 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3216 | __arm_vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3217 | { | |
3218 | return __builtin_mve_vrhaddq_uv16qi (__a, __b); | |
3219 | } | |
3220 | ||
3221 | __extension__ extern __inline uint8x16_t | |
3222 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3223 | __arm_vqsubq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3224 | { | |
3225 | return __builtin_mve_vqsubq_uv16qi (__a, __b); | |
3226 | } | |
3227 | ||
3228 | __extension__ extern __inline uint8x16_t | |
3229 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3230 | __arm_vqsubq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3231 | { | |
3232 | return __builtin_mve_vqsubq_n_uv16qi (__a, __b); | |
3233 | } | |
3234 | ||
3235 | __extension__ extern __inline uint8x16_t | |
3236 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3237 | __arm_vqaddq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3238 | { | |
3239 | return __builtin_mve_vqaddq_uv16qi (__a, __b); | |
3240 | } | |
3241 | ||
3242 | __extension__ extern __inline uint8x16_t | |
3243 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3244 | __arm_vqaddq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3245 | { | |
3246 | return __builtin_mve_vqaddq_n_uv16qi (__a, __b); | |
3247 | } | |
3248 | ||
3249 | __extension__ extern __inline uint8x16_t | |
3250 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3251 | __arm_vorrq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3252 | { | |
3253 | return __builtin_mve_vorrq_uv16qi (__a, __b); | |
3254 | } | |
3255 | ||
3256 | __extension__ extern __inline uint8x16_t | |
3257 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3258 | __arm_vornq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3259 | { | |
3260 | return __builtin_mve_vornq_uv16qi (__a, __b); | |
3261 | } | |
3262 | ||
3263 | __extension__ extern __inline uint8x16_t | |
3264 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3265 | __arm_vmulq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3266 | { | |
3267 | return __builtin_mve_vmulq_uv16qi (__a, __b); | |
3268 | } | |
3269 | ||
3270 | __extension__ extern __inline uint8x16_t | |
3271 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3272 | __arm_vmulq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3273 | { | |
3274 | return __builtin_mve_vmulq_n_uv16qi (__a, __b); | |
3275 | } | |
3276 | ||
3277 | __extension__ extern __inline uint16x8_t | |
3278 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3279 | __arm_vmulltq_int_u8 (uint8x16_t __a, uint8x16_t __b) | |
3280 | { | |
3281 | return __builtin_mve_vmulltq_int_uv16qi (__a, __b); | |
3282 | } | |
3283 | ||
3284 | __extension__ extern __inline uint16x8_t | |
3285 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3286 | __arm_vmullbq_int_u8 (uint8x16_t __a, uint8x16_t __b) | |
3287 | { | |
3288 | return __builtin_mve_vmullbq_int_uv16qi (__a, __b); | |
3289 | } | |
3290 | ||
3291 | __extension__ extern __inline uint8x16_t | |
3292 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3293 | __arm_vmulhq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3294 | { | |
3295 | return __builtin_mve_vmulhq_uv16qi (__a, __b); | |
3296 | } | |
3297 | ||
3298 | __extension__ extern __inline uint32_t | |
3299 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3300 | __arm_vmladavq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3301 | { | |
3302 | return __builtin_mve_vmladavq_uv16qi (__a, __b); | |
3303 | } | |
3304 | ||
3305 | __extension__ extern __inline uint8_t | |
3306 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3307 | __arm_vminvq_u8 (uint8_t __a, uint8x16_t __b) | |
3308 | { | |
3309 | return __builtin_mve_vminvq_uv16qi (__a, __b); | |
3310 | } | |
3311 | ||
3312 | __extension__ extern __inline uint8x16_t | |
3313 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3314 | __arm_vminq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3315 | { | |
3316 | return __builtin_mve_vminq_uv16qi (__a, __b); | |
3317 | } | |
3318 | ||
3319 | __extension__ extern __inline uint8_t | |
3320 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3321 | __arm_vmaxvq_u8 (uint8_t __a, uint8x16_t __b) | |
3322 | { | |
3323 | return __builtin_mve_vmaxvq_uv16qi (__a, __b); | |
3324 | } | |
3325 | ||
3326 | __extension__ extern __inline uint8x16_t | |
3327 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3328 | __arm_vmaxq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3329 | { | |
3330 | return __builtin_mve_vmaxq_uv16qi (__a, __b); | |
3331 | } | |
3332 | ||
3333 | __extension__ extern __inline uint8x16_t | |
3334 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3335 | __arm_vhsubq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3336 | { | |
3337 | return __builtin_mve_vhsubq_uv16qi (__a, __b); | |
3338 | } | |
3339 | ||
3340 | __extension__ extern __inline uint8x16_t | |
3341 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3342 | __arm_vhsubq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3343 | { | |
3344 | return __builtin_mve_vhsubq_n_uv16qi (__a, __b); | |
3345 | } | |
3346 | ||
3347 | __extension__ extern __inline uint8x16_t | |
3348 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3349 | __arm_vhaddq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3350 | { | |
3351 | return __builtin_mve_vhaddq_uv16qi (__a, __b); | |
3352 | } | |
3353 | ||
3354 | __extension__ extern __inline uint8x16_t | |
3355 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3356 | __arm_vhaddq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3357 | { | |
3358 | return __builtin_mve_vhaddq_n_uv16qi (__a, __b); | |
3359 | } | |
3360 | ||
3361 | __extension__ extern __inline uint8x16_t | |
3362 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3363 | __arm_veorq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3364 | { | |
3365 | return __builtin_mve_veorq_uv16qi (__a, __b); | |
3366 | } | |
3367 | ||
3368 | __extension__ extern __inline mve_pred16_t | |
3369 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3370 | __arm_vcmpneq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3371 | { | |
3372 | return __builtin_mve_vcmpneq_n_uv16qi (__a, __b); | |
3373 | } | |
3374 | ||
3375 | __extension__ extern __inline mve_pred16_t | |
3376 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3377 | __arm_vcmphiq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3378 | { | |
3379 | return __builtin_mve_vcmphiq_uv16qi (__a, __b); | |
3380 | } | |
3381 | ||
3382 | __extension__ extern __inline mve_pred16_t | |
3383 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3384 | __arm_vcmphiq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3385 | { | |
3386 | return __builtin_mve_vcmphiq_n_uv16qi (__a, __b); | |
3387 | } | |
3388 | ||
3389 | __extension__ extern __inline mve_pred16_t | |
3390 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3391 | __arm_vcmpeqq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3392 | { | |
3393 | return __builtin_mve_vcmpeqq_uv16qi (__a, __b); | |
3394 | } | |
3395 | ||
3396 | __extension__ extern __inline mve_pred16_t | |
3397 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3398 | __arm_vcmpeqq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3399 | { | |
3400 | return __builtin_mve_vcmpeqq_n_uv16qi (__a, __b); | |
3401 | } | |
3402 | ||
3403 | __extension__ extern __inline mve_pred16_t | |
3404 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3405 | __arm_vcmpcsq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3406 | { | |
3407 | return __builtin_mve_vcmpcsq_uv16qi (__a, __b); | |
3408 | } | |
3409 | ||
3410 | __extension__ extern __inline mve_pred16_t | |
3411 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3412 | __arm_vcmpcsq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3413 | { | |
3414 | return __builtin_mve_vcmpcsq_n_uv16qi (__a, __b); | |
3415 | } | |
3416 | ||
3417 | __extension__ extern __inline uint8x16_t | |
3418 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3419 | __arm_vcaddq_rot90_u8 (uint8x16_t __a, uint8x16_t __b) | |
3420 | { | |
3421 | return __builtin_mve_vcaddq_rot90_uv16qi (__a, __b); | |
3422 | } | |
3423 | ||
3424 | __extension__ extern __inline uint8x16_t | |
3425 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3426 | __arm_vcaddq_rot270_u8 (uint8x16_t __a, uint8x16_t __b) | |
3427 | { | |
3428 | return __builtin_mve_vcaddq_rot270_uv16qi (__a, __b); | |
3429 | } | |
3430 | ||
3431 | __extension__ extern __inline uint8x16_t | |
3432 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3433 | __arm_vbicq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3434 | { | |
3435 | return __builtin_mve_vbicq_uv16qi (__a, __b); | |
3436 | } | |
3437 | ||
3438 | __extension__ extern __inline uint8x16_t | |
3439 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3440 | __arm_vandq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3441 | { | |
3442 | return __builtin_mve_vandq_uv16qi (__a, __b); | |
3443 | } | |
3444 | ||
3445 | __extension__ extern __inline uint32_t | |
3446 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3447 | __arm_vaddvq_p_u8 (uint8x16_t __a, mve_pred16_t __p) | |
3448 | { | |
3449 | return __builtin_mve_vaddvq_p_uv16qi (__a, __p); | |
3450 | } | |
3451 | ||
3452 | __extension__ extern __inline uint32_t | |
3453 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3454 | __arm_vaddvaq_u8 (uint32_t __a, uint8x16_t __b) | |
3455 | { | |
3456 | return __builtin_mve_vaddvaq_uv16qi (__a, __b); | |
3457 | } | |
3458 | ||
3459 | __extension__ extern __inline uint8x16_t | |
3460 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3461 | __arm_vaddq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3462 | { | |
3463 | return __builtin_mve_vaddq_n_uv16qi (__a, __b); | |
3464 | } | |
3465 | ||
3466 | __extension__ extern __inline uint8x16_t | |
3467 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3468 | __arm_vabdq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3469 | { | |
3470 | return __builtin_mve_vabdq_uv16qi (__a, __b); | |
3471 | } | |
3472 | ||
3473 | __extension__ extern __inline uint8x16_t | |
3474 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3475 | __arm_vshlq_r_u8 (uint8x16_t __a, int32_t __b) | |
3476 | { | |
3477 | return __builtin_mve_vshlq_r_uv16qi (__a, __b); | |
3478 | } | |
3479 | ||
3480 | __extension__ extern __inline uint8x16_t | |
3481 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3482 | __arm_vrshlq_u8 (uint8x16_t __a, int8x16_t __b) | |
3483 | { | |
3484 | return __builtin_mve_vrshlq_uv16qi (__a, __b); | |
3485 | } | |
3486 | ||
3487 | __extension__ extern __inline uint8x16_t | |
3488 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3489 | __arm_vrshlq_n_u8 (uint8x16_t __a, int32_t __b) | |
3490 | { | |
3491 | return __builtin_mve_vrshlq_n_uv16qi (__a, __b); | |
3492 | } | |
3493 | ||
3494 | __extension__ extern __inline uint8x16_t | |
3495 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3496 | __arm_vqshlq_u8 (uint8x16_t __a, int8x16_t __b) | |
3497 | { | |
3498 | return __builtin_mve_vqshlq_uv16qi (__a, __b); | |
3499 | } | |
3500 | ||
3501 | __extension__ extern __inline uint8x16_t | |
3502 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3503 | __arm_vqshlq_r_u8 (uint8x16_t __a, int32_t __b) | |
3504 | { | |
3505 | return __builtin_mve_vqshlq_r_uv16qi (__a, __b); | |
3506 | } | |
3507 | ||
3508 | __extension__ extern __inline uint8x16_t | |
3509 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3510 | __arm_vqrshlq_u8 (uint8x16_t __a, int8x16_t __b) | |
3511 | { | |
3512 | return __builtin_mve_vqrshlq_uv16qi (__a, __b); | |
3513 | } | |
3514 | ||
3515 | __extension__ extern __inline uint8x16_t | |
3516 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3517 | __arm_vqrshlq_n_u8 (uint8x16_t __a, int32_t __b) | |
3518 | { | |
3519 | return __builtin_mve_vqrshlq_n_uv16qi (__a, __b); | |
3520 | } | |
3521 | ||
3522 | __extension__ extern __inline uint8_t | |
3523 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3524 | __arm_vminavq_s8 (uint8_t __a, int8x16_t __b) | |
3525 | { | |
3526 | return __builtin_mve_vminavq_sv16qi (__a, __b); | |
3527 | } | |
3528 | ||
3529 | __extension__ extern __inline uint8x16_t | |
3530 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3531 | __arm_vminaq_s8 (uint8x16_t __a, int8x16_t __b) | |
3532 | { | |
3533 | return __builtin_mve_vminaq_sv16qi (__a, __b); | |
3534 | } | |
3535 | ||
3536 | __extension__ extern __inline uint8_t | |
3537 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3538 | __arm_vmaxavq_s8 (uint8_t __a, int8x16_t __b) | |
3539 | { | |
3540 | return __builtin_mve_vmaxavq_sv16qi (__a, __b); | |
3541 | } | |
3542 | ||
3543 | __extension__ extern __inline uint8x16_t | |
3544 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3545 | __arm_vmaxaq_s8 (uint8x16_t __a, int8x16_t __b) | |
3546 | { | |
3547 | return __builtin_mve_vmaxaq_sv16qi (__a, __b); | |
3548 | } | |
3549 | ||
3550 | __extension__ extern __inline uint8x16_t | |
3551 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3552 | __arm_vbrsrq_n_u8 (uint8x16_t __a, int32_t __b) | |
3553 | { | |
3554 | return __builtin_mve_vbrsrq_n_uv16qi (__a, __b); | |
3555 | } | |
3556 | ||
3557 | __extension__ extern __inline uint8x16_t | |
3558 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3559 | __arm_vshlq_n_u8 (uint8x16_t __a, const int __imm) | |
3560 | { | |
3561 | return __builtin_mve_vshlq_n_uv16qi (__a, __imm); | |
3562 | } | |
3563 | ||
3564 | __extension__ extern __inline uint8x16_t | |
3565 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3566 | __arm_vrshrq_n_u8 (uint8x16_t __a, const int __imm) | |
3567 | { | |
3568 | return __builtin_mve_vrshrq_n_uv16qi (__a, __imm); | |
3569 | } | |
3570 | ||
3571 | __extension__ extern __inline uint8x16_t | |
3572 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3573 | __arm_vqshlq_n_u8 (uint8x16_t __a, const int __imm) | |
3574 | { | |
3575 | return __builtin_mve_vqshlq_n_uv16qi (__a, __imm); | |
3576 | } | |
3577 | ||
3578 | __extension__ extern __inline mve_pred16_t | |
3579 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3580 | __arm_vcmpneq_n_s8 (int8x16_t __a, int8_t __b) | |
3581 | { | |
3582 | return __builtin_mve_vcmpneq_n_sv16qi (__a, __b); | |
3583 | } | |
3584 | ||
3585 | __extension__ extern __inline mve_pred16_t | |
3586 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3587 | __arm_vcmpltq_s8 (int8x16_t __a, int8x16_t __b) | |
3588 | { | |
3589 | return __builtin_mve_vcmpltq_sv16qi (__a, __b); | |
3590 | } | |
3591 | ||
3592 | __extension__ extern __inline mve_pred16_t | |
3593 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3594 | __arm_vcmpltq_n_s8 (int8x16_t __a, int8_t __b) | |
3595 | { | |
3596 | return __builtin_mve_vcmpltq_n_sv16qi (__a, __b); | |
3597 | } | |
3598 | ||
3599 | __extension__ extern __inline mve_pred16_t | |
3600 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3601 | __arm_vcmpleq_s8 (int8x16_t __a, int8x16_t __b) | |
3602 | { | |
3603 | return __builtin_mve_vcmpleq_sv16qi (__a, __b); | |
3604 | } | |
3605 | ||
3606 | __extension__ extern __inline mve_pred16_t | |
3607 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3608 | __arm_vcmpleq_n_s8 (int8x16_t __a, int8_t __b) | |
3609 | { | |
3610 | return __builtin_mve_vcmpleq_n_sv16qi (__a, __b); | |
3611 | } | |
3612 | ||
3613 | __extension__ extern __inline mve_pred16_t | |
3614 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3615 | __arm_vcmpgtq_s8 (int8x16_t __a, int8x16_t __b) | |
3616 | { | |
3617 | return __builtin_mve_vcmpgtq_sv16qi (__a, __b); | |
3618 | } | |
3619 | ||
3620 | __extension__ extern __inline mve_pred16_t | |
3621 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3622 | __arm_vcmpgtq_n_s8 (int8x16_t __a, int8_t __b) | |
3623 | { | |
3624 | return __builtin_mve_vcmpgtq_n_sv16qi (__a, __b); | |
3625 | } | |
3626 | ||
3627 | __extension__ extern __inline mve_pred16_t | |
3628 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3629 | __arm_vcmpgeq_s8 (int8x16_t __a, int8x16_t __b) | |
3630 | { | |
3631 | return __builtin_mve_vcmpgeq_sv16qi (__a, __b); | |
3632 | } | |
3633 | ||
3634 | __extension__ extern __inline mve_pred16_t | |
3635 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3636 | __arm_vcmpgeq_n_s8 (int8x16_t __a, int8_t __b) | |
3637 | { | |
3638 | return __builtin_mve_vcmpgeq_n_sv16qi (__a, __b); | |
3639 | } | |
3640 | ||
3641 | __extension__ extern __inline mve_pred16_t | |
3642 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3643 | __arm_vcmpeqq_s8 (int8x16_t __a, int8x16_t __b) | |
3644 | { | |
3645 | return __builtin_mve_vcmpeqq_sv16qi (__a, __b); | |
3646 | } | |
3647 | ||
3648 | __extension__ extern __inline mve_pred16_t | |
3649 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3650 | __arm_vcmpeqq_n_s8 (int8x16_t __a, int8_t __b) | |
3651 | { | |
3652 | return __builtin_mve_vcmpeqq_n_sv16qi (__a, __b); | |
3653 | } | |
3654 | ||
3655 | __extension__ extern __inline uint8x16_t | |
3656 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3657 | __arm_vqshluq_n_s8 (int8x16_t __a, const int __imm) | |
3658 | { | |
3659 | return __builtin_mve_vqshluq_n_sv16qi (__a, __imm); | |
3660 | } | |
3661 | ||
3662 | __extension__ extern __inline int32_t | |
3663 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3664 | __arm_vaddvq_p_s8 (int8x16_t __a, mve_pred16_t __p) | |
3665 | { | |
3666 | return __builtin_mve_vaddvq_p_sv16qi (__a, __p); | |
3667 | } | |
3668 | ||
3669 | __extension__ extern __inline int8x16_t | |
3670 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3671 | __arm_vsubq_s8 (int8x16_t __a, int8x16_t __b) | |
3672 | { | |
3673 | return __builtin_mve_vsubq_sv16qi (__a, __b); | |
3674 | } | |
3675 | ||
3676 | __extension__ extern __inline int8x16_t | |
3677 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3678 | __arm_vsubq_n_s8 (int8x16_t __a, int8_t __b) | |
3679 | { | |
3680 | return __builtin_mve_vsubq_n_sv16qi (__a, __b); | |
3681 | } | |
3682 | ||
3683 | __extension__ extern __inline int8x16_t | |
3684 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3685 | __arm_vshlq_r_s8 (int8x16_t __a, int32_t __b) | |
3686 | { | |
3687 | return __builtin_mve_vshlq_r_sv16qi (__a, __b); | |
3688 | } | |
3689 | ||
3690 | __extension__ extern __inline int8x16_t | |
3691 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3692 | __arm_vrshlq_s8 (int8x16_t __a, int8x16_t __b) | |
3693 | { | |
3694 | return __builtin_mve_vrshlq_sv16qi (__a, __b); | |
3695 | } | |
3696 | ||
3697 | __extension__ extern __inline int8x16_t | |
3698 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3699 | __arm_vrshlq_n_s8 (int8x16_t __a, int32_t __b) | |
3700 | { | |
3701 | return __builtin_mve_vrshlq_n_sv16qi (__a, __b); | |
3702 | } | |
3703 | ||
3704 | __extension__ extern __inline int8x16_t | |
3705 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3706 | __arm_vrmulhq_s8 (int8x16_t __a, int8x16_t __b) | |
3707 | { | |
3708 | return __builtin_mve_vrmulhq_sv16qi (__a, __b); | |
3709 | } | |
3710 | ||
3711 | __extension__ extern __inline int8x16_t | |
3712 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3713 | __arm_vrhaddq_s8 (int8x16_t __a, int8x16_t __b) | |
3714 | { | |
3715 | return __builtin_mve_vrhaddq_sv16qi (__a, __b); | |
3716 | } | |
3717 | ||
3718 | __extension__ extern __inline int8x16_t | |
3719 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3720 | __arm_vqsubq_s8 (int8x16_t __a, int8x16_t __b) | |
3721 | { | |
3722 | return __builtin_mve_vqsubq_sv16qi (__a, __b); | |
3723 | } | |
3724 | ||
3725 | __extension__ extern __inline int8x16_t | |
3726 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3727 | __arm_vqsubq_n_s8 (int8x16_t __a, int8_t __b) | |
3728 | { | |
3729 | return __builtin_mve_vqsubq_n_sv16qi (__a, __b); | |
3730 | } | |
3731 | ||
3732 | __extension__ extern __inline int8x16_t | |
3733 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3734 | __arm_vqshlq_s8 (int8x16_t __a, int8x16_t __b) | |
3735 | { | |
3736 | return __builtin_mve_vqshlq_sv16qi (__a, __b); | |
3737 | } | |
3738 | ||
3739 | __extension__ extern __inline int8x16_t | |
3740 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3741 | __arm_vqshlq_r_s8 (int8x16_t __a, int32_t __b) | |
3742 | { | |
3743 | return __builtin_mve_vqshlq_r_sv16qi (__a, __b); | |
3744 | } | |
3745 | ||
3746 | __extension__ extern __inline int8x16_t | |
3747 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3748 | __arm_vqrshlq_s8 (int8x16_t __a, int8x16_t __b) | |
3749 | { | |
3750 | return __builtin_mve_vqrshlq_sv16qi (__a, __b); | |
3751 | } | |
3752 | ||
3753 | __extension__ extern __inline int8x16_t | |
3754 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3755 | __arm_vqrshlq_n_s8 (int8x16_t __a, int32_t __b) | |
3756 | { | |
3757 | return __builtin_mve_vqrshlq_n_sv16qi (__a, __b); | |
3758 | } | |
3759 | ||
3760 | __extension__ extern __inline int8x16_t | |
3761 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3762 | __arm_vqrdmulhq_s8 (int8x16_t __a, int8x16_t __b) | |
3763 | { | |
3764 | return __builtin_mve_vqrdmulhq_sv16qi (__a, __b); | |
3765 | } | |
3766 | ||
3767 | __extension__ extern __inline int8x16_t | |
3768 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3769 | __arm_vqrdmulhq_n_s8 (int8x16_t __a, int8_t __b) | |
3770 | { | |
3771 | return __builtin_mve_vqrdmulhq_n_sv16qi (__a, __b); | |
3772 | } | |
3773 | ||
3774 | __extension__ extern __inline int8x16_t | |
3775 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3776 | __arm_vqdmulhq_s8 (int8x16_t __a, int8x16_t __b) | |
3777 | { | |
3778 | return __builtin_mve_vqdmulhq_sv16qi (__a, __b); | |
3779 | } | |
3780 | ||
3781 | __extension__ extern __inline int8x16_t | |
3782 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3783 | __arm_vqdmulhq_n_s8 (int8x16_t __a, int8_t __b) | |
3784 | { | |
3785 | return __builtin_mve_vqdmulhq_n_sv16qi (__a, __b); | |
3786 | } | |
3787 | ||
3788 | __extension__ extern __inline int8x16_t | |
3789 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3790 | __arm_vqaddq_s8 (int8x16_t __a, int8x16_t __b) | |
3791 | { | |
3792 | return __builtin_mve_vqaddq_sv16qi (__a, __b); | |
3793 | } | |
3794 | ||
3795 | __extension__ extern __inline int8x16_t | |
3796 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3797 | __arm_vqaddq_n_s8 (int8x16_t __a, int8_t __b) | |
3798 | { | |
3799 | return __builtin_mve_vqaddq_n_sv16qi (__a, __b); | |
3800 | } | |
3801 | ||
3802 | __extension__ extern __inline int8x16_t | |
3803 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3804 | __arm_vorrq_s8 (int8x16_t __a, int8x16_t __b) | |
3805 | { | |
3806 | return __builtin_mve_vorrq_sv16qi (__a, __b); | |
3807 | } | |
3808 | ||
3809 | __extension__ extern __inline int8x16_t | |
3810 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3811 | __arm_vornq_s8 (int8x16_t __a, int8x16_t __b) | |
3812 | { | |
3813 | return __builtin_mve_vornq_sv16qi (__a, __b); | |
3814 | } | |
3815 | ||
3816 | __extension__ extern __inline int8x16_t | |
3817 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3818 | __arm_vmulq_s8 (int8x16_t __a, int8x16_t __b) | |
3819 | { | |
3820 | return __builtin_mve_vmulq_sv16qi (__a, __b); | |
3821 | } | |
3822 | ||
3823 | __extension__ extern __inline int8x16_t | |
3824 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3825 | __arm_vmulq_n_s8 (int8x16_t __a, int8_t __b) | |
3826 | { | |
3827 | return __builtin_mve_vmulq_n_sv16qi (__a, __b); | |
3828 | } | |
3829 | ||
3830 | __extension__ extern __inline int16x8_t | |
3831 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3832 | __arm_vmulltq_int_s8 (int8x16_t __a, int8x16_t __b) | |
3833 | { | |
3834 | return __builtin_mve_vmulltq_int_sv16qi (__a, __b); | |
3835 | } | |
3836 | ||
3837 | __extension__ extern __inline int16x8_t | |
3838 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3839 | __arm_vmullbq_int_s8 (int8x16_t __a, int8x16_t __b) | |
3840 | { | |
3841 | return __builtin_mve_vmullbq_int_sv16qi (__a, __b); | |
3842 | } | |
3843 | ||
3844 | __extension__ extern __inline int8x16_t | |
3845 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3846 | __arm_vmulhq_s8 (int8x16_t __a, int8x16_t __b) | |
3847 | { | |
3848 | return __builtin_mve_vmulhq_sv16qi (__a, __b); | |
3849 | } | |
3850 | ||
3851 | __extension__ extern __inline int32_t | |
3852 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3853 | __arm_vmlsdavxq_s8 (int8x16_t __a, int8x16_t __b) | |
3854 | { | |
3855 | return __builtin_mve_vmlsdavxq_sv16qi (__a, __b); | |
3856 | } | |
3857 | ||
3858 | __extension__ extern __inline int32_t | |
3859 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3860 | __arm_vmlsdavq_s8 (int8x16_t __a, int8x16_t __b) | |
3861 | { | |
3862 | return __builtin_mve_vmlsdavq_sv16qi (__a, __b); | |
3863 | } | |
3864 | ||
3865 | __extension__ extern __inline int32_t | |
3866 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3867 | __arm_vmladavxq_s8 (int8x16_t __a, int8x16_t __b) | |
3868 | { | |
3869 | return __builtin_mve_vmladavxq_sv16qi (__a, __b); | |
3870 | } | |
3871 | ||
3872 | __extension__ extern __inline int32_t | |
3873 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3874 | __arm_vmladavq_s8 (int8x16_t __a, int8x16_t __b) | |
3875 | { | |
3876 | return __builtin_mve_vmladavq_sv16qi (__a, __b); | |
3877 | } | |
3878 | ||
3879 | __extension__ extern __inline int8_t | |
3880 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3881 | __arm_vminvq_s8 (int8_t __a, int8x16_t __b) | |
3882 | { | |
3883 | return __builtin_mve_vminvq_sv16qi (__a, __b); | |
3884 | } | |
3885 | ||
3886 | __extension__ extern __inline int8x16_t | |
3887 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3888 | __arm_vminq_s8 (int8x16_t __a, int8x16_t __b) | |
3889 | { | |
3890 | return __builtin_mve_vminq_sv16qi (__a, __b); | |
3891 | } | |
3892 | ||
3893 | __extension__ extern __inline int8_t | |
3894 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3895 | __arm_vmaxvq_s8 (int8_t __a, int8x16_t __b) | |
3896 | { | |
3897 | return __builtin_mve_vmaxvq_sv16qi (__a, __b); | |
3898 | } | |
3899 | ||
3900 | __extension__ extern __inline int8x16_t | |
3901 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3902 | __arm_vmaxq_s8 (int8x16_t __a, int8x16_t __b) | |
3903 | { | |
3904 | return __builtin_mve_vmaxq_sv16qi (__a, __b); | |
3905 | } | |
3906 | ||
3907 | __extension__ extern __inline int8x16_t | |
3908 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3909 | __arm_vhsubq_s8 (int8x16_t __a, int8x16_t __b) | |
3910 | { | |
3911 | return __builtin_mve_vhsubq_sv16qi (__a, __b); | |
3912 | } | |
3913 | ||
3914 | __extension__ extern __inline int8x16_t | |
3915 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3916 | __arm_vhsubq_n_s8 (int8x16_t __a, int8_t __b) | |
3917 | { | |
3918 | return __builtin_mve_vhsubq_n_sv16qi (__a, __b); | |
3919 | } | |
3920 | ||
3921 | __extension__ extern __inline int8x16_t | |
3922 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3923 | __arm_vhcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b) | |
3924 | { | |
3925 | return __builtin_mve_vhcaddq_rot90_sv16qi (__a, __b); | |
3926 | } | |
3927 | ||
3928 | __extension__ extern __inline int8x16_t | |
3929 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3930 | __arm_vhcaddq_rot270_s8 (int8x16_t __a, int8x16_t __b) | |
3931 | { | |
3932 | return __builtin_mve_vhcaddq_rot270_sv16qi (__a, __b); | |
3933 | } | |
3934 | ||
3935 | __extension__ extern __inline int8x16_t | |
3936 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3937 | __arm_vhaddq_s8 (int8x16_t __a, int8x16_t __b) | |
3938 | { | |
3939 | return __builtin_mve_vhaddq_sv16qi (__a, __b); | |
3940 | } | |
3941 | ||
3942 | __extension__ extern __inline int8x16_t | |
3943 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3944 | __arm_vhaddq_n_s8 (int8x16_t __a, int8_t __b) | |
3945 | { | |
3946 | return __builtin_mve_vhaddq_n_sv16qi (__a, __b); | |
3947 | } | |
3948 | ||
3949 | __extension__ extern __inline int8x16_t | |
3950 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3951 | __arm_veorq_s8 (int8x16_t __a, int8x16_t __b) | |
3952 | { | |
3953 | return __builtin_mve_veorq_sv16qi (__a, __b); | |
3954 | } | |
3955 | ||
3956 | __extension__ extern __inline int8x16_t | |
3957 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3958 | __arm_vcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b) | |
3959 | { | |
3960 | return __builtin_mve_vcaddq_rot90_sv16qi (__a, __b); | |
3961 | } | |
3962 | ||
3963 | __extension__ extern __inline int8x16_t | |
3964 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3965 | __arm_vcaddq_rot270_s8 (int8x16_t __a, int8x16_t __b) | |
3966 | { | |
3967 | return __builtin_mve_vcaddq_rot270_sv16qi (__a, __b); | |
3968 | } | |
3969 | ||
3970 | __extension__ extern __inline int8x16_t | |
3971 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3972 | __arm_vbrsrq_n_s8 (int8x16_t __a, int32_t __b) | |
3973 | { | |
3974 | return __builtin_mve_vbrsrq_n_sv16qi (__a, __b); | |
3975 | } | |
3976 | ||
3977 | __extension__ extern __inline int8x16_t | |
3978 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3979 | __arm_vbicq_s8 (int8x16_t __a, int8x16_t __b) | |
3980 | { | |
3981 | return __builtin_mve_vbicq_sv16qi (__a, __b); | |
3982 | } | |
3983 | ||
3984 | __extension__ extern __inline int8x16_t | |
3985 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3986 | __arm_vandq_s8 (int8x16_t __a, int8x16_t __b) | |
3987 | { | |
3988 | return __builtin_mve_vandq_sv16qi (__a, __b); | |
3989 | } | |
3990 | ||
3991 | __extension__ extern __inline int32_t | |
3992 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3993 | __arm_vaddvaq_s8 (int32_t __a, int8x16_t __b) | |
3994 | { | |
3995 | return __builtin_mve_vaddvaq_sv16qi (__a, __b); | |
3996 | } | |
3997 | ||
3998 | __extension__ extern __inline int8x16_t | |
3999 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4000 | __arm_vaddq_n_s8 (int8x16_t __a, int8_t __b) | |
4001 | { | |
4002 | return __builtin_mve_vaddq_n_sv16qi (__a, __b); | |
4003 | } | |
4004 | ||
4005 | __extension__ extern __inline int8x16_t | |
4006 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4007 | __arm_vabdq_s8 (int8x16_t __a, int8x16_t __b) | |
4008 | { | |
4009 | return __builtin_mve_vabdq_sv16qi (__a, __b); | |
4010 | } | |
4011 | ||
4012 | __extension__ extern __inline int8x16_t | |
4013 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4014 | __arm_vshlq_n_s8 (int8x16_t __a, const int __imm) | |
4015 | { | |
4016 | return __builtin_mve_vshlq_n_sv16qi (__a, __imm); | |
4017 | } | |
4018 | ||
4019 | __extension__ extern __inline int8x16_t | |
4020 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4021 | __arm_vrshrq_n_s8 (int8x16_t __a, const int __imm) | |
4022 | { | |
4023 | return __builtin_mve_vrshrq_n_sv16qi (__a, __imm); | |
4024 | } | |
4025 | ||
4026 | __extension__ extern __inline int8x16_t | |
4027 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4028 | __arm_vqshlq_n_s8 (int8x16_t __a, const int __imm) | |
4029 | { | |
4030 | return __builtin_mve_vqshlq_n_sv16qi (__a, __imm); | |
4031 | } | |
4032 | ||
4033 | __extension__ extern __inline uint16x8_t | |
4034 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4035 | __arm_vsubq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4036 | { | |
4037 | return __builtin_mve_vsubq_uv8hi (__a, __b); | |
4038 | } | |
4039 | ||
4040 | __extension__ extern __inline uint16x8_t | |
4041 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4042 | __arm_vsubq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4043 | { | |
4044 | return __builtin_mve_vsubq_n_uv8hi (__a, __b); | |
4045 | } | |
4046 | ||
4047 | __extension__ extern __inline uint16x8_t | |
4048 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4049 | __arm_vrmulhq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4050 | { | |
4051 | return __builtin_mve_vrmulhq_uv8hi (__a, __b); | |
4052 | } | |
4053 | ||
4054 | __extension__ extern __inline uint16x8_t | |
4055 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4056 | __arm_vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4057 | { | |
4058 | return __builtin_mve_vrhaddq_uv8hi (__a, __b); | |
4059 | } | |
4060 | ||
4061 | __extension__ extern __inline uint16x8_t | |
4062 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4063 | __arm_vqsubq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4064 | { | |
4065 | return __builtin_mve_vqsubq_uv8hi (__a, __b); | |
4066 | } | |
4067 | ||
4068 | __extension__ extern __inline uint16x8_t | |
4069 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4070 | __arm_vqsubq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4071 | { | |
4072 | return __builtin_mve_vqsubq_n_uv8hi (__a, __b); | |
4073 | } | |
4074 | ||
4075 | __extension__ extern __inline uint16x8_t | |
4076 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4077 | __arm_vqaddq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4078 | { | |
4079 | return __builtin_mve_vqaddq_uv8hi (__a, __b); | |
4080 | } | |
4081 | ||
4082 | __extension__ extern __inline uint16x8_t | |
4083 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4084 | __arm_vqaddq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4085 | { | |
4086 | return __builtin_mve_vqaddq_n_uv8hi (__a, __b); | |
4087 | } | |
4088 | ||
4089 | __extension__ extern __inline uint16x8_t | |
4090 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4091 | __arm_vorrq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4092 | { | |
4093 | return __builtin_mve_vorrq_uv8hi (__a, __b); | |
4094 | } | |
4095 | ||
4096 | __extension__ extern __inline uint16x8_t | |
4097 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4098 | __arm_vornq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4099 | { | |
4100 | return __builtin_mve_vornq_uv8hi (__a, __b); | |
4101 | } | |
4102 | ||
4103 | __extension__ extern __inline uint16x8_t | |
4104 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4105 | __arm_vmulq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4106 | { | |
4107 | return __builtin_mve_vmulq_uv8hi (__a, __b); | |
4108 | } | |
4109 | ||
4110 | __extension__ extern __inline uint16x8_t | |
4111 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4112 | __arm_vmulq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4113 | { | |
4114 | return __builtin_mve_vmulq_n_uv8hi (__a, __b); | |
4115 | } | |
4116 | ||
4117 | __extension__ extern __inline uint32x4_t | |
4118 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4119 | __arm_vmulltq_int_u16 (uint16x8_t __a, uint16x8_t __b) | |
4120 | { | |
4121 | return __builtin_mve_vmulltq_int_uv8hi (__a, __b); | |
4122 | } | |
4123 | ||
4124 | __extension__ extern __inline uint32x4_t | |
4125 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4126 | __arm_vmullbq_int_u16 (uint16x8_t __a, uint16x8_t __b) | |
4127 | { | |
4128 | return __builtin_mve_vmullbq_int_uv8hi (__a, __b); | |
4129 | } | |
4130 | ||
4131 | __extension__ extern __inline uint16x8_t | |
4132 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4133 | __arm_vmulhq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4134 | { | |
4135 | return __builtin_mve_vmulhq_uv8hi (__a, __b); | |
4136 | } | |
4137 | ||
4138 | __extension__ extern __inline uint32_t | |
4139 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4140 | __arm_vmladavq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4141 | { | |
4142 | return __builtin_mve_vmladavq_uv8hi (__a, __b); | |
4143 | } | |
4144 | ||
4145 | __extension__ extern __inline uint16_t | |
4146 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4147 | __arm_vminvq_u16 (uint16_t __a, uint16x8_t __b) | |
4148 | { | |
4149 | return __builtin_mve_vminvq_uv8hi (__a, __b); | |
4150 | } | |
4151 | ||
4152 | __extension__ extern __inline uint16x8_t | |
4153 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4154 | __arm_vminq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4155 | { | |
4156 | return __builtin_mve_vminq_uv8hi (__a, __b); | |
4157 | } | |
4158 | ||
4159 | __extension__ extern __inline uint16_t | |
4160 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4161 | __arm_vmaxvq_u16 (uint16_t __a, uint16x8_t __b) | |
4162 | { | |
4163 | return __builtin_mve_vmaxvq_uv8hi (__a, __b); | |
4164 | } | |
4165 | ||
4166 | __extension__ extern __inline uint16x8_t | |
4167 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4168 | __arm_vmaxq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4169 | { | |
4170 | return __builtin_mve_vmaxq_uv8hi (__a, __b); | |
4171 | } | |
4172 | ||
4173 | __extension__ extern __inline uint16x8_t | |
4174 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4175 | __arm_vhsubq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4176 | { | |
4177 | return __builtin_mve_vhsubq_uv8hi (__a, __b); | |
4178 | } | |
4179 | ||
4180 | __extension__ extern __inline uint16x8_t | |
4181 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4182 | __arm_vhsubq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4183 | { | |
4184 | return __builtin_mve_vhsubq_n_uv8hi (__a, __b); | |
4185 | } | |
4186 | ||
4187 | __extension__ extern __inline uint16x8_t | |
4188 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4189 | __arm_vhaddq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4190 | { | |
4191 | return __builtin_mve_vhaddq_uv8hi (__a, __b); | |
4192 | } | |
4193 | ||
4194 | __extension__ extern __inline uint16x8_t | |
4195 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4196 | __arm_vhaddq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4197 | { | |
4198 | return __builtin_mve_vhaddq_n_uv8hi (__a, __b); | |
4199 | } | |
4200 | ||
4201 | __extension__ extern __inline uint16x8_t | |
4202 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4203 | __arm_veorq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4204 | { | |
4205 | return __builtin_mve_veorq_uv8hi (__a, __b); | |
4206 | } | |
4207 | ||
4208 | __extension__ extern __inline mve_pred16_t | |
4209 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4210 | __arm_vcmpneq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4211 | { | |
4212 | return __builtin_mve_vcmpneq_n_uv8hi (__a, __b); | |
4213 | } | |
4214 | ||
4215 | __extension__ extern __inline mve_pred16_t | |
4216 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4217 | __arm_vcmphiq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4218 | { | |
4219 | return __builtin_mve_vcmphiq_uv8hi (__a, __b); | |
4220 | } | |
4221 | ||
4222 | __extension__ extern __inline mve_pred16_t | |
4223 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4224 | __arm_vcmphiq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4225 | { | |
4226 | return __builtin_mve_vcmphiq_n_uv8hi (__a, __b); | |
4227 | } | |
4228 | ||
4229 | __extension__ extern __inline mve_pred16_t | |
4230 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4231 | __arm_vcmpeqq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4232 | { | |
4233 | return __builtin_mve_vcmpeqq_uv8hi (__a, __b); | |
4234 | } | |
4235 | ||
4236 | __extension__ extern __inline mve_pred16_t | |
4237 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4238 | __arm_vcmpeqq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4239 | { | |
4240 | return __builtin_mve_vcmpeqq_n_uv8hi (__a, __b); | |
4241 | } | |
4242 | ||
4243 | __extension__ extern __inline mve_pred16_t | |
4244 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4245 | __arm_vcmpcsq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4246 | { | |
4247 | return __builtin_mve_vcmpcsq_uv8hi (__a, __b); | |
4248 | } | |
4249 | ||
4250 | __extension__ extern __inline mve_pred16_t | |
4251 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4252 | __arm_vcmpcsq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4253 | { | |
4254 | return __builtin_mve_vcmpcsq_n_uv8hi (__a, __b); | |
4255 | } | |
4256 | ||
4257 | __extension__ extern __inline uint16x8_t | |
4258 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4259 | __arm_vcaddq_rot90_u16 (uint16x8_t __a, uint16x8_t __b) | |
4260 | { | |
4261 | return __builtin_mve_vcaddq_rot90_uv8hi (__a, __b); | |
4262 | } | |
4263 | ||
4264 | __extension__ extern __inline uint16x8_t | |
4265 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4266 | __arm_vcaddq_rot270_u16 (uint16x8_t __a, uint16x8_t __b) | |
4267 | { | |
4268 | return __builtin_mve_vcaddq_rot270_uv8hi (__a, __b); | |
4269 | } | |
4270 | ||
4271 | __extension__ extern __inline uint16x8_t | |
4272 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4273 | __arm_vbicq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4274 | { | |
4275 | return __builtin_mve_vbicq_uv8hi (__a, __b); | |
4276 | } | |
4277 | ||
4278 | __extension__ extern __inline uint16x8_t | |
4279 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4280 | __arm_vandq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4281 | { | |
4282 | return __builtin_mve_vandq_uv8hi (__a, __b); | |
4283 | } | |
4284 | ||
4285 | __extension__ extern __inline uint32_t | |
4286 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4287 | __arm_vaddvq_p_u16 (uint16x8_t __a, mve_pred16_t __p) | |
4288 | { | |
4289 | return __builtin_mve_vaddvq_p_uv8hi (__a, __p); | |
4290 | } | |
4291 | ||
4292 | __extension__ extern __inline uint32_t | |
4293 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4294 | __arm_vaddvaq_u16 (uint32_t __a, uint16x8_t __b) | |
4295 | { | |
4296 | return __builtin_mve_vaddvaq_uv8hi (__a, __b); | |
4297 | } | |
4298 | ||
4299 | __extension__ extern __inline uint16x8_t | |
4300 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4301 | __arm_vaddq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4302 | { | |
4303 | return __builtin_mve_vaddq_n_uv8hi (__a, __b); | |
4304 | } | |
4305 | ||
4306 | __extension__ extern __inline uint16x8_t | |
4307 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4308 | __arm_vabdq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4309 | { | |
4310 | return __builtin_mve_vabdq_uv8hi (__a, __b); | |
4311 | } | |
4312 | ||
4313 | __extension__ extern __inline uint16x8_t | |
4314 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4315 | __arm_vshlq_r_u16 (uint16x8_t __a, int32_t __b) | |
4316 | { | |
4317 | return __builtin_mve_vshlq_r_uv8hi (__a, __b); | |
4318 | } | |
4319 | ||
4320 | __extension__ extern __inline uint16x8_t | |
4321 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4322 | __arm_vrshlq_u16 (uint16x8_t __a, int16x8_t __b) | |
4323 | { | |
4324 | return __builtin_mve_vrshlq_uv8hi (__a, __b); | |
4325 | } | |
4326 | ||
4327 | __extension__ extern __inline uint16x8_t | |
4328 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4329 | __arm_vrshlq_n_u16 (uint16x8_t __a, int32_t __b) | |
4330 | { | |
4331 | return __builtin_mve_vrshlq_n_uv8hi (__a, __b); | |
4332 | } | |
4333 | ||
4334 | __extension__ extern __inline uint16x8_t | |
4335 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4336 | __arm_vqshlq_u16 (uint16x8_t __a, int16x8_t __b) | |
4337 | { | |
4338 | return __builtin_mve_vqshlq_uv8hi (__a, __b); | |
4339 | } | |
4340 | ||
4341 | __extension__ extern __inline uint16x8_t | |
4342 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4343 | __arm_vqshlq_r_u16 (uint16x8_t __a, int32_t __b) | |
4344 | { | |
4345 | return __builtin_mve_vqshlq_r_uv8hi (__a, __b); | |
4346 | } | |
4347 | ||
4348 | __extension__ extern __inline uint16x8_t | |
4349 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4350 | __arm_vqrshlq_u16 (uint16x8_t __a, int16x8_t __b) | |
4351 | { | |
4352 | return __builtin_mve_vqrshlq_uv8hi (__a, __b); | |
4353 | } | |
4354 | ||
4355 | __extension__ extern __inline uint16x8_t | |
4356 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4357 | __arm_vqrshlq_n_u16 (uint16x8_t __a, int32_t __b) | |
4358 | { | |
4359 | return __builtin_mve_vqrshlq_n_uv8hi (__a, __b); | |
4360 | } | |
4361 | ||
4362 | __extension__ extern __inline uint16_t | |
4363 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4364 | __arm_vminavq_s16 (uint16_t __a, int16x8_t __b) | |
4365 | { | |
4366 | return __builtin_mve_vminavq_sv8hi (__a, __b); | |
4367 | } | |
4368 | ||
4369 | __extension__ extern __inline uint16x8_t | |
4370 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4371 | __arm_vminaq_s16 (uint16x8_t __a, int16x8_t __b) | |
4372 | { | |
4373 | return __builtin_mve_vminaq_sv8hi (__a, __b); | |
4374 | } | |
4375 | ||
4376 | __extension__ extern __inline uint16_t | |
4377 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4378 | __arm_vmaxavq_s16 (uint16_t __a, int16x8_t __b) | |
4379 | { | |
4380 | return __builtin_mve_vmaxavq_sv8hi (__a, __b); | |
4381 | } | |
4382 | ||
4383 | __extension__ extern __inline uint16x8_t | |
4384 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4385 | __arm_vmaxaq_s16 (uint16x8_t __a, int16x8_t __b) | |
4386 | { | |
4387 | return __builtin_mve_vmaxaq_sv8hi (__a, __b); | |
4388 | } | |
4389 | ||
4390 | __extension__ extern __inline uint16x8_t | |
4391 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4392 | __arm_vbrsrq_n_u16 (uint16x8_t __a, int32_t __b) | |
4393 | { | |
4394 | return __builtin_mve_vbrsrq_n_uv8hi (__a, __b); | |
4395 | } | |
4396 | ||
4397 | __extension__ extern __inline uint16x8_t | |
4398 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4399 | __arm_vshlq_n_u16 (uint16x8_t __a, const int __imm) | |
4400 | { | |
4401 | return __builtin_mve_vshlq_n_uv8hi (__a, __imm); | |
4402 | } | |
4403 | ||
4404 | __extension__ extern __inline uint16x8_t | |
4405 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4406 | __arm_vrshrq_n_u16 (uint16x8_t __a, const int __imm) | |
4407 | { | |
4408 | return __builtin_mve_vrshrq_n_uv8hi (__a, __imm); | |
4409 | } | |
4410 | ||
4411 | __extension__ extern __inline uint16x8_t | |
4412 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4413 | __arm_vqshlq_n_u16 (uint16x8_t __a, const int __imm) | |
4414 | { | |
4415 | return __builtin_mve_vqshlq_n_uv8hi (__a, __imm); | |
4416 | } | |
4417 | ||
4418 | __extension__ extern __inline mve_pred16_t | |
4419 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4420 | __arm_vcmpneq_n_s16 (int16x8_t __a, int16_t __b) | |
4421 | { | |
4422 | return __builtin_mve_vcmpneq_n_sv8hi (__a, __b); | |
4423 | } | |
4424 | ||
4425 | __extension__ extern __inline mve_pred16_t | |
4426 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4427 | __arm_vcmpltq_s16 (int16x8_t __a, int16x8_t __b) | |
4428 | { | |
4429 | return __builtin_mve_vcmpltq_sv8hi (__a, __b); | |
4430 | } | |
4431 | ||
4432 | __extension__ extern __inline mve_pred16_t | |
4433 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4434 | __arm_vcmpltq_n_s16 (int16x8_t __a, int16_t __b) | |
4435 | { | |
4436 | return __builtin_mve_vcmpltq_n_sv8hi (__a, __b); | |
4437 | } | |
4438 | ||
4439 | __extension__ extern __inline mve_pred16_t | |
4440 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4441 | __arm_vcmpleq_s16 (int16x8_t __a, int16x8_t __b) | |
4442 | { | |
4443 | return __builtin_mve_vcmpleq_sv8hi (__a, __b); | |
4444 | } | |
4445 | ||
4446 | __extension__ extern __inline mve_pred16_t | |
4447 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4448 | __arm_vcmpleq_n_s16 (int16x8_t __a, int16_t __b) | |
4449 | { | |
4450 | return __builtin_mve_vcmpleq_n_sv8hi (__a, __b); | |
4451 | } | |
4452 | ||
4453 | __extension__ extern __inline mve_pred16_t | |
4454 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4455 | __arm_vcmpgtq_s16 (int16x8_t __a, int16x8_t __b) | |
4456 | { | |
4457 | return __builtin_mve_vcmpgtq_sv8hi (__a, __b); | |
4458 | } | |
4459 | ||
4460 | __extension__ extern __inline mve_pred16_t | |
4461 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4462 | __arm_vcmpgtq_n_s16 (int16x8_t __a, int16_t __b) | |
4463 | { | |
4464 | return __builtin_mve_vcmpgtq_n_sv8hi (__a, __b); | |
4465 | } | |
4466 | ||
4467 | __extension__ extern __inline mve_pred16_t | |
4468 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4469 | __arm_vcmpgeq_s16 (int16x8_t __a, int16x8_t __b) | |
4470 | { | |
4471 | return __builtin_mve_vcmpgeq_sv8hi (__a, __b); | |
4472 | } | |
4473 | ||
4474 | __extension__ extern __inline mve_pred16_t | |
4475 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4476 | __arm_vcmpgeq_n_s16 (int16x8_t __a, int16_t __b) | |
4477 | { | |
4478 | return __builtin_mve_vcmpgeq_n_sv8hi (__a, __b); | |
4479 | } | |
4480 | ||
4481 | __extension__ extern __inline mve_pred16_t | |
4482 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4483 | __arm_vcmpeqq_s16 (int16x8_t __a, int16x8_t __b) | |
4484 | { | |
4485 | return __builtin_mve_vcmpeqq_sv8hi (__a, __b); | |
4486 | } | |
4487 | ||
4488 | __extension__ extern __inline mve_pred16_t | |
4489 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4490 | __arm_vcmpeqq_n_s16 (int16x8_t __a, int16_t __b) | |
4491 | { | |
4492 | return __builtin_mve_vcmpeqq_n_sv8hi (__a, __b); | |
4493 | } | |
4494 | ||
4495 | __extension__ extern __inline uint16x8_t | |
4496 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4497 | __arm_vqshluq_n_s16 (int16x8_t __a, const int __imm) | |
4498 | { | |
4499 | return __builtin_mve_vqshluq_n_sv8hi (__a, __imm); | |
4500 | } | |
4501 | ||
4502 | __extension__ extern __inline int32_t | |
4503 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4504 | __arm_vaddvq_p_s16 (int16x8_t __a, mve_pred16_t __p) | |
4505 | { | |
4506 | return __builtin_mve_vaddvq_p_sv8hi (__a, __p); | |
4507 | } | |
4508 | ||
4509 | __extension__ extern __inline int16x8_t | |
4510 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4511 | __arm_vsubq_s16 (int16x8_t __a, int16x8_t __b) | |
4512 | { | |
4513 | return __builtin_mve_vsubq_sv8hi (__a, __b); | |
4514 | } | |
4515 | ||
4516 | __extension__ extern __inline int16x8_t | |
4517 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4518 | __arm_vsubq_n_s16 (int16x8_t __a, int16_t __b) | |
4519 | { | |
4520 | return __builtin_mve_vsubq_n_sv8hi (__a, __b); | |
4521 | } | |
4522 | ||
4523 | __extension__ extern __inline int16x8_t | |
4524 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4525 | __arm_vshlq_r_s16 (int16x8_t __a, int32_t __b) | |
4526 | { | |
4527 | return __builtin_mve_vshlq_r_sv8hi (__a, __b); | |
4528 | } | |
4529 | ||
4530 | __extension__ extern __inline int16x8_t | |
4531 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4532 | __arm_vrshlq_s16 (int16x8_t __a, int16x8_t __b) | |
4533 | { | |
4534 | return __builtin_mve_vrshlq_sv8hi (__a, __b); | |
4535 | } | |
4536 | ||
4537 | __extension__ extern __inline int16x8_t | |
4538 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4539 | __arm_vrshlq_n_s16 (int16x8_t __a, int32_t __b) | |
4540 | { | |
4541 | return __builtin_mve_vrshlq_n_sv8hi (__a, __b); | |
4542 | } | |
4543 | ||
4544 | __extension__ extern __inline int16x8_t | |
4545 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4546 | __arm_vrmulhq_s16 (int16x8_t __a, int16x8_t __b) | |
4547 | { | |
4548 | return __builtin_mve_vrmulhq_sv8hi (__a, __b); | |
4549 | } | |
4550 | ||
4551 | __extension__ extern __inline int16x8_t | |
4552 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4553 | __arm_vrhaddq_s16 (int16x8_t __a, int16x8_t __b) | |
4554 | { | |
4555 | return __builtin_mve_vrhaddq_sv8hi (__a, __b); | |
4556 | } | |
4557 | ||
4558 | __extension__ extern __inline int16x8_t | |
4559 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4560 | __arm_vqsubq_s16 (int16x8_t __a, int16x8_t __b) | |
4561 | { | |
4562 | return __builtin_mve_vqsubq_sv8hi (__a, __b); | |
4563 | } | |
4564 | ||
4565 | __extension__ extern __inline int16x8_t | |
4566 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4567 | __arm_vqsubq_n_s16 (int16x8_t __a, int16_t __b) | |
4568 | { | |
4569 | return __builtin_mve_vqsubq_n_sv8hi (__a, __b); | |
4570 | } | |
4571 | ||
4572 | __extension__ extern __inline int16x8_t | |
4573 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4574 | __arm_vqshlq_s16 (int16x8_t __a, int16x8_t __b) | |
4575 | { | |
4576 | return __builtin_mve_vqshlq_sv8hi (__a, __b); | |
4577 | } | |
4578 | ||
4579 | __extension__ extern __inline int16x8_t | |
4580 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4581 | __arm_vqshlq_r_s16 (int16x8_t __a, int32_t __b) | |
4582 | { | |
4583 | return __builtin_mve_vqshlq_r_sv8hi (__a, __b); | |
4584 | } | |
4585 | ||
4586 | __extension__ extern __inline int16x8_t | |
4587 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4588 | __arm_vqrshlq_s16 (int16x8_t __a, int16x8_t __b) | |
4589 | { | |
4590 | return __builtin_mve_vqrshlq_sv8hi (__a, __b); | |
4591 | } | |
4592 | ||
4593 | __extension__ extern __inline int16x8_t | |
4594 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4595 | __arm_vqrshlq_n_s16 (int16x8_t __a, int32_t __b) | |
4596 | { | |
4597 | return __builtin_mve_vqrshlq_n_sv8hi (__a, __b); | |
4598 | } | |
4599 | ||
4600 | __extension__ extern __inline int16x8_t | |
4601 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4602 | __arm_vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b) | |
4603 | { | |
4604 | return __builtin_mve_vqrdmulhq_sv8hi (__a, __b); | |
4605 | } | |
4606 | ||
4607 | __extension__ extern __inline int16x8_t | |
4608 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4609 | __arm_vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b) | |
4610 | { | |
4611 | return __builtin_mve_vqrdmulhq_n_sv8hi (__a, __b); | |
4612 | } | |
4613 | ||
4614 | __extension__ extern __inline int16x8_t | |
4615 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4616 | __arm_vqdmulhq_s16 (int16x8_t __a, int16x8_t __b) | |
4617 | { | |
4618 | return __builtin_mve_vqdmulhq_sv8hi (__a, __b); | |
4619 | } | |
4620 | ||
4621 | __extension__ extern __inline int16x8_t | |
4622 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4623 | __arm_vqdmulhq_n_s16 (int16x8_t __a, int16_t __b) | |
4624 | { | |
4625 | return __builtin_mve_vqdmulhq_n_sv8hi (__a, __b); | |
4626 | } | |
4627 | ||
4628 | __extension__ extern __inline int16x8_t | |
4629 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4630 | __arm_vqaddq_s16 (int16x8_t __a, int16x8_t __b) | |
4631 | { | |
4632 | return __builtin_mve_vqaddq_sv8hi (__a, __b); | |
4633 | } | |
4634 | ||
4635 | __extension__ extern __inline int16x8_t | |
4636 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4637 | __arm_vqaddq_n_s16 (int16x8_t __a, int16_t __b) | |
4638 | { | |
4639 | return __builtin_mve_vqaddq_n_sv8hi (__a, __b); | |
4640 | } | |
4641 | ||
4642 | __extension__ extern __inline int16x8_t | |
4643 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4644 | __arm_vorrq_s16 (int16x8_t __a, int16x8_t __b) | |
4645 | { | |
4646 | return __builtin_mve_vorrq_sv8hi (__a, __b); | |
4647 | } | |
4648 | ||
4649 | __extension__ extern __inline int16x8_t | |
4650 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4651 | __arm_vornq_s16 (int16x8_t __a, int16x8_t __b) | |
4652 | { | |
4653 | return __builtin_mve_vornq_sv8hi (__a, __b); | |
4654 | } | |
4655 | ||
4656 | __extension__ extern __inline int16x8_t | |
4657 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4658 | __arm_vmulq_s16 (int16x8_t __a, int16x8_t __b) | |
4659 | { | |
4660 | return __builtin_mve_vmulq_sv8hi (__a, __b); | |
4661 | } | |
4662 | ||
4663 | __extension__ extern __inline int16x8_t | |
4664 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4665 | __arm_vmulq_n_s16 (int16x8_t __a, int16_t __b) | |
4666 | { | |
4667 | return __builtin_mve_vmulq_n_sv8hi (__a, __b); | |
4668 | } | |
4669 | ||
4670 | __extension__ extern __inline int32x4_t | |
4671 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4672 | __arm_vmulltq_int_s16 (int16x8_t __a, int16x8_t __b) | |
4673 | { | |
4674 | return __builtin_mve_vmulltq_int_sv8hi (__a, __b); | |
4675 | } | |
4676 | ||
4677 | __extension__ extern __inline int32x4_t | |
4678 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4679 | __arm_vmullbq_int_s16 (int16x8_t __a, int16x8_t __b) | |
4680 | { | |
4681 | return __builtin_mve_vmullbq_int_sv8hi (__a, __b); | |
4682 | } | |
4683 | ||
4684 | __extension__ extern __inline int16x8_t | |
4685 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4686 | __arm_vmulhq_s16 (int16x8_t __a, int16x8_t __b) | |
4687 | { | |
4688 | return __builtin_mve_vmulhq_sv8hi (__a, __b); | |
4689 | } | |
4690 | ||
4691 | __extension__ extern __inline int32_t | |
4692 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4693 | __arm_vmlsdavxq_s16 (int16x8_t __a, int16x8_t __b) | |
4694 | { | |
4695 | return __builtin_mve_vmlsdavxq_sv8hi (__a, __b); | |
4696 | } | |
4697 | ||
4698 | __extension__ extern __inline int32_t | |
4699 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4700 | __arm_vmlsdavq_s16 (int16x8_t __a, int16x8_t __b) | |
4701 | { | |
4702 | return __builtin_mve_vmlsdavq_sv8hi (__a, __b); | |
4703 | } | |
4704 | ||
4705 | __extension__ extern __inline int32_t | |
4706 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4707 | __arm_vmladavxq_s16 (int16x8_t __a, int16x8_t __b) | |
4708 | { | |
4709 | return __builtin_mve_vmladavxq_sv8hi (__a, __b); | |
4710 | } | |
4711 | ||
4712 | __extension__ extern __inline int32_t | |
4713 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4714 | __arm_vmladavq_s16 (int16x8_t __a, int16x8_t __b) | |
4715 | { | |
4716 | return __builtin_mve_vmladavq_sv8hi (__a, __b); | |
4717 | } | |
4718 | ||
4719 | __extension__ extern __inline int16_t | |
4720 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4721 | __arm_vminvq_s16 (int16_t __a, int16x8_t __b) | |
4722 | { | |
4723 | return __builtin_mve_vminvq_sv8hi (__a, __b); | |
4724 | } | |
4725 | ||
4726 | __extension__ extern __inline int16x8_t | |
4727 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4728 | __arm_vminq_s16 (int16x8_t __a, int16x8_t __b) | |
4729 | { | |
4730 | return __builtin_mve_vminq_sv8hi (__a, __b); | |
4731 | } | |
4732 | ||
4733 | __extension__ extern __inline int16_t | |
4734 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4735 | __arm_vmaxvq_s16 (int16_t __a, int16x8_t __b) | |
4736 | { | |
4737 | return __builtin_mve_vmaxvq_sv8hi (__a, __b); | |
4738 | } | |
4739 | ||
4740 | __extension__ extern __inline int16x8_t | |
4741 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4742 | __arm_vmaxq_s16 (int16x8_t __a, int16x8_t __b) | |
4743 | { | |
4744 | return __builtin_mve_vmaxq_sv8hi (__a, __b); | |
4745 | } | |
4746 | ||
4747 | __extension__ extern __inline int16x8_t | |
4748 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4749 | __arm_vhsubq_s16 (int16x8_t __a, int16x8_t __b) | |
4750 | { | |
4751 | return __builtin_mve_vhsubq_sv8hi (__a, __b); | |
4752 | } | |
4753 | ||
4754 | __extension__ extern __inline int16x8_t | |
4755 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4756 | __arm_vhsubq_n_s16 (int16x8_t __a, int16_t __b) | |
4757 | { | |
4758 | return __builtin_mve_vhsubq_n_sv8hi (__a, __b); | |
4759 | } | |
4760 | ||
4761 | __extension__ extern __inline int16x8_t | |
4762 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4763 | __arm_vhcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b) | |
4764 | { | |
4765 | return __builtin_mve_vhcaddq_rot90_sv8hi (__a, __b); | |
4766 | } | |
4767 | ||
4768 | __extension__ extern __inline int16x8_t | |
4769 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4770 | __arm_vhcaddq_rot270_s16 (int16x8_t __a, int16x8_t __b) | |
4771 | { | |
4772 | return __builtin_mve_vhcaddq_rot270_sv8hi (__a, __b); | |
4773 | } | |
4774 | ||
4775 | __extension__ extern __inline int16x8_t | |
4776 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4777 | __arm_vhaddq_s16 (int16x8_t __a, int16x8_t __b) | |
4778 | { | |
4779 | return __builtin_mve_vhaddq_sv8hi (__a, __b); | |
4780 | } | |
4781 | ||
4782 | __extension__ extern __inline int16x8_t | |
4783 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4784 | __arm_vhaddq_n_s16 (int16x8_t __a, int16_t __b) | |
4785 | { | |
4786 | return __builtin_mve_vhaddq_n_sv8hi (__a, __b); | |
4787 | } | |
4788 | ||
4789 | __extension__ extern __inline int16x8_t | |
4790 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4791 | __arm_veorq_s16 (int16x8_t __a, int16x8_t __b) | |
4792 | { | |
4793 | return __builtin_mve_veorq_sv8hi (__a, __b); | |
4794 | } | |
4795 | ||
4796 | __extension__ extern __inline int16x8_t | |
4797 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4798 | __arm_vcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b) | |
4799 | { | |
4800 | return __builtin_mve_vcaddq_rot90_sv8hi (__a, __b); | |
4801 | } | |
4802 | ||
4803 | __extension__ extern __inline int16x8_t | |
4804 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4805 | __arm_vcaddq_rot270_s16 (int16x8_t __a, int16x8_t __b) | |
4806 | { | |
4807 | return __builtin_mve_vcaddq_rot270_sv8hi (__a, __b); | |
4808 | } | |
4809 | ||
4810 | __extension__ extern __inline int16x8_t | |
4811 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4812 | __arm_vbrsrq_n_s16 (int16x8_t __a, int32_t __b) | |
4813 | { | |
4814 | return __builtin_mve_vbrsrq_n_sv8hi (__a, __b); | |
4815 | } | |
4816 | ||
4817 | __extension__ extern __inline int16x8_t | |
4818 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4819 | __arm_vbicq_s16 (int16x8_t __a, int16x8_t __b) | |
4820 | { | |
4821 | return __builtin_mve_vbicq_sv8hi (__a, __b); | |
4822 | } | |
4823 | ||
4824 | __extension__ extern __inline int16x8_t | |
4825 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4826 | __arm_vandq_s16 (int16x8_t __a, int16x8_t __b) | |
4827 | { | |
4828 | return __builtin_mve_vandq_sv8hi (__a, __b); | |
4829 | } | |
4830 | ||
4831 | __extension__ extern __inline int32_t | |
4832 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4833 | __arm_vaddvaq_s16 (int32_t __a, int16x8_t __b) | |
4834 | { | |
4835 | return __builtin_mve_vaddvaq_sv8hi (__a, __b); | |
4836 | } | |
4837 | ||
4838 | __extension__ extern __inline int16x8_t | |
4839 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4840 | __arm_vaddq_n_s16 (int16x8_t __a, int16_t __b) | |
4841 | { | |
4842 | return __builtin_mve_vaddq_n_sv8hi (__a, __b); | |
4843 | } | |
4844 | ||
4845 | __extension__ extern __inline int16x8_t | |
4846 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4847 | __arm_vabdq_s16 (int16x8_t __a, int16x8_t __b) | |
4848 | { | |
4849 | return __builtin_mve_vabdq_sv8hi (__a, __b); | |
4850 | } | |
4851 | ||
4852 | __extension__ extern __inline int16x8_t | |
4853 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4854 | __arm_vshlq_n_s16 (int16x8_t __a, const int __imm) | |
4855 | { | |
4856 | return __builtin_mve_vshlq_n_sv8hi (__a, __imm); | |
4857 | } | |
4858 | ||
4859 | __extension__ extern __inline int16x8_t | |
4860 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4861 | __arm_vrshrq_n_s16 (int16x8_t __a, const int __imm) | |
4862 | { | |
4863 | return __builtin_mve_vrshrq_n_sv8hi (__a, __imm); | |
4864 | } | |
4865 | ||
4866 | __extension__ extern __inline int16x8_t | |
4867 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4868 | __arm_vqshlq_n_s16 (int16x8_t __a, const int __imm) | |
4869 | { | |
4870 | return __builtin_mve_vqshlq_n_sv8hi (__a, __imm); | |
4871 | } | |
4872 | ||
4873 | __extension__ extern __inline uint32x4_t | |
4874 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4875 | __arm_vsubq_u32 (uint32x4_t __a, uint32x4_t __b) | |
4876 | { | |
4877 | return __builtin_mve_vsubq_uv4si (__a, __b); | |
4878 | } | |
4879 | ||
4880 | __extension__ extern __inline uint32x4_t | |
4881 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4882 | __arm_vsubq_n_u32 (uint32x4_t __a, uint32_t __b) | |
4883 | { | |
4884 | return __builtin_mve_vsubq_n_uv4si (__a, __b); | |
4885 | } | |
4886 | ||
4887 | __extension__ extern __inline uint32x4_t | |
4888 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4889 | __arm_vrmulhq_u32 (uint32x4_t __a, uint32x4_t __b) | |
4890 | { | |
4891 | return __builtin_mve_vrmulhq_uv4si (__a, __b); | |
4892 | } | |
4893 | ||
4894 | __extension__ extern __inline uint32x4_t | |
4895 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4896 | __arm_vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b) | |
4897 | { | |
4898 | return __builtin_mve_vrhaddq_uv4si (__a, __b); | |
4899 | } | |
4900 | ||
4901 | __extension__ extern __inline uint32x4_t | |
4902 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4903 | __arm_vqsubq_u32 (uint32x4_t __a, uint32x4_t __b) | |
4904 | { | |
4905 | return __builtin_mve_vqsubq_uv4si (__a, __b); | |
4906 | } | |
4907 | ||
4908 | __extension__ extern __inline uint32x4_t | |
4909 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4910 | __arm_vqsubq_n_u32 (uint32x4_t __a, uint32_t __b) | |
4911 | { | |
4912 | return __builtin_mve_vqsubq_n_uv4si (__a, __b); | |
4913 | } | |
4914 | ||
4915 | __extension__ extern __inline uint32x4_t | |
4916 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4917 | __arm_vqaddq_u32 (uint32x4_t __a, uint32x4_t __b) | |
4918 | { | |
4919 | return __builtin_mve_vqaddq_uv4si (__a, __b); | |
4920 | } | |
4921 | ||
4922 | __extension__ extern __inline uint32x4_t | |
4923 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4924 | __arm_vqaddq_n_u32 (uint32x4_t __a, uint32_t __b) | |
4925 | { | |
4926 | return __builtin_mve_vqaddq_n_uv4si (__a, __b); | |
4927 | } | |
4928 | ||
4929 | __extension__ extern __inline uint32x4_t | |
4930 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4931 | __arm_vorrq_u32 (uint32x4_t __a, uint32x4_t __b) | |
4932 | { | |
4933 | return __builtin_mve_vorrq_uv4si (__a, __b); | |
4934 | } | |
4935 | ||
4936 | __extension__ extern __inline uint32x4_t | |
4937 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4938 | __arm_vornq_u32 (uint32x4_t __a, uint32x4_t __b) | |
4939 | { | |
4940 | return __builtin_mve_vornq_uv4si (__a, __b); | |
4941 | } | |
4942 | ||
4943 | __extension__ extern __inline uint32x4_t | |
4944 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4945 | __arm_vmulq_u32 (uint32x4_t __a, uint32x4_t __b) | |
4946 | { | |
4947 | return __builtin_mve_vmulq_uv4si (__a, __b); | |
4948 | } | |
4949 | ||
4950 | __extension__ extern __inline uint32x4_t | |
4951 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4952 | __arm_vmulq_n_u32 (uint32x4_t __a, uint32_t __b) | |
4953 | { | |
4954 | return __builtin_mve_vmulq_n_uv4si (__a, __b); | |
4955 | } | |
4956 | ||
4957 | __extension__ extern __inline uint64x2_t | |
4958 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4959 | __arm_vmulltq_int_u32 (uint32x4_t __a, uint32x4_t __b) | |
4960 | { | |
4961 | return __builtin_mve_vmulltq_int_uv4si (__a, __b); | |
4962 | } | |
4963 | ||
4964 | __extension__ extern __inline uint64x2_t | |
4965 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4966 | __arm_vmullbq_int_u32 (uint32x4_t __a, uint32x4_t __b) | |
4967 | { | |
4968 | return __builtin_mve_vmullbq_int_uv4si (__a, __b); | |
4969 | } | |
4970 | ||
4971 | __extension__ extern __inline uint32x4_t | |
4972 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4973 | __arm_vmulhq_u32 (uint32x4_t __a, uint32x4_t __b) | |
4974 | { | |
4975 | return __builtin_mve_vmulhq_uv4si (__a, __b); | |
4976 | } | |
4977 | ||
4978 | __extension__ extern __inline uint32_t | |
4979 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4980 | __arm_vmladavq_u32 (uint32x4_t __a, uint32x4_t __b) | |
4981 | { | |
4982 | return __builtin_mve_vmladavq_uv4si (__a, __b); | |
4983 | } | |
4984 | ||
4985 | __extension__ extern __inline uint32_t | |
4986 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4987 | __arm_vminvq_u32 (uint32_t __a, uint32x4_t __b) | |
4988 | { | |
4989 | return __builtin_mve_vminvq_uv4si (__a, __b); | |
4990 | } | |
4991 | ||
4992 | __extension__ extern __inline uint32x4_t | |
4993 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4994 | __arm_vminq_u32 (uint32x4_t __a, uint32x4_t __b) | |
4995 | { | |
4996 | return __builtin_mve_vminq_uv4si (__a, __b); | |
4997 | } | |
4998 | ||
4999 | __extension__ extern __inline uint32_t | |
5000 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5001 | __arm_vmaxvq_u32 (uint32_t __a, uint32x4_t __b) | |
5002 | { | |
5003 | return __builtin_mve_vmaxvq_uv4si (__a, __b); | |
5004 | } | |
5005 | ||
5006 | __extension__ extern __inline uint32x4_t | |
5007 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5008 | __arm_vmaxq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5009 | { | |
5010 | return __builtin_mve_vmaxq_uv4si (__a, __b); | |
5011 | } | |
5012 | ||
5013 | __extension__ extern __inline uint32x4_t | |
5014 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5015 | __arm_vhsubq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5016 | { | |
5017 | return __builtin_mve_vhsubq_uv4si (__a, __b); | |
5018 | } | |
5019 | ||
5020 | __extension__ extern __inline uint32x4_t | |
5021 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5022 | __arm_vhsubq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5023 | { | |
5024 | return __builtin_mve_vhsubq_n_uv4si (__a, __b); | |
5025 | } | |
5026 | ||
5027 | __extension__ extern __inline uint32x4_t | |
5028 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5029 | __arm_vhaddq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5030 | { | |
5031 | return __builtin_mve_vhaddq_uv4si (__a, __b); | |
5032 | } | |
5033 | ||
5034 | __extension__ extern __inline uint32x4_t | |
5035 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5036 | __arm_vhaddq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5037 | { | |
5038 | return __builtin_mve_vhaddq_n_uv4si (__a, __b); | |
5039 | } | |
5040 | ||
5041 | __extension__ extern __inline uint32x4_t | |
5042 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5043 | __arm_veorq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5044 | { | |
5045 | return __builtin_mve_veorq_uv4si (__a, __b); | |
5046 | } | |
5047 | ||
5048 | __extension__ extern __inline mve_pred16_t | |
5049 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5050 | __arm_vcmpneq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5051 | { | |
5052 | return __builtin_mve_vcmpneq_n_uv4si (__a, __b); | |
5053 | } | |
5054 | ||
5055 | __extension__ extern __inline mve_pred16_t | |
5056 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5057 | __arm_vcmphiq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5058 | { | |
5059 | return __builtin_mve_vcmphiq_uv4si (__a, __b); | |
5060 | } | |
5061 | ||
5062 | __extension__ extern __inline mve_pred16_t | |
5063 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5064 | __arm_vcmphiq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5065 | { | |
5066 | return __builtin_mve_vcmphiq_n_uv4si (__a, __b); | |
5067 | } | |
5068 | ||
5069 | __extension__ extern __inline mve_pred16_t | |
5070 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5071 | __arm_vcmpeqq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5072 | { | |
5073 | return __builtin_mve_vcmpeqq_uv4si (__a, __b); | |
5074 | } | |
5075 | ||
5076 | __extension__ extern __inline mve_pred16_t | |
5077 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5078 | __arm_vcmpeqq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5079 | { | |
5080 | return __builtin_mve_vcmpeqq_n_uv4si (__a, __b); | |
5081 | } | |
5082 | ||
5083 | __extension__ extern __inline mve_pred16_t | |
5084 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5085 | __arm_vcmpcsq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5086 | { | |
5087 | return __builtin_mve_vcmpcsq_uv4si (__a, __b); | |
5088 | } | |
5089 | ||
5090 | __extension__ extern __inline mve_pred16_t | |
5091 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5092 | __arm_vcmpcsq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5093 | { | |
5094 | return __builtin_mve_vcmpcsq_n_uv4si (__a, __b); | |
5095 | } | |
5096 | ||
5097 | __extension__ extern __inline uint32x4_t | |
5098 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5099 | __arm_vcaddq_rot90_u32 (uint32x4_t __a, uint32x4_t __b) | |
5100 | { | |
5101 | return __builtin_mve_vcaddq_rot90_uv4si (__a, __b); | |
5102 | } | |
5103 | ||
5104 | __extension__ extern __inline uint32x4_t | |
5105 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5106 | __arm_vcaddq_rot270_u32 (uint32x4_t __a, uint32x4_t __b) | |
5107 | { | |
5108 | return __builtin_mve_vcaddq_rot270_uv4si (__a, __b); | |
5109 | } | |
5110 | ||
5111 | __extension__ extern __inline uint32x4_t | |
5112 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5113 | __arm_vbicq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5114 | { | |
5115 | return __builtin_mve_vbicq_uv4si (__a, __b); | |
5116 | } | |
5117 | ||
5118 | __extension__ extern __inline uint32x4_t | |
5119 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5120 | __arm_vandq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5121 | { | |
5122 | return __builtin_mve_vandq_uv4si (__a, __b); | |
5123 | } | |
5124 | ||
5125 | __extension__ extern __inline uint32_t | |
5126 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5127 | __arm_vaddvq_p_u32 (uint32x4_t __a, mve_pred16_t __p) | |
5128 | { | |
5129 | return __builtin_mve_vaddvq_p_uv4si (__a, __p); | |
5130 | } | |
5131 | ||
5132 | __extension__ extern __inline uint32_t | |
5133 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5134 | __arm_vaddvaq_u32 (uint32_t __a, uint32x4_t __b) | |
5135 | { | |
5136 | return __builtin_mve_vaddvaq_uv4si (__a, __b); | |
5137 | } | |
5138 | ||
5139 | __extension__ extern __inline uint32x4_t | |
5140 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5141 | __arm_vaddq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5142 | { | |
5143 | return __builtin_mve_vaddq_n_uv4si (__a, __b); | |
5144 | } | |
5145 | ||
5146 | __extension__ extern __inline uint32x4_t | |
5147 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5148 | __arm_vabdq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5149 | { | |
5150 | return __builtin_mve_vabdq_uv4si (__a, __b); | |
5151 | } | |
5152 | ||
5153 | __extension__ extern __inline uint32x4_t | |
5154 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5155 | __arm_vshlq_r_u32 (uint32x4_t __a, int32_t __b) | |
5156 | { | |
5157 | return __builtin_mve_vshlq_r_uv4si (__a, __b); | |
5158 | } | |
5159 | ||
5160 | __extension__ extern __inline uint32x4_t | |
5161 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5162 | __arm_vrshlq_u32 (uint32x4_t __a, int32x4_t __b) | |
5163 | { | |
5164 | return __builtin_mve_vrshlq_uv4si (__a, __b); | |
5165 | } | |
5166 | ||
5167 | __extension__ extern __inline uint32x4_t | |
5168 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5169 | __arm_vrshlq_n_u32 (uint32x4_t __a, int32_t __b) | |
5170 | { | |
5171 | return __builtin_mve_vrshlq_n_uv4si (__a, __b); | |
5172 | } | |
5173 | ||
5174 | __extension__ extern __inline uint32x4_t | |
5175 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5176 | __arm_vqshlq_u32 (uint32x4_t __a, int32x4_t __b) | |
5177 | { | |
5178 | return __builtin_mve_vqshlq_uv4si (__a, __b); | |
5179 | } | |
5180 | ||
5181 | __extension__ extern __inline uint32x4_t | |
5182 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5183 | __arm_vqshlq_r_u32 (uint32x4_t __a, int32_t __b) | |
5184 | { | |
5185 | return __builtin_mve_vqshlq_r_uv4si (__a, __b); | |
5186 | } | |
5187 | ||
5188 | __extension__ extern __inline uint32x4_t | |
5189 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5190 | __arm_vqrshlq_u32 (uint32x4_t __a, int32x4_t __b) | |
5191 | { | |
5192 | return __builtin_mve_vqrshlq_uv4si (__a, __b); | |
5193 | } | |
5194 | ||
5195 | __extension__ extern __inline uint32x4_t | |
5196 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5197 | __arm_vqrshlq_n_u32 (uint32x4_t __a, int32_t __b) | |
5198 | { | |
5199 | return __builtin_mve_vqrshlq_n_uv4si (__a, __b); | |
5200 | } | |
5201 | ||
5202 | __extension__ extern __inline uint32_t | |
5203 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5204 | __arm_vminavq_s32 (uint32_t __a, int32x4_t __b) | |
5205 | { | |
5206 | return __builtin_mve_vminavq_sv4si (__a, __b); | |
5207 | } | |
5208 | ||
5209 | __extension__ extern __inline uint32x4_t | |
5210 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5211 | __arm_vminaq_s32 (uint32x4_t __a, int32x4_t __b) | |
5212 | { | |
5213 | return __builtin_mve_vminaq_sv4si (__a, __b); | |
5214 | } | |
5215 | ||
5216 | __extension__ extern __inline uint32_t | |
5217 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5218 | __arm_vmaxavq_s32 (uint32_t __a, int32x4_t __b) | |
5219 | { | |
5220 | return __builtin_mve_vmaxavq_sv4si (__a, __b); | |
5221 | } | |
5222 | ||
5223 | __extension__ extern __inline uint32x4_t | |
5224 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5225 | __arm_vmaxaq_s32 (uint32x4_t __a, int32x4_t __b) | |
5226 | { | |
5227 | return __builtin_mve_vmaxaq_sv4si (__a, __b); | |
5228 | } | |
5229 | ||
5230 | __extension__ extern __inline uint32x4_t | |
5231 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5232 | __arm_vbrsrq_n_u32 (uint32x4_t __a, int32_t __b) | |
5233 | { | |
5234 | return __builtin_mve_vbrsrq_n_uv4si (__a, __b); | |
5235 | } | |
5236 | ||
5237 | __extension__ extern __inline uint32x4_t | |
5238 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5239 | __arm_vshlq_n_u32 (uint32x4_t __a, const int __imm) | |
5240 | { | |
5241 | return __builtin_mve_vshlq_n_uv4si (__a, __imm); | |
5242 | } | |
5243 | ||
5244 | __extension__ extern __inline uint32x4_t | |
5245 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5246 | __arm_vrshrq_n_u32 (uint32x4_t __a, const int __imm) | |
5247 | { | |
5248 | return __builtin_mve_vrshrq_n_uv4si (__a, __imm); | |
5249 | } | |
5250 | ||
5251 | __extension__ extern __inline uint32x4_t | |
5252 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5253 | __arm_vqshlq_n_u32 (uint32x4_t __a, const int __imm) | |
5254 | { | |
5255 | return __builtin_mve_vqshlq_n_uv4si (__a, __imm); | |
5256 | } | |
5257 | ||
5258 | __extension__ extern __inline mve_pred16_t | |
5259 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5260 | __arm_vcmpneq_n_s32 (int32x4_t __a, int32_t __b) | |
5261 | { | |
5262 | return __builtin_mve_vcmpneq_n_sv4si (__a, __b); | |
5263 | } | |
5264 | ||
5265 | __extension__ extern __inline mve_pred16_t | |
5266 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5267 | __arm_vcmpltq_s32 (int32x4_t __a, int32x4_t __b) | |
5268 | { | |
5269 | return __builtin_mve_vcmpltq_sv4si (__a, __b); | |
5270 | } | |
5271 | ||
5272 | __extension__ extern __inline mve_pred16_t | |
5273 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5274 | __arm_vcmpltq_n_s32 (int32x4_t __a, int32_t __b) | |
5275 | { | |
5276 | return __builtin_mve_vcmpltq_n_sv4si (__a, __b); | |
5277 | } | |
5278 | ||
5279 | __extension__ extern __inline mve_pred16_t | |
5280 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5281 | __arm_vcmpleq_s32 (int32x4_t __a, int32x4_t __b) | |
5282 | { | |
5283 | return __builtin_mve_vcmpleq_sv4si (__a, __b); | |
5284 | } | |
5285 | ||
5286 | __extension__ extern __inline mve_pred16_t | |
5287 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5288 | __arm_vcmpleq_n_s32 (int32x4_t __a, int32_t __b) | |
5289 | { | |
5290 | return __builtin_mve_vcmpleq_n_sv4si (__a, __b); | |
5291 | } | |
5292 | ||
5293 | __extension__ extern __inline mve_pred16_t | |
5294 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5295 | __arm_vcmpgtq_s32 (int32x4_t __a, int32x4_t __b) | |
5296 | { | |
5297 | return __builtin_mve_vcmpgtq_sv4si (__a, __b); | |
5298 | } | |
5299 | ||
5300 | __extension__ extern __inline mve_pred16_t | |
5301 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5302 | __arm_vcmpgtq_n_s32 (int32x4_t __a, int32_t __b) | |
5303 | { | |
5304 | return __builtin_mve_vcmpgtq_n_sv4si (__a, __b); | |
5305 | } | |
5306 | ||
5307 | __extension__ extern __inline mve_pred16_t | |
5308 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5309 | __arm_vcmpgeq_s32 (int32x4_t __a, int32x4_t __b) | |
5310 | { | |
5311 | return __builtin_mve_vcmpgeq_sv4si (__a, __b); | |
5312 | } | |
5313 | ||
5314 | __extension__ extern __inline mve_pred16_t | |
5315 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5316 | __arm_vcmpgeq_n_s32 (int32x4_t __a, int32_t __b) | |
5317 | { | |
5318 | return __builtin_mve_vcmpgeq_n_sv4si (__a, __b); | |
5319 | } | |
5320 | ||
5321 | __extension__ extern __inline mve_pred16_t | |
5322 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5323 | __arm_vcmpeqq_s32 (int32x4_t __a, int32x4_t __b) | |
5324 | { | |
5325 | return __builtin_mve_vcmpeqq_sv4si (__a, __b); | |
5326 | } | |
5327 | ||
5328 | __extension__ extern __inline mve_pred16_t | |
5329 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5330 | __arm_vcmpeqq_n_s32 (int32x4_t __a, int32_t __b) | |
5331 | { | |
5332 | return __builtin_mve_vcmpeqq_n_sv4si (__a, __b); | |
5333 | } | |
5334 | ||
5335 | __extension__ extern __inline uint32x4_t | |
5336 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5337 | __arm_vqshluq_n_s32 (int32x4_t __a, const int __imm) | |
5338 | { | |
5339 | return __builtin_mve_vqshluq_n_sv4si (__a, __imm); | |
5340 | } | |
5341 | ||
5342 | __extension__ extern __inline int32_t | |
5343 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5344 | __arm_vaddvq_p_s32 (int32x4_t __a, mve_pred16_t __p) | |
5345 | { | |
5346 | return __builtin_mve_vaddvq_p_sv4si (__a, __p); | |
5347 | } | |
5348 | ||
5349 | __extension__ extern __inline int32x4_t | |
5350 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5351 | __arm_vsubq_s32 (int32x4_t __a, int32x4_t __b) | |
5352 | { | |
5353 | return __builtin_mve_vsubq_sv4si (__a, __b); | |
5354 | } | |
5355 | ||
5356 | __extension__ extern __inline int32x4_t | |
5357 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5358 | __arm_vsubq_n_s32 (int32x4_t __a, int32_t __b) | |
5359 | { | |
5360 | return __builtin_mve_vsubq_n_sv4si (__a, __b); | |
5361 | } | |
5362 | ||
5363 | __extension__ extern __inline int32x4_t | |
5364 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5365 | __arm_vshlq_r_s32 (int32x4_t __a, int32_t __b) | |
5366 | { | |
5367 | return __builtin_mve_vshlq_r_sv4si (__a, __b); | |
5368 | } | |
5369 | ||
5370 | __extension__ extern __inline int32x4_t | |
5371 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5372 | __arm_vrshlq_s32 (int32x4_t __a, int32x4_t __b) | |
5373 | { | |
5374 | return __builtin_mve_vrshlq_sv4si (__a, __b); | |
5375 | } | |
5376 | ||
5377 | __extension__ extern __inline int32x4_t | |
5378 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5379 | __arm_vrshlq_n_s32 (int32x4_t __a, int32_t __b) | |
5380 | { | |
5381 | return __builtin_mve_vrshlq_n_sv4si (__a, __b); | |
5382 | } | |
5383 | ||
5384 | __extension__ extern __inline int32x4_t | |
5385 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5386 | __arm_vrmulhq_s32 (int32x4_t __a, int32x4_t __b) | |
5387 | { | |
5388 | return __builtin_mve_vrmulhq_sv4si (__a, __b); | |
5389 | } | |
5390 | ||
5391 | __extension__ extern __inline int32x4_t | |
5392 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5393 | __arm_vrhaddq_s32 (int32x4_t __a, int32x4_t __b) | |
5394 | { | |
5395 | return __builtin_mve_vrhaddq_sv4si (__a, __b); | |
5396 | } | |
5397 | ||
5398 | __extension__ extern __inline int32x4_t | |
5399 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5400 | __arm_vqsubq_s32 (int32x4_t __a, int32x4_t __b) | |
5401 | { | |
5402 | return __builtin_mve_vqsubq_sv4si (__a, __b); | |
5403 | } | |
5404 | ||
5405 | __extension__ extern __inline int32x4_t | |
5406 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5407 | __arm_vqsubq_n_s32 (int32x4_t __a, int32_t __b) | |
5408 | { | |
5409 | return __builtin_mve_vqsubq_n_sv4si (__a, __b); | |
5410 | } | |
5411 | ||
5412 | __extension__ extern __inline int32x4_t | |
5413 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5414 | __arm_vqshlq_s32 (int32x4_t __a, int32x4_t __b) | |
5415 | { | |
5416 | return __builtin_mve_vqshlq_sv4si (__a, __b); | |
5417 | } | |
5418 | ||
5419 | __extension__ extern __inline int32x4_t | |
5420 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5421 | __arm_vqshlq_r_s32 (int32x4_t __a, int32_t __b) | |
5422 | { | |
5423 | return __builtin_mve_vqshlq_r_sv4si (__a, __b); | |
5424 | } | |
5425 | ||
5426 | __extension__ extern __inline int32x4_t | |
5427 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5428 | __arm_vqrshlq_s32 (int32x4_t __a, int32x4_t __b) | |
5429 | { | |
5430 | return __builtin_mve_vqrshlq_sv4si (__a, __b); | |
5431 | } | |
5432 | ||
5433 | __extension__ extern __inline int32x4_t | |
5434 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5435 | __arm_vqrshlq_n_s32 (int32x4_t __a, int32_t __b) | |
5436 | { | |
5437 | return __builtin_mve_vqrshlq_n_sv4si (__a, __b); | |
5438 | } | |
5439 | ||
5440 | __extension__ extern __inline int32x4_t | |
5441 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5442 | __arm_vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b) | |
5443 | { | |
5444 | return __builtin_mve_vqrdmulhq_sv4si (__a, __b); | |
5445 | } | |
5446 | ||
5447 | __extension__ extern __inline int32x4_t | |
5448 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5449 | __arm_vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b) | |
5450 | { | |
5451 | return __builtin_mve_vqrdmulhq_n_sv4si (__a, __b); | |
5452 | } | |
5453 | ||
5454 | __extension__ extern __inline int32x4_t | |
5455 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5456 | __arm_vqdmulhq_s32 (int32x4_t __a, int32x4_t __b) | |
5457 | { | |
5458 | return __builtin_mve_vqdmulhq_sv4si (__a, __b); | |
5459 | } | |
5460 | ||
5461 | __extension__ extern __inline int32x4_t | |
5462 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5463 | __arm_vqdmulhq_n_s32 (int32x4_t __a, int32_t __b) | |
5464 | { | |
5465 | return __builtin_mve_vqdmulhq_n_sv4si (__a, __b); | |
5466 | } | |
5467 | ||
5468 | __extension__ extern __inline int32x4_t | |
5469 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5470 | __arm_vqaddq_s32 (int32x4_t __a, int32x4_t __b) | |
5471 | { | |
5472 | return __builtin_mve_vqaddq_sv4si (__a, __b); | |
5473 | } | |
5474 | ||
5475 | __extension__ extern __inline int32x4_t | |
5476 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5477 | __arm_vqaddq_n_s32 (int32x4_t __a, int32_t __b) | |
5478 | { | |
5479 | return __builtin_mve_vqaddq_n_sv4si (__a, __b); | |
5480 | } | |
5481 | ||
5482 | __extension__ extern __inline int32x4_t | |
5483 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5484 | __arm_vorrq_s32 (int32x4_t __a, int32x4_t __b) | |
5485 | { | |
5486 | return __builtin_mve_vorrq_sv4si (__a, __b); | |
5487 | } | |
5488 | ||
5489 | __extension__ extern __inline int32x4_t | |
5490 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5491 | __arm_vornq_s32 (int32x4_t __a, int32x4_t __b) | |
5492 | { | |
5493 | return __builtin_mve_vornq_sv4si (__a, __b); | |
5494 | } | |
5495 | ||
5496 | __extension__ extern __inline int32x4_t | |
5497 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5498 | __arm_vmulq_s32 (int32x4_t __a, int32x4_t __b) | |
5499 | { | |
5500 | return __builtin_mve_vmulq_sv4si (__a, __b); | |
5501 | } | |
5502 | ||
5503 | __extension__ extern __inline int32x4_t | |
5504 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5505 | __arm_vmulq_n_s32 (int32x4_t __a, int32_t __b) | |
5506 | { | |
5507 | return __builtin_mve_vmulq_n_sv4si (__a, __b); | |
5508 | } | |
5509 | ||
5510 | __extension__ extern __inline int64x2_t | |
5511 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5512 | __arm_vmulltq_int_s32 (int32x4_t __a, int32x4_t __b) | |
5513 | { | |
5514 | return __builtin_mve_vmulltq_int_sv4si (__a, __b); | |
5515 | } | |
5516 | ||
5517 | __extension__ extern __inline int64x2_t | |
5518 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5519 | __arm_vmullbq_int_s32 (int32x4_t __a, int32x4_t __b) | |
5520 | { | |
5521 | return __builtin_mve_vmullbq_int_sv4si (__a, __b); | |
5522 | } | |
5523 | ||
5524 | __extension__ extern __inline int32x4_t | |
5525 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5526 | __arm_vmulhq_s32 (int32x4_t __a, int32x4_t __b) | |
5527 | { | |
5528 | return __builtin_mve_vmulhq_sv4si (__a, __b); | |
5529 | } | |
5530 | ||
5531 | __extension__ extern __inline int32_t | |
5532 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5533 | __arm_vmlsdavxq_s32 (int32x4_t __a, int32x4_t __b) | |
5534 | { | |
5535 | return __builtin_mve_vmlsdavxq_sv4si (__a, __b); | |
5536 | } | |
5537 | ||
5538 | __extension__ extern __inline int32_t | |
5539 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5540 | __arm_vmlsdavq_s32 (int32x4_t __a, int32x4_t __b) | |
5541 | { | |
5542 | return __builtin_mve_vmlsdavq_sv4si (__a, __b); | |
5543 | } | |
5544 | ||
5545 | __extension__ extern __inline int32_t | |
5546 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5547 | __arm_vmladavxq_s32 (int32x4_t __a, int32x4_t __b) | |
5548 | { | |
5549 | return __builtin_mve_vmladavxq_sv4si (__a, __b); | |
5550 | } | |
5551 | ||
5552 | __extension__ extern __inline int32_t | |
5553 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5554 | __arm_vmladavq_s32 (int32x4_t __a, int32x4_t __b) | |
5555 | { | |
5556 | return __builtin_mve_vmladavq_sv4si (__a, __b); | |
5557 | } | |
5558 | ||
5559 | __extension__ extern __inline int32_t | |
5560 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5561 | __arm_vminvq_s32 (int32_t __a, int32x4_t __b) | |
5562 | { | |
5563 | return __builtin_mve_vminvq_sv4si (__a, __b); | |
5564 | } | |
5565 | ||
5566 | __extension__ extern __inline int32x4_t | |
5567 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5568 | __arm_vminq_s32 (int32x4_t __a, int32x4_t __b) | |
5569 | { | |
5570 | return __builtin_mve_vminq_sv4si (__a, __b); | |
5571 | } | |
5572 | ||
5573 | __extension__ extern __inline int32_t | |
5574 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5575 | __arm_vmaxvq_s32 (int32_t __a, int32x4_t __b) | |
5576 | { | |
5577 | return __builtin_mve_vmaxvq_sv4si (__a, __b); | |
5578 | } | |
5579 | ||
5580 | __extension__ extern __inline int32x4_t | |
5581 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5582 | __arm_vmaxq_s32 (int32x4_t __a, int32x4_t __b) | |
5583 | { | |
5584 | return __builtin_mve_vmaxq_sv4si (__a, __b); | |
5585 | } | |
5586 | ||
5587 | __extension__ extern __inline int32x4_t | |
5588 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5589 | __arm_vhsubq_s32 (int32x4_t __a, int32x4_t __b) | |
5590 | { | |
5591 | return __builtin_mve_vhsubq_sv4si (__a, __b); | |
5592 | } | |
5593 | ||
5594 | __extension__ extern __inline int32x4_t | |
5595 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5596 | __arm_vhsubq_n_s32 (int32x4_t __a, int32_t __b) | |
5597 | { | |
5598 | return __builtin_mve_vhsubq_n_sv4si (__a, __b); | |
5599 | } | |
5600 | ||
5601 | __extension__ extern __inline int32x4_t | |
5602 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5603 | __arm_vhcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b) | |
5604 | { | |
5605 | return __builtin_mve_vhcaddq_rot90_sv4si (__a, __b); | |
5606 | } | |
5607 | ||
5608 | __extension__ extern __inline int32x4_t | |
5609 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5610 | __arm_vhcaddq_rot270_s32 (int32x4_t __a, int32x4_t __b) | |
5611 | { | |
5612 | return __builtin_mve_vhcaddq_rot270_sv4si (__a, __b); | |
5613 | } | |
5614 | ||
5615 | __extension__ extern __inline int32x4_t | |
5616 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5617 | __arm_vhaddq_s32 (int32x4_t __a, int32x4_t __b) | |
5618 | { | |
5619 | return __builtin_mve_vhaddq_sv4si (__a, __b); | |
5620 | } | |
5621 | ||
5622 | __extension__ extern __inline int32x4_t | |
5623 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5624 | __arm_vhaddq_n_s32 (int32x4_t __a, int32_t __b) | |
5625 | { | |
5626 | return __builtin_mve_vhaddq_n_sv4si (__a, __b); | |
5627 | } | |
5628 | ||
5629 | __extension__ extern __inline int32x4_t | |
5630 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5631 | __arm_veorq_s32 (int32x4_t __a, int32x4_t __b) | |
5632 | { | |
5633 | return __builtin_mve_veorq_sv4si (__a, __b); | |
5634 | } | |
5635 | ||
5636 | __extension__ extern __inline int32x4_t | |
5637 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5638 | __arm_vcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b) | |
5639 | { | |
5640 | return __builtin_mve_vcaddq_rot90_sv4si (__a, __b); | |
5641 | } | |
5642 | ||
5643 | __extension__ extern __inline int32x4_t | |
5644 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5645 | __arm_vcaddq_rot270_s32 (int32x4_t __a, int32x4_t __b) | |
5646 | { | |
5647 | return __builtin_mve_vcaddq_rot270_sv4si (__a, __b); | |
5648 | } | |
5649 | ||
5650 | __extension__ extern __inline int32x4_t | |
5651 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5652 | __arm_vbrsrq_n_s32 (int32x4_t __a, int32_t __b) | |
5653 | { | |
5654 | return __builtin_mve_vbrsrq_n_sv4si (__a, __b); | |
5655 | } | |
5656 | ||
5657 | __extension__ extern __inline int32x4_t | |
5658 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5659 | __arm_vbicq_s32 (int32x4_t __a, int32x4_t __b) | |
5660 | { | |
5661 | return __builtin_mve_vbicq_sv4si (__a, __b); | |
5662 | } | |
5663 | ||
5664 | __extension__ extern __inline int32x4_t | |
5665 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5666 | __arm_vandq_s32 (int32x4_t __a, int32x4_t __b) | |
5667 | { | |
5668 | return __builtin_mve_vandq_sv4si (__a, __b); | |
5669 | } | |
5670 | ||
5671 | __extension__ extern __inline int32_t | |
5672 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5673 | __arm_vaddvaq_s32 (int32_t __a, int32x4_t __b) | |
5674 | { | |
5675 | return __builtin_mve_vaddvaq_sv4si (__a, __b); | |
5676 | } | |
5677 | ||
5678 | __extension__ extern __inline int32x4_t | |
5679 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5680 | __arm_vaddq_n_s32 (int32x4_t __a, int32_t __b) | |
5681 | { | |
5682 | return __builtin_mve_vaddq_n_sv4si (__a, __b); | |
5683 | } | |
5684 | ||
5685 | __extension__ extern __inline int32x4_t | |
5686 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5687 | __arm_vabdq_s32 (int32x4_t __a, int32x4_t __b) | |
5688 | { | |
5689 | return __builtin_mve_vabdq_sv4si (__a, __b); | |
5690 | } | |
5691 | ||
5692 | __extension__ extern __inline int32x4_t | |
5693 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5694 | __arm_vshlq_n_s32 (int32x4_t __a, const int __imm) | |
5695 | { | |
5696 | return __builtin_mve_vshlq_n_sv4si (__a, __imm); | |
5697 | } | |
5698 | ||
5699 | __extension__ extern __inline int32x4_t | |
5700 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5701 | __arm_vrshrq_n_s32 (int32x4_t __a, const int __imm) | |
5702 | { | |
5703 | return __builtin_mve_vrshrq_n_sv4si (__a, __imm); | |
5704 | } | |
5705 | ||
5706 | __extension__ extern __inline int32x4_t | |
5707 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5708 | __arm_vqshlq_n_s32 (int32x4_t __a, const int __imm) | |
5709 | { | |
5710 | return __builtin_mve_vqshlq_n_sv4si (__a, __imm); | |
5711 | } | |
f166a8cd | 5712 | |
f9355dee | 5713 | __extension__ extern __inline uint8x16_t |
14782c81 | 5714 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5715 | __arm_vqmovntq_u16 (uint8x16_t __a, uint16x8_t __b) |
14782c81 | 5716 | { |
f9355dee | 5717 | return __builtin_mve_vqmovntq_uv8hi (__a, __b); |
14782c81 SP |
5718 | } |
5719 | ||
f9355dee | 5720 | __extension__ extern __inline uint8x16_t |
14782c81 | 5721 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5722 | __arm_vqmovnbq_u16 (uint8x16_t __a, uint16x8_t __b) |
14782c81 | 5723 | { |
f9355dee | 5724 | return __builtin_mve_vqmovnbq_uv8hi (__a, __b); |
14782c81 SP |
5725 | } |
5726 | ||
f9355dee | 5727 | __extension__ extern __inline uint16x8_t |
a50f6abf | 5728 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5729 | __arm_vmulltq_poly_p8 (uint8x16_t __a, uint8x16_t __b) |
a50f6abf | 5730 | { |
f9355dee | 5731 | return __builtin_mve_vmulltq_poly_pv16qi (__a, __b); |
a50f6abf SP |
5732 | } |
5733 | ||
f9355dee | 5734 | __extension__ extern __inline uint16x8_t |
a50f6abf | 5735 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5736 | __arm_vmullbq_poly_p8 (uint8x16_t __a, uint8x16_t __b) |
a50f6abf | 5737 | { |
f9355dee | 5738 | return __builtin_mve_vmullbq_poly_pv16qi (__a, __b); |
a50f6abf SP |
5739 | } |
5740 | ||
f9355dee | 5741 | __extension__ extern __inline uint8x16_t |
a50f6abf | 5742 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5743 | __arm_vmovntq_u16 (uint8x16_t __a, uint16x8_t __b) |
a50f6abf | 5744 | { |
f9355dee | 5745 | return __builtin_mve_vmovntq_uv8hi (__a, __b); |
a50f6abf SP |
5746 | } |
5747 | ||
f9355dee | 5748 | __extension__ extern __inline uint8x16_t |
a50f6abf | 5749 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5750 | __arm_vmovnbq_u16 (uint8x16_t __a, uint16x8_t __b) |
a50f6abf | 5751 | { |
f9355dee | 5752 | return __builtin_mve_vmovnbq_uv8hi (__a, __b); |
a50f6abf SP |
5753 | } |
5754 | ||
f9355dee | 5755 | __extension__ extern __inline uint64_t |
a50f6abf | 5756 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5757 | __arm_vmlaldavq_u16 (uint16x8_t __a, uint16x8_t __b) |
a50f6abf | 5758 | { |
f9355dee | 5759 | return __builtin_mve_vmlaldavq_uv8hi (__a, __b); |
a50f6abf SP |
5760 | } |
5761 | ||
f9355dee | 5762 | __extension__ extern __inline uint8x16_t |
a50f6abf | 5763 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5764 | __arm_vqmovuntq_s16 (uint8x16_t __a, int16x8_t __b) |
a50f6abf | 5765 | { |
f9355dee | 5766 | return __builtin_mve_vqmovuntq_sv8hi (__a, __b); |
a50f6abf SP |
5767 | } |
5768 | ||
f9355dee | 5769 | __extension__ extern __inline uint8x16_t |
a50f6abf | 5770 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5771 | __arm_vqmovunbq_s16 (uint8x16_t __a, int16x8_t __b) |
a50f6abf | 5772 | { |
f9355dee | 5773 | return __builtin_mve_vqmovunbq_sv8hi (__a, __b); |
a50f6abf SP |
5774 | } |
5775 | ||
f9355dee | 5776 | __extension__ extern __inline uint16x8_t |
a50f6abf | 5777 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5778 | __arm_vshlltq_n_u8 (uint8x16_t __a, const int __imm) |
a50f6abf | 5779 | { |
f9355dee | 5780 | return __builtin_mve_vshlltq_n_uv16qi (__a, __imm); |
a50f6abf SP |
5781 | } |
5782 | ||
f9355dee | 5783 | __extension__ extern __inline uint16x8_t |
a50f6abf | 5784 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5785 | __arm_vshllbq_n_u8 (uint8x16_t __a, const int __imm) |
a50f6abf | 5786 | { |
f9355dee | 5787 | return __builtin_mve_vshllbq_n_uv16qi (__a, __imm); |
a50f6abf SP |
5788 | } |
5789 | ||
f9355dee | 5790 | __extension__ extern __inline uint16x8_t |
a50f6abf | 5791 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5792 | __arm_vorrq_n_u16 (uint16x8_t __a, const int __imm) |
a50f6abf | 5793 | { |
f9355dee | 5794 | return __builtin_mve_vorrq_n_uv8hi (__a, __imm); |
a50f6abf SP |
5795 | } |
5796 | ||
f9355dee | 5797 | __extension__ extern __inline uint16x8_t |
a50f6abf | 5798 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5799 | __arm_vbicq_n_u16 (uint16x8_t __a, const int __imm) |
a50f6abf | 5800 | { |
f9355dee | 5801 | return __builtin_mve_vbicq_n_uv8hi (__a, __imm); |
a50f6abf SP |
5802 | } |
5803 | ||
f9355dee | 5804 | __extension__ extern __inline int8x16_t |
a50f6abf | 5805 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5806 | __arm_vqmovntq_s16 (int8x16_t __a, int16x8_t __b) |
a50f6abf | 5807 | { |
f9355dee | 5808 | return __builtin_mve_vqmovntq_sv8hi (__a, __b); |
a50f6abf SP |
5809 | } |
5810 | ||
f9355dee | 5811 | __extension__ extern __inline int8x16_t |
a50f6abf | 5812 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5813 | __arm_vqmovnbq_s16 (int8x16_t __a, int16x8_t __b) |
a50f6abf | 5814 | { |
f9355dee | 5815 | return __builtin_mve_vqmovnbq_sv8hi (__a, __b); |
a50f6abf SP |
5816 | } |
5817 | ||
f9355dee | 5818 | __extension__ extern __inline int32x4_t |
a50f6abf | 5819 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5820 | __arm_vqdmulltq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 5821 | { |
f9355dee | 5822 | return __builtin_mve_vqdmulltq_sv8hi (__a, __b); |
a50f6abf SP |
5823 | } |
5824 | ||
f9355dee | 5825 | __extension__ extern __inline int32x4_t |
a50f6abf | 5826 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5827 | __arm_vqdmulltq_n_s16 (int16x8_t __a, int16_t __b) |
a50f6abf | 5828 | { |
f9355dee | 5829 | return __builtin_mve_vqdmulltq_n_sv8hi (__a, __b); |
a50f6abf SP |
5830 | } |
5831 | ||
f9355dee | 5832 | __extension__ extern __inline int32x4_t |
a50f6abf | 5833 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5834 | __arm_vqdmullbq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 5835 | { |
f9355dee | 5836 | return __builtin_mve_vqdmullbq_sv8hi (__a, __b); |
a50f6abf SP |
5837 | } |
5838 | ||
f9355dee | 5839 | __extension__ extern __inline int32x4_t |
a50f6abf | 5840 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5841 | __arm_vqdmullbq_n_s16 (int16x8_t __a, int16_t __b) |
a50f6abf | 5842 | { |
f9355dee | 5843 | return __builtin_mve_vqdmullbq_n_sv8hi (__a, __b); |
a50f6abf SP |
5844 | } |
5845 | ||
f9355dee | 5846 | __extension__ extern __inline int8x16_t |
a50f6abf | 5847 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5848 | __arm_vmovntq_s16 (int8x16_t __a, int16x8_t __b) |
a50f6abf | 5849 | { |
f9355dee | 5850 | return __builtin_mve_vmovntq_sv8hi (__a, __b); |
a50f6abf SP |
5851 | } |
5852 | ||
f9355dee | 5853 | __extension__ extern __inline int8x16_t |
a50f6abf | 5854 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5855 | __arm_vmovnbq_s16 (int8x16_t __a, int16x8_t __b) |
a50f6abf | 5856 | { |
f9355dee | 5857 | return __builtin_mve_vmovnbq_sv8hi (__a, __b); |
a50f6abf SP |
5858 | } |
5859 | ||
f9355dee | 5860 | __extension__ extern __inline int64_t |
a50f6abf | 5861 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5862 | __arm_vmlsldavxq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 5863 | { |
f9355dee | 5864 | return __builtin_mve_vmlsldavxq_sv8hi (__a, __b); |
a50f6abf SP |
5865 | } |
5866 | ||
f9355dee | 5867 | __extension__ extern __inline int64_t |
a50f6abf | 5868 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5869 | __arm_vmlsldavq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 5870 | { |
f9355dee | 5871 | return __builtin_mve_vmlsldavq_sv8hi (__a, __b); |
a50f6abf SP |
5872 | } |
5873 | ||
f9355dee | 5874 | __extension__ extern __inline int64_t |
a50f6abf | 5875 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5876 | __arm_vmlaldavxq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 5877 | { |
f9355dee | 5878 | return __builtin_mve_vmlaldavxq_sv8hi (__a, __b); |
a50f6abf SP |
5879 | } |
5880 | ||
f9355dee | 5881 | __extension__ extern __inline int64_t |
a50f6abf | 5882 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5883 | __arm_vmlaldavq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 5884 | { |
f9355dee | 5885 | return __builtin_mve_vmlaldavq_sv8hi (__a, __b); |
a50f6abf SP |
5886 | } |
5887 | ||
f9355dee | 5888 | __extension__ extern __inline int16x8_t |
a50f6abf | 5889 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5890 | __arm_vshlltq_n_s8 (int8x16_t __a, const int __imm) |
a50f6abf | 5891 | { |
f9355dee | 5892 | return __builtin_mve_vshlltq_n_sv16qi (__a, __imm); |
a50f6abf SP |
5893 | } |
5894 | ||
f9355dee | 5895 | __extension__ extern __inline int16x8_t |
a50f6abf | 5896 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5897 | __arm_vshllbq_n_s8 (int8x16_t __a, const int __imm) |
a50f6abf | 5898 | { |
f9355dee | 5899 | return __builtin_mve_vshllbq_n_sv16qi (__a, __imm); |
a50f6abf SP |
5900 | } |
5901 | ||
f9355dee | 5902 | __extension__ extern __inline int16x8_t |
a50f6abf | 5903 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5904 | __arm_vorrq_n_s16 (int16x8_t __a, const int __imm) |
a50f6abf | 5905 | { |
f9355dee | 5906 | return __builtin_mve_vorrq_n_sv8hi (__a, __imm); |
a50f6abf SP |
5907 | } |
5908 | ||
f9355dee | 5909 | __extension__ extern __inline int16x8_t |
a50f6abf | 5910 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5911 | __arm_vbicq_n_s16 (int16x8_t __a, const int __imm) |
a50f6abf | 5912 | { |
f9355dee | 5913 | return __builtin_mve_vbicq_n_sv8hi (__a, __imm); |
a50f6abf SP |
5914 | } |
5915 | ||
f9355dee | 5916 | __extension__ extern __inline uint16x8_t |
5db0eb95 | 5917 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5918 | __arm_vqmovntq_u32 (uint16x8_t __a, uint32x4_t __b) |
5db0eb95 | 5919 | { |
f9355dee | 5920 | return __builtin_mve_vqmovntq_uv4si (__a, __b); |
5db0eb95 SP |
5921 | } |
5922 | ||
f9355dee | 5923 | __extension__ extern __inline uint16x8_t |
5db0eb95 | 5924 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5925 | __arm_vqmovnbq_u32 (uint16x8_t __a, uint32x4_t __b) |
5db0eb95 | 5926 | { |
f9355dee | 5927 | return __builtin_mve_vqmovnbq_uv4si (__a, __b); |
5db0eb95 SP |
5928 | } |
5929 | ||
f9355dee | 5930 | __extension__ extern __inline uint32x4_t |
5db0eb95 | 5931 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5932 | __arm_vmulltq_poly_p16 (uint16x8_t __a, uint16x8_t __b) |
5db0eb95 | 5933 | { |
f9355dee | 5934 | return __builtin_mve_vmulltq_poly_pv8hi (__a, __b); |
5db0eb95 SP |
5935 | } |
5936 | ||
5937 | __extension__ extern __inline uint32x4_t | |
5938 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 5939 | __arm_vmullbq_poly_p16 (uint16x8_t __a, uint16x8_t __b) |
5db0eb95 | 5940 | { |
f9355dee | 5941 | return __builtin_mve_vmullbq_poly_pv8hi (__a, __b); |
5db0eb95 SP |
5942 | } |
5943 | ||
6df4618c SP |
5944 | __extension__ extern __inline uint16x8_t |
5945 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 5946 | __arm_vmovntq_u32 (uint16x8_t __a, uint32x4_t __b) |
6df4618c | 5947 | { |
f9355dee | 5948 | return __builtin_mve_vmovntq_uv4si (__a, __b); |
6df4618c SP |
5949 | } |
5950 | ||
f9355dee | 5951 | __extension__ extern __inline uint16x8_t |
6df4618c | 5952 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5953 | __arm_vmovnbq_u32 (uint16x8_t __a, uint32x4_t __b) |
6df4618c | 5954 | { |
f9355dee SP |
5955 | return __builtin_mve_vmovnbq_uv4si (__a, __b); |
5956 | } | |
5957 | ||
5958 | __extension__ extern __inline uint64_t | |
5959 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5960 | __arm_vmlaldavq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5961 | { | |
5962 | return __builtin_mve_vmlaldavq_uv4si (__a, __b); | |
6df4618c SP |
5963 | } |
5964 | ||
5965 | __extension__ extern __inline uint16x8_t | |
5966 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 5967 | __arm_vqmovuntq_s32 (uint16x8_t __a, int32x4_t __b) |
6df4618c | 5968 | { |
f9355dee | 5969 | return __builtin_mve_vqmovuntq_sv4si (__a, __b); |
6df4618c SP |
5970 | } |
5971 | ||
5972 | __extension__ extern __inline uint16x8_t | |
5973 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 5974 | __arm_vqmovunbq_s32 (uint16x8_t __a, int32x4_t __b) |
6df4618c | 5975 | { |
f9355dee | 5976 | return __builtin_mve_vqmovunbq_sv4si (__a, __b); |
6df4618c SP |
5977 | } |
5978 | ||
5979 | __extension__ extern __inline uint32x4_t | |
5980 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 5981 | __arm_vshlltq_n_u16 (uint16x8_t __a, const int __imm) |
6df4618c | 5982 | { |
f9355dee | 5983 | return __builtin_mve_vshlltq_n_uv8hi (__a, __imm); |
6df4618c SP |
5984 | } |
5985 | ||
f9355dee | 5986 | __extension__ extern __inline uint32x4_t |
6df4618c | 5987 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 5988 | __arm_vshllbq_n_u16 (uint16x8_t __a, const int __imm) |
6df4618c | 5989 | { |
f9355dee | 5990 | return __builtin_mve_vshllbq_n_uv8hi (__a, __imm); |
6df4618c SP |
5991 | } |
5992 | ||
5993 | __extension__ extern __inline uint32x4_t | |
5994 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 5995 | __arm_vorrq_n_u32 (uint32x4_t __a, const int __imm) |
6df4618c | 5996 | { |
f9355dee | 5997 | return __builtin_mve_vorrq_n_uv4si (__a, __imm); |
6df4618c SP |
5998 | } |
5999 | ||
f9355dee SP |
6000 | __extension__ extern __inline uint32x4_t |
6001 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6002 | __arm_vbicq_n_u32 (uint32x4_t __a, const int __imm) | |
6df4618c | 6003 | { |
f9355dee | 6004 | return __builtin_mve_vbicq_n_uv4si (__a, __imm); |
6df4618c SP |
6005 | } |
6006 | ||
f9355dee | 6007 | __extension__ extern __inline int16x8_t |
6df4618c | 6008 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6009 | __arm_vqmovntq_s32 (int16x8_t __a, int32x4_t __b) |
6df4618c | 6010 | { |
f9355dee | 6011 | return __builtin_mve_vqmovntq_sv4si (__a, __b); |
6df4618c SP |
6012 | } |
6013 | ||
6014 | __extension__ extern __inline int16x8_t | |
6015 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6016 | __arm_vqmovnbq_s32 (int16x8_t __a, int32x4_t __b) |
6df4618c | 6017 | { |
f9355dee | 6018 | return __builtin_mve_vqmovnbq_sv4si (__a, __b); |
6df4618c SP |
6019 | } |
6020 | ||
f9355dee | 6021 | __extension__ extern __inline int64x2_t |
6df4618c | 6022 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6023 | __arm_vqdmulltq_s32 (int32x4_t __a, int32x4_t __b) |
6df4618c | 6024 | { |
f9355dee | 6025 | return __builtin_mve_vqdmulltq_sv4si (__a, __b); |
6df4618c SP |
6026 | } |
6027 | ||
f9355dee | 6028 | __extension__ extern __inline int64x2_t |
6df4618c | 6029 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6030 | __arm_vqdmulltq_n_s32 (int32x4_t __a, int32_t __b) |
6df4618c | 6031 | { |
f9355dee | 6032 | return __builtin_mve_vqdmulltq_n_sv4si (__a, __b); |
6df4618c SP |
6033 | } |
6034 | ||
f9355dee | 6035 | __extension__ extern __inline int64x2_t |
6df4618c | 6036 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6037 | __arm_vqdmullbq_s32 (int32x4_t __a, int32x4_t __b) |
6df4618c | 6038 | { |
f9355dee SP |
6039 | return __builtin_mve_vqdmullbq_sv4si (__a, __b); |
6040 | } | |
6041 | ||
6042 | __extension__ extern __inline int64x2_t | |
6043 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6044 | __arm_vqdmullbq_n_s32 (int32x4_t __a, int32_t __b) | |
6045 | { | |
6046 | return __builtin_mve_vqdmullbq_n_sv4si (__a, __b); | |
6df4618c SP |
6047 | } |
6048 | ||
6049 | __extension__ extern __inline int16x8_t | |
6050 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6051 | __arm_vmovntq_s32 (int16x8_t __a, int32x4_t __b) |
6df4618c | 6052 | { |
f9355dee | 6053 | return __builtin_mve_vmovntq_sv4si (__a, __b); |
6df4618c SP |
6054 | } |
6055 | ||
f9355dee | 6056 | __extension__ extern __inline int16x8_t |
6df4618c | 6057 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6058 | __arm_vmovnbq_s32 (int16x8_t __a, int32x4_t __b) |
6df4618c | 6059 | { |
f9355dee | 6060 | return __builtin_mve_vmovnbq_sv4si (__a, __b); |
6df4618c SP |
6061 | } |
6062 | ||
f9355dee | 6063 | __extension__ extern __inline int64_t |
4be8cf77 | 6064 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6065 | __arm_vmlsldavxq_s32 (int32x4_t __a, int32x4_t __b) |
4be8cf77 | 6066 | { |
f9355dee | 6067 | return __builtin_mve_vmlsldavxq_sv4si (__a, __b); |
4be8cf77 SP |
6068 | } |
6069 | ||
f9355dee | 6070 | __extension__ extern __inline int64_t |
4be8cf77 | 6071 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6072 | __arm_vmlsldavq_s32 (int32x4_t __a, int32x4_t __b) |
4be8cf77 | 6073 | { |
f9355dee | 6074 | return __builtin_mve_vmlsldavq_sv4si (__a, __b); |
4be8cf77 SP |
6075 | } |
6076 | ||
f9355dee | 6077 | __extension__ extern __inline int64_t |
4be8cf77 | 6078 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6079 | __arm_vmlaldavxq_s32 (int32x4_t __a, int32x4_t __b) |
4be8cf77 | 6080 | { |
f9355dee | 6081 | return __builtin_mve_vmlaldavxq_sv4si (__a, __b); |
4be8cf77 SP |
6082 | } |
6083 | ||
f9355dee | 6084 | __extension__ extern __inline int64_t |
4be8cf77 | 6085 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6086 | __arm_vmlaldavq_s32 (int32x4_t __a, int32x4_t __b) |
4be8cf77 | 6087 | { |
f9355dee | 6088 | return __builtin_mve_vmlaldavq_sv4si (__a, __b); |
4be8cf77 SP |
6089 | } |
6090 | ||
f9355dee | 6091 | __extension__ extern __inline int32x4_t |
4be8cf77 | 6092 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6093 | __arm_vshlltq_n_s16 (int16x8_t __a, const int __imm) |
4be8cf77 | 6094 | { |
f9355dee | 6095 | return __builtin_mve_vshlltq_n_sv8hi (__a, __imm); |
4be8cf77 SP |
6096 | } |
6097 | ||
f9355dee | 6098 | __extension__ extern __inline int32x4_t |
4be8cf77 | 6099 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6100 | __arm_vshllbq_n_s16 (int16x8_t __a, const int __imm) |
4be8cf77 | 6101 | { |
f9355dee | 6102 | return __builtin_mve_vshllbq_n_sv8hi (__a, __imm); |
4be8cf77 SP |
6103 | } |
6104 | ||
f9355dee | 6105 | __extension__ extern __inline int32x4_t |
4be8cf77 | 6106 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6107 | __arm_vorrq_n_s32 (int32x4_t __a, const int __imm) |
4be8cf77 | 6108 | { |
f9355dee | 6109 | return __builtin_mve_vorrq_n_sv4si (__a, __imm); |
4be8cf77 SP |
6110 | } |
6111 | ||
f9355dee | 6112 | __extension__ extern __inline int32x4_t |
4be8cf77 | 6113 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6114 | __arm_vbicq_n_s32 (int32x4_t __a, const int __imm) |
4be8cf77 | 6115 | { |
f9355dee | 6116 | return __builtin_mve_vbicq_n_sv4si (__a, __imm); |
4be8cf77 SP |
6117 | } |
6118 | ||
f9355dee | 6119 | __extension__ extern __inline uint64_t |
4be8cf77 | 6120 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6121 | __arm_vrmlaldavhq_u32 (uint32x4_t __a, uint32x4_t __b) |
4be8cf77 | 6122 | { |
f9355dee | 6123 | return __builtin_mve_vrmlaldavhq_uv4si (__a, __b); |
4be8cf77 SP |
6124 | } |
6125 | ||
f9355dee | 6126 | __extension__ extern __inline mve_pred16_t |
4be8cf77 | 6127 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6128 | __arm_vctp8q_m (uint32_t __a, mve_pred16_t __p) |
4be8cf77 | 6129 | { |
f9355dee | 6130 | return __builtin_mve_vctp8q_mhi (__a, __p); |
4be8cf77 SP |
6131 | } |
6132 | ||
f9355dee | 6133 | __extension__ extern __inline mve_pred16_t |
f166a8cd | 6134 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6135 | __arm_vctp64q_m (uint32_t __a, mve_pred16_t __p) |
f166a8cd | 6136 | { |
f9355dee | 6137 | return __builtin_mve_vctp64q_mhi (__a, __p); |
f166a8cd SP |
6138 | } |
6139 | ||
f9355dee | 6140 | __extension__ extern __inline mve_pred16_t |
f166a8cd | 6141 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6142 | __arm_vctp32q_m (uint32_t __a, mve_pred16_t __p) |
f166a8cd | 6143 | { |
f9355dee | 6144 | return __builtin_mve_vctp32q_mhi (__a, __p); |
f166a8cd SP |
6145 | } |
6146 | ||
f9355dee | 6147 | __extension__ extern __inline mve_pred16_t |
f166a8cd | 6148 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6149 | __arm_vctp16q_m (uint32_t __a, mve_pred16_t __p) |
f166a8cd | 6150 | { |
f9355dee | 6151 | return __builtin_mve_vctp16q_mhi (__a, __p); |
f166a8cd SP |
6152 | } |
6153 | ||
f9355dee | 6154 | __extension__ extern __inline uint64_t |
f166a8cd | 6155 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6156 | __arm_vaddlvaq_u32 (uint64_t __a, uint32x4_t __b) |
f166a8cd | 6157 | { |
f9355dee | 6158 | return __builtin_mve_vaddlvaq_uv4si (__a, __b); |
f166a8cd SP |
6159 | } |
6160 | ||
f9355dee SP |
6161 | __extension__ extern __inline int64_t |
6162 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6163 | __arm_vrmlsldavhxq_s32 (int32x4_t __a, int32x4_t __b) | |
6164 | { | |
6165 | return __builtin_mve_vrmlsldavhxq_sv4si (__a, __b); | |
6166 | } | |
14782c81 | 6167 | |
f9355dee SP |
6168 | __extension__ extern __inline int64_t |
6169 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6170 | __arm_vrmlsldavhq_s32 (int32x4_t __a, int32x4_t __b) | |
6171 | { | |
6172 | return __builtin_mve_vrmlsldavhq_sv4si (__a, __b); | |
6173 | } | |
6174 | ||
6175 | __extension__ extern __inline int64_t | |
6176 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6177 | __arm_vrmlaldavhxq_s32 (int32x4_t __a, int32x4_t __b) | |
6178 | { | |
6179 | return __builtin_mve_vrmlaldavhxq_sv4si (__a, __b); | |
6180 | } | |
6181 | ||
6182 | __extension__ extern __inline int64_t | |
6183 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6184 | __arm_vrmlaldavhq_s32 (int32x4_t __a, int32x4_t __b) | |
6185 | { | |
6186 | return __builtin_mve_vrmlaldavhq_sv4si (__a, __b); | |
6187 | } | |
6188 | ||
6189 | __extension__ extern __inline int64_t | |
6190 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6191 | __arm_vaddlvaq_s32 (int64_t __a, int32x4_t __b) | |
6192 | { | |
6193 | return __builtin_mve_vaddlvaq_sv4si (__a, __b); | |
6194 | } | |
6195 | ||
0dad5b33 SP |
6196 | __extension__ extern __inline uint32_t |
6197 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6198 | __arm_vabavq_s8 (uint32_t __a, int8x16_t __b, int8x16_t __c) | |
6199 | { | |
6200 | return __builtin_mve_vabavq_sv16qi (__a, __b, __c); | |
6201 | } | |
6202 | ||
6203 | __extension__ extern __inline uint32_t | |
6204 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6205 | __arm_vabavq_s16 (uint32_t __a, int16x8_t __b, int16x8_t __c) | |
6206 | { | |
6207 | return __builtin_mve_vabavq_sv8hi (__a, __b, __c); | |
6208 | } | |
6209 | ||
6210 | __extension__ extern __inline uint32_t | |
6211 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6212 | __arm_vabavq_s32 (uint32_t __a, int32x4_t __b, int32x4_t __c) | |
6213 | { | |
6214 | return __builtin_mve_vabavq_sv4si (__a, __b, __c); | |
6215 | } | |
6216 | ||
6217 | __extension__ extern __inline uint32_t | |
6218 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6219 | __arm_vabavq_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c) | |
6220 | { | |
6221 | return __builtin_mve_vabavq_uv16qi(__a, __b, __c); | |
6222 | } | |
6223 | ||
6224 | __extension__ extern __inline uint32_t | |
6225 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6226 | __arm_vabavq_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c) | |
6227 | { | |
6228 | return __builtin_mve_vabavq_uv8hi(__a, __b, __c); | |
6229 | } | |
6230 | ||
6231 | __extension__ extern __inline uint32_t | |
6232 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6233 | __arm_vabavq_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c) | |
6234 | { | |
6235 | return __builtin_mve_vabavq_uv4si(__a, __b, __c); | |
6236 | } | |
6237 | ||
6238 | __extension__ extern __inline int16x8_t | |
6239 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6240 | __arm_vbicq_m_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
6241 | { | |
6242 | return __builtin_mve_vbicq_m_n_sv8hi (__a, __imm, __p); | |
6243 | } | |
6244 | ||
6245 | __extension__ extern __inline int32x4_t | |
6246 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6247 | __arm_vbicq_m_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
6248 | { | |
6249 | return __builtin_mve_vbicq_m_n_sv4si (__a, __imm, __p); | |
6250 | } | |
6251 | ||
6252 | __extension__ extern __inline uint16x8_t | |
6253 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6254 | __arm_vbicq_m_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
6255 | { | |
6256 | return __builtin_mve_vbicq_m_n_uv8hi (__a, __imm, __p); | |
6257 | } | |
6258 | ||
6259 | __extension__ extern __inline uint32x4_t | |
6260 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6261 | __arm_vbicq_m_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
6262 | { | |
6263 | return __builtin_mve_vbicq_m_n_uv4si (__a, __imm, __p); | |
6264 | } | |
6265 | ||
6266 | __extension__ extern __inline int8x16_t | |
6267 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6268 | __arm_vqrshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) | |
6269 | { | |
6270 | return __builtin_mve_vqrshrnbq_n_sv8hi (__a, __b, __imm); | |
6271 | } | |
6272 | ||
6273 | __extension__ extern __inline uint8x16_t | |
6274 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6275 | __arm_vqrshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) | |
6276 | { | |
6277 | return __builtin_mve_vqrshrnbq_n_uv8hi (__a, __b, __imm); | |
6278 | } | |
6279 | ||
6280 | __extension__ extern __inline int16x8_t | |
6281 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6282 | __arm_vqrshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) | |
6283 | { | |
6284 | return __builtin_mve_vqrshrnbq_n_sv4si (__a, __b, __imm); | |
6285 | } | |
6286 | ||
6287 | __extension__ extern __inline uint16x8_t | |
6288 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6289 | __arm_vqrshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) | |
6290 | { | |
6291 | return __builtin_mve_vqrshrnbq_n_uv4si (__a, __b, __imm); | |
6292 | } | |
6293 | ||
6294 | __extension__ extern __inline uint8x16_t | |
6295 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6296 | __arm_vqrshrunbq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm) | |
6297 | { | |
6298 | return __builtin_mve_vqrshrunbq_n_sv8hi (__a, __b, __imm); | |
6299 | } | |
6300 | ||
6301 | __extension__ extern __inline uint16x8_t | |
6302 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6303 | __arm_vqrshrunbq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm) | |
6304 | { | |
6305 | return __builtin_mve_vqrshrunbq_n_sv4si (__a, __b, __imm); | |
6306 | } | |
6307 | ||
6308 | __extension__ extern __inline int64_t | |
6309 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6310 | __arm_vrmlaldavhaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) | |
6311 | { | |
6312 | return __builtin_mve_vrmlaldavhaq_sv4si (__a, __b, __c); | |
6313 | } | |
6314 | ||
6315 | __extension__ extern __inline uint64_t | |
6316 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6317 | __arm_vrmlaldavhaq_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c) | |
6318 | { | |
6319 | return __builtin_mve_vrmlaldavhaq_uv4si (__a, __b, __c); | |
6320 | } | |
6321 | ||
6322 | __extension__ extern __inline int8x16_t | |
6323 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6324 | __arm_vshlcq_s8 (int8x16_t __a, uint32_t * __b, const int __imm) | |
6325 | { | |
6326 | int8x16_t __res = __builtin_mve_vshlcq_vec_sv16qi (__a, *__b, __imm); | |
6327 | *__b = __builtin_mve_vshlcq_carry_sv16qi (__a, *__b, __imm); | |
6328 | return __res; | |
6329 | } | |
6330 | ||
6331 | __extension__ extern __inline uint8x16_t | |
6332 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6333 | __arm_vshlcq_u8 (uint8x16_t __a, uint32_t * __b, const int __imm) | |
6334 | { | |
6335 | uint8x16_t __res = __builtin_mve_vshlcq_vec_uv16qi (__a, *__b, __imm); | |
6336 | *__b = __builtin_mve_vshlcq_carry_uv16qi (__a, *__b, __imm); | |
6337 | return __res; | |
6338 | } | |
6339 | ||
6340 | __extension__ extern __inline int16x8_t | |
6341 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6342 | __arm_vshlcq_s16 (int16x8_t __a, uint32_t * __b, const int __imm) | |
6343 | { | |
6344 | int16x8_t __res = __builtin_mve_vshlcq_vec_sv8hi (__a, *__b, __imm); | |
6345 | *__b = __builtin_mve_vshlcq_carry_sv8hi (__a, *__b, __imm); | |
6346 | return __res; | |
6347 | } | |
6348 | ||
6349 | __extension__ extern __inline uint16x8_t | |
6350 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6351 | __arm_vshlcq_u16 (uint16x8_t __a, uint32_t * __b, const int __imm) | |
6352 | { | |
6353 | uint16x8_t __res = __builtin_mve_vshlcq_vec_uv8hi (__a, *__b, __imm); | |
6354 | *__b = __builtin_mve_vshlcq_carry_uv8hi (__a, *__b, __imm); | |
6355 | return __res; | |
6356 | } | |
6357 | ||
6358 | __extension__ extern __inline int32x4_t | |
6359 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6360 | __arm_vshlcq_s32 (int32x4_t __a, uint32_t * __b, const int __imm) | |
6361 | { | |
6362 | int32x4_t __res = __builtin_mve_vshlcq_vec_sv4si (__a, *__b, __imm); | |
6363 | *__b = __builtin_mve_vshlcq_carry_sv4si (__a, *__b, __imm); | |
6364 | return __res; | |
6365 | } | |
6366 | ||
6367 | __extension__ extern __inline uint32x4_t | |
6368 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6369 | __arm_vshlcq_u32 (uint32x4_t __a, uint32_t * __b, const int __imm) | |
6370 | { | |
6371 | uint32x4_t __res = __builtin_mve_vshlcq_vec_uv4si (__a, *__b, __imm); | |
6372 | *__b = __builtin_mve_vshlcq_carry_uv4si (__a, *__b, __imm); | |
6373 | return __res; | |
6374 | } | |
6375 | ||
8165795c SP |
6376 | __extension__ extern __inline uint8x16_t |
6377 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6378 | __arm_vpselq_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6379 | { | |
6380 | return __builtin_mve_vpselq_uv16qi (__a, __b, __p); | |
6381 | } | |
6382 | ||
6383 | __extension__ extern __inline int8x16_t | |
6384 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6385 | __arm_vpselq_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6386 | { | |
6387 | return __builtin_mve_vpselq_sv16qi (__a, __b, __p); | |
6388 | } | |
6389 | ||
6390 | __extension__ extern __inline uint8x16_t | |
6391 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6392 | __arm_vrev64q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
6393 | { | |
6394 | return __builtin_mve_vrev64q_m_uv16qi (__inactive, __a, __p); | |
6395 | } | |
6396 | ||
6397 | __extension__ extern __inline uint8x16_t | |
6398 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6399 | __arm_vqrdmlashq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) | |
6400 | { | |
6401 | return __builtin_mve_vqrdmlashq_n_uv16qi (__a, __b, __c); | |
6402 | } | |
6403 | ||
6404 | __extension__ extern __inline uint8x16_t | |
6405 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6406 | __arm_vqrdmlahq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) | |
6407 | { | |
6408 | return __builtin_mve_vqrdmlahq_n_uv16qi (__a, __b, __c); | |
6409 | } | |
6410 | ||
6411 | __extension__ extern __inline uint8x16_t | |
6412 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6413 | __arm_vqdmlahq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) | |
6414 | { | |
6415 | return __builtin_mve_vqdmlahq_n_uv16qi (__a, __b, __c); | |
6416 | } | |
6417 | ||
6418 | __extension__ extern __inline uint8x16_t | |
6419 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6420 | __arm_vmvnq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
6421 | { | |
6422 | return __builtin_mve_vmvnq_m_uv16qi (__inactive, __a, __p); | |
6423 | } | |
6424 | ||
6425 | __extension__ extern __inline uint8x16_t | |
6426 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6427 | __arm_vmlasq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) | |
6428 | { | |
6429 | return __builtin_mve_vmlasq_n_uv16qi (__a, __b, __c); | |
6430 | } | |
6431 | ||
6432 | __extension__ extern __inline uint8x16_t | |
6433 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6434 | __arm_vmlaq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) | |
6435 | { | |
6436 | return __builtin_mve_vmlaq_n_uv16qi (__a, __b, __c); | |
6437 | } | |
6438 | ||
6439 | __extension__ extern __inline uint32_t | |
6440 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6441 | __arm_vmladavq_p_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6442 | { | |
6443 | return __builtin_mve_vmladavq_p_uv16qi (__a, __b, __p); | |
6444 | } | |
6445 | ||
6446 | __extension__ extern __inline uint32_t | |
6447 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6448 | __arm_vmladavaq_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c) | |
6449 | { | |
6450 | return __builtin_mve_vmladavaq_uv16qi (__a, __b, __c); | |
6451 | } | |
6452 | ||
6453 | __extension__ extern __inline uint8_t | |
6454 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6455 | __arm_vminvq_p_u8 (uint8_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6456 | { | |
6457 | return __builtin_mve_vminvq_p_uv16qi (__a, __b, __p); | |
6458 | } | |
6459 | ||
6460 | __extension__ extern __inline uint8_t | |
6461 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6462 | __arm_vmaxvq_p_u8 (uint8_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6463 | { | |
6464 | return __builtin_mve_vmaxvq_p_uv16qi (__a, __b, __p); | |
6465 | } | |
6466 | ||
6467 | __extension__ extern __inline uint8x16_t | |
6468 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6469 | __arm_vdupq_m_n_u8 (uint8x16_t __inactive, uint8_t __a, mve_pred16_t __p) | |
6470 | { | |
6471 | return __builtin_mve_vdupq_m_n_uv16qi (__inactive, __a, __p); | |
6472 | } | |
6473 | ||
6474 | __extension__ extern __inline mve_pred16_t | |
6475 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6476 | __arm_vcmpneq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6477 | { | |
6478 | return __builtin_mve_vcmpneq_m_uv16qi (__a, __b, __p); | |
6479 | } | |
6480 | ||
6481 | __extension__ extern __inline mve_pred16_t | |
6482 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6483 | __arm_vcmpneq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
6484 | { | |
6485 | return __builtin_mve_vcmpneq_m_n_uv16qi (__a, __b, __p); | |
6486 | } | |
6487 | ||
6488 | __extension__ extern __inline mve_pred16_t | |
6489 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6490 | __arm_vcmphiq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6491 | { | |
6492 | return __builtin_mve_vcmphiq_m_uv16qi (__a, __b, __p); | |
6493 | } | |
6494 | ||
6495 | __extension__ extern __inline mve_pred16_t | |
6496 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6497 | __arm_vcmphiq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
6498 | { | |
6499 | return __builtin_mve_vcmphiq_m_n_uv16qi (__a, __b, __p); | |
6500 | } | |
6501 | ||
6502 | __extension__ extern __inline mve_pred16_t | |
6503 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6504 | __arm_vcmpeqq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6505 | { | |
6506 | return __builtin_mve_vcmpeqq_m_uv16qi (__a, __b, __p); | |
6507 | } | |
6508 | ||
6509 | __extension__ extern __inline mve_pred16_t | |
6510 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6511 | __arm_vcmpeqq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
6512 | { | |
6513 | return __builtin_mve_vcmpeqq_m_n_uv16qi (__a, __b, __p); | |
6514 | } | |
6515 | ||
6516 | __extension__ extern __inline mve_pred16_t | |
6517 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6518 | __arm_vcmpcsq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6519 | { | |
6520 | return __builtin_mve_vcmpcsq_m_uv16qi (__a, __b, __p); | |
6521 | } | |
6522 | ||
6523 | __extension__ extern __inline mve_pred16_t | |
6524 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6525 | __arm_vcmpcsq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
6526 | { | |
6527 | return __builtin_mve_vcmpcsq_m_n_uv16qi (__a, __b, __p); | |
6528 | } | |
6529 | ||
6530 | __extension__ extern __inline uint8x16_t | |
6531 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6532 | __arm_vclzq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
6533 | { | |
6534 | return __builtin_mve_vclzq_m_uv16qi (__inactive, __a, __p); | |
6535 | } | |
6536 | ||
6537 | __extension__ extern __inline uint32_t | |
6538 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6539 | __arm_vaddvaq_p_u8 (uint32_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6540 | { | |
6541 | return __builtin_mve_vaddvaq_p_uv16qi (__a, __b, __p); | |
6542 | } | |
6543 | ||
6544 | __extension__ extern __inline uint8x16_t | |
6545 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6546 | __arm_vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm) | |
6547 | { | |
6548 | return __builtin_mve_vsriq_n_uv16qi (__a, __b, __imm); | |
6549 | } | |
6550 | ||
6551 | __extension__ extern __inline uint8x16_t | |
6552 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6553 | __arm_vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm) | |
6554 | { | |
6555 | return __builtin_mve_vsliq_n_uv16qi (__a, __b, __imm); | |
6556 | } | |
6557 | ||
6558 | __extension__ extern __inline uint8x16_t | |
6559 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6560 | __arm_vshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6561 | { | |
6562 | return __builtin_mve_vshlq_m_r_uv16qi (__a, __b, __p); | |
6563 | } | |
6564 | ||
6565 | __extension__ extern __inline uint8x16_t | |
6566 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6567 | __arm_vrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6568 | { | |
6569 | return __builtin_mve_vrshlq_m_n_uv16qi (__a, __b, __p); | |
6570 | } | |
6571 | ||
6572 | __extension__ extern __inline uint8x16_t | |
6573 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6574 | __arm_vqshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6575 | { | |
6576 | return __builtin_mve_vqshlq_m_r_uv16qi (__a, __b, __p); | |
6577 | } | |
6578 | ||
6579 | __extension__ extern __inline uint8x16_t | |
6580 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6581 | __arm_vqrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6582 | { | |
6583 | return __builtin_mve_vqrshlq_m_n_uv16qi (__a, __b, __p); | |
6584 | } | |
6585 | ||
6586 | __extension__ extern __inline uint8_t | |
6587 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6588 | __arm_vminavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p) | |
6589 | { | |
6590 | return __builtin_mve_vminavq_p_sv16qi (__a, __b, __p); | |
6591 | } | |
6592 | ||
6593 | __extension__ extern __inline uint8x16_t | |
6594 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6595 | __arm_vminaq_m_s8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6596 | { | |
6597 | return __builtin_mve_vminaq_m_sv16qi (__a, __b, __p); | |
6598 | } | |
6599 | ||
6600 | __extension__ extern __inline uint8_t | |
6601 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6602 | __arm_vmaxavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p) | |
6603 | { | |
6604 | return __builtin_mve_vmaxavq_p_sv16qi (__a, __b, __p); | |
6605 | } | |
6606 | ||
6607 | __extension__ extern __inline uint8x16_t | |
6608 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6609 | __arm_vmaxaq_m_s8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6610 | { | |
6611 | return __builtin_mve_vmaxaq_m_sv16qi (__a, __b, __p); | |
6612 | } | |
6613 | ||
6614 | __extension__ extern __inline mve_pred16_t | |
6615 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6616 | __arm_vcmpneq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6617 | { | |
6618 | return __builtin_mve_vcmpneq_m_sv16qi (__a, __b, __p); | |
6619 | } | |
6620 | ||
6621 | __extension__ extern __inline mve_pred16_t | |
6622 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6623 | __arm_vcmpneq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
6624 | { | |
6625 | return __builtin_mve_vcmpneq_m_n_sv16qi (__a, __b, __p); | |
6626 | } | |
6627 | ||
6628 | __extension__ extern __inline mve_pred16_t | |
6629 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6630 | __arm_vcmpltq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6631 | { | |
6632 | return __builtin_mve_vcmpltq_m_sv16qi (__a, __b, __p); | |
6633 | } | |
6634 | ||
6635 | __extension__ extern __inline mve_pred16_t | |
6636 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6637 | __arm_vcmpltq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
6638 | { | |
6639 | return __builtin_mve_vcmpltq_m_n_sv16qi (__a, __b, __p); | |
6640 | } | |
6641 | ||
6642 | __extension__ extern __inline mve_pred16_t | |
6643 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6644 | __arm_vcmpleq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6645 | { | |
6646 | return __builtin_mve_vcmpleq_m_sv16qi (__a, __b, __p); | |
6647 | } | |
6648 | ||
6649 | __extension__ extern __inline mve_pred16_t | |
6650 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6651 | __arm_vcmpleq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
6652 | { | |
6653 | return __builtin_mve_vcmpleq_m_n_sv16qi (__a, __b, __p); | |
6654 | } | |
6655 | ||
6656 | __extension__ extern __inline mve_pred16_t | |
6657 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6658 | __arm_vcmpgtq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6659 | { | |
6660 | return __builtin_mve_vcmpgtq_m_sv16qi (__a, __b, __p); | |
6661 | } | |
6662 | ||
6663 | __extension__ extern __inline mve_pred16_t | |
6664 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6665 | __arm_vcmpgtq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
6666 | { | |
6667 | return __builtin_mve_vcmpgtq_m_n_sv16qi (__a, __b, __p); | |
6668 | } | |
6669 | ||
6670 | __extension__ extern __inline mve_pred16_t | |
6671 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6672 | __arm_vcmpgeq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6673 | { | |
6674 | return __builtin_mve_vcmpgeq_m_sv16qi (__a, __b, __p); | |
6675 | } | |
6676 | ||
6677 | __extension__ extern __inline mve_pred16_t | |
6678 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6679 | __arm_vcmpgeq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
6680 | { | |
6681 | return __builtin_mve_vcmpgeq_m_n_sv16qi (__a, __b, __p); | |
6682 | } | |
6683 | ||
6684 | __extension__ extern __inline mve_pred16_t | |
6685 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6686 | __arm_vcmpeqq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6687 | { | |
6688 | return __builtin_mve_vcmpeqq_m_sv16qi (__a, __b, __p); | |
6689 | } | |
6690 | ||
6691 | __extension__ extern __inline mve_pred16_t | |
6692 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6693 | __arm_vcmpeqq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
6694 | { | |
6695 | return __builtin_mve_vcmpeqq_m_n_sv16qi (__a, __b, __p); | |
6696 | } | |
6697 | ||
6698 | __extension__ extern __inline int8x16_t | |
6699 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6700 | __arm_vshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6701 | { | |
6702 | return __builtin_mve_vshlq_m_r_sv16qi (__a, __b, __p); | |
6703 | } | |
6704 | ||
6705 | __extension__ extern __inline int8x16_t | |
6706 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6707 | __arm_vrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6708 | { | |
6709 | return __builtin_mve_vrshlq_m_n_sv16qi (__a, __b, __p); | |
6710 | } | |
6711 | ||
6712 | __extension__ extern __inline int8x16_t | |
6713 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6714 | __arm_vrev64q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6715 | { | |
6716 | return __builtin_mve_vrev64q_m_sv16qi (__inactive, __a, __p); | |
6717 | } | |
6718 | ||
6719 | __extension__ extern __inline int8x16_t | |
6720 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6721 | __arm_vqshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6722 | { | |
6723 | return __builtin_mve_vqshlq_m_r_sv16qi (__a, __b, __p); | |
6724 | } | |
6725 | ||
6726 | __extension__ extern __inline int8x16_t | |
6727 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6728 | __arm_vqrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
6729 | { | |
6730 | return __builtin_mve_vqrshlq_m_n_sv16qi (__a, __b, __p); | |
6731 | } | |
6732 | ||
6733 | __extension__ extern __inline int8x16_t | |
6734 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6735 | __arm_vqnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6736 | { | |
6737 | return __builtin_mve_vqnegq_m_sv16qi (__inactive, __a, __p); | |
6738 | } | |
6739 | ||
6740 | __extension__ extern __inline int8x16_t | |
6741 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6742 | __arm_vqabsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6743 | { | |
6744 | return __builtin_mve_vqabsq_m_sv16qi (__inactive, __a, __p); | |
6745 | } | |
6746 | ||
6747 | __extension__ extern __inline int8x16_t | |
6748 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6749 | __arm_vnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6750 | { | |
6751 | return __builtin_mve_vnegq_m_sv16qi (__inactive, __a, __p); | |
6752 | } | |
6753 | ||
6754 | ||
6755 | __extension__ extern __inline int8x16_t | |
6756 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6757 | __arm_vmvnq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6758 | { | |
6759 | return __builtin_mve_vmvnq_m_sv16qi (__inactive, __a, __p); | |
6760 | } | |
6761 | ||
6762 | __extension__ extern __inline int32_t | |
6763 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6764 | __arm_vmlsdavxq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6765 | { | |
6766 | return __builtin_mve_vmlsdavxq_p_sv16qi (__a, __b, __p); | |
6767 | } | |
6768 | ||
6769 | __extension__ extern __inline int32_t | |
6770 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6771 | __arm_vmlsdavq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6772 | { | |
6773 | return __builtin_mve_vmlsdavq_p_sv16qi (__a, __b, __p); | |
6774 | } | |
6775 | ||
6776 | __extension__ extern __inline int32_t | |
6777 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6778 | __arm_vmladavxq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6779 | { | |
6780 | return __builtin_mve_vmladavxq_p_sv16qi (__a, __b, __p); | |
6781 | } | |
6782 | ||
6783 | __extension__ extern __inline int32_t | |
6784 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6785 | __arm_vmladavq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6786 | { | |
6787 | return __builtin_mve_vmladavq_p_sv16qi (__a, __b, __p); | |
6788 | } | |
6789 | ||
6790 | __extension__ extern __inline int8_t | |
6791 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6792 | __arm_vminvq_p_s8 (int8_t __a, int8x16_t __b, mve_pred16_t __p) | |
6793 | { | |
6794 | return __builtin_mve_vminvq_p_sv16qi (__a, __b, __p); | |
6795 | } | |
6796 | ||
6797 | __extension__ extern __inline int8_t | |
6798 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6799 | __arm_vmaxvq_p_s8 (int8_t __a, int8x16_t __b, mve_pred16_t __p) | |
6800 | { | |
6801 | return __builtin_mve_vmaxvq_p_sv16qi (__a, __b, __p); | |
6802 | } | |
6803 | ||
6804 | __extension__ extern __inline int8x16_t | |
6805 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6806 | __arm_vdupq_m_n_s8 (int8x16_t __inactive, int8_t __a, mve_pred16_t __p) | |
6807 | { | |
6808 | return __builtin_mve_vdupq_m_n_sv16qi (__inactive, __a, __p); | |
6809 | } | |
6810 | ||
6811 | __extension__ extern __inline int8x16_t | |
6812 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6813 | __arm_vclzq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6814 | { | |
6815 | return __builtin_mve_vclzq_m_sv16qi (__inactive, __a, __p); | |
6816 | } | |
6817 | ||
6818 | __extension__ extern __inline int8x16_t | |
6819 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6820 | __arm_vclsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6821 | { | |
6822 | return __builtin_mve_vclsq_m_sv16qi (__inactive, __a, __p); | |
6823 | } | |
6824 | ||
6825 | __extension__ extern __inline int32_t | |
6826 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6827 | __arm_vaddvaq_p_s8 (int32_t __a, int8x16_t __b, mve_pred16_t __p) | |
6828 | { | |
6829 | return __builtin_mve_vaddvaq_p_sv16qi (__a, __b, __p); | |
6830 | } | |
6831 | ||
6832 | __extension__ extern __inline int8x16_t | |
6833 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6834 | __arm_vabsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
6835 | { | |
6836 | return __builtin_mve_vabsq_m_sv16qi (__inactive, __a, __p); | |
6837 | } | |
6838 | ||
6839 | __extension__ extern __inline int8x16_t | |
6840 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6841 | __arm_vqrdmlsdhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
6842 | { | |
6843 | return __builtin_mve_vqrdmlsdhxq_sv16qi (__inactive, __a, __b); | |
6844 | } | |
6845 | ||
6846 | __extension__ extern __inline int8x16_t | |
6847 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6848 | __arm_vqrdmlsdhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
6849 | { | |
6850 | return __builtin_mve_vqrdmlsdhq_sv16qi (__inactive, __a, __b); | |
6851 | } | |
6852 | ||
6853 | __extension__ extern __inline int8x16_t | |
6854 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6855 | __arm_vqrdmlashq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
6856 | { | |
6857 | return __builtin_mve_vqrdmlashq_n_sv16qi (__a, __b, __c); | |
6858 | } | |
6859 | ||
6860 | __extension__ extern __inline int8x16_t | |
6861 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6862 | __arm_vqrdmlahq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
6863 | { | |
6864 | return __builtin_mve_vqrdmlahq_n_sv16qi (__a, __b, __c); | |
6865 | } | |
6866 | ||
6867 | __extension__ extern __inline int8x16_t | |
6868 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6869 | __arm_vqrdmladhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
6870 | { | |
6871 | return __builtin_mve_vqrdmladhxq_sv16qi (__inactive, __a, __b); | |
6872 | } | |
6873 | ||
6874 | __extension__ extern __inline int8x16_t | |
6875 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6876 | __arm_vqrdmladhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
6877 | { | |
6878 | return __builtin_mve_vqrdmladhq_sv16qi (__inactive, __a, __b); | |
6879 | } | |
6880 | ||
6881 | __extension__ extern __inline int8x16_t | |
6882 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6883 | __arm_vqdmlsdhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
6884 | { | |
6885 | return __builtin_mve_vqdmlsdhxq_sv16qi (__inactive, __a, __b); | |
6886 | } | |
6887 | ||
6888 | __extension__ extern __inline int8x16_t | |
6889 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6890 | __arm_vqdmlsdhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
6891 | { | |
6892 | return __builtin_mve_vqdmlsdhq_sv16qi (__inactive, __a, __b); | |
6893 | } | |
6894 | ||
6895 | __extension__ extern __inline int8x16_t | |
6896 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6897 | __arm_vqdmlahq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
6898 | { | |
6899 | return __builtin_mve_vqdmlahq_n_sv16qi (__a, __b, __c); | |
6900 | } | |
6901 | ||
6902 | __extension__ extern __inline int8x16_t | |
6903 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6904 | __arm_vqdmladhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
6905 | { | |
6906 | return __builtin_mve_vqdmladhxq_sv16qi (__inactive, __a, __b); | |
6907 | } | |
6908 | ||
6909 | __extension__ extern __inline int8x16_t | |
6910 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6911 | __arm_vqdmladhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
6912 | { | |
6913 | return __builtin_mve_vqdmladhq_sv16qi (__inactive, __a, __b); | |
6914 | } | |
6915 | ||
6916 | __extension__ extern __inline int32_t | |
6917 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6918 | __arm_vmlsdavaxq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) | |
6919 | { | |
6920 | return __builtin_mve_vmlsdavaxq_sv16qi (__a, __b, __c); | |
6921 | } | |
6922 | ||
6923 | __extension__ extern __inline int32_t | |
6924 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6925 | __arm_vmlsdavaq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) | |
6926 | { | |
6927 | return __builtin_mve_vmlsdavaq_sv16qi (__a, __b, __c); | |
6928 | } | |
6929 | ||
6930 | __extension__ extern __inline int8x16_t | |
6931 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6932 | __arm_vmlasq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
6933 | { | |
6934 | return __builtin_mve_vmlasq_n_sv16qi (__a, __b, __c); | |
6935 | } | |
6936 | ||
6937 | __extension__ extern __inline int8x16_t | |
6938 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6939 | __arm_vmlaq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
6940 | { | |
6941 | return __builtin_mve_vmlaq_n_sv16qi (__a, __b, __c); | |
6942 | } | |
6943 | ||
6944 | __extension__ extern __inline int32_t | |
6945 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6946 | __arm_vmladavaxq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) | |
6947 | { | |
6948 | return __builtin_mve_vmladavaxq_sv16qi (__a, __b, __c); | |
6949 | } | |
6950 | ||
6951 | __extension__ extern __inline int32_t | |
6952 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6953 | __arm_vmladavaq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) | |
6954 | { | |
6955 | return __builtin_mve_vmladavaq_sv16qi (__a, __b, __c); | |
6956 | } | |
6957 | ||
6958 | __extension__ extern __inline int8x16_t | |
6959 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6960 | __arm_vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm) | |
6961 | { | |
6962 | return __builtin_mve_vsriq_n_sv16qi (__a, __b, __imm); | |
6963 | } | |
6964 | ||
6965 | __extension__ extern __inline int8x16_t | |
6966 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6967 | __arm_vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm) | |
6968 | { | |
6969 | return __builtin_mve_vsliq_n_sv16qi (__a, __b, __imm); | |
6970 | } | |
6971 | ||
6972 | __extension__ extern __inline uint16x8_t | |
6973 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6974 | __arm_vpselq_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
6975 | { | |
6976 | return __builtin_mve_vpselq_uv8hi (__a, __b, __p); | |
6977 | } | |
6978 | ||
6979 | __extension__ extern __inline int16x8_t | |
6980 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6981 | __arm_vpselq_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
6982 | { | |
6983 | return __builtin_mve_vpselq_sv8hi (__a, __b, __p); | |
6984 | } | |
6985 | ||
6986 | __extension__ extern __inline uint16x8_t | |
6987 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6988 | __arm_vrev64q_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
6989 | { | |
6990 | return __builtin_mve_vrev64q_m_uv8hi (__inactive, __a, __p); | |
6991 | } | |
6992 | ||
6993 | __extension__ extern __inline uint16x8_t | |
6994 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6995 | __arm_vqrdmlashq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) | |
6996 | { | |
6997 | return __builtin_mve_vqrdmlashq_n_uv8hi (__a, __b, __c); | |
6998 | } | |
6999 | ||
7000 | __extension__ extern __inline uint16x8_t | |
7001 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7002 | __arm_vqrdmlahq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) | |
7003 | { | |
7004 | return __builtin_mve_vqrdmlahq_n_uv8hi (__a, __b, __c); | |
7005 | } | |
7006 | ||
7007 | __extension__ extern __inline uint16x8_t | |
7008 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7009 | __arm_vqdmlahq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) | |
7010 | { | |
7011 | return __builtin_mve_vqdmlahq_n_uv8hi (__a, __b, __c); | |
7012 | } | |
7013 | ||
7014 | __extension__ extern __inline uint16x8_t | |
7015 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7016 | __arm_vmvnq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
7017 | { | |
7018 | return __builtin_mve_vmvnq_m_uv8hi (__inactive, __a, __p); | |
7019 | } | |
7020 | ||
7021 | __extension__ extern __inline uint16x8_t | |
7022 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7023 | __arm_vmlasq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) | |
7024 | { | |
7025 | return __builtin_mve_vmlasq_n_uv8hi (__a, __b, __c); | |
7026 | } | |
7027 | ||
7028 | __extension__ extern __inline uint16x8_t | |
7029 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7030 | __arm_vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) | |
7031 | { | |
7032 | return __builtin_mve_vmlaq_n_uv8hi (__a, __b, __c); | |
7033 | } | |
7034 | ||
7035 | __extension__ extern __inline uint32_t | |
7036 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7037 | __arm_vmladavq_p_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7038 | { | |
7039 | return __builtin_mve_vmladavq_p_uv8hi (__a, __b, __p); | |
7040 | } | |
7041 | ||
7042 | __extension__ extern __inline uint32_t | |
7043 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7044 | __arm_vmladavaq_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c) | |
7045 | { | |
7046 | return __builtin_mve_vmladavaq_uv8hi (__a, __b, __c); | |
7047 | } | |
7048 | ||
7049 | __extension__ extern __inline uint16_t | |
7050 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7051 | __arm_vminvq_p_u16 (uint16_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7052 | { | |
7053 | return __builtin_mve_vminvq_p_uv8hi (__a, __b, __p); | |
7054 | } | |
7055 | ||
7056 | __extension__ extern __inline uint16_t | |
7057 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7058 | __arm_vmaxvq_p_u16 (uint16_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7059 | { | |
7060 | return __builtin_mve_vmaxvq_p_uv8hi (__a, __b, __p); | |
7061 | } | |
7062 | ||
7063 | __extension__ extern __inline uint16x8_t | |
7064 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7065 | __arm_vdupq_m_n_u16 (uint16x8_t __inactive, uint16_t __a, mve_pred16_t __p) | |
7066 | { | |
7067 | return __builtin_mve_vdupq_m_n_uv8hi (__inactive, __a, __p); | |
7068 | } | |
7069 | ||
7070 | __extension__ extern __inline mve_pred16_t | |
7071 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7072 | __arm_vcmpneq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7073 | { | |
7074 | return __builtin_mve_vcmpneq_m_uv8hi (__a, __b, __p); | |
7075 | } | |
7076 | ||
7077 | __extension__ extern __inline mve_pred16_t | |
7078 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7079 | __arm_vcmpneq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
7080 | { | |
7081 | return __builtin_mve_vcmpneq_m_n_uv8hi (__a, __b, __p); | |
7082 | } | |
7083 | ||
7084 | __extension__ extern __inline mve_pred16_t | |
7085 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7086 | __arm_vcmphiq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7087 | { | |
7088 | return __builtin_mve_vcmphiq_m_uv8hi (__a, __b, __p); | |
7089 | } | |
7090 | ||
7091 | __extension__ extern __inline mve_pred16_t | |
7092 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7093 | __arm_vcmphiq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
7094 | { | |
7095 | return __builtin_mve_vcmphiq_m_n_uv8hi (__a, __b, __p); | |
7096 | } | |
7097 | ||
7098 | __extension__ extern __inline mve_pred16_t | |
7099 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7100 | __arm_vcmpeqq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7101 | { | |
7102 | return __builtin_mve_vcmpeqq_m_uv8hi (__a, __b, __p); | |
7103 | } | |
7104 | ||
7105 | __extension__ extern __inline mve_pred16_t | |
7106 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7107 | __arm_vcmpeqq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
7108 | { | |
7109 | return __builtin_mve_vcmpeqq_m_n_uv8hi (__a, __b, __p); | |
7110 | } | |
7111 | ||
7112 | __extension__ extern __inline mve_pred16_t | |
7113 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7114 | __arm_vcmpcsq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7115 | { | |
7116 | return __builtin_mve_vcmpcsq_m_uv8hi (__a, __b, __p); | |
7117 | } | |
7118 | ||
7119 | __extension__ extern __inline mve_pred16_t | |
7120 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7121 | __arm_vcmpcsq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
7122 | { | |
7123 | return __builtin_mve_vcmpcsq_m_n_uv8hi (__a, __b, __p); | |
7124 | } | |
7125 | ||
7126 | __extension__ extern __inline uint16x8_t | |
7127 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7128 | __arm_vclzq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
7129 | { | |
7130 | return __builtin_mve_vclzq_m_uv8hi (__inactive, __a, __p); | |
7131 | } | |
7132 | ||
7133 | __extension__ extern __inline uint32_t | |
7134 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7135 | __arm_vaddvaq_p_u16 (uint32_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7136 | { | |
7137 | return __builtin_mve_vaddvaq_p_uv8hi (__a, __b, __p); | |
7138 | } | |
7139 | ||
7140 | __extension__ extern __inline uint16x8_t | |
7141 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7142 | __arm_vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm) | |
7143 | { | |
7144 | return __builtin_mve_vsriq_n_uv8hi (__a, __b, __imm); | |
7145 | } | |
7146 | ||
7147 | __extension__ extern __inline uint16x8_t | |
7148 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7149 | __arm_vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm) | |
7150 | { | |
7151 | return __builtin_mve_vsliq_n_uv8hi (__a, __b, __imm); | |
7152 | } | |
7153 | ||
7154 | __extension__ extern __inline uint16x8_t | |
7155 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7156 | __arm_vshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7157 | { | |
7158 | return __builtin_mve_vshlq_m_r_uv8hi (__a, __b, __p); | |
7159 | } | |
7160 | ||
7161 | __extension__ extern __inline uint16x8_t | |
7162 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7163 | __arm_vrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7164 | { | |
7165 | return __builtin_mve_vrshlq_m_n_uv8hi (__a, __b, __p); | |
7166 | } | |
7167 | ||
7168 | __extension__ extern __inline uint16x8_t | |
7169 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7170 | __arm_vqshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7171 | { | |
7172 | return __builtin_mve_vqshlq_m_r_uv8hi (__a, __b, __p); | |
7173 | } | |
7174 | ||
7175 | __extension__ extern __inline uint16x8_t | |
7176 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7177 | __arm_vqrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7178 | { | |
7179 | return __builtin_mve_vqrshlq_m_n_uv8hi (__a, __b, __p); | |
7180 | } | |
7181 | ||
7182 | __extension__ extern __inline uint16_t | |
7183 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7184 | __arm_vminavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p) | |
7185 | { | |
7186 | return __builtin_mve_vminavq_p_sv8hi (__a, __b, __p); | |
7187 | } | |
7188 | ||
7189 | __extension__ extern __inline uint16x8_t | |
7190 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7191 | __arm_vminaq_m_s16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7192 | { | |
7193 | return __builtin_mve_vminaq_m_sv8hi (__a, __b, __p); | |
7194 | } | |
7195 | ||
7196 | __extension__ extern __inline uint16_t | |
7197 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7198 | __arm_vmaxavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p) | |
7199 | { | |
7200 | return __builtin_mve_vmaxavq_p_sv8hi (__a, __b, __p); | |
7201 | } | |
7202 | ||
7203 | __extension__ extern __inline uint16x8_t | |
7204 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7205 | __arm_vmaxaq_m_s16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7206 | { | |
7207 | return __builtin_mve_vmaxaq_m_sv8hi (__a, __b, __p); | |
7208 | } | |
7209 | ||
7210 | __extension__ extern __inline mve_pred16_t | |
7211 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7212 | __arm_vcmpneq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7213 | { | |
7214 | return __builtin_mve_vcmpneq_m_sv8hi (__a, __b, __p); | |
7215 | } | |
7216 | ||
7217 | __extension__ extern __inline mve_pred16_t | |
7218 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7219 | __arm_vcmpneq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7220 | { | |
7221 | return __builtin_mve_vcmpneq_m_n_sv8hi (__a, __b, __p); | |
7222 | } | |
7223 | ||
7224 | __extension__ extern __inline mve_pred16_t | |
7225 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7226 | __arm_vcmpltq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7227 | { | |
7228 | return __builtin_mve_vcmpltq_m_sv8hi (__a, __b, __p); | |
7229 | } | |
7230 | ||
7231 | __extension__ extern __inline mve_pred16_t | |
7232 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7233 | __arm_vcmpltq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7234 | { | |
7235 | return __builtin_mve_vcmpltq_m_n_sv8hi (__a, __b, __p); | |
7236 | } | |
7237 | ||
7238 | __extension__ extern __inline mve_pred16_t | |
7239 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7240 | __arm_vcmpleq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7241 | { | |
7242 | return __builtin_mve_vcmpleq_m_sv8hi (__a, __b, __p); | |
7243 | } | |
7244 | ||
7245 | __extension__ extern __inline mve_pred16_t | |
7246 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7247 | __arm_vcmpleq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7248 | { | |
7249 | return __builtin_mve_vcmpleq_m_n_sv8hi (__a, __b, __p); | |
7250 | } | |
7251 | ||
7252 | __extension__ extern __inline mve_pred16_t | |
7253 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7254 | __arm_vcmpgtq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7255 | { | |
7256 | return __builtin_mve_vcmpgtq_m_sv8hi (__a, __b, __p); | |
7257 | } | |
7258 | ||
7259 | __extension__ extern __inline mve_pred16_t | |
7260 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7261 | __arm_vcmpgtq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7262 | { | |
7263 | return __builtin_mve_vcmpgtq_m_n_sv8hi (__a, __b, __p); | |
7264 | } | |
7265 | ||
7266 | __extension__ extern __inline mve_pred16_t | |
7267 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7268 | __arm_vcmpgeq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7269 | { | |
7270 | return __builtin_mve_vcmpgeq_m_sv8hi (__a, __b, __p); | |
7271 | } | |
7272 | ||
7273 | __extension__ extern __inline mve_pred16_t | |
7274 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7275 | __arm_vcmpgeq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7276 | { | |
7277 | return __builtin_mve_vcmpgeq_m_n_sv8hi (__a, __b, __p); | |
7278 | } | |
7279 | ||
7280 | __extension__ extern __inline mve_pred16_t | |
7281 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7282 | __arm_vcmpeqq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7283 | { | |
7284 | return __builtin_mve_vcmpeqq_m_sv8hi (__a, __b, __p); | |
7285 | } | |
7286 | ||
7287 | __extension__ extern __inline mve_pred16_t | |
7288 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7289 | __arm_vcmpeqq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7290 | { | |
7291 | return __builtin_mve_vcmpeqq_m_n_sv8hi (__a, __b, __p); | |
7292 | } | |
7293 | ||
7294 | __extension__ extern __inline int16x8_t | |
7295 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7296 | __arm_vshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7297 | { | |
7298 | return __builtin_mve_vshlq_m_r_sv8hi (__a, __b, __p); | |
7299 | } | |
7300 | ||
7301 | __extension__ extern __inline int16x8_t | |
7302 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7303 | __arm_vrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7304 | { | |
7305 | return __builtin_mve_vrshlq_m_n_sv8hi (__a, __b, __p); | |
7306 | } | |
7307 | ||
7308 | __extension__ extern __inline int16x8_t | |
7309 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7310 | __arm_vrev64q_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7311 | { | |
7312 | return __builtin_mve_vrev64q_m_sv8hi (__inactive, __a, __p); | |
7313 | } | |
7314 | ||
7315 | __extension__ extern __inline int16x8_t | |
7316 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7317 | __arm_vqshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7318 | { | |
7319 | return __builtin_mve_vqshlq_m_r_sv8hi (__a, __b, __p); | |
7320 | } | |
7321 | ||
7322 | __extension__ extern __inline int16x8_t | |
7323 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7324 | __arm_vqrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7325 | { | |
7326 | return __builtin_mve_vqrshlq_m_n_sv8hi (__a, __b, __p); | |
7327 | } | |
7328 | ||
7329 | __extension__ extern __inline int16x8_t | |
7330 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7331 | __arm_vqnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7332 | { | |
7333 | return __builtin_mve_vqnegq_m_sv8hi (__inactive, __a, __p); | |
7334 | } | |
7335 | ||
7336 | __extension__ extern __inline int16x8_t | |
7337 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7338 | __arm_vqabsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7339 | { | |
7340 | return __builtin_mve_vqabsq_m_sv8hi (__inactive, __a, __p); | |
7341 | } | |
7342 | ||
7343 | __extension__ extern __inline int16x8_t | |
7344 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7345 | __arm_vnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7346 | { | |
7347 | return __builtin_mve_vnegq_m_sv8hi (__inactive, __a, __p); | |
7348 | } | |
7349 | ||
7350 | __extension__ extern __inline int16x8_t | |
7351 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7352 | __arm_vmvnq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7353 | { | |
7354 | return __builtin_mve_vmvnq_m_sv8hi (__inactive, __a, __p); | |
7355 | } | |
7356 | ||
7357 | __extension__ extern __inline int32_t | |
7358 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7359 | __arm_vmlsdavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7360 | { | |
7361 | return __builtin_mve_vmlsdavxq_p_sv8hi (__a, __b, __p); | |
7362 | } | |
7363 | ||
7364 | __extension__ extern __inline int32_t | |
7365 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7366 | __arm_vmlsdavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7367 | { | |
7368 | return __builtin_mve_vmlsdavq_p_sv8hi (__a, __b, __p); | |
7369 | } | |
7370 | ||
7371 | __extension__ extern __inline int32_t | |
7372 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7373 | __arm_vmladavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7374 | { | |
7375 | return __builtin_mve_vmladavxq_p_sv8hi (__a, __b, __p); | |
7376 | } | |
7377 | ||
7378 | __extension__ extern __inline int32_t | |
7379 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7380 | __arm_vmladavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7381 | { | |
7382 | return __builtin_mve_vmladavq_p_sv8hi (__a, __b, __p); | |
7383 | } | |
7384 | ||
7385 | __extension__ extern __inline int16_t | |
7386 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7387 | __arm_vminvq_p_s16 (int16_t __a, int16x8_t __b, mve_pred16_t __p) | |
7388 | { | |
7389 | return __builtin_mve_vminvq_p_sv8hi (__a, __b, __p); | |
7390 | } | |
7391 | ||
7392 | __extension__ extern __inline int16_t | |
7393 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7394 | __arm_vmaxvq_p_s16 (int16_t __a, int16x8_t __b, mve_pred16_t __p) | |
7395 | { | |
7396 | return __builtin_mve_vmaxvq_p_sv8hi (__a, __b, __p); | |
7397 | } | |
7398 | ||
7399 | __extension__ extern __inline int16x8_t | |
7400 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7401 | __arm_vdupq_m_n_s16 (int16x8_t __inactive, int16_t __a, mve_pred16_t __p) | |
7402 | { | |
7403 | return __builtin_mve_vdupq_m_n_sv8hi (__inactive, __a, __p); | |
7404 | } | |
7405 | ||
7406 | __extension__ extern __inline int16x8_t | |
7407 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7408 | __arm_vclzq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7409 | { | |
7410 | return __builtin_mve_vclzq_m_sv8hi (__inactive, __a, __p); | |
7411 | } | |
7412 | ||
7413 | __extension__ extern __inline int16x8_t | |
7414 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7415 | __arm_vclsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7416 | { | |
7417 | return __builtin_mve_vclsq_m_sv8hi (__inactive, __a, __p); | |
7418 | } | |
7419 | ||
7420 | __extension__ extern __inline int32_t | |
7421 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7422 | __arm_vaddvaq_p_s16 (int32_t __a, int16x8_t __b, mve_pred16_t __p) | |
7423 | { | |
7424 | return __builtin_mve_vaddvaq_p_sv8hi (__a, __b, __p); | |
7425 | } | |
7426 | ||
7427 | __extension__ extern __inline int16x8_t | |
7428 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7429 | __arm_vabsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7430 | { | |
7431 | return __builtin_mve_vabsq_m_sv8hi (__inactive, __a, __p); | |
7432 | } | |
7433 | ||
7434 | __extension__ extern __inline int16x8_t | |
7435 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7436 | __arm_vqrdmlsdhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7437 | { | |
7438 | return __builtin_mve_vqrdmlsdhxq_sv8hi (__inactive, __a, __b); | |
7439 | } | |
7440 | ||
7441 | __extension__ extern __inline int16x8_t | |
7442 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7443 | __arm_vqrdmlsdhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7444 | { | |
7445 | return __builtin_mve_vqrdmlsdhq_sv8hi (__inactive, __a, __b); | |
7446 | } | |
7447 | ||
7448 | __extension__ extern __inline int16x8_t | |
7449 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7450 | __arm_vqrdmlashq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
7451 | { | |
7452 | return __builtin_mve_vqrdmlashq_n_sv8hi (__a, __b, __c); | |
7453 | } | |
7454 | ||
7455 | __extension__ extern __inline int16x8_t | |
7456 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7457 | __arm_vqrdmlahq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
7458 | { | |
7459 | return __builtin_mve_vqrdmlahq_n_sv8hi (__a, __b, __c); | |
7460 | } | |
7461 | ||
7462 | __extension__ extern __inline int16x8_t | |
7463 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7464 | __arm_vqrdmladhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7465 | { | |
7466 | return __builtin_mve_vqrdmladhxq_sv8hi (__inactive, __a, __b); | |
7467 | } | |
7468 | ||
7469 | __extension__ extern __inline int16x8_t | |
7470 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7471 | __arm_vqrdmladhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7472 | { | |
7473 | return __builtin_mve_vqrdmladhq_sv8hi (__inactive, __a, __b); | |
7474 | } | |
7475 | ||
7476 | __extension__ extern __inline int16x8_t | |
7477 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7478 | __arm_vqdmlsdhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7479 | { | |
7480 | return __builtin_mve_vqdmlsdhxq_sv8hi (__inactive, __a, __b); | |
7481 | } | |
7482 | ||
7483 | __extension__ extern __inline int16x8_t | |
7484 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7485 | __arm_vqdmlsdhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7486 | { | |
7487 | return __builtin_mve_vqdmlsdhq_sv8hi (__inactive, __a, __b); | |
7488 | } | |
7489 | ||
7490 | __extension__ extern __inline int16x8_t | |
7491 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7492 | __arm_vqdmlahq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
7493 | { | |
7494 | return __builtin_mve_vqdmlahq_n_sv8hi (__a, __b, __c); | |
7495 | } | |
7496 | ||
7497 | __extension__ extern __inline int16x8_t | |
7498 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7499 | __arm_vqdmladhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7500 | { | |
7501 | return __builtin_mve_vqdmladhxq_sv8hi (__inactive, __a, __b); | |
7502 | } | |
7503 | ||
7504 | __extension__ extern __inline int16x8_t | |
7505 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7506 | __arm_vqdmladhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7507 | { | |
7508 | return __builtin_mve_vqdmladhq_sv8hi (__inactive, __a, __b); | |
7509 | } | |
7510 | ||
7511 | __extension__ extern __inline int32_t | |
7512 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7513 | __arm_vmlsdavaxq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) | |
7514 | { | |
7515 | return __builtin_mve_vmlsdavaxq_sv8hi (__a, __b, __c); | |
7516 | } | |
7517 | ||
7518 | __extension__ extern __inline int32_t | |
7519 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7520 | __arm_vmlsdavaq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) | |
7521 | { | |
7522 | return __builtin_mve_vmlsdavaq_sv8hi (__a, __b, __c); | |
7523 | } | |
7524 | ||
7525 | __extension__ extern __inline int16x8_t | |
7526 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7527 | __arm_vmlasq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
7528 | { | |
7529 | return __builtin_mve_vmlasq_n_sv8hi (__a, __b, __c); | |
7530 | } | |
7531 | ||
7532 | __extension__ extern __inline int16x8_t | |
7533 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7534 | __arm_vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
7535 | { | |
7536 | return __builtin_mve_vmlaq_n_sv8hi (__a, __b, __c); | |
7537 | } | |
7538 | ||
7539 | __extension__ extern __inline int32_t | |
7540 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7541 | __arm_vmladavaxq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) | |
7542 | { | |
7543 | return __builtin_mve_vmladavaxq_sv8hi (__a, __b, __c); | |
7544 | } | |
7545 | ||
7546 | __extension__ extern __inline int32_t | |
7547 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7548 | __arm_vmladavaq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) | |
7549 | { | |
7550 | return __builtin_mve_vmladavaq_sv8hi (__a, __b, __c); | |
7551 | } | |
7552 | ||
7553 | __extension__ extern __inline int16x8_t | |
7554 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7555 | __arm_vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm) | |
7556 | { | |
7557 | return __builtin_mve_vsriq_n_sv8hi (__a, __b, __imm); | |
7558 | } | |
7559 | ||
7560 | __extension__ extern __inline int16x8_t | |
7561 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7562 | __arm_vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm) | |
7563 | { | |
7564 | return __builtin_mve_vsliq_n_sv8hi (__a, __b, __imm); | |
7565 | } | |
7566 | ||
7567 | __extension__ extern __inline uint32x4_t | |
7568 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7569 | __arm_vpselq_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7570 | { | |
7571 | return __builtin_mve_vpselq_uv4si (__a, __b, __p); | |
7572 | } | |
7573 | ||
7574 | __extension__ extern __inline int32x4_t | |
7575 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7576 | __arm_vpselq_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7577 | { | |
7578 | return __builtin_mve_vpselq_sv4si (__a, __b, __p); | |
7579 | } | |
7580 | ||
7581 | __extension__ extern __inline uint32x4_t | |
7582 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7583 | __arm_vrev64q_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
7584 | { | |
7585 | return __builtin_mve_vrev64q_m_uv4si (__inactive, __a, __p); | |
7586 | } | |
7587 | ||
7588 | __extension__ extern __inline uint32x4_t | |
7589 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7590 | __arm_vqrdmlashq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) | |
7591 | { | |
7592 | return __builtin_mve_vqrdmlashq_n_uv4si (__a, __b, __c); | |
7593 | } | |
7594 | ||
7595 | __extension__ extern __inline uint32x4_t | |
7596 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7597 | __arm_vqrdmlahq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) | |
7598 | { | |
7599 | return __builtin_mve_vqrdmlahq_n_uv4si (__a, __b, __c); | |
7600 | } | |
7601 | ||
7602 | __extension__ extern __inline uint32x4_t | |
7603 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7604 | __arm_vqdmlahq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) | |
7605 | { | |
7606 | return __builtin_mve_vqdmlahq_n_uv4si (__a, __b, __c); | |
7607 | } | |
7608 | ||
7609 | __extension__ extern __inline uint32x4_t | |
7610 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7611 | __arm_vmvnq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
7612 | { | |
7613 | return __builtin_mve_vmvnq_m_uv4si (__inactive, __a, __p); | |
7614 | } | |
7615 | ||
7616 | __extension__ extern __inline uint32x4_t | |
7617 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7618 | __arm_vmlasq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) | |
7619 | { | |
7620 | return __builtin_mve_vmlasq_n_uv4si (__a, __b, __c); | |
7621 | } | |
7622 | ||
7623 | __extension__ extern __inline uint32x4_t | |
7624 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7625 | __arm_vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) | |
7626 | { | |
7627 | return __builtin_mve_vmlaq_n_uv4si (__a, __b, __c); | |
7628 | } | |
7629 | ||
7630 | __extension__ extern __inline uint32_t | |
7631 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7632 | __arm_vmladavq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7633 | { | |
7634 | return __builtin_mve_vmladavq_p_uv4si (__a, __b, __p); | |
7635 | } | |
7636 | ||
7637 | __extension__ extern __inline uint32_t | |
7638 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7639 | __arm_vmladavaq_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c) | |
7640 | { | |
7641 | return __builtin_mve_vmladavaq_uv4si (__a, __b, __c); | |
7642 | } | |
7643 | ||
7644 | __extension__ extern __inline uint32_t | |
7645 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7646 | __arm_vminvq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7647 | { | |
7648 | return __builtin_mve_vminvq_p_uv4si (__a, __b, __p); | |
7649 | } | |
7650 | ||
7651 | __extension__ extern __inline uint32_t | |
7652 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7653 | __arm_vmaxvq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7654 | { | |
7655 | return __builtin_mve_vmaxvq_p_uv4si (__a, __b, __p); | |
7656 | } | |
7657 | ||
7658 | __extension__ extern __inline uint32x4_t | |
7659 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7660 | __arm_vdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, mve_pred16_t __p) | |
7661 | { | |
7662 | return __builtin_mve_vdupq_m_n_uv4si (__inactive, __a, __p); | |
7663 | } | |
7664 | ||
7665 | __extension__ extern __inline mve_pred16_t | |
7666 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7667 | __arm_vcmpneq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7668 | { | |
7669 | return __builtin_mve_vcmpneq_m_uv4si (__a, __b, __p); | |
7670 | } | |
7671 | ||
7672 | __extension__ extern __inline mve_pred16_t | |
7673 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7674 | __arm_vcmpneq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
7675 | { | |
7676 | return __builtin_mve_vcmpneq_m_n_uv4si (__a, __b, __p); | |
7677 | } | |
7678 | ||
7679 | __extension__ extern __inline mve_pred16_t | |
7680 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7681 | __arm_vcmphiq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7682 | { | |
7683 | return __builtin_mve_vcmphiq_m_uv4si (__a, __b, __p); | |
7684 | } | |
7685 | ||
7686 | __extension__ extern __inline mve_pred16_t | |
7687 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7688 | __arm_vcmphiq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
7689 | { | |
7690 | return __builtin_mve_vcmphiq_m_n_uv4si (__a, __b, __p); | |
7691 | } | |
7692 | ||
7693 | __extension__ extern __inline mve_pred16_t | |
7694 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7695 | __arm_vcmpeqq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7696 | { | |
7697 | return __builtin_mve_vcmpeqq_m_uv4si (__a, __b, __p); | |
7698 | } | |
7699 | ||
7700 | __extension__ extern __inline mve_pred16_t | |
7701 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7702 | __arm_vcmpeqq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
7703 | { | |
7704 | return __builtin_mve_vcmpeqq_m_n_uv4si (__a, __b, __p); | |
7705 | } | |
7706 | ||
7707 | __extension__ extern __inline mve_pred16_t | |
7708 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7709 | __arm_vcmpcsq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7710 | { | |
7711 | return __builtin_mve_vcmpcsq_m_uv4si (__a, __b, __p); | |
7712 | } | |
7713 | ||
7714 | __extension__ extern __inline mve_pred16_t | |
7715 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7716 | __arm_vcmpcsq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
7717 | { | |
7718 | return __builtin_mve_vcmpcsq_m_n_uv4si (__a, __b, __p); | |
7719 | } | |
7720 | ||
7721 | __extension__ extern __inline uint32x4_t | |
7722 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7723 | __arm_vclzq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
7724 | { | |
7725 | return __builtin_mve_vclzq_m_uv4si (__inactive, __a, __p); | |
7726 | } | |
7727 | ||
7728 | __extension__ extern __inline uint32_t | |
7729 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7730 | __arm_vaddvaq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) | |
7731 | { | |
7732 | return __builtin_mve_vaddvaq_p_uv4si (__a, __b, __p); | |
7733 | } | |
7734 | ||
7735 | __extension__ extern __inline uint32x4_t | |
7736 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7737 | __arm_vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm) | |
7738 | { | |
7739 | return __builtin_mve_vsriq_n_uv4si (__a, __b, __imm); | |
7740 | } | |
7741 | ||
7742 | __extension__ extern __inline uint32x4_t | |
7743 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7744 | __arm_vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm) | |
7745 | { | |
7746 | return __builtin_mve_vsliq_n_uv4si (__a, __b, __imm); | |
7747 | } | |
7748 | ||
7749 | __extension__ extern __inline uint32x4_t | |
7750 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7751 | __arm_vshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7752 | { | |
7753 | return __builtin_mve_vshlq_m_r_uv4si (__a, __b, __p); | |
7754 | } | |
7755 | ||
7756 | __extension__ extern __inline uint32x4_t | |
7757 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7758 | __arm_vrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7759 | { | |
7760 | return __builtin_mve_vrshlq_m_n_uv4si (__a, __b, __p); | |
7761 | } | |
7762 | ||
7763 | __extension__ extern __inline uint32x4_t | |
7764 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7765 | __arm_vqshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7766 | { | |
7767 | return __builtin_mve_vqshlq_m_r_uv4si (__a, __b, __p); | |
7768 | } | |
7769 | ||
7770 | __extension__ extern __inline uint32x4_t | |
7771 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7772 | __arm_vqrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7773 | { | |
7774 | return __builtin_mve_vqrshlq_m_n_uv4si (__a, __b, __p); | |
7775 | } | |
7776 | ||
7777 | __extension__ extern __inline uint32_t | |
7778 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7779 | __arm_vminavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p) | |
7780 | { | |
7781 | return __builtin_mve_vminavq_p_sv4si (__a, __b, __p); | |
7782 | } | |
7783 | ||
7784 | __extension__ extern __inline uint32x4_t | |
7785 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7786 | __arm_vminaq_m_s32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7787 | { | |
7788 | return __builtin_mve_vminaq_m_sv4si (__a, __b, __p); | |
7789 | } | |
7790 | ||
7791 | __extension__ extern __inline uint32_t | |
7792 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7793 | __arm_vmaxavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p) | |
7794 | { | |
7795 | return __builtin_mve_vmaxavq_p_sv4si (__a, __b, __p); | |
7796 | } | |
7797 | ||
7798 | __extension__ extern __inline uint32x4_t | |
7799 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7800 | __arm_vmaxaq_m_s32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7801 | { | |
7802 | return __builtin_mve_vmaxaq_m_sv4si (__a, __b, __p); | |
7803 | } | |
7804 | ||
7805 | __extension__ extern __inline mve_pred16_t | |
7806 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7807 | __arm_vcmpneq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7808 | { | |
7809 | return __builtin_mve_vcmpneq_m_sv4si (__a, __b, __p); | |
7810 | } | |
7811 | ||
7812 | __extension__ extern __inline mve_pred16_t | |
7813 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7814 | __arm_vcmpneq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7815 | { | |
7816 | return __builtin_mve_vcmpneq_m_n_sv4si (__a, __b, __p); | |
7817 | } | |
7818 | ||
7819 | __extension__ extern __inline mve_pred16_t | |
7820 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7821 | __arm_vcmpltq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7822 | { | |
7823 | return __builtin_mve_vcmpltq_m_sv4si (__a, __b, __p); | |
7824 | } | |
7825 | ||
7826 | __extension__ extern __inline mve_pred16_t | |
7827 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7828 | __arm_vcmpltq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7829 | { | |
7830 | return __builtin_mve_vcmpltq_m_n_sv4si (__a, __b, __p); | |
7831 | } | |
7832 | ||
7833 | __extension__ extern __inline mve_pred16_t | |
7834 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7835 | __arm_vcmpleq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7836 | { | |
7837 | return __builtin_mve_vcmpleq_m_sv4si (__a, __b, __p); | |
7838 | } | |
7839 | ||
7840 | __extension__ extern __inline mve_pred16_t | |
7841 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7842 | __arm_vcmpleq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7843 | { | |
7844 | return __builtin_mve_vcmpleq_m_n_sv4si (__a, __b, __p); | |
7845 | } | |
7846 | ||
7847 | __extension__ extern __inline mve_pred16_t | |
7848 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7849 | __arm_vcmpgtq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7850 | { | |
7851 | return __builtin_mve_vcmpgtq_m_sv4si (__a, __b, __p); | |
7852 | } | |
7853 | ||
7854 | __extension__ extern __inline mve_pred16_t | |
7855 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7856 | __arm_vcmpgtq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7857 | { | |
7858 | return __builtin_mve_vcmpgtq_m_n_sv4si (__a, __b, __p); | |
7859 | } | |
7860 | ||
7861 | __extension__ extern __inline mve_pred16_t | |
7862 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7863 | __arm_vcmpgeq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7864 | { | |
7865 | return __builtin_mve_vcmpgeq_m_sv4si (__a, __b, __p); | |
7866 | } | |
7867 | ||
7868 | __extension__ extern __inline mve_pred16_t | |
7869 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7870 | __arm_vcmpgeq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7871 | { | |
7872 | return __builtin_mve_vcmpgeq_m_n_sv4si (__a, __b, __p); | |
7873 | } | |
7874 | ||
7875 | __extension__ extern __inline mve_pred16_t | |
7876 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7877 | __arm_vcmpeqq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7878 | { | |
7879 | return __builtin_mve_vcmpeqq_m_sv4si (__a, __b, __p); | |
7880 | } | |
7881 | ||
7882 | __extension__ extern __inline mve_pred16_t | |
7883 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7884 | __arm_vcmpeqq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7885 | { | |
7886 | return __builtin_mve_vcmpeqq_m_n_sv4si (__a, __b, __p); | |
7887 | } | |
7888 | ||
7889 | __extension__ extern __inline int32x4_t | |
7890 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7891 | __arm_vshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7892 | { | |
7893 | return __builtin_mve_vshlq_m_r_sv4si (__a, __b, __p); | |
7894 | } | |
7895 | ||
7896 | __extension__ extern __inline int32x4_t | |
7897 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7898 | __arm_vrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7899 | { | |
7900 | return __builtin_mve_vrshlq_m_n_sv4si (__a, __b, __p); | |
7901 | } | |
7902 | ||
7903 | __extension__ extern __inline int32x4_t | |
7904 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7905 | __arm_vrev64q_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
7906 | { | |
7907 | return __builtin_mve_vrev64q_m_sv4si (__inactive, __a, __p); | |
7908 | } | |
7909 | ||
7910 | __extension__ extern __inline int32x4_t | |
7911 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7912 | __arm_vqshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7913 | { | |
7914 | return __builtin_mve_vqshlq_m_r_sv4si (__a, __b, __p); | |
7915 | } | |
7916 | ||
7917 | __extension__ extern __inline int32x4_t | |
7918 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7919 | __arm_vqrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
7920 | { | |
7921 | return __builtin_mve_vqrshlq_m_n_sv4si (__a, __b, __p); | |
7922 | } | |
7923 | ||
7924 | __extension__ extern __inline int32x4_t | |
7925 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7926 | __arm_vqnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
7927 | { | |
7928 | return __builtin_mve_vqnegq_m_sv4si (__inactive, __a, __p); | |
7929 | } | |
7930 | ||
7931 | __extension__ extern __inline int32x4_t | |
7932 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7933 | __arm_vqabsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
7934 | { | |
7935 | return __builtin_mve_vqabsq_m_sv4si (__inactive, __a, __p); | |
7936 | } | |
7937 | ||
7938 | __extension__ extern __inline int32x4_t | |
7939 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7940 | __arm_vnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
7941 | { | |
7942 | return __builtin_mve_vnegq_m_sv4si (__inactive, __a, __p); | |
7943 | } | |
7944 | ||
7945 | __extension__ extern __inline int32x4_t | |
7946 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7947 | __arm_vmvnq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
7948 | { | |
7949 | return __builtin_mve_vmvnq_m_sv4si (__inactive, __a, __p); | |
7950 | } | |
7951 | ||
7952 | __extension__ extern __inline int32_t | |
7953 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7954 | __arm_vmlsdavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7955 | { | |
7956 | return __builtin_mve_vmlsdavxq_p_sv4si (__a, __b, __p); | |
7957 | } | |
7958 | ||
7959 | __extension__ extern __inline int32_t | |
7960 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7961 | __arm_vmlsdavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7962 | { | |
7963 | return __builtin_mve_vmlsdavq_p_sv4si (__a, __b, __p); | |
7964 | } | |
7965 | ||
7966 | __extension__ extern __inline int32_t | |
7967 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7968 | __arm_vmladavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7969 | { | |
7970 | return __builtin_mve_vmladavxq_p_sv4si (__a, __b, __p); | |
7971 | } | |
7972 | ||
7973 | __extension__ extern __inline int32_t | |
7974 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7975 | __arm_vmladavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
7976 | { | |
7977 | return __builtin_mve_vmladavq_p_sv4si (__a, __b, __p); | |
7978 | } | |
7979 | ||
7980 | __extension__ extern __inline int32_t | |
7981 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7982 | __arm_vminvq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p) | |
7983 | { | |
7984 | return __builtin_mve_vminvq_p_sv4si (__a, __b, __p); | |
7985 | } | |
7986 | ||
7987 | __extension__ extern __inline int32_t | |
7988 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7989 | __arm_vmaxvq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p) | |
7990 | { | |
7991 | return __builtin_mve_vmaxvq_p_sv4si (__a, __b, __p); | |
7992 | } | |
7993 | ||
7994 | __extension__ extern __inline int32x4_t | |
7995 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7996 | __arm_vdupq_m_n_s32 (int32x4_t __inactive, int32_t __a, mve_pred16_t __p) | |
7997 | { | |
7998 | return __builtin_mve_vdupq_m_n_sv4si (__inactive, __a, __p); | |
7999 | } | |
8000 | ||
8001 | __extension__ extern __inline int32x4_t | |
8002 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8003 | __arm_vclzq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8004 | { | |
8005 | return __builtin_mve_vclzq_m_sv4si (__inactive, __a, __p); | |
8006 | } | |
8007 | ||
8008 | __extension__ extern __inline int32x4_t | |
8009 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8010 | __arm_vclsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8011 | { | |
8012 | return __builtin_mve_vclsq_m_sv4si (__inactive, __a, __p); | |
8013 | } | |
8014 | ||
8015 | __extension__ extern __inline int32_t | |
8016 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8017 | __arm_vaddvaq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p) | |
8018 | { | |
8019 | return __builtin_mve_vaddvaq_p_sv4si (__a, __b, __p); | |
8020 | } | |
8021 | ||
8022 | __extension__ extern __inline int32x4_t | |
8023 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8024 | __arm_vabsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8025 | { | |
8026 | return __builtin_mve_vabsq_m_sv4si (__inactive, __a, __p); | |
8027 | } | |
8028 | ||
8029 | __extension__ extern __inline int32x4_t | |
8030 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8031 | __arm_vqrdmlsdhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8032 | { | |
8033 | return __builtin_mve_vqrdmlsdhxq_sv4si (__inactive, __a, __b); | |
8034 | } | |
8035 | ||
8036 | __extension__ extern __inline int32x4_t | |
8037 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8038 | __arm_vqrdmlsdhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8039 | { | |
8040 | return __builtin_mve_vqrdmlsdhq_sv4si (__inactive, __a, __b); | |
8041 | } | |
8042 | ||
8043 | __extension__ extern __inline int32x4_t | |
8044 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8045 | __arm_vqrdmlashq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8046 | { | |
8047 | return __builtin_mve_vqrdmlashq_n_sv4si (__a, __b, __c); | |
8048 | } | |
8049 | ||
8050 | __extension__ extern __inline int32x4_t | |
8051 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8052 | __arm_vqrdmlahq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8053 | { | |
8054 | return __builtin_mve_vqrdmlahq_n_sv4si (__a, __b, __c); | |
8055 | } | |
8056 | ||
8057 | __extension__ extern __inline int32x4_t | |
8058 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8059 | __arm_vqrdmladhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8060 | { | |
8061 | return __builtin_mve_vqrdmladhxq_sv4si (__inactive, __a, __b); | |
8062 | } | |
8063 | ||
8064 | __extension__ extern __inline int32x4_t | |
8065 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8066 | __arm_vqrdmladhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8067 | { | |
8068 | return __builtin_mve_vqrdmladhq_sv4si (__inactive, __a, __b); | |
8069 | } | |
8070 | ||
8071 | __extension__ extern __inline int32x4_t | |
8072 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8073 | __arm_vqdmlsdhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8074 | { | |
8075 | return __builtin_mve_vqdmlsdhxq_sv4si (__inactive, __a, __b); | |
8076 | } | |
8077 | ||
8078 | __extension__ extern __inline int32x4_t | |
8079 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8080 | __arm_vqdmlsdhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8081 | { | |
8082 | return __builtin_mve_vqdmlsdhq_sv4si (__inactive, __a, __b); | |
8083 | } | |
8084 | ||
8085 | __extension__ extern __inline int32x4_t | |
8086 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8087 | __arm_vqdmlahq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8088 | { | |
8089 | return __builtin_mve_vqdmlahq_n_sv4si (__a, __b, __c); | |
8090 | } | |
8091 | ||
8092 | __extension__ extern __inline int32x4_t | |
8093 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8094 | __arm_vqdmladhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8095 | { | |
8096 | return __builtin_mve_vqdmladhxq_sv4si (__inactive, __a, __b); | |
8097 | } | |
8098 | ||
8099 | __extension__ extern __inline int32x4_t | |
8100 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8101 | __arm_vqdmladhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8102 | { | |
8103 | return __builtin_mve_vqdmladhq_sv4si (__inactive, __a, __b); | |
8104 | } | |
8105 | ||
8106 | __extension__ extern __inline int32_t | |
8107 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8108 | __arm_vmlsdavaxq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) | |
8109 | { | |
8110 | return __builtin_mve_vmlsdavaxq_sv4si (__a, __b, __c); | |
8111 | } | |
8112 | ||
8113 | __extension__ extern __inline int32_t | |
8114 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8115 | __arm_vmlsdavaq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) | |
8116 | { | |
8117 | return __builtin_mve_vmlsdavaq_sv4si (__a, __b, __c); | |
8118 | } | |
8119 | ||
8120 | __extension__ extern __inline int32x4_t | |
8121 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8122 | __arm_vmlasq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8123 | { | |
8124 | return __builtin_mve_vmlasq_n_sv4si (__a, __b, __c); | |
8125 | } | |
8126 | ||
8127 | __extension__ extern __inline int32x4_t | |
8128 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8129 | __arm_vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8130 | { | |
8131 | return __builtin_mve_vmlaq_n_sv4si (__a, __b, __c); | |
8132 | } | |
8133 | ||
8134 | __extension__ extern __inline int32_t | |
8135 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8136 | __arm_vmladavaxq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) | |
8137 | { | |
8138 | return __builtin_mve_vmladavaxq_sv4si (__a, __b, __c); | |
8139 | } | |
8140 | ||
8141 | __extension__ extern __inline int32_t | |
8142 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8143 | __arm_vmladavaq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) | |
8144 | { | |
8145 | return __builtin_mve_vmladavaq_sv4si (__a, __b, __c); | |
8146 | } | |
8147 | ||
8148 | __extension__ extern __inline int32x4_t | |
8149 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8150 | __arm_vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm) | |
8151 | { | |
8152 | return __builtin_mve_vsriq_n_sv4si (__a, __b, __imm); | |
8153 | } | |
8154 | ||
8155 | __extension__ extern __inline int32x4_t | |
8156 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8157 | __arm_vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm) | |
8158 | { | |
8159 | return __builtin_mve_vsliq_n_sv4si (__a, __b, __imm); | |
8160 | } | |
8161 | ||
8162 | __extension__ extern __inline uint64x2_t | |
8163 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8164 | __arm_vpselq_u64 (uint64x2_t __a, uint64x2_t __b, mve_pred16_t __p) | |
8165 | { | |
8166 | return __builtin_mve_vpselq_uv2di (__a, __b, __p); | |
8167 | } | |
8168 | ||
8169 | __extension__ extern __inline int64x2_t | |
8170 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8171 | __arm_vpselq_s64 (int64x2_t __a, int64x2_t __b, mve_pred16_t __p) | |
8172 | { | |
8173 | return __builtin_mve_vpselq_sv2di (__a, __b, __p); | |
8174 | } | |
f9355dee | 8175 | |
e3678b44 | 8176 | __extension__ extern __inline int64_t |
f9355dee | 8177 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8178 | __arm_vrmlaldavhaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8179 | { |
e3678b44 | 8180 | return __builtin_mve_vrmlaldavhaxq_sv4si (__a, __b, __c); |
f9355dee SP |
8181 | } |
8182 | ||
e3678b44 | 8183 | __extension__ extern __inline int64_t |
f9355dee | 8184 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8185 | __arm_vrmlsldavhaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8186 | { |
e3678b44 | 8187 | return __builtin_mve_vrmlsldavhaq_sv4si (__a, __b, __c); |
f9355dee SP |
8188 | } |
8189 | ||
e3678b44 | 8190 | __extension__ extern __inline int64_t |
f9355dee | 8191 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8192 | __arm_vrmlsldavhaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8193 | { |
e3678b44 | 8194 | return __builtin_mve_vrmlsldavhaxq_sv4si (__a, __b, __c); |
f9355dee SP |
8195 | } |
8196 | ||
e3678b44 | 8197 | __extension__ extern __inline int64_t |
f9355dee | 8198 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8199 | __arm_vaddlvaq_p_s32 (int64_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8200 | { |
e3678b44 | 8201 | return __builtin_mve_vaddlvaq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8202 | } |
8203 | ||
e3678b44 | 8204 | __extension__ extern __inline int8x16_t |
f9355dee | 8205 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8206 | __arm_vrev16q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) |
f9355dee | 8207 | { |
e3678b44 | 8208 | return __builtin_mve_vrev16q_m_sv16qi (__inactive, __a, __p); |
f9355dee SP |
8209 | } |
8210 | ||
e3678b44 | 8211 | __extension__ extern __inline int64_t |
f9355dee | 8212 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8213 | __arm_vrmlaldavhq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8214 | { |
e3678b44 | 8215 | return __builtin_mve_vrmlaldavhq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8216 | } |
8217 | ||
e3678b44 | 8218 | __extension__ extern __inline int64_t |
f9355dee | 8219 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8220 | __arm_vrmlaldavhxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8221 | { |
e3678b44 | 8222 | return __builtin_mve_vrmlaldavhxq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8223 | } |
8224 | ||
e3678b44 | 8225 | __extension__ extern __inline int64_t |
f9355dee | 8226 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8227 | __arm_vrmlsldavhq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8228 | { |
e3678b44 | 8229 | return __builtin_mve_vrmlsldavhq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8230 | } |
8231 | ||
e3678b44 | 8232 | __extension__ extern __inline int64_t |
f9355dee | 8233 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8234 | __arm_vrmlsldavhxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8235 | { |
e3678b44 | 8236 | return __builtin_mve_vrmlsldavhxq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8237 | } |
8238 | ||
e3678b44 | 8239 | __extension__ extern __inline uint64_t |
f9355dee | 8240 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8241 | __arm_vaddlvaq_p_u32 (uint64_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 8242 | { |
e3678b44 | 8243 | return __builtin_mve_vaddlvaq_p_uv4si (__a, __b, __p); |
f9355dee SP |
8244 | } |
8245 | ||
e3678b44 | 8246 | __extension__ extern __inline uint8x16_t |
f9355dee | 8247 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8248 | __arm_vrev16q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) |
f9355dee | 8249 | { |
e3678b44 | 8250 | return __builtin_mve_vrev16q_m_uv16qi (__inactive, __a, __p); |
f9355dee SP |
8251 | } |
8252 | ||
e3678b44 | 8253 | __extension__ extern __inline uint64_t |
f9355dee | 8254 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8255 | __arm_vrmlaldavhq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 8256 | { |
e3678b44 | 8257 | return __builtin_mve_vrmlaldavhq_p_uv4si (__a, __b, __p); |
f9355dee SP |
8258 | } |
8259 | ||
e3678b44 | 8260 | __extension__ extern __inline int16x8_t |
f9355dee | 8261 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8262 | __arm_vmvnq_m_n_s16 (int16x8_t __inactive, const int __imm, mve_pred16_t __p) |
f9355dee | 8263 | { |
e3678b44 | 8264 | return __builtin_mve_vmvnq_m_n_sv8hi (__inactive, __imm, __p); |
f9355dee SP |
8265 | } |
8266 | ||
e3678b44 | 8267 | __extension__ extern __inline int16x8_t |
f9355dee | 8268 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8269 | __arm_vorrq_m_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 8270 | { |
e3678b44 | 8271 | return __builtin_mve_vorrq_m_n_sv8hi (__a, __imm, __p); |
f9355dee SP |
8272 | } |
8273 | ||
e3678b44 | 8274 | __extension__ extern __inline int8x16_t |
f9355dee | 8275 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8276 | __arm_vqrshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8277 | { |
e3678b44 | 8278 | return __builtin_mve_vqrshrntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8279 | } |
8280 | ||
e3678b44 | 8281 | __extension__ extern __inline int8x16_t |
f9355dee | 8282 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8283 | __arm_vqshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8284 | { |
e3678b44 | 8285 | return __builtin_mve_vqshrnbq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8286 | } |
8287 | ||
e3678b44 | 8288 | __extension__ extern __inline int8x16_t |
f9355dee | 8289 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8290 | __arm_vqshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8291 | { |
e3678b44 | 8292 | return __builtin_mve_vqshrntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8293 | } |
8294 | ||
e3678b44 | 8295 | __extension__ extern __inline int8x16_t |
f9355dee | 8296 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8297 | __arm_vrshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8298 | { |
e3678b44 | 8299 | return __builtin_mve_vrshrnbq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8300 | } |
8301 | ||
e3678b44 | 8302 | __extension__ extern __inline int8x16_t |
f9355dee | 8303 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8304 | __arm_vrshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8305 | { |
e3678b44 | 8306 | return __builtin_mve_vrshrntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8307 | } |
8308 | ||
e3678b44 | 8309 | __extension__ extern __inline int8x16_t |
f9355dee | 8310 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8311 | __arm_vshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8312 | { |
e3678b44 | 8313 | return __builtin_mve_vshrnbq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8314 | } |
8315 | ||
e3678b44 | 8316 | __extension__ extern __inline int8x16_t |
f9355dee | 8317 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8318 | __arm_vshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8319 | { |
e3678b44 | 8320 | return __builtin_mve_vshrntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8321 | } |
8322 | ||
e3678b44 | 8323 | __extension__ extern __inline int64_t |
f9355dee | 8324 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8325 | __arm_vmlaldavaq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c) |
f9355dee | 8326 | { |
e3678b44 | 8327 | return __builtin_mve_vmlaldavaq_sv8hi (__a, __b, __c); |
f9355dee SP |
8328 | } |
8329 | ||
e3678b44 | 8330 | __extension__ extern __inline int64_t |
f9355dee | 8331 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8332 | __arm_vmlaldavaxq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c) |
f9355dee | 8333 | { |
e3678b44 | 8334 | return __builtin_mve_vmlaldavaxq_sv8hi (__a, __b, __c); |
f9355dee SP |
8335 | } |
8336 | ||
e3678b44 | 8337 | __extension__ extern __inline int64_t |
f9355dee | 8338 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8339 | __arm_vmlsldavaq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c) |
f9355dee | 8340 | { |
e3678b44 | 8341 | return __builtin_mve_vmlsldavaq_sv8hi (__a, __b, __c); |
f9355dee SP |
8342 | } |
8343 | ||
e3678b44 | 8344 | __extension__ extern __inline int64_t |
f9355dee | 8345 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8346 | __arm_vmlsldavaxq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c) |
f9355dee | 8347 | { |
e3678b44 | 8348 | return __builtin_mve_vmlsldavaxq_sv8hi (__a, __b, __c); |
f9355dee SP |
8349 | } |
8350 | ||
e3678b44 | 8351 | __extension__ extern __inline int64_t |
f9355dee | 8352 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8353 | __arm_vmlaldavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8354 | { |
e3678b44 | 8355 | return __builtin_mve_vmlaldavq_p_sv8hi (__a, __b, __p); |
f9355dee SP |
8356 | } |
8357 | ||
e3678b44 | 8358 | __extension__ extern __inline int64_t |
f9355dee | 8359 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8360 | __arm_vmlaldavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8361 | { |
e3678b44 | 8362 | return __builtin_mve_vmlaldavxq_p_sv8hi (__a, __b, __p); |
f9355dee SP |
8363 | } |
8364 | ||
e3678b44 | 8365 | __extension__ extern __inline int64_t |
f9355dee | 8366 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8367 | __arm_vmlsldavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8368 | { |
e3678b44 | 8369 | return __builtin_mve_vmlsldavq_p_sv8hi (__a, __b, __p); |
f9355dee SP |
8370 | } |
8371 | ||
e3678b44 | 8372 | __extension__ extern __inline int64_t |
f9355dee | 8373 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8374 | __arm_vmlsldavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8375 | { |
e3678b44 | 8376 | return __builtin_mve_vmlsldavxq_p_sv8hi (__a, __b, __p); |
f9355dee SP |
8377 | } |
8378 | ||
8379 | __extension__ extern __inline int16x8_t | |
8380 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e3678b44 | 8381 | __arm_vmovlbq_m_s8 (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p) |
f9355dee | 8382 | { |
e3678b44 | 8383 | return __builtin_mve_vmovlbq_m_sv16qi (__inactive, __a, __p); |
f9355dee SP |
8384 | } |
8385 | ||
e3678b44 | 8386 | __extension__ extern __inline int16x8_t |
f9355dee | 8387 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8388 | __arm_vmovltq_m_s8 (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p) |
f9355dee | 8389 | { |
e3678b44 | 8390 | return __builtin_mve_vmovltq_m_sv16qi (__inactive, __a, __p); |
f9355dee SP |
8391 | } |
8392 | ||
e3678b44 | 8393 | __extension__ extern __inline int8x16_t |
f9355dee | 8394 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8395 | __arm_vmovnbq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8396 | { |
e3678b44 | 8397 | return __builtin_mve_vmovnbq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8398 | } |
8399 | ||
e3678b44 | 8400 | __extension__ extern __inline int8x16_t |
f9355dee | 8401 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8402 | __arm_vmovntq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8403 | { |
e3678b44 | 8404 | return __builtin_mve_vmovntq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8405 | } |
8406 | ||
e3678b44 | 8407 | __extension__ extern __inline int8x16_t |
f9355dee | 8408 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8409 | __arm_vqmovnbq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8410 | { |
e3678b44 | 8411 | return __builtin_mve_vqmovnbq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8412 | } |
8413 | ||
e3678b44 | 8414 | __extension__ extern __inline int8x16_t |
f9355dee | 8415 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8416 | __arm_vqmovntq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8417 | { |
e3678b44 | 8418 | return __builtin_mve_vqmovntq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8419 | } |
8420 | ||
e3678b44 | 8421 | __extension__ extern __inline int8x16_t |
f9355dee | 8422 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8423 | __arm_vrev32q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) |
f9355dee | 8424 | { |
e3678b44 | 8425 | return __builtin_mve_vrev32q_m_sv16qi (__inactive, __a, __p); |
f9355dee SP |
8426 | } |
8427 | ||
8428 | __extension__ extern __inline uint16x8_t | |
8429 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e3678b44 | 8430 | __arm_vmvnq_m_n_u16 (uint16x8_t __inactive, const int __imm, mve_pred16_t __p) |
f9355dee | 8431 | { |
e3678b44 | 8432 | return __builtin_mve_vmvnq_m_n_uv8hi (__inactive, __imm, __p); |
f9355dee SP |
8433 | } |
8434 | ||
e3678b44 | 8435 | __extension__ extern __inline uint16x8_t |
f9355dee | 8436 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8437 | __arm_vorrq_m_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 8438 | { |
e3678b44 | 8439 | return __builtin_mve_vorrq_m_n_uv8hi (__a, __imm, __p); |
f9355dee SP |
8440 | } |
8441 | ||
e3678b44 | 8442 | __extension__ extern __inline uint8x16_t |
f9355dee | 8443 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8444 | __arm_vqrshruntq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8445 | { |
e3678b44 | 8446 | return __builtin_mve_vqrshruntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8447 | } |
8448 | ||
e3678b44 | 8449 | __extension__ extern __inline uint8x16_t |
f9355dee | 8450 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8451 | __arm_vqshrunbq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8452 | { |
e3678b44 | 8453 | return __builtin_mve_vqshrunbq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8454 | } |
8455 | ||
e3678b44 | 8456 | __extension__ extern __inline uint8x16_t |
f9355dee | 8457 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8458 | __arm_vqshruntq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8459 | { |
e3678b44 | 8460 | return __builtin_mve_vqshruntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8461 | } |
8462 | ||
e3678b44 | 8463 | __extension__ extern __inline uint8x16_t |
f9355dee | 8464 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8465 | __arm_vqmovunbq_m_s16 (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8466 | { |
e3678b44 | 8467 | return __builtin_mve_vqmovunbq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8468 | } |
8469 | ||
e3678b44 | 8470 | __extension__ extern __inline uint8x16_t |
f9355dee | 8471 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8472 | __arm_vqmovuntq_m_s16 (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8473 | { |
e3678b44 | 8474 | return __builtin_mve_vqmovuntq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8475 | } |
8476 | ||
e3678b44 | 8477 | __extension__ extern __inline uint8x16_t |
f9355dee | 8478 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8479 | __arm_vqrshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 8480 | { |
e3678b44 | 8481 | return __builtin_mve_vqrshrntq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
8482 | } |
8483 | ||
e3678b44 | 8484 | __extension__ extern __inline uint8x16_t |
f9355dee | 8485 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8486 | __arm_vqshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 8487 | { |
e3678b44 | 8488 | return __builtin_mve_vqshrnbq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
8489 | } |
8490 | ||
e3678b44 | 8491 | __extension__ extern __inline uint8x16_t |
f9355dee | 8492 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8493 | __arm_vqshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 8494 | { |
e3678b44 | 8495 | return __builtin_mve_vqshrntq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
8496 | } |
8497 | ||
e3678b44 | 8498 | __extension__ extern __inline uint8x16_t |
f9355dee | 8499 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8500 | __arm_vrshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 8501 | { |
e3678b44 | 8502 | return __builtin_mve_vrshrnbq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
8503 | } |
8504 | ||
e3678b44 | 8505 | __extension__ extern __inline uint8x16_t |
f9355dee | 8506 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8507 | __arm_vrshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 8508 | { |
e3678b44 | 8509 | return __builtin_mve_vrshrntq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
8510 | } |
8511 | ||
e3678b44 | 8512 | __extension__ extern __inline uint8x16_t |
f9355dee | 8513 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8514 | __arm_vshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 8515 | { |
e3678b44 | 8516 | return __builtin_mve_vshrnbq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
8517 | } |
8518 | ||
e3678b44 | 8519 | __extension__ extern __inline uint8x16_t |
f9355dee | 8520 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8521 | __arm_vshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 8522 | { |
e3678b44 | 8523 | return __builtin_mve_vshrntq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
8524 | } |
8525 | ||
e3678b44 | 8526 | __extension__ extern __inline uint64_t |
f9355dee | 8527 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8528 | __arm_vmlaldavaq_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c) |
f9355dee | 8529 | { |
e3678b44 | 8530 | return __builtin_mve_vmlaldavaq_uv8hi (__a, __b, __c); |
f9355dee SP |
8531 | } |
8532 | ||
e3678b44 | 8533 | __extension__ extern __inline uint64_t |
f9355dee | 8534 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8535 | __arm_vmlaldavq_p_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
f9355dee | 8536 | { |
e3678b44 | 8537 | return __builtin_mve_vmlaldavq_p_uv8hi (__a, __b, __p); |
f9355dee SP |
8538 | } |
8539 | ||
e3678b44 | 8540 | __extension__ extern __inline uint16x8_t |
f9355dee | 8541 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8542 | __arm_vmovlbq_m_u8 (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p) |
f9355dee | 8543 | { |
e3678b44 | 8544 | return __builtin_mve_vmovlbq_m_uv16qi (__inactive, __a, __p); |
f9355dee SP |
8545 | } |
8546 | ||
e3678b44 | 8547 | __extension__ extern __inline uint16x8_t |
f9355dee | 8548 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8549 | __arm_vmovltq_m_u8 (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p) |
f9355dee | 8550 | { |
e3678b44 | 8551 | return __builtin_mve_vmovltq_m_uv16qi (__inactive, __a, __p); |
f9355dee SP |
8552 | } |
8553 | ||
e3678b44 | 8554 | __extension__ extern __inline uint8x16_t |
f9355dee | 8555 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8556 | __arm_vmovnbq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) |
f9355dee | 8557 | { |
e3678b44 | 8558 | return __builtin_mve_vmovnbq_m_uv8hi (__a, __b, __p); |
f9355dee SP |
8559 | } |
8560 | ||
e3678b44 | 8561 | __extension__ extern __inline uint8x16_t |
f9355dee | 8562 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8563 | __arm_vmovntq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) |
f9355dee | 8564 | { |
e3678b44 | 8565 | return __builtin_mve_vmovntq_m_uv8hi (__a, __b, __p); |
f9355dee SP |
8566 | } |
8567 | ||
e3678b44 | 8568 | __extension__ extern __inline uint8x16_t |
f9355dee | 8569 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8570 | __arm_vqmovnbq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) |
f9355dee | 8571 | { |
e3678b44 | 8572 | return __builtin_mve_vqmovnbq_m_uv8hi (__a, __b, __p); |
f9355dee SP |
8573 | } |
8574 | ||
e3678b44 | 8575 | __extension__ extern __inline uint8x16_t |
f9355dee | 8576 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8577 | __arm_vqmovntq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) |
f9355dee | 8578 | { |
e3678b44 | 8579 | return __builtin_mve_vqmovntq_m_uv8hi (__a, __b, __p); |
f9355dee SP |
8580 | } |
8581 | ||
e3678b44 | 8582 | __extension__ extern __inline uint8x16_t |
f9355dee | 8583 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8584 | __arm_vrev32q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) |
f9355dee | 8585 | { |
e3678b44 | 8586 | return __builtin_mve_vrev32q_m_uv16qi (__inactive, __a, __p); |
f9355dee SP |
8587 | } |
8588 | ||
8589 | __extension__ extern __inline int32x4_t | |
8590 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e3678b44 | 8591 | __arm_vmvnq_m_n_s32 (int32x4_t __inactive, const int __imm, mve_pred16_t __p) |
f9355dee | 8592 | { |
e3678b44 | 8593 | return __builtin_mve_vmvnq_m_n_sv4si (__inactive, __imm, __p); |
f9355dee SP |
8594 | } |
8595 | ||
e3678b44 | 8596 | __extension__ extern __inline int32x4_t |
f9355dee | 8597 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8598 | __arm_vorrq_m_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 8599 | { |
e3678b44 | 8600 | return __builtin_mve_vorrq_m_n_sv4si (__a, __imm, __p); |
f9355dee SP |
8601 | } |
8602 | ||
e3678b44 | 8603 | __extension__ extern __inline int16x8_t |
f9355dee | 8604 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8605 | __arm_vqrshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8606 | { |
e3678b44 | 8607 | return __builtin_mve_vqrshrntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8608 | } |
8609 | ||
e3678b44 | 8610 | __extension__ extern __inline int16x8_t |
f9355dee | 8611 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8612 | __arm_vqshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8613 | { |
e3678b44 | 8614 | return __builtin_mve_vqshrnbq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8615 | } |
8616 | ||
e3678b44 | 8617 | __extension__ extern __inline int16x8_t |
f9355dee | 8618 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8619 | __arm_vqshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8620 | { |
e3678b44 | 8621 | return __builtin_mve_vqshrntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8622 | } |
8623 | ||
e3678b44 | 8624 | __extension__ extern __inline int16x8_t |
f9355dee | 8625 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8626 | __arm_vrshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8627 | { |
e3678b44 | 8628 | return __builtin_mve_vrshrnbq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8629 | } |
8630 | ||
e3678b44 | 8631 | __extension__ extern __inline int16x8_t |
f9355dee | 8632 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8633 | __arm_vrshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8634 | { |
e3678b44 | 8635 | return __builtin_mve_vrshrntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8636 | } |
8637 | ||
e3678b44 | 8638 | __extension__ extern __inline int16x8_t |
f9355dee | 8639 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8640 | __arm_vshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8641 | { |
e3678b44 | 8642 | return __builtin_mve_vshrnbq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8643 | } |
8644 | ||
e3678b44 | 8645 | __extension__ extern __inline int16x8_t |
f9355dee | 8646 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8647 | __arm_vshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8648 | { |
e3678b44 | 8649 | return __builtin_mve_vshrntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8650 | } |
8651 | ||
e3678b44 | 8652 | __extension__ extern __inline int64_t |
f9355dee | 8653 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8654 | __arm_vmlaldavaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8655 | { |
e3678b44 | 8656 | return __builtin_mve_vmlaldavaq_sv4si (__a, __b, __c); |
f9355dee SP |
8657 | } |
8658 | ||
e3678b44 | 8659 | __extension__ extern __inline int64_t |
f9355dee | 8660 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8661 | __arm_vmlaldavaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8662 | { |
e3678b44 | 8663 | return __builtin_mve_vmlaldavaxq_sv4si (__a, __b, __c); |
f9355dee SP |
8664 | } |
8665 | ||
e3678b44 | 8666 | __extension__ extern __inline int64_t |
f9355dee | 8667 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8668 | __arm_vmlsldavaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8669 | { |
e3678b44 | 8670 | return __builtin_mve_vmlsldavaq_sv4si (__a, __b, __c); |
f9355dee SP |
8671 | } |
8672 | ||
e3678b44 | 8673 | __extension__ extern __inline int64_t |
f9355dee | 8674 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8675 | __arm_vmlsldavaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8676 | { |
e3678b44 | 8677 | return __builtin_mve_vmlsldavaxq_sv4si (__a, __b, __c); |
f9355dee SP |
8678 | } |
8679 | ||
e3678b44 | 8680 | __extension__ extern __inline int64_t |
f9355dee | 8681 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8682 | __arm_vmlaldavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8683 | { |
e3678b44 | 8684 | return __builtin_mve_vmlaldavq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8685 | } |
8686 | ||
e3678b44 | 8687 | __extension__ extern __inline int64_t |
f9355dee | 8688 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8689 | __arm_vmlaldavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8690 | { |
e3678b44 | 8691 | return __builtin_mve_vmlaldavxq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8692 | } |
8693 | ||
e3678b44 | 8694 | __extension__ extern __inline int64_t |
f9355dee | 8695 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8696 | __arm_vmlsldavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8697 | { |
e3678b44 | 8698 | return __builtin_mve_vmlsldavq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8699 | } |
8700 | ||
e3678b44 | 8701 | __extension__ extern __inline int64_t |
f9355dee | 8702 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8703 | __arm_vmlsldavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8704 | { |
e3678b44 | 8705 | return __builtin_mve_vmlsldavxq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8706 | } |
8707 | ||
e3678b44 | 8708 | __extension__ extern __inline int32x4_t |
f9355dee | 8709 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8710 | __arm_vmovlbq_m_s16 (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p) |
f9355dee | 8711 | { |
e3678b44 | 8712 | return __builtin_mve_vmovlbq_m_sv8hi (__inactive, __a, __p); |
f9355dee SP |
8713 | } |
8714 | ||
e3678b44 | 8715 | __extension__ extern __inline int32x4_t |
f9355dee | 8716 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8717 | __arm_vmovltq_m_s16 (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p) |
f9355dee | 8718 | { |
e3678b44 | 8719 | return __builtin_mve_vmovltq_m_sv8hi (__inactive, __a, __p); |
f9355dee SP |
8720 | } |
8721 | ||
e3678b44 | 8722 | __extension__ extern __inline int16x8_t |
f9355dee | 8723 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8724 | __arm_vmovnbq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8725 | { |
e3678b44 | 8726 | return __builtin_mve_vmovnbq_m_sv4si (__a, __b, __p); |
f9355dee SP |
8727 | } |
8728 | ||
e3678b44 | 8729 | __extension__ extern __inline int16x8_t |
f9355dee | 8730 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8731 | __arm_vmovntq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8732 | { |
e3678b44 | 8733 | return __builtin_mve_vmovntq_m_sv4si (__a, __b, __p); |
f9355dee SP |
8734 | } |
8735 | ||
e3678b44 | 8736 | __extension__ extern __inline int16x8_t |
f9355dee | 8737 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8738 | __arm_vqmovnbq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8739 | { |
e3678b44 | 8740 | return __builtin_mve_vqmovnbq_m_sv4si (__a, __b, __p); |
f9355dee SP |
8741 | } |
8742 | ||
e3678b44 | 8743 | __extension__ extern __inline int16x8_t |
f9355dee | 8744 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8745 | __arm_vqmovntq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8746 | { |
e3678b44 | 8747 | return __builtin_mve_vqmovntq_m_sv4si (__a, __b, __p); |
f9355dee SP |
8748 | } |
8749 | ||
e3678b44 | 8750 | __extension__ extern __inline int16x8_t |
f9355dee | 8751 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8752 | __arm_vrev32q_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) |
f9355dee | 8753 | { |
e3678b44 | 8754 | return __builtin_mve_vrev32q_m_sv8hi (__inactive, __a, __p); |
f9355dee SP |
8755 | } |
8756 | ||
e3678b44 | 8757 | __extension__ extern __inline uint32x4_t |
f9355dee | 8758 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8759 | __arm_vmvnq_m_n_u32 (uint32x4_t __inactive, const int __imm, mve_pred16_t __p) |
f9355dee | 8760 | { |
e3678b44 | 8761 | return __builtin_mve_vmvnq_m_n_uv4si (__inactive, __imm, __p); |
f9355dee SP |
8762 | } |
8763 | ||
e3678b44 | 8764 | __extension__ extern __inline uint32x4_t |
f9355dee | 8765 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8766 | __arm_vorrq_m_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 8767 | { |
e3678b44 | 8768 | return __builtin_mve_vorrq_m_n_uv4si (__a, __imm, __p); |
f9355dee SP |
8769 | } |
8770 | ||
e3678b44 | 8771 | __extension__ extern __inline uint16x8_t |
f9355dee | 8772 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8773 | __arm_vqrshruntq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8774 | { |
e3678b44 | 8775 | return __builtin_mve_vqrshruntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8776 | } |
8777 | ||
e3678b44 | 8778 | __extension__ extern __inline uint16x8_t |
f9355dee | 8779 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8780 | __arm_vqshrunbq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8781 | { |
e3678b44 | 8782 | return __builtin_mve_vqshrunbq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8783 | } |
8784 | ||
e3678b44 | 8785 | __extension__ extern __inline uint16x8_t |
f9355dee | 8786 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8787 | __arm_vqshruntq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 8788 | { |
e3678b44 | 8789 | return __builtin_mve_vqshruntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
8790 | } |
8791 | ||
e3678b44 | 8792 | __extension__ extern __inline uint16x8_t |
f9355dee | 8793 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8794 | __arm_vqmovunbq_m_s32 (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8795 | { |
e3678b44 | 8796 | return __builtin_mve_vqmovunbq_m_sv4si (__a, __b, __p); |
f9355dee SP |
8797 | } |
8798 | ||
e3678b44 | 8799 | __extension__ extern __inline uint16x8_t |
f9355dee | 8800 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8801 | __arm_vqmovuntq_m_s32 (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8802 | { |
e3678b44 | 8803 | return __builtin_mve_vqmovuntq_m_sv4si (__a, __b, __p); |
f9355dee SP |
8804 | } |
8805 | ||
e3678b44 | 8806 | __extension__ extern __inline uint16x8_t |
f9355dee | 8807 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8808 | __arm_vqrshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 8809 | { |
e3678b44 | 8810 | return __builtin_mve_vqrshrntq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
8811 | } |
8812 | ||
e3678b44 | 8813 | __extension__ extern __inline uint16x8_t |
f9355dee | 8814 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8815 | __arm_vqshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 8816 | { |
e3678b44 | 8817 | return __builtin_mve_vqshrnbq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
8818 | } |
8819 | ||
e3678b44 | 8820 | __extension__ extern __inline uint16x8_t |
f9355dee | 8821 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8822 | __arm_vqshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 8823 | { |
e3678b44 | 8824 | return __builtin_mve_vqshrntq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
8825 | } |
8826 | ||
e3678b44 | 8827 | __extension__ extern __inline uint16x8_t |
f9355dee | 8828 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8829 | __arm_vrshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 8830 | { |
e3678b44 | 8831 | return __builtin_mve_vrshrnbq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
8832 | } |
8833 | ||
e3678b44 | 8834 | __extension__ extern __inline uint16x8_t |
f9355dee | 8835 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8836 | __arm_vrshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 8837 | { |
e3678b44 | 8838 | return __builtin_mve_vrshrntq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
8839 | } |
8840 | ||
e3678b44 | 8841 | __extension__ extern __inline uint16x8_t |
f9355dee | 8842 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8843 | __arm_vshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 8844 | { |
e3678b44 | 8845 | return __builtin_mve_vshrnbq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
8846 | } |
8847 | ||
e3678b44 | 8848 | __extension__ extern __inline uint16x8_t |
f9355dee | 8849 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8850 | __arm_vshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 8851 | { |
e3678b44 | 8852 | return __builtin_mve_vshrntq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
8853 | } |
8854 | ||
e3678b44 | 8855 | __extension__ extern __inline uint64_t |
f9355dee | 8856 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8857 | __arm_vmlaldavaq_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c) |
f9355dee | 8858 | { |
e3678b44 | 8859 | return __builtin_mve_vmlaldavaq_uv4si (__a, __b, __c); |
f9355dee SP |
8860 | } |
8861 | ||
e3678b44 | 8862 | __extension__ extern __inline uint64_t |
f9355dee | 8863 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8864 | __arm_vmlaldavq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 8865 | { |
e3678b44 | 8866 | return __builtin_mve_vmlaldavq_p_uv4si (__a, __b, __p); |
f9355dee SP |
8867 | } |
8868 | ||
e3678b44 | 8869 | __extension__ extern __inline uint32x4_t |
f9355dee | 8870 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8871 | __arm_vmovlbq_m_u16 (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p) |
f9355dee | 8872 | { |
e3678b44 | 8873 | return __builtin_mve_vmovlbq_m_uv8hi (__inactive, __a, __p); |
f9355dee SP |
8874 | } |
8875 | ||
e3678b44 | 8876 | __extension__ extern __inline uint32x4_t |
f9355dee | 8877 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8878 | __arm_vmovltq_m_u16 (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p) |
f9355dee | 8879 | { |
e3678b44 | 8880 | return __builtin_mve_vmovltq_m_uv8hi (__inactive, __a, __p); |
f9355dee SP |
8881 | } |
8882 | ||
e3678b44 | 8883 | __extension__ extern __inline uint16x8_t |
f9355dee | 8884 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8885 | __arm_vmovnbq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 8886 | { |
e3678b44 | 8887 | return __builtin_mve_vmovnbq_m_uv4si (__a, __b, __p); |
f9355dee SP |
8888 | } |
8889 | ||
e3678b44 | 8890 | __extension__ extern __inline uint16x8_t |
f9355dee | 8891 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8892 | __arm_vmovntq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 8893 | { |
e3678b44 | 8894 | return __builtin_mve_vmovntq_m_uv4si (__a, __b, __p); |
f9355dee SP |
8895 | } |
8896 | ||
e3678b44 | 8897 | __extension__ extern __inline uint16x8_t |
f9355dee | 8898 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8899 | __arm_vqmovnbq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 8900 | { |
e3678b44 | 8901 | return __builtin_mve_vqmovnbq_m_uv4si (__a, __b, __p); |
f9355dee SP |
8902 | } |
8903 | ||
e3678b44 | 8904 | __extension__ extern __inline uint16x8_t |
f9355dee | 8905 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8906 | __arm_vqmovntq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 8907 | { |
e3678b44 | 8908 | return __builtin_mve_vqmovntq_m_uv4si (__a, __b, __p); |
f9355dee SP |
8909 | } |
8910 | ||
e3678b44 | 8911 | __extension__ extern __inline uint16x8_t |
f9355dee | 8912 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8913 | __arm_vrev32q_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) |
f9355dee | 8914 | { |
e3678b44 | 8915 | return __builtin_mve_vrev32q_m_uv8hi (__inactive, __a, __p); |
f9355dee | 8916 | } |
db5db9d2 SP |
8917 | |
8918 | __extension__ extern __inline int8x16_t | |
8919 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8920 | __arm_vsriq_m_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p) | |
8921 | { | |
8922 | return __builtin_mve_vsriq_m_n_sv16qi (__a, __b, __imm, __p); | |
8923 | } | |
8924 | ||
8925 | __extension__ extern __inline int8x16_t | |
8926 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8927 | __arm_vsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
8928 | { | |
8929 | return __builtin_mve_vsubq_m_sv16qi (__inactive, __a, __b, __p); | |
8930 | } | |
8931 | ||
8932 | __extension__ extern __inline uint8x16_t | |
8933 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8934 | __arm_vqshluq_m_n_s8 (uint8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
8935 | { | |
8936 | return __builtin_mve_vqshluq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
8937 | } | |
8938 | ||
8939 | __extension__ extern __inline uint32_t | |
8940 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8941 | __arm_vabavq_p_s8 (uint32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
8942 | { | |
8943 | return __builtin_mve_vabavq_p_sv16qi (__a, __b, __c, __p); | |
8944 | } | |
8945 | ||
8946 | __extension__ extern __inline uint8x16_t | |
8947 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8948 | __arm_vsriq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p) | |
8949 | { | |
8950 | return __builtin_mve_vsriq_m_n_uv16qi (__a, __b, __imm, __p); | |
8951 | } | |
8952 | ||
8953 | __extension__ extern __inline uint8x16_t | |
8954 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8955 | __arm_vshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
8956 | { | |
8957 | return __builtin_mve_vshlq_m_uv16qi (__inactive, __a, __b, __p); | |
8958 | } | |
8959 | ||
8960 | __extension__ extern __inline uint8x16_t | |
8961 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8962 | __arm_vsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
8963 | { | |
8964 | return __builtin_mve_vsubq_m_uv16qi (__inactive, __a, __b, __p); | |
8965 | } | |
8966 | ||
8967 | __extension__ extern __inline uint32_t | |
8968 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8969 | __arm_vabavq_p_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p) | |
8970 | { | |
8971 | return __builtin_mve_vabavq_p_uv16qi (__a, __b, __c, __p); | |
8972 | } | |
8973 | ||
8974 | __extension__ extern __inline int8x16_t | |
8975 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8976 | __arm_vshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
8977 | { | |
8978 | return __builtin_mve_vshlq_m_sv16qi (__inactive, __a, __b, __p); | |
8979 | } | |
8980 | ||
8981 | __extension__ extern __inline int16x8_t | |
8982 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8983 | __arm_vsriq_m_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
8984 | { | |
8985 | return __builtin_mve_vsriq_m_n_sv8hi (__a, __b, __imm, __p); | |
8986 | } | |
8987 | ||
8988 | __extension__ extern __inline int16x8_t | |
8989 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8990 | __arm_vsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
8991 | { | |
8992 | return __builtin_mve_vsubq_m_sv8hi (__inactive, __a, __b, __p); | |
8993 | } | |
8994 | ||
8995 | __extension__ extern __inline uint16x8_t | |
8996 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8997 | __arm_vqshluq_m_n_s16 (uint16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
8998 | { | |
8999 | return __builtin_mve_vqshluq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
9000 | } | |
9001 | ||
9002 | __extension__ extern __inline uint32_t | |
9003 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9004 | __arm_vabavq_p_s16 (uint32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
9005 | { | |
9006 | return __builtin_mve_vabavq_p_sv8hi (__a, __b, __c, __p); | |
9007 | } | |
9008 | ||
9009 | __extension__ extern __inline uint16x8_t | |
9010 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9011 | __arm_vsriq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
9012 | { | |
9013 | return __builtin_mve_vsriq_m_n_uv8hi (__a, __b, __imm, __p); | |
9014 | } | |
9015 | ||
9016 | __extension__ extern __inline uint16x8_t | |
9017 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9018 | __arm_vshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9019 | { | |
9020 | return __builtin_mve_vshlq_m_uv8hi (__inactive, __a, __b, __p); | |
9021 | } | |
9022 | ||
9023 | __extension__ extern __inline uint16x8_t | |
9024 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9025 | __arm_vsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9026 | { | |
9027 | return __builtin_mve_vsubq_m_uv8hi (__inactive, __a, __b, __p); | |
9028 | } | |
9029 | ||
9030 | __extension__ extern __inline uint32_t | |
9031 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9032 | __arm_vabavq_p_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) | |
9033 | { | |
9034 | return __builtin_mve_vabavq_p_uv8hi (__a, __b, __c, __p); | |
9035 | } | |
9036 | ||
9037 | __extension__ extern __inline int16x8_t | |
9038 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9039 | __arm_vshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9040 | { | |
9041 | return __builtin_mve_vshlq_m_sv8hi (__inactive, __a, __b, __p); | |
9042 | } | |
9043 | ||
9044 | __extension__ extern __inline int32x4_t | |
9045 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9046 | __arm_vsriq_m_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
9047 | { | |
9048 | return __builtin_mve_vsriq_m_n_sv4si (__a, __b, __imm, __p); | |
9049 | } | |
9050 | ||
9051 | __extension__ extern __inline int32x4_t | |
9052 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9053 | __arm_vsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9054 | { | |
9055 | return __builtin_mve_vsubq_m_sv4si (__inactive, __a, __b, __p); | |
9056 | } | |
9057 | ||
9058 | __extension__ extern __inline uint32x4_t | |
9059 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9060 | __arm_vqshluq_m_n_s32 (uint32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
9061 | { | |
9062 | return __builtin_mve_vqshluq_m_n_sv4si (__inactive, __a, __imm, __p); | |
9063 | } | |
9064 | ||
9065 | __extension__ extern __inline uint32_t | |
9066 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9067 | __arm_vabavq_p_s32 (uint32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
9068 | { | |
9069 | return __builtin_mve_vabavq_p_sv4si (__a, __b, __c, __p); | |
9070 | } | |
9071 | ||
9072 | __extension__ extern __inline uint32x4_t | |
9073 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9074 | __arm_vsriq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
9075 | { | |
9076 | return __builtin_mve_vsriq_m_n_uv4si (__a, __b, __imm, __p); | |
9077 | } | |
9078 | ||
9079 | __extension__ extern __inline uint32x4_t | |
9080 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9081 | __arm_vshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9082 | { | |
9083 | return __builtin_mve_vshlq_m_uv4si (__inactive, __a, __b, __p); | |
9084 | } | |
9085 | ||
9086 | __extension__ extern __inline uint32x4_t | |
9087 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9088 | __arm_vsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9089 | { | |
9090 | return __builtin_mve_vsubq_m_uv4si (__inactive, __a, __b, __p); | |
9091 | } | |
9092 | ||
9093 | __extension__ extern __inline uint32_t | |
9094 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9095 | __arm_vabavq_p_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
9096 | { | |
9097 | return __builtin_mve_vabavq_p_uv4si (__a, __b, __c, __p); | |
9098 | } | |
9099 | ||
9100 | __extension__ extern __inline int32x4_t | |
9101 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9102 | __arm_vshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9103 | { | |
9104 | return __builtin_mve_vshlq_m_sv4si (__inactive, __a, __b, __p); | |
9105 | } | |
9106 | ||
8eb3b6b9 SP |
9107 | __extension__ extern __inline int8x16_t |
9108 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9109 | __arm_vabdq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9110 | { | |
9111 | return __builtin_mve_vabdq_m_sv16qi (__inactive, __a, __b, __p); | |
9112 | } | |
9113 | ||
9114 | __extension__ extern __inline int32x4_t | |
9115 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9116 | __arm_vabdq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9117 | { | |
9118 | return __builtin_mve_vabdq_m_sv4si (__inactive, __a, __b, __p); | |
9119 | } | |
9120 | ||
9121 | __extension__ extern __inline int16x8_t | |
9122 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9123 | __arm_vabdq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9124 | { | |
9125 | return __builtin_mve_vabdq_m_sv8hi (__inactive, __a, __b, __p); | |
9126 | } | |
9127 | ||
9128 | __extension__ extern __inline uint8x16_t | |
9129 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9130 | __arm_vabdq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9131 | { | |
9132 | return __builtin_mve_vabdq_m_uv16qi (__inactive, __a, __b, __p); | |
9133 | } | |
9134 | ||
9135 | __extension__ extern __inline uint32x4_t | |
9136 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9137 | __arm_vabdq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9138 | { | |
9139 | return __builtin_mve_vabdq_m_uv4si (__inactive, __a, __b, __p); | |
9140 | } | |
9141 | ||
9142 | __extension__ extern __inline uint16x8_t | |
9143 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9144 | __arm_vabdq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9145 | { | |
9146 | return __builtin_mve_vabdq_m_uv8hi (__inactive, __a, __b, __p); | |
9147 | } | |
9148 | ||
9149 | __extension__ extern __inline int8x16_t | |
9150 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9151 | __arm_vaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
9152 | { | |
9153 | return __builtin_mve_vaddq_m_n_sv16qi (__inactive, __a, __b, __p); | |
9154 | } | |
9155 | ||
9156 | __extension__ extern __inline int32x4_t | |
9157 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9158 | __arm_vaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
9159 | { | |
9160 | return __builtin_mve_vaddq_m_n_sv4si (__inactive, __a, __b, __p); | |
9161 | } | |
9162 | ||
9163 | __extension__ extern __inline int16x8_t | |
9164 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9165 | __arm_vaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
9166 | { | |
9167 | return __builtin_mve_vaddq_m_n_sv8hi (__inactive, __a, __b, __p); | |
9168 | } | |
9169 | ||
9170 | __extension__ extern __inline uint8x16_t | |
9171 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9172 | __arm_vaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
9173 | { | |
9174 | return __builtin_mve_vaddq_m_n_uv16qi (__inactive, __a, __b, __p); | |
9175 | } | |
9176 | ||
9177 | __extension__ extern __inline uint32x4_t | |
9178 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9179 | __arm_vaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
9180 | { | |
9181 | return __builtin_mve_vaddq_m_n_uv4si (__inactive, __a, __b, __p); | |
9182 | } | |
9183 | ||
9184 | __extension__ extern __inline uint16x8_t | |
9185 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9186 | __arm_vaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
9187 | { | |
9188 | return __builtin_mve_vaddq_m_n_uv8hi (__inactive, __a, __b, __p); | |
9189 | } | |
9190 | ||
9191 | __extension__ extern __inline int8x16_t | |
9192 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9193 | __arm_vaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9194 | { | |
9195 | return __builtin_mve_vaddq_m_sv16qi (__inactive, __a, __b, __p); | |
9196 | } | |
9197 | ||
9198 | __extension__ extern __inline int32x4_t | |
9199 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9200 | __arm_vaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9201 | { | |
9202 | return __builtin_mve_vaddq_m_sv4si (__inactive, __a, __b, __p); | |
9203 | } | |
9204 | ||
9205 | __extension__ extern __inline int16x8_t | |
9206 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9207 | __arm_vaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9208 | { | |
9209 | return __builtin_mve_vaddq_m_sv8hi (__inactive, __a, __b, __p); | |
9210 | } | |
9211 | ||
9212 | __extension__ extern __inline uint8x16_t | |
9213 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9214 | __arm_vaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9215 | { | |
9216 | return __builtin_mve_vaddq_m_uv16qi (__inactive, __a, __b, __p); | |
9217 | } | |
9218 | ||
9219 | __extension__ extern __inline uint32x4_t | |
9220 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9221 | __arm_vaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9222 | { | |
9223 | return __builtin_mve_vaddq_m_uv4si (__inactive, __a, __b, __p); | |
9224 | } | |
9225 | ||
9226 | __extension__ extern __inline uint16x8_t | |
9227 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9228 | __arm_vaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9229 | { | |
9230 | return __builtin_mve_vaddq_m_uv8hi (__inactive, __a, __b, __p); | |
9231 | } | |
9232 | ||
9233 | __extension__ extern __inline int8x16_t | |
9234 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9235 | __arm_vandq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9236 | { | |
9237 | return __builtin_mve_vandq_m_sv16qi (__inactive, __a, __b, __p); | |
9238 | } | |
9239 | ||
9240 | __extension__ extern __inline int32x4_t | |
9241 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9242 | __arm_vandq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9243 | { | |
9244 | return __builtin_mve_vandq_m_sv4si (__inactive, __a, __b, __p); | |
9245 | } | |
9246 | ||
9247 | __extension__ extern __inline int16x8_t | |
9248 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9249 | __arm_vandq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9250 | { | |
9251 | return __builtin_mve_vandq_m_sv8hi (__inactive, __a, __b, __p); | |
9252 | } | |
9253 | ||
9254 | __extension__ extern __inline uint8x16_t | |
9255 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9256 | __arm_vandq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9257 | { | |
9258 | return __builtin_mve_vandq_m_uv16qi (__inactive, __a, __b, __p); | |
9259 | } | |
9260 | ||
9261 | __extension__ extern __inline uint32x4_t | |
9262 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9263 | __arm_vandq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9264 | { | |
9265 | return __builtin_mve_vandq_m_uv4si (__inactive, __a, __b, __p); | |
9266 | } | |
9267 | ||
9268 | __extension__ extern __inline uint16x8_t | |
9269 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9270 | __arm_vandq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9271 | { | |
9272 | return __builtin_mve_vandq_m_uv8hi (__inactive, __a, __b, __p); | |
9273 | } | |
9274 | ||
9275 | __extension__ extern __inline int8x16_t | |
9276 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9277 | __arm_vbicq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9278 | { | |
9279 | return __builtin_mve_vbicq_m_sv16qi (__inactive, __a, __b, __p); | |
9280 | } | |
9281 | ||
9282 | __extension__ extern __inline int32x4_t | |
9283 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9284 | __arm_vbicq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9285 | { | |
9286 | return __builtin_mve_vbicq_m_sv4si (__inactive, __a, __b, __p); | |
9287 | } | |
9288 | ||
9289 | __extension__ extern __inline int16x8_t | |
9290 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9291 | __arm_vbicq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9292 | { | |
9293 | return __builtin_mve_vbicq_m_sv8hi (__inactive, __a, __b, __p); | |
9294 | } | |
9295 | ||
9296 | __extension__ extern __inline uint8x16_t | |
9297 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9298 | __arm_vbicq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9299 | { | |
9300 | return __builtin_mve_vbicq_m_uv16qi (__inactive, __a, __b, __p); | |
9301 | } | |
9302 | ||
9303 | __extension__ extern __inline uint32x4_t | |
9304 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9305 | __arm_vbicq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9306 | { | |
9307 | return __builtin_mve_vbicq_m_uv4si (__inactive, __a, __b, __p); | |
9308 | } | |
9309 | ||
9310 | __extension__ extern __inline uint16x8_t | |
9311 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9312 | __arm_vbicq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9313 | { | |
9314 | return __builtin_mve_vbicq_m_uv8hi (__inactive, __a, __b, __p); | |
9315 | } | |
9316 | ||
9317 | __extension__ extern __inline int8x16_t | |
9318 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9319 | __arm_vbrsrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
9320 | { | |
9321 | return __builtin_mve_vbrsrq_m_n_sv16qi (__inactive, __a, __b, __p); | |
9322 | } | |
9323 | ||
9324 | __extension__ extern __inline int32x4_t | |
9325 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9326 | __arm_vbrsrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
9327 | { | |
9328 | return __builtin_mve_vbrsrq_m_n_sv4si (__inactive, __a, __b, __p); | |
9329 | } | |
9330 | ||
9331 | __extension__ extern __inline int16x8_t | |
9332 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9333 | __arm_vbrsrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
9334 | { | |
9335 | return __builtin_mve_vbrsrq_m_n_sv8hi (__inactive, __a, __b, __p); | |
9336 | } | |
9337 | ||
9338 | __extension__ extern __inline uint8x16_t | |
9339 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9340 | __arm_vbrsrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
9341 | { | |
9342 | return __builtin_mve_vbrsrq_m_n_uv16qi (__inactive, __a, __b, __p); | |
9343 | } | |
9344 | ||
9345 | __extension__ extern __inline uint32x4_t | |
9346 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9347 | __arm_vbrsrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
9348 | { | |
9349 | return __builtin_mve_vbrsrq_m_n_uv4si (__inactive, __a, __b, __p); | |
9350 | } | |
9351 | ||
9352 | __extension__ extern __inline uint16x8_t | |
9353 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9354 | __arm_vbrsrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
9355 | { | |
9356 | return __builtin_mve_vbrsrq_m_n_uv8hi (__inactive, __a, __b, __p); | |
9357 | } | |
9358 | ||
9359 | __extension__ extern __inline int8x16_t | |
9360 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9361 | __arm_vcaddq_rot270_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9362 | { | |
9363 | return __builtin_mve_vcaddq_rot270_m_sv16qi (__inactive, __a, __b, __p); | |
9364 | } | |
9365 | ||
9366 | __extension__ extern __inline int32x4_t | |
9367 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9368 | __arm_vcaddq_rot270_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9369 | { | |
9370 | return __builtin_mve_vcaddq_rot270_m_sv4si (__inactive, __a, __b, __p); | |
9371 | } | |
9372 | ||
9373 | __extension__ extern __inline int16x8_t | |
9374 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9375 | __arm_vcaddq_rot270_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9376 | { | |
9377 | return __builtin_mve_vcaddq_rot270_m_sv8hi (__inactive, __a, __b, __p); | |
9378 | } | |
9379 | ||
9380 | __extension__ extern __inline uint8x16_t | |
9381 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9382 | __arm_vcaddq_rot270_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9383 | { | |
9384 | return __builtin_mve_vcaddq_rot270_m_uv16qi (__inactive, __a, __b, __p); | |
9385 | } | |
9386 | ||
9387 | __extension__ extern __inline uint32x4_t | |
9388 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9389 | __arm_vcaddq_rot270_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9390 | { | |
9391 | return __builtin_mve_vcaddq_rot270_m_uv4si (__inactive, __a, __b, __p); | |
9392 | } | |
9393 | ||
9394 | __extension__ extern __inline uint16x8_t | |
9395 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9396 | __arm_vcaddq_rot270_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9397 | { | |
9398 | return __builtin_mve_vcaddq_rot270_m_uv8hi (__inactive, __a, __b, __p); | |
9399 | } | |
9400 | ||
9401 | __extension__ extern __inline int8x16_t | |
9402 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9403 | __arm_vcaddq_rot90_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9404 | { | |
9405 | return __builtin_mve_vcaddq_rot90_m_sv16qi (__inactive, __a, __b, __p); | |
9406 | } | |
9407 | ||
9408 | __extension__ extern __inline int32x4_t | |
9409 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9410 | __arm_vcaddq_rot90_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9411 | { | |
9412 | return __builtin_mve_vcaddq_rot90_m_sv4si (__inactive, __a, __b, __p); | |
9413 | } | |
9414 | ||
9415 | __extension__ extern __inline int16x8_t | |
9416 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9417 | __arm_vcaddq_rot90_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9418 | { | |
9419 | return __builtin_mve_vcaddq_rot90_m_sv8hi (__inactive, __a, __b, __p); | |
9420 | } | |
9421 | ||
9422 | __extension__ extern __inline uint8x16_t | |
9423 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9424 | __arm_vcaddq_rot90_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9425 | { | |
9426 | return __builtin_mve_vcaddq_rot90_m_uv16qi (__inactive, __a, __b, __p); | |
9427 | } | |
9428 | ||
9429 | __extension__ extern __inline uint32x4_t | |
9430 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9431 | __arm_vcaddq_rot90_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9432 | { | |
9433 | return __builtin_mve_vcaddq_rot90_m_uv4si (__inactive, __a, __b, __p); | |
9434 | } | |
9435 | ||
9436 | __extension__ extern __inline uint16x8_t | |
9437 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9438 | __arm_vcaddq_rot90_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9439 | { | |
9440 | return __builtin_mve_vcaddq_rot90_m_uv8hi (__inactive, __a, __b, __p); | |
9441 | } | |
9442 | ||
9443 | __extension__ extern __inline int8x16_t | |
9444 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9445 | __arm_veorq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9446 | { | |
9447 | return __builtin_mve_veorq_m_sv16qi (__inactive, __a, __b, __p); | |
9448 | } | |
9449 | ||
9450 | __extension__ extern __inline int32x4_t | |
9451 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9452 | __arm_veorq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9453 | { | |
9454 | return __builtin_mve_veorq_m_sv4si (__inactive, __a, __b, __p); | |
9455 | } | |
9456 | ||
9457 | __extension__ extern __inline int16x8_t | |
9458 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9459 | __arm_veorq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9460 | { | |
9461 | return __builtin_mve_veorq_m_sv8hi (__inactive, __a, __b, __p); | |
9462 | } | |
9463 | ||
9464 | __extension__ extern __inline uint8x16_t | |
9465 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9466 | __arm_veorq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9467 | { | |
9468 | return __builtin_mve_veorq_m_uv16qi (__inactive, __a, __b, __p); | |
9469 | } | |
9470 | ||
9471 | __extension__ extern __inline uint32x4_t | |
9472 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9473 | __arm_veorq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9474 | { | |
9475 | return __builtin_mve_veorq_m_uv4si (__inactive, __a, __b, __p); | |
9476 | } | |
9477 | ||
9478 | __extension__ extern __inline uint16x8_t | |
9479 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9480 | __arm_veorq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9481 | { | |
9482 | return __builtin_mve_veorq_m_uv8hi (__inactive, __a, __b, __p); | |
9483 | } | |
9484 | ||
9485 | __extension__ extern __inline int8x16_t | |
9486 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9487 | __arm_vhaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
9488 | { | |
9489 | return __builtin_mve_vhaddq_m_n_sv16qi (__inactive, __a, __b, __p); | |
9490 | } | |
9491 | ||
9492 | __extension__ extern __inline int32x4_t | |
9493 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9494 | __arm_vhaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
9495 | { | |
9496 | return __builtin_mve_vhaddq_m_n_sv4si (__inactive, __a, __b, __p); | |
9497 | } | |
9498 | ||
9499 | __extension__ extern __inline int16x8_t | |
9500 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9501 | __arm_vhaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
9502 | { | |
9503 | return __builtin_mve_vhaddq_m_n_sv8hi (__inactive, __a, __b, __p); | |
9504 | } | |
9505 | ||
9506 | __extension__ extern __inline uint8x16_t | |
9507 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9508 | __arm_vhaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
9509 | { | |
9510 | return __builtin_mve_vhaddq_m_n_uv16qi (__inactive, __a, __b, __p); | |
9511 | } | |
9512 | ||
9513 | __extension__ extern __inline uint32x4_t | |
9514 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9515 | __arm_vhaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
9516 | { | |
9517 | return __builtin_mve_vhaddq_m_n_uv4si (__inactive, __a, __b, __p); | |
9518 | } | |
9519 | ||
9520 | __extension__ extern __inline uint16x8_t | |
9521 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9522 | __arm_vhaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
9523 | { | |
9524 | return __builtin_mve_vhaddq_m_n_uv8hi (__inactive, __a, __b, __p); | |
9525 | } | |
9526 | ||
9527 | __extension__ extern __inline int8x16_t | |
9528 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9529 | __arm_vhaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9530 | { | |
9531 | return __builtin_mve_vhaddq_m_sv16qi (__inactive, __a, __b, __p); | |
9532 | } | |
9533 | ||
9534 | __extension__ extern __inline int32x4_t | |
9535 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9536 | __arm_vhaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9537 | { | |
9538 | return __builtin_mve_vhaddq_m_sv4si (__inactive, __a, __b, __p); | |
9539 | } | |
9540 | ||
9541 | __extension__ extern __inline int16x8_t | |
9542 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9543 | __arm_vhaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9544 | { | |
9545 | return __builtin_mve_vhaddq_m_sv8hi (__inactive, __a, __b, __p); | |
9546 | } | |
9547 | ||
9548 | __extension__ extern __inline uint8x16_t | |
9549 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9550 | __arm_vhaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9551 | { | |
9552 | return __builtin_mve_vhaddq_m_uv16qi (__inactive, __a, __b, __p); | |
9553 | } | |
9554 | ||
9555 | __extension__ extern __inline uint32x4_t | |
9556 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9557 | __arm_vhaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9558 | { | |
9559 | return __builtin_mve_vhaddq_m_uv4si (__inactive, __a, __b, __p); | |
9560 | } | |
9561 | ||
9562 | __extension__ extern __inline uint16x8_t | |
9563 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9564 | __arm_vhaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9565 | { | |
9566 | return __builtin_mve_vhaddq_m_uv8hi (__inactive, __a, __b, __p); | |
9567 | } | |
9568 | ||
9569 | __extension__ extern __inline int8x16_t | |
9570 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9571 | __arm_vhcaddq_rot270_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9572 | { | |
9573 | return __builtin_mve_vhcaddq_rot270_m_sv16qi (__inactive, __a, __b, __p); | |
9574 | } | |
9575 | ||
9576 | __extension__ extern __inline int32x4_t | |
9577 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9578 | __arm_vhcaddq_rot270_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9579 | { | |
9580 | return __builtin_mve_vhcaddq_rot270_m_sv4si (__inactive, __a, __b, __p); | |
9581 | } | |
9582 | ||
9583 | __extension__ extern __inline int16x8_t | |
9584 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9585 | __arm_vhcaddq_rot270_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9586 | { | |
9587 | return __builtin_mve_vhcaddq_rot270_m_sv8hi (__inactive, __a, __b, __p); | |
9588 | } | |
9589 | ||
9590 | __extension__ extern __inline int8x16_t | |
9591 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9592 | __arm_vhcaddq_rot90_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9593 | { | |
9594 | return __builtin_mve_vhcaddq_rot90_m_sv16qi (__inactive, __a, __b, __p); | |
9595 | } | |
9596 | ||
9597 | __extension__ extern __inline int32x4_t | |
9598 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9599 | __arm_vhcaddq_rot90_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9600 | { | |
9601 | return __builtin_mve_vhcaddq_rot90_m_sv4si (__inactive, __a, __b, __p); | |
9602 | } | |
9603 | ||
9604 | __extension__ extern __inline int16x8_t | |
9605 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9606 | __arm_vhcaddq_rot90_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9607 | { | |
9608 | return __builtin_mve_vhcaddq_rot90_m_sv8hi (__inactive, __a, __b, __p); | |
9609 | } | |
9610 | ||
9611 | __extension__ extern __inline int8x16_t | |
9612 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9613 | __arm_vhsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
9614 | { | |
9615 | return __builtin_mve_vhsubq_m_n_sv16qi (__inactive, __a, __b, __p); | |
9616 | } | |
9617 | ||
9618 | __extension__ extern __inline int32x4_t | |
9619 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9620 | __arm_vhsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
9621 | { | |
9622 | return __builtin_mve_vhsubq_m_n_sv4si (__inactive, __a, __b, __p); | |
9623 | } | |
9624 | ||
9625 | __extension__ extern __inline int16x8_t | |
9626 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9627 | __arm_vhsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
9628 | { | |
9629 | return __builtin_mve_vhsubq_m_n_sv8hi (__inactive, __a, __b, __p); | |
9630 | } | |
9631 | ||
9632 | __extension__ extern __inline uint8x16_t | |
9633 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9634 | __arm_vhsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
9635 | { | |
9636 | return __builtin_mve_vhsubq_m_n_uv16qi (__inactive, __a, __b, __p); | |
9637 | } | |
9638 | ||
9639 | __extension__ extern __inline uint32x4_t | |
9640 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9641 | __arm_vhsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
9642 | { | |
9643 | return __builtin_mve_vhsubq_m_n_uv4si (__inactive, __a, __b, __p); | |
9644 | } | |
9645 | ||
9646 | __extension__ extern __inline uint16x8_t | |
9647 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9648 | __arm_vhsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
9649 | { | |
9650 | return __builtin_mve_vhsubq_m_n_uv8hi (__inactive, __a, __b, __p); | |
9651 | } | |
9652 | ||
9653 | __extension__ extern __inline int8x16_t | |
9654 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9655 | __arm_vhsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9656 | { | |
9657 | return __builtin_mve_vhsubq_m_sv16qi (__inactive, __a, __b, __p); | |
9658 | } | |
9659 | ||
9660 | __extension__ extern __inline int32x4_t | |
9661 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9662 | __arm_vhsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9663 | { | |
9664 | return __builtin_mve_vhsubq_m_sv4si (__inactive, __a, __b, __p); | |
9665 | } | |
9666 | ||
9667 | __extension__ extern __inline int16x8_t | |
9668 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9669 | __arm_vhsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9670 | { | |
9671 | return __builtin_mve_vhsubq_m_sv8hi (__inactive, __a, __b, __p); | |
9672 | } | |
9673 | ||
9674 | __extension__ extern __inline uint8x16_t | |
9675 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9676 | __arm_vhsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9677 | { | |
9678 | return __builtin_mve_vhsubq_m_uv16qi (__inactive, __a, __b, __p); | |
9679 | } | |
9680 | ||
9681 | __extension__ extern __inline uint32x4_t | |
9682 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9683 | __arm_vhsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9684 | { | |
9685 | return __builtin_mve_vhsubq_m_uv4si (__inactive, __a, __b, __p); | |
9686 | } | |
9687 | ||
9688 | __extension__ extern __inline uint16x8_t | |
9689 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9690 | __arm_vhsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9691 | { | |
9692 | return __builtin_mve_vhsubq_m_uv8hi (__inactive, __a, __b, __p); | |
9693 | } | |
9694 | ||
9695 | __extension__ extern __inline int8x16_t | |
9696 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9697 | __arm_vmaxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9698 | { | |
9699 | return __builtin_mve_vmaxq_m_sv16qi (__inactive, __a, __b, __p); | |
9700 | } | |
9701 | ||
9702 | __extension__ extern __inline int32x4_t | |
9703 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9704 | __arm_vmaxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9705 | { | |
9706 | return __builtin_mve_vmaxq_m_sv4si (__inactive, __a, __b, __p); | |
9707 | } | |
9708 | ||
9709 | __extension__ extern __inline int16x8_t | |
9710 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9711 | __arm_vmaxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9712 | { | |
9713 | return __builtin_mve_vmaxq_m_sv8hi (__inactive, __a, __b, __p); | |
9714 | } | |
9715 | ||
9716 | __extension__ extern __inline uint8x16_t | |
9717 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9718 | __arm_vmaxq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9719 | { | |
9720 | return __builtin_mve_vmaxq_m_uv16qi (__inactive, __a, __b, __p); | |
9721 | } | |
9722 | ||
9723 | __extension__ extern __inline uint32x4_t | |
9724 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9725 | __arm_vmaxq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9726 | { | |
9727 | return __builtin_mve_vmaxq_m_uv4si (__inactive, __a, __b, __p); | |
9728 | } | |
9729 | ||
9730 | __extension__ extern __inline uint16x8_t | |
9731 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9732 | __arm_vmaxq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9733 | { | |
9734 | return __builtin_mve_vmaxq_m_uv8hi (__inactive, __a, __b, __p); | |
9735 | } | |
9736 | ||
9737 | __extension__ extern __inline int8x16_t | |
9738 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9739 | __arm_vminq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9740 | { | |
9741 | return __builtin_mve_vminq_m_sv16qi (__inactive, __a, __b, __p); | |
9742 | } | |
9743 | ||
9744 | __extension__ extern __inline int32x4_t | |
9745 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9746 | __arm_vminq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9747 | { | |
9748 | return __builtin_mve_vminq_m_sv4si (__inactive, __a, __b, __p); | |
9749 | } | |
9750 | ||
9751 | __extension__ extern __inline int16x8_t | |
9752 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9753 | __arm_vminq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9754 | { | |
9755 | return __builtin_mve_vminq_m_sv8hi (__inactive, __a, __b, __p); | |
9756 | } | |
9757 | ||
9758 | __extension__ extern __inline uint8x16_t | |
9759 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9760 | __arm_vminq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9761 | { | |
9762 | return __builtin_mve_vminq_m_uv16qi (__inactive, __a, __b, __p); | |
9763 | } | |
9764 | ||
9765 | __extension__ extern __inline uint32x4_t | |
9766 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9767 | __arm_vminq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9768 | { | |
9769 | return __builtin_mve_vminq_m_uv4si (__inactive, __a, __b, __p); | |
9770 | } | |
9771 | ||
9772 | __extension__ extern __inline uint16x8_t | |
9773 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9774 | __arm_vminq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9775 | { | |
9776 | return __builtin_mve_vminq_m_uv8hi (__inactive, __a, __b, __p); | |
9777 | } | |
9778 | ||
9779 | __extension__ extern __inline int32_t | |
9780 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9781 | __arm_vmladavaq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
9782 | { | |
9783 | return __builtin_mve_vmladavaq_p_sv16qi (__a, __b, __c, __p); | |
9784 | } | |
9785 | ||
9786 | __extension__ extern __inline int32_t | |
9787 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9788 | __arm_vmladavaq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
9789 | { | |
9790 | return __builtin_mve_vmladavaq_p_sv4si (__a, __b, __c, __p); | |
9791 | } | |
9792 | ||
9793 | __extension__ extern __inline int32_t | |
9794 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9795 | __arm_vmladavaq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
9796 | { | |
9797 | return __builtin_mve_vmladavaq_p_sv8hi (__a, __b, __c, __p); | |
9798 | } | |
9799 | ||
9800 | __extension__ extern __inline uint32_t | |
9801 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9802 | __arm_vmladavaq_p_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p) | |
9803 | { | |
9804 | return __builtin_mve_vmladavaq_p_uv16qi (__a, __b, __c, __p); | |
9805 | } | |
9806 | ||
9807 | __extension__ extern __inline uint32_t | |
9808 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9809 | __arm_vmladavaq_p_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
9810 | { | |
9811 | return __builtin_mve_vmladavaq_p_uv4si (__a, __b, __c, __p); | |
9812 | } | |
9813 | ||
9814 | __extension__ extern __inline uint32_t | |
9815 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9816 | __arm_vmladavaq_p_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) | |
9817 | { | |
9818 | return __builtin_mve_vmladavaq_p_uv8hi (__a, __b, __c, __p); | |
9819 | } | |
9820 | ||
9821 | __extension__ extern __inline int32_t | |
9822 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9823 | __arm_vmladavaxq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
9824 | { | |
9825 | return __builtin_mve_vmladavaxq_p_sv16qi (__a, __b, __c, __p); | |
9826 | } | |
9827 | ||
9828 | __extension__ extern __inline int32_t | |
9829 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9830 | __arm_vmladavaxq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
9831 | { | |
9832 | return __builtin_mve_vmladavaxq_p_sv4si (__a, __b, __c, __p); | |
9833 | } | |
9834 | ||
9835 | __extension__ extern __inline int32_t | |
9836 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9837 | __arm_vmladavaxq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
9838 | { | |
9839 | return __builtin_mve_vmladavaxq_p_sv8hi (__a, __b, __c, __p); | |
9840 | } | |
9841 | ||
9842 | __extension__ extern __inline int8x16_t | |
9843 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9844 | __arm_vmlaq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
9845 | { | |
9846 | return __builtin_mve_vmlaq_m_n_sv16qi (__a, __b, __c, __p); | |
9847 | } | |
9848 | ||
9849 | __extension__ extern __inline int32x4_t | |
9850 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9851 | __arm_vmlaq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
9852 | { | |
9853 | return __builtin_mve_vmlaq_m_n_sv4si (__a, __b, __c, __p); | |
9854 | } | |
9855 | ||
9856 | __extension__ extern __inline int16x8_t | |
9857 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9858 | __arm_vmlaq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
9859 | { | |
9860 | return __builtin_mve_vmlaq_m_n_sv8hi (__a, __b, __c, __p); | |
9861 | } | |
9862 | ||
9863 | __extension__ extern __inline uint8x16_t | |
9864 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9865 | __arm_vmlaq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p) | |
9866 | { | |
9867 | return __builtin_mve_vmlaq_m_n_uv16qi (__a, __b, __c, __p); | |
9868 | } | |
9869 | ||
9870 | __extension__ extern __inline uint32x4_t | |
9871 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9872 | __arm_vmlaq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p) | |
9873 | { | |
9874 | return __builtin_mve_vmlaq_m_n_uv4si (__a, __b, __c, __p); | |
9875 | } | |
9876 | ||
9877 | __extension__ extern __inline uint16x8_t | |
9878 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9879 | __arm_vmlaq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p) | |
9880 | { | |
9881 | return __builtin_mve_vmlaq_m_n_uv8hi (__a, __b, __c, __p); | |
9882 | } | |
9883 | ||
9884 | __extension__ extern __inline int8x16_t | |
9885 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9886 | __arm_vmlasq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
9887 | { | |
9888 | return __builtin_mve_vmlasq_m_n_sv16qi (__a, __b, __c, __p); | |
9889 | } | |
9890 | ||
9891 | __extension__ extern __inline int32x4_t | |
9892 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9893 | __arm_vmlasq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
9894 | { | |
9895 | return __builtin_mve_vmlasq_m_n_sv4si (__a, __b, __c, __p); | |
9896 | } | |
9897 | ||
9898 | __extension__ extern __inline int16x8_t | |
9899 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9900 | __arm_vmlasq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
9901 | { | |
9902 | return __builtin_mve_vmlasq_m_n_sv8hi (__a, __b, __c, __p); | |
9903 | } | |
9904 | ||
9905 | __extension__ extern __inline uint8x16_t | |
9906 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9907 | __arm_vmlasq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p) | |
9908 | { | |
9909 | return __builtin_mve_vmlasq_m_n_uv16qi (__a, __b, __c, __p); | |
9910 | } | |
9911 | ||
9912 | __extension__ extern __inline uint32x4_t | |
9913 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9914 | __arm_vmlasq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p) | |
9915 | { | |
9916 | return __builtin_mve_vmlasq_m_n_uv4si (__a, __b, __c, __p); | |
9917 | } | |
9918 | ||
9919 | __extension__ extern __inline uint16x8_t | |
9920 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9921 | __arm_vmlasq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p) | |
9922 | { | |
9923 | return __builtin_mve_vmlasq_m_n_uv8hi (__a, __b, __c, __p); | |
9924 | } | |
9925 | ||
9926 | __extension__ extern __inline int32_t | |
9927 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9928 | __arm_vmlsdavaq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
9929 | { | |
9930 | return __builtin_mve_vmlsdavaq_p_sv16qi (__a, __b, __c, __p); | |
9931 | } | |
9932 | ||
9933 | __extension__ extern __inline int32_t | |
9934 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9935 | __arm_vmlsdavaq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
9936 | { | |
9937 | return __builtin_mve_vmlsdavaq_p_sv4si (__a, __b, __c, __p); | |
9938 | } | |
9939 | ||
9940 | __extension__ extern __inline int32_t | |
9941 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9942 | __arm_vmlsdavaq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
9943 | { | |
9944 | return __builtin_mve_vmlsdavaq_p_sv8hi (__a, __b, __c, __p); | |
9945 | } | |
9946 | ||
9947 | __extension__ extern __inline int32_t | |
9948 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9949 | __arm_vmlsdavaxq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
9950 | { | |
9951 | return __builtin_mve_vmlsdavaxq_p_sv16qi (__a, __b, __c, __p); | |
9952 | } | |
9953 | ||
9954 | __extension__ extern __inline int32_t | |
9955 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9956 | __arm_vmlsdavaxq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
9957 | { | |
9958 | return __builtin_mve_vmlsdavaxq_p_sv4si (__a, __b, __c, __p); | |
9959 | } | |
9960 | ||
9961 | __extension__ extern __inline int32_t | |
9962 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9963 | __arm_vmlsdavaxq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
9964 | { | |
9965 | return __builtin_mve_vmlsdavaxq_p_sv8hi (__a, __b, __c, __p); | |
9966 | } | |
9967 | ||
9968 | __extension__ extern __inline int8x16_t | |
9969 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9970 | __arm_vmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9971 | { | |
9972 | return __builtin_mve_vmulhq_m_sv16qi (__inactive, __a, __b, __p); | |
9973 | } | |
9974 | ||
9975 | __extension__ extern __inline int32x4_t | |
9976 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9977 | __arm_vmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9978 | { | |
9979 | return __builtin_mve_vmulhq_m_sv4si (__inactive, __a, __b, __p); | |
9980 | } | |
9981 | ||
9982 | __extension__ extern __inline int16x8_t | |
9983 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9984 | __arm_vmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9985 | { | |
9986 | return __builtin_mve_vmulhq_m_sv8hi (__inactive, __a, __b, __p); | |
9987 | } | |
9988 | ||
9989 | __extension__ extern __inline uint8x16_t | |
9990 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9991 | __arm_vmulhq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9992 | { | |
9993 | return __builtin_mve_vmulhq_m_uv16qi (__inactive, __a, __b, __p); | |
9994 | } | |
9995 | ||
9996 | __extension__ extern __inline uint32x4_t | |
9997 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9998 | __arm_vmulhq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9999 | { | |
10000 | return __builtin_mve_vmulhq_m_uv4si (__inactive, __a, __b, __p); | |
10001 | } | |
10002 | ||
10003 | __extension__ extern __inline uint16x8_t | |
10004 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10005 | __arm_vmulhq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10006 | { | |
10007 | return __builtin_mve_vmulhq_m_uv8hi (__inactive, __a, __b, __p); | |
10008 | } | |
10009 | ||
10010 | __extension__ extern __inline int16x8_t | |
10011 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10012 | __arm_vmullbq_int_m_s8 (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10013 | { | |
10014 | return __builtin_mve_vmullbq_int_m_sv16qi (__inactive, __a, __b, __p); | |
10015 | } | |
10016 | ||
10017 | __extension__ extern __inline int64x2_t | |
10018 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10019 | __arm_vmullbq_int_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10020 | { | |
10021 | return __builtin_mve_vmullbq_int_m_sv4si (__inactive, __a, __b, __p); | |
10022 | } | |
10023 | ||
10024 | __extension__ extern __inline int32x4_t | |
10025 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10026 | __arm_vmullbq_int_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10027 | { | |
10028 | return __builtin_mve_vmullbq_int_m_sv8hi (__inactive, __a, __b, __p); | |
10029 | } | |
10030 | ||
10031 | __extension__ extern __inline uint16x8_t | |
10032 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10033 | __arm_vmullbq_int_m_u8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10034 | { | |
10035 | return __builtin_mve_vmullbq_int_m_uv16qi (__inactive, __a, __b, __p); | |
10036 | } | |
10037 | ||
10038 | __extension__ extern __inline uint64x2_t | |
10039 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10040 | __arm_vmullbq_int_m_u32 (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10041 | { | |
10042 | return __builtin_mve_vmullbq_int_m_uv4si (__inactive, __a, __b, __p); | |
10043 | } | |
10044 | ||
10045 | __extension__ extern __inline uint32x4_t | |
10046 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10047 | __arm_vmullbq_int_m_u16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10048 | { | |
10049 | return __builtin_mve_vmullbq_int_m_uv8hi (__inactive, __a, __b, __p); | |
10050 | } | |
10051 | ||
10052 | __extension__ extern __inline int16x8_t | |
10053 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10054 | __arm_vmulltq_int_m_s8 (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10055 | { | |
10056 | return __builtin_mve_vmulltq_int_m_sv16qi (__inactive, __a, __b, __p); | |
10057 | } | |
10058 | ||
10059 | __extension__ extern __inline int64x2_t | |
10060 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10061 | __arm_vmulltq_int_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10062 | { | |
10063 | return __builtin_mve_vmulltq_int_m_sv4si (__inactive, __a, __b, __p); | |
10064 | } | |
10065 | ||
10066 | __extension__ extern __inline int32x4_t | |
10067 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10068 | __arm_vmulltq_int_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10069 | { | |
10070 | return __builtin_mve_vmulltq_int_m_sv8hi (__inactive, __a, __b, __p); | |
10071 | } | |
10072 | ||
10073 | __extension__ extern __inline uint16x8_t | |
10074 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10075 | __arm_vmulltq_int_m_u8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10076 | { | |
10077 | return __builtin_mve_vmulltq_int_m_uv16qi (__inactive, __a, __b, __p); | |
10078 | } | |
10079 | ||
10080 | __extension__ extern __inline uint64x2_t | |
10081 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10082 | __arm_vmulltq_int_m_u32 (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10083 | { | |
10084 | return __builtin_mve_vmulltq_int_m_uv4si (__inactive, __a, __b, __p); | |
10085 | } | |
10086 | ||
10087 | __extension__ extern __inline uint32x4_t | |
10088 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10089 | __arm_vmulltq_int_m_u16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10090 | { | |
10091 | return __builtin_mve_vmulltq_int_m_uv8hi (__inactive, __a, __b, __p); | |
10092 | } | |
10093 | ||
10094 | __extension__ extern __inline int8x16_t | |
10095 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10096 | __arm_vmulq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
10097 | { | |
10098 | return __builtin_mve_vmulq_m_n_sv16qi (__inactive, __a, __b, __p); | |
10099 | } | |
10100 | ||
10101 | __extension__ extern __inline int32x4_t | |
10102 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10103 | __arm_vmulq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
10104 | { | |
10105 | return __builtin_mve_vmulq_m_n_sv4si (__inactive, __a, __b, __p); | |
10106 | } | |
10107 | ||
10108 | __extension__ extern __inline int16x8_t | |
10109 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10110 | __arm_vmulq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
10111 | { | |
10112 | return __builtin_mve_vmulq_m_n_sv8hi (__inactive, __a, __b, __p); | |
10113 | } | |
10114 | ||
10115 | __extension__ extern __inline uint8x16_t | |
10116 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10117 | __arm_vmulq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
10118 | { | |
10119 | return __builtin_mve_vmulq_m_n_uv16qi (__inactive, __a, __b, __p); | |
10120 | } | |
10121 | ||
10122 | __extension__ extern __inline uint32x4_t | |
10123 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10124 | __arm_vmulq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
10125 | { | |
10126 | return __builtin_mve_vmulq_m_n_uv4si (__inactive, __a, __b, __p); | |
10127 | } | |
10128 | ||
10129 | __extension__ extern __inline uint16x8_t | |
10130 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10131 | __arm_vmulq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
10132 | { | |
10133 | return __builtin_mve_vmulq_m_n_uv8hi (__inactive, __a, __b, __p); | |
10134 | } | |
10135 | ||
10136 | __extension__ extern __inline int8x16_t | |
10137 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10138 | __arm_vmulq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10139 | { | |
10140 | return __builtin_mve_vmulq_m_sv16qi (__inactive, __a, __b, __p); | |
10141 | } | |
10142 | ||
10143 | __extension__ extern __inline int32x4_t | |
10144 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10145 | __arm_vmulq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10146 | { | |
10147 | return __builtin_mve_vmulq_m_sv4si (__inactive, __a, __b, __p); | |
10148 | } | |
10149 | ||
10150 | __extension__ extern __inline int16x8_t | |
10151 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10152 | __arm_vmulq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10153 | { | |
10154 | return __builtin_mve_vmulq_m_sv8hi (__inactive, __a, __b, __p); | |
10155 | } | |
10156 | ||
10157 | __extension__ extern __inline uint8x16_t | |
10158 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10159 | __arm_vmulq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10160 | { | |
10161 | return __builtin_mve_vmulq_m_uv16qi (__inactive, __a, __b, __p); | |
10162 | } | |
10163 | ||
10164 | __extension__ extern __inline uint32x4_t | |
10165 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10166 | __arm_vmulq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10167 | { | |
10168 | return __builtin_mve_vmulq_m_uv4si (__inactive, __a, __b, __p); | |
10169 | } | |
10170 | ||
10171 | __extension__ extern __inline uint16x8_t | |
10172 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10173 | __arm_vmulq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10174 | { | |
10175 | return __builtin_mve_vmulq_m_uv8hi (__inactive, __a, __b, __p); | |
10176 | } | |
10177 | ||
10178 | __extension__ extern __inline int8x16_t | |
10179 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10180 | __arm_vornq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10181 | { | |
10182 | return __builtin_mve_vornq_m_sv16qi (__inactive, __a, __b, __p); | |
10183 | } | |
10184 | ||
10185 | __extension__ extern __inline int32x4_t | |
10186 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10187 | __arm_vornq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10188 | { | |
10189 | return __builtin_mve_vornq_m_sv4si (__inactive, __a, __b, __p); | |
10190 | } | |
10191 | ||
10192 | __extension__ extern __inline int16x8_t | |
10193 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10194 | __arm_vornq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10195 | { | |
10196 | return __builtin_mve_vornq_m_sv8hi (__inactive, __a, __b, __p); | |
10197 | } | |
10198 | ||
10199 | __extension__ extern __inline uint8x16_t | |
10200 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10201 | __arm_vornq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10202 | { | |
10203 | return __builtin_mve_vornq_m_uv16qi (__inactive, __a, __b, __p); | |
10204 | } | |
10205 | ||
10206 | __extension__ extern __inline uint32x4_t | |
10207 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10208 | __arm_vornq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10209 | { | |
10210 | return __builtin_mve_vornq_m_uv4si (__inactive, __a, __b, __p); | |
10211 | } | |
10212 | ||
10213 | __extension__ extern __inline uint16x8_t | |
10214 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10215 | __arm_vornq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10216 | { | |
10217 | return __builtin_mve_vornq_m_uv8hi (__inactive, __a, __b, __p); | |
10218 | } | |
10219 | ||
10220 | __extension__ extern __inline int8x16_t | |
10221 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10222 | __arm_vorrq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10223 | { | |
10224 | return __builtin_mve_vorrq_m_sv16qi (__inactive, __a, __b, __p); | |
10225 | } | |
10226 | ||
10227 | __extension__ extern __inline int32x4_t | |
10228 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10229 | __arm_vorrq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10230 | { | |
10231 | return __builtin_mve_vorrq_m_sv4si (__inactive, __a, __b, __p); | |
10232 | } | |
10233 | ||
10234 | __extension__ extern __inline int16x8_t | |
10235 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10236 | __arm_vorrq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10237 | { | |
10238 | return __builtin_mve_vorrq_m_sv8hi (__inactive, __a, __b, __p); | |
10239 | } | |
10240 | ||
10241 | __extension__ extern __inline uint8x16_t | |
10242 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10243 | __arm_vorrq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10244 | { | |
10245 | return __builtin_mve_vorrq_m_uv16qi (__inactive, __a, __b, __p); | |
10246 | } | |
10247 | ||
10248 | __extension__ extern __inline uint32x4_t | |
10249 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10250 | __arm_vorrq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10251 | { | |
10252 | return __builtin_mve_vorrq_m_uv4si (__inactive, __a, __b, __p); | |
10253 | } | |
10254 | ||
10255 | __extension__ extern __inline uint16x8_t | |
10256 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10257 | __arm_vorrq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10258 | { | |
10259 | return __builtin_mve_vorrq_m_uv8hi (__inactive, __a, __b, __p); | |
10260 | } | |
10261 | ||
10262 | __extension__ extern __inline int8x16_t | |
10263 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10264 | __arm_vqaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
10265 | { | |
10266 | return __builtin_mve_vqaddq_m_n_sv16qi (__inactive, __a, __b, __p); | |
10267 | } | |
10268 | ||
10269 | __extension__ extern __inline int32x4_t | |
10270 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10271 | __arm_vqaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
10272 | { | |
10273 | return __builtin_mve_vqaddq_m_n_sv4si (__inactive, __a, __b, __p); | |
10274 | } | |
10275 | ||
10276 | __extension__ extern __inline int16x8_t | |
10277 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10278 | __arm_vqaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
10279 | { | |
10280 | return __builtin_mve_vqaddq_m_n_sv8hi (__inactive, __a, __b, __p); | |
10281 | } | |
10282 | ||
10283 | __extension__ extern __inline uint8x16_t | |
10284 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10285 | __arm_vqaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
10286 | { | |
10287 | return __builtin_mve_vqaddq_m_n_uv16qi (__inactive, __a, __b, __p); | |
10288 | } | |
10289 | ||
10290 | __extension__ extern __inline uint32x4_t | |
10291 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10292 | __arm_vqaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
10293 | { | |
10294 | return __builtin_mve_vqaddq_m_n_uv4si (__inactive, __a, __b, __p); | |
10295 | } | |
10296 | ||
10297 | __extension__ extern __inline uint16x8_t | |
10298 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10299 | __arm_vqaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
10300 | { | |
10301 | return __builtin_mve_vqaddq_m_n_uv8hi (__inactive, __a, __b, __p); | |
10302 | } | |
10303 | ||
10304 | __extension__ extern __inline int8x16_t | |
10305 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10306 | __arm_vqaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10307 | { | |
10308 | return __builtin_mve_vqaddq_m_sv16qi (__inactive, __a, __b, __p); | |
10309 | } | |
10310 | ||
10311 | __extension__ extern __inline int32x4_t | |
10312 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10313 | __arm_vqaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10314 | { | |
10315 | return __builtin_mve_vqaddq_m_sv4si (__inactive, __a, __b, __p); | |
10316 | } | |
10317 | ||
10318 | __extension__ extern __inline int16x8_t | |
10319 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10320 | __arm_vqaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10321 | { | |
10322 | return __builtin_mve_vqaddq_m_sv8hi (__inactive, __a, __b, __p); | |
10323 | } | |
10324 | ||
10325 | __extension__ extern __inline uint8x16_t | |
10326 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10327 | __arm_vqaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10328 | { | |
10329 | return __builtin_mve_vqaddq_m_uv16qi (__inactive, __a, __b, __p); | |
10330 | } | |
10331 | ||
10332 | __extension__ extern __inline uint32x4_t | |
10333 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10334 | __arm_vqaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10335 | { | |
10336 | return __builtin_mve_vqaddq_m_uv4si (__inactive, __a, __b, __p); | |
10337 | } | |
10338 | ||
10339 | __extension__ extern __inline uint16x8_t | |
10340 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10341 | __arm_vqaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10342 | { | |
10343 | return __builtin_mve_vqaddq_m_uv8hi (__inactive, __a, __b, __p); | |
10344 | } | |
10345 | ||
10346 | __extension__ extern __inline int8x16_t | |
10347 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10348 | __arm_vqdmladhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10349 | { | |
10350 | return __builtin_mve_vqdmladhq_m_sv16qi (__inactive, __a, __b, __p); | |
10351 | } | |
10352 | ||
10353 | __extension__ extern __inline int32x4_t | |
10354 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10355 | __arm_vqdmladhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10356 | { | |
10357 | return __builtin_mve_vqdmladhq_m_sv4si (__inactive, __a, __b, __p); | |
10358 | } | |
10359 | ||
10360 | __extension__ extern __inline int16x8_t | |
10361 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10362 | __arm_vqdmladhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10363 | { | |
10364 | return __builtin_mve_vqdmladhq_m_sv8hi (__inactive, __a, __b, __p); | |
10365 | } | |
10366 | ||
10367 | __extension__ extern __inline int8x16_t | |
10368 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10369 | __arm_vqdmladhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10370 | { | |
10371 | return __builtin_mve_vqdmladhxq_m_sv16qi (__inactive, __a, __b, __p); | |
10372 | } | |
10373 | ||
10374 | __extension__ extern __inline int32x4_t | |
10375 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10376 | __arm_vqdmladhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10377 | { | |
10378 | return __builtin_mve_vqdmladhxq_m_sv4si (__inactive, __a, __b, __p); | |
10379 | } | |
10380 | ||
10381 | __extension__ extern __inline int16x8_t | |
10382 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10383 | __arm_vqdmladhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10384 | { | |
10385 | return __builtin_mve_vqdmladhxq_m_sv8hi (__inactive, __a, __b, __p); | |
10386 | } | |
10387 | ||
10388 | __extension__ extern __inline int8x16_t | |
10389 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10390 | __arm_vqdmlahq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
10391 | { | |
10392 | return __builtin_mve_vqdmlahq_m_n_sv16qi (__a, __b, __c, __p); | |
10393 | } | |
10394 | ||
10395 | __extension__ extern __inline int32x4_t | |
10396 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10397 | __arm_vqdmlahq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
10398 | { | |
10399 | return __builtin_mve_vqdmlahq_m_n_sv4si (__a, __b, __c, __p); | |
10400 | } | |
10401 | ||
10402 | __extension__ extern __inline int16x8_t | |
10403 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10404 | __arm_vqdmlahq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
10405 | { | |
10406 | return __builtin_mve_vqdmlahq_m_n_sv8hi (__a, __b, __c, __p); | |
10407 | } | |
10408 | ||
10409 | __extension__ extern __inline int8x16_t | |
10410 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10411 | __arm_vqdmlsdhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10412 | { | |
10413 | return __builtin_mve_vqdmlsdhq_m_sv16qi (__inactive, __a, __b, __p); | |
10414 | } | |
10415 | ||
10416 | __extension__ extern __inline int32x4_t | |
10417 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10418 | __arm_vqdmlsdhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10419 | { | |
10420 | return __builtin_mve_vqdmlsdhq_m_sv4si (__inactive, __a, __b, __p); | |
10421 | } | |
10422 | ||
10423 | __extension__ extern __inline int16x8_t | |
10424 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10425 | __arm_vqdmlsdhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10426 | { | |
10427 | return __builtin_mve_vqdmlsdhq_m_sv8hi (__inactive, __a, __b, __p); | |
10428 | } | |
10429 | ||
10430 | __extension__ extern __inline int8x16_t | |
10431 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10432 | __arm_vqdmlsdhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10433 | { | |
10434 | return __builtin_mve_vqdmlsdhxq_m_sv16qi (__inactive, __a, __b, __p); | |
10435 | } | |
10436 | ||
10437 | __extension__ extern __inline int32x4_t | |
10438 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10439 | __arm_vqdmlsdhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10440 | { | |
10441 | return __builtin_mve_vqdmlsdhxq_m_sv4si (__inactive, __a, __b, __p); | |
10442 | } | |
10443 | ||
10444 | __extension__ extern __inline int16x8_t | |
10445 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10446 | __arm_vqdmlsdhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10447 | { | |
10448 | return __builtin_mve_vqdmlsdhxq_m_sv8hi (__inactive, __a, __b, __p); | |
10449 | } | |
10450 | ||
10451 | __extension__ extern __inline int8x16_t | |
10452 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10453 | __arm_vqdmulhq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
10454 | { | |
10455 | return __builtin_mve_vqdmulhq_m_n_sv16qi (__inactive, __a, __b, __p); | |
10456 | } | |
10457 | ||
10458 | __extension__ extern __inline int32x4_t | |
10459 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10460 | __arm_vqdmulhq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
10461 | { | |
10462 | return __builtin_mve_vqdmulhq_m_n_sv4si (__inactive, __a, __b, __p); | |
10463 | } | |
10464 | ||
10465 | __extension__ extern __inline int16x8_t | |
10466 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10467 | __arm_vqdmulhq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
10468 | { | |
10469 | return __builtin_mve_vqdmulhq_m_n_sv8hi (__inactive, __a, __b, __p); | |
10470 | } | |
10471 | ||
10472 | __extension__ extern __inline int8x16_t | |
10473 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10474 | __arm_vqdmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10475 | { | |
10476 | return __builtin_mve_vqdmulhq_m_sv16qi (__inactive, __a, __b, __p); | |
10477 | } | |
10478 | ||
10479 | __extension__ extern __inline int32x4_t | |
10480 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10481 | __arm_vqdmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10482 | { | |
10483 | return __builtin_mve_vqdmulhq_m_sv4si (__inactive, __a, __b, __p); | |
10484 | } | |
10485 | ||
10486 | __extension__ extern __inline int16x8_t | |
10487 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10488 | __arm_vqdmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10489 | { | |
10490 | return __builtin_mve_vqdmulhq_m_sv8hi (__inactive, __a, __b, __p); | |
10491 | } | |
10492 | ||
10493 | __extension__ extern __inline int8x16_t | |
10494 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10495 | __arm_vqrdmladhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10496 | { | |
10497 | return __builtin_mve_vqrdmladhq_m_sv16qi (__inactive, __a, __b, __p); | |
10498 | } | |
10499 | ||
10500 | __extension__ extern __inline int32x4_t | |
10501 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10502 | __arm_vqrdmladhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10503 | { | |
10504 | return __builtin_mve_vqrdmladhq_m_sv4si (__inactive, __a, __b, __p); | |
10505 | } | |
10506 | ||
10507 | __extension__ extern __inline int16x8_t | |
10508 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10509 | __arm_vqrdmladhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10510 | { | |
10511 | return __builtin_mve_vqrdmladhq_m_sv8hi (__inactive, __a, __b, __p); | |
10512 | } | |
10513 | ||
10514 | __extension__ extern __inline int8x16_t | |
10515 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10516 | __arm_vqrdmladhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10517 | { | |
10518 | return __builtin_mve_vqrdmladhxq_m_sv16qi (__inactive, __a, __b, __p); | |
10519 | } | |
10520 | ||
10521 | __extension__ extern __inline int32x4_t | |
10522 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10523 | __arm_vqrdmladhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10524 | { | |
10525 | return __builtin_mve_vqrdmladhxq_m_sv4si (__inactive, __a, __b, __p); | |
10526 | } | |
10527 | ||
10528 | __extension__ extern __inline int16x8_t | |
10529 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10530 | __arm_vqrdmladhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10531 | { | |
10532 | return __builtin_mve_vqrdmladhxq_m_sv8hi (__inactive, __a, __b, __p); | |
10533 | } | |
10534 | ||
10535 | __extension__ extern __inline int8x16_t | |
10536 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10537 | __arm_vqrdmlahq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
10538 | { | |
10539 | return __builtin_mve_vqrdmlahq_m_n_sv16qi (__a, __b, __c, __p); | |
10540 | } | |
10541 | ||
10542 | __extension__ extern __inline int32x4_t | |
10543 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10544 | __arm_vqrdmlahq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
10545 | { | |
10546 | return __builtin_mve_vqrdmlahq_m_n_sv4si (__a, __b, __c, __p); | |
10547 | } | |
10548 | ||
10549 | __extension__ extern __inline int16x8_t | |
10550 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10551 | __arm_vqrdmlahq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
10552 | { | |
10553 | return __builtin_mve_vqrdmlahq_m_n_sv8hi (__a, __b, __c, __p); | |
10554 | } | |
10555 | ||
10556 | __extension__ extern __inline int8x16_t | |
10557 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10558 | __arm_vqrdmlashq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
10559 | { | |
10560 | return __builtin_mve_vqrdmlashq_m_n_sv16qi (__a, __b, __c, __p); | |
10561 | } | |
10562 | ||
10563 | __extension__ extern __inline int32x4_t | |
10564 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10565 | __arm_vqrdmlashq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
10566 | { | |
10567 | return __builtin_mve_vqrdmlashq_m_n_sv4si (__a, __b, __c, __p); | |
10568 | } | |
10569 | ||
10570 | __extension__ extern __inline int16x8_t | |
10571 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10572 | __arm_vqrdmlashq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
10573 | { | |
10574 | return __builtin_mve_vqrdmlashq_m_n_sv8hi (__a, __b, __c, __p); | |
10575 | } | |
10576 | ||
10577 | __extension__ extern __inline int8x16_t | |
10578 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10579 | __arm_vqrdmlsdhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10580 | { | |
10581 | return __builtin_mve_vqrdmlsdhq_m_sv16qi (__inactive, __a, __b, __p); | |
10582 | } | |
10583 | ||
10584 | __extension__ extern __inline int32x4_t | |
10585 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10586 | __arm_vqrdmlsdhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10587 | { | |
10588 | return __builtin_mve_vqrdmlsdhq_m_sv4si (__inactive, __a, __b, __p); | |
10589 | } | |
10590 | ||
10591 | __extension__ extern __inline int16x8_t | |
10592 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10593 | __arm_vqrdmlsdhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10594 | { | |
10595 | return __builtin_mve_vqrdmlsdhq_m_sv8hi (__inactive, __a, __b, __p); | |
10596 | } | |
10597 | ||
10598 | __extension__ extern __inline int8x16_t | |
10599 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10600 | __arm_vqrdmlsdhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10601 | { | |
10602 | return __builtin_mve_vqrdmlsdhxq_m_sv16qi (__inactive, __a, __b, __p); | |
10603 | } | |
10604 | ||
10605 | __extension__ extern __inline int32x4_t | |
10606 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10607 | __arm_vqrdmlsdhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10608 | { | |
10609 | return __builtin_mve_vqrdmlsdhxq_m_sv4si (__inactive, __a, __b, __p); | |
10610 | } | |
10611 | ||
10612 | __extension__ extern __inline int16x8_t | |
10613 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10614 | __arm_vqrdmlsdhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10615 | { | |
10616 | return __builtin_mve_vqrdmlsdhxq_m_sv8hi (__inactive, __a, __b, __p); | |
10617 | } | |
10618 | ||
10619 | __extension__ extern __inline int8x16_t | |
10620 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10621 | __arm_vqrdmulhq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
10622 | { | |
10623 | return __builtin_mve_vqrdmulhq_m_n_sv16qi (__inactive, __a, __b, __p); | |
10624 | } | |
10625 | ||
10626 | __extension__ extern __inline int32x4_t | |
10627 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10628 | __arm_vqrdmulhq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
10629 | { | |
10630 | return __builtin_mve_vqrdmulhq_m_n_sv4si (__inactive, __a, __b, __p); | |
10631 | } | |
10632 | ||
10633 | __extension__ extern __inline int16x8_t | |
10634 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10635 | __arm_vqrdmulhq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
10636 | { | |
10637 | return __builtin_mve_vqrdmulhq_m_n_sv8hi (__inactive, __a, __b, __p); | |
10638 | } | |
10639 | ||
10640 | __extension__ extern __inline int8x16_t | |
10641 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10642 | __arm_vqrdmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10643 | { | |
10644 | return __builtin_mve_vqrdmulhq_m_sv16qi (__inactive, __a, __b, __p); | |
10645 | } | |
10646 | ||
10647 | __extension__ extern __inline int32x4_t | |
10648 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10649 | __arm_vqrdmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10650 | { | |
10651 | return __builtin_mve_vqrdmulhq_m_sv4si (__inactive, __a, __b, __p); | |
10652 | } | |
10653 | ||
10654 | __extension__ extern __inline int16x8_t | |
10655 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10656 | __arm_vqrdmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10657 | { | |
10658 | return __builtin_mve_vqrdmulhq_m_sv8hi (__inactive, __a, __b, __p); | |
10659 | } | |
10660 | ||
10661 | __extension__ extern __inline int8x16_t | |
10662 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10663 | __arm_vqrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10664 | { | |
10665 | return __builtin_mve_vqrshlq_m_sv16qi (__inactive, __a, __b, __p); | |
10666 | } | |
10667 | ||
10668 | __extension__ extern __inline int32x4_t | |
10669 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10670 | __arm_vqrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10671 | { | |
10672 | return __builtin_mve_vqrshlq_m_sv4si (__inactive, __a, __b, __p); | |
10673 | } | |
10674 | ||
10675 | __extension__ extern __inline int16x8_t | |
10676 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10677 | __arm_vqrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10678 | { | |
10679 | return __builtin_mve_vqrshlq_m_sv8hi (__inactive, __a, __b, __p); | |
10680 | } | |
10681 | ||
10682 | __extension__ extern __inline uint8x16_t | |
10683 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10684 | __arm_vqrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10685 | { | |
10686 | return __builtin_mve_vqrshlq_m_uv16qi (__inactive, __a, __b, __p); | |
10687 | } | |
10688 | ||
10689 | __extension__ extern __inline uint32x4_t | |
10690 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10691 | __arm_vqrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10692 | { | |
10693 | return __builtin_mve_vqrshlq_m_uv4si (__inactive, __a, __b, __p); | |
10694 | } | |
10695 | ||
10696 | __extension__ extern __inline uint16x8_t | |
10697 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10698 | __arm_vqrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10699 | { | |
10700 | return __builtin_mve_vqrshlq_m_uv8hi (__inactive, __a, __b, __p); | |
10701 | } | |
10702 | ||
10703 | __extension__ extern __inline int8x16_t | |
10704 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10705 | __arm_vqshlq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
10706 | { | |
10707 | return __builtin_mve_vqshlq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
10708 | } | |
10709 | ||
10710 | __extension__ extern __inline int32x4_t | |
10711 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10712 | __arm_vqshlq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
10713 | { | |
10714 | return __builtin_mve_vqshlq_m_n_sv4si (__inactive, __a, __imm, __p); | |
10715 | } | |
10716 | ||
10717 | __extension__ extern __inline int16x8_t | |
10718 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10719 | __arm_vqshlq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
10720 | { | |
10721 | return __builtin_mve_vqshlq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
10722 | } | |
10723 | ||
10724 | __extension__ extern __inline uint8x16_t | |
10725 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10726 | __arm_vqshlq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
10727 | { | |
10728 | return __builtin_mve_vqshlq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
10729 | } | |
10730 | ||
10731 | __extension__ extern __inline uint32x4_t | |
10732 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10733 | __arm_vqshlq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
10734 | { | |
10735 | return __builtin_mve_vqshlq_m_n_uv4si (__inactive, __a, __imm, __p); | |
10736 | } | |
10737 | ||
10738 | __extension__ extern __inline uint16x8_t | |
10739 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10740 | __arm_vqshlq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
10741 | { | |
10742 | return __builtin_mve_vqshlq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
10743 | } | |
10744 | ||
10745 | __extension__ extern __inline int8x16_t | |
10746 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10747 | __arm_vqshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10748 | { | |
10749 | return __builtin_mve_vqshlq_m_sv16qi (__inactive, __a, __b, __p); | |
10750 | } | |
10751 | ||
10752 | __extension__ extern __inline int32x4_t | |
10753 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10754 | __arm_vqshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10755 | { | |
10756 | return __builtin_mve_vqshlq_m_sv4si (__inactive, __a, __b, __p); | |
10757 | } | |
10758 | ||
10759 | __extension__ extern __inline int16x8_t | |
10760 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10761 | __arm_vqshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10762 | { | |
10763 | return __builtin_mve_vqshlq_m_sv8hi (__inactive, __a, __b, __p); | |
10764 | } | |
10765 | ||
10766 | __extension__ extern __inline uint8x16_t | |
10767 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10768 | __arm_vqshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10769 | { | |
10770 | return __builtin_mve_vqshlq_m_uv16qi (__inactive, __a, __b, __p); | |
10771 | } | |
10772 | ||
10773 | __extension__ extern __inline uint32x4_t | |
10774 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10775 | __arm_vqshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10776 | { | |
10777 | return __builtin_mve_vqshlq_m_uv4si (__inactive, __a, __b, __p); | |
10778 | } | |
10779 | ||
10780 | __extension__ extern __inline uint16x8_t | |
10781 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10782 | __arm_vqshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10783 | { | |
10784 | return __builtin_mve_vqshlq_m_uv8hi (__inactive, __a, __b, __p); | |
10785 | } | |
10786 | ||
10787 | __extension__ extern __inline int8x16_t | |
10788 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10789 | __arm_vqsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
10790 | { | |
10791 | return __builtin_mve_vqsubq_m_n_sv16qi (__inactive, __a, __b, __p); | |
10792 | } | |
10793 | ||
10794 | __extension__ extern __inline int32x4_t | |
10795 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10796 | __arm_vqsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
10797 | { | |
10798 | return __builtin_mve_vqsubq_m_n_sv4si (__inactive, __a, __b, __p); | |
10799 | } | |
10800 | ||
10801 | __extension__ extern __inline int16x8_t | |
10802 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10803 | __arm_vqsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
10804 | { | |
10805 | return __builtin_mve_vqsubq_m_n_sv8hi (__inactive, __a, __b, __p); | |
10806 | } | |
10807 | ||
10808 | __extension__ extern __inline uint8x16_t | |
10809 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10810 | __arm_vqsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
10811 | { | |
10812 | return __builtin_mve_vqsubq_m_n_uv16qi (__inactive, __a, __b, __p); | |
10813 | } | |
10814 | ||
10815 | __extension__ extern __inline uint32x4_t | |
10816 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10817 | __arm_vqsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
10818 | { | |
10819 | return __builtin_mve_vqsubq_m_n_uv4si (__inactive, __a, __b, __p); | |
10820 | } | |
10821 | ||
10822 | __extension__ extern __inline uint16x8_t | |
10823 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10824 | __arm_vqsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
10825 | { | |
10826 | return __builtin_mve_vqsubq_m_n_uv8hi (__inactive, __a, __b, __p); | |
10827 | } | |
10828 | ||
10829 | __extension__ extern __inline int8x16_t | |
10830 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10831 | __arm_vqsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10832 | { | |
10833 | return __builtin_mve_vqsubq_m_sv16qi (__inactive, __a, __b, __p); | |
10834 | } | |
10835 | ||
10836 | __extension__ extern __inline int32x4_t | |
10837 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10838 | __arm_vqsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10839 | { | |
10840 | return __builtin_mve_vqsubq_m_sv4si (__inactive, __a, __b, __p); | |
10841 | } | |
10842 | ||
10843 | __extension__ extern __inline int16x8_t | |
10844 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10845 | __arm_vqsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10846 | { | |
10847 | return __builtin_mve_vqsubq_m_sv8hi (__inactive, __a, __b, __p); | |
10848 | } | |
10849 | ||
10850 | __extension__ extern __inline uint8x16_t | |
10851 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10852 | __arm_vqsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10853 | { | |
10854 | return __builtin_mve_vqsubq_m_uv16qi (__inactive, __a, __b, __p); | |
10855 | } | |
10856 | ||
10857 | __extension__ extern __inline uint32x4_t | |
10858 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10859 | __arm_vqsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10860 | { | |
10861 | return __builtin_mve_vqsubq_m_uv4si (__inactive, __a, __b, __p); | |
10862 | } | |
10863 | ||
10864 | __extension__ extern __inline uint16x8_t | |
10865 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10866 | __arm_vqsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10867 | { | |
10868 | return __builtin_mve_vqsubq_m_uv8hi (__inactive, __a, __b, __p); | |
10869 | } | |
10870 | ||
10871 | __extension__ extern __inline int8x16_t | |
10872 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10873 | __arm_vrhaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10874 | { | |
10875 | return __builtin_mve_vrhaddq_m_sv16qi (__inactive, __a, __b, __p); | |
10876 | } | |
10877 | ||
10878 | __extension__ extern __inline int32x4_t | |
10879 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10880 | __arm_vrhaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10881 | { | |
10882 | return __builtin_mve_vrhaddq_m_sv4si (__inactive, __a, __b, __p); | |
10883 | } | |
10884 | ||
10885 | __extension__ extern __inline int16x8_t | |
10886 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10887 | __arm_vrhaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10888 | { | |
10889 | return __builtin_mve_vrhaddq_m_sv8hi (__inactive, __a, __b, __p); | |
10890 | } | |
10891 | ||
10892 | __extension__ extern __inline uint8x16_t | |
10893 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10894 | __arm_vrhaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10895 | { | |
10896 | return __builtin_mve_vrhaddq_m_uv16qi (__inactive, __a, __b, __p); | |
10897 | } | |
10898 | ||
10899 | __extension__ extern __inline uint32x4_t | |
10900 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10901 | __arm_vrhaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10902 | { | |
10903 | return __builtin_mve_vrhaddq_m_uv4si (__inactive, __a, __b, __p); | |
10904 | } | |
10905 | ||
10906 | __extension__ extern __inline uint16x8_t | |
10907 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10908 | __arm_vrhaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10909 | { | |
10910 | return __builtin_mve_vrhaddq_m_uv8hi (__inactive, __a, __b, __p); | |
10911 | } | |
10912 | ||
10913 | __extension__ extern __inline int8x16_t | |
10914 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10915 | __arm_vrmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10916 | { | |
10917 | return __builtin_mve_vrmulhq_m_sv16qi (__inactive, __a, __b, __p); | |
10918 | } | |
10919 | ||
10920 | __extension__ extern __inline int32x4_t | |
10921 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10922 | __arm_vrmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10923 | { | |
10924 | return __builtin_mve_vrmulhq_m_sv4si (__inactive, __a, __b, __p); | |
10925 | } | |
10926 | ||
10927 | __extension__ extern __inline int16x8_t | |
10928 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10929 | __arm_vrmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10930 | { | |
10931 | return __builtin_mve_vrmulhq_m_sv8hi (__inactive, __a, __b, __p); | |
10932 | } | |
10933 | ||
10934 | __extension__ extern __inline uint8x16_t | |
10935 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10936 | __arm_vrmulhq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10937 | { | |
10938 | return __builtin_mve_vrmulhq_m_uv16qi (__inactive, __a, __b, __p); | |
10939 | } | |
10940 | ||
10941 | __extension__ extern __inline uint32x4_t | |
10942 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10943 | __arm_vrmulhq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10944 | { | |
10945 | return __builtin_mve_vrmulhq_m_uv4si (__inactive, __a, __b, __p); | |
10946 | } | |
10947 | ||
10948 | __extension__ extern __inline uint16x8_t | |
10949 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10950 | __arm_vrmulhq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10951 | { | |
10952 | return __builtin_mve_vrmulhq_m_uv8hi (__inactive, __a, __b, __p); | |
10953 | } | |
10954 | ||
10955 | __extension__ extern __inline int8x16_t | |
10956 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10957 | __arm_vrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10958 | { | |
10959 | return __builtin_mve_vrshlq_m_sv16qi (__inactive, __a, __b, __p); | |
10960 | } | |
10961 | ||
10962 | __extension__ extern __inline int32x4_t | |
10963 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10964 | __arm_vrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10965 | { | |
10966 | return __builtin_mve_vrshlq_m_sv4si (__inactive, __a, __b, __p); | |
10967 | } | |
10968 | ||
10969 | __extension__ extern __inline int16x8_t | |
10970 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10971 | __arm_vrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10972 | { | |
10973 | return __builtin_mve_vrshlq_m_sv8hi (__inactive, __a, __b, __p); | |
10974 | } | |
10975 | ||
10976 | __extension__ extern __inline uint8x16_t | |
10977 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10978 | __arm_vrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10979 | { | |
10980 | return __builtin_mve_vrshlq_m_uv16qi (__inactive, __a, __b, __p); | |
10981 | } | |
10982 | ||
10983 | __extension__ extern __inline uint32x4_t | |
10984 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10985 | __arm_vrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10986 | { | |
10987 | return __builtin_mve_vrshlq_m_uv4si (__inactive, __a, __b, __p); | |
10988 | } | |
10989 | ||
10990 | __extension__ extern __inline uint16x8_t | |
10991 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10992 | __arm_vrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10993 | { | |
10994 | return __builtin_mve_vrshlq_m_uv8hi (__inactive, __a, __b, __p); | |
10995 | } | |
10996 | ||
10997 | __extension__ extern __inline int8x16_t | |
10998 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10999 | __arm_vrshrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
11000 | { | |
11001 | return __builtin_mve_vrshrq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
11002 | } | |
11003 | ||
11004 | __extension__ extern __inline int32x4_t | |
11005 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11006 | __arm_vrshrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
11007 | { | |
11008 | return __builtin_mve_vrshrq_m_n_sv4si (__inactive, __a, __imm, __p); | |
11009 | } | |
11010 | ||
11011 | __extension__ extern __inline int16x8_t | |
11012 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11013 | __arm_vrshrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
11014 | { | |
11015 | return __builtin_mve_vrshrq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
11016 | } | |
11017 | ||
11018 | __extension__ extern __inline uint8x16_t | |
11019 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11020 | __arm_vrshrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
11021 | { | |
11022 | return __builtin_mve_vrshrq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
11023 | } | |
11024 | ||
11025 | __extension__ extern __inline uint32x4_t | |
11026 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11027 | __arm_vrshrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
11028 | { | |
11029 | return __builtin_mve_vrshrq_m_n_uv4si (__inactive, __a, __imm, __p); | |
11030 | } | |
11031 | ||
11032 | __extension__ extern __inline uint16x8_t | |
11033 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11034 | __arm_vrshrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
11035 | { | |
11036 | return __builtin_mve_vrshrq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
11037 | } | |
11038 | ||
11039 | __extension__ extern __inline int8x16_t | |
11040 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11041 | __arm_vshlq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
11042 | { | |
11043 | return __builtin_mve_vshlq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
11044 | } | |
11045 | ||
11046 | __extension__ extern __inline int32x4_t | |
11047 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11048 | __arm_vshlq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
11049 | { | |
11050 | return __builtin_mve_vshlq_m_n_sv4si (__inactive, __a, __imm, __p); | |
11051 | } | |
11052 | ||
11053 | __extension__ extern __inline int16x8_t | |
11054 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11055 | __arm_vshlq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
11056 | { | |
11057 | return __builtin_mve_vshlq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
11058 | } | |
11059 | ||
11060 | __extension__ extern __inline uint8x16_t | |
11061 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11062 | __arm_vshlq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
11063 | { | |
11064 | return __builtin_mve_vshlq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
11065 | } | |
11066 | ||
11067 | __extension__ extern __inline uint32x4_t | |
11068 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11069 | __arm_vshlq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
11070 | { | |
11071 | return __builtin_mve_vshlq_m_n_uv4si (__inactive, __a, __imm, __p); | |
11072 | } | |
11073 | ||
11074 | __extension__ extern __inline uint16x8_t | |
11075 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11076 | __arm_vshlq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
11077 | { | |
11078 | return __builtin_mve_vshlq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
11079 | } | |
11080 | ||
11081 | __extension__ extern __inline int8x16_t | |
11082 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11083 | __arm_vshrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
11084 | { | |
11085 | return __builtin_mve_vshrq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
11086 | } | |
11087 | ||
11088 | __extension__ extern __inline int32x4_t | |
11089 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11090 | __arm_vshrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
11091 | { | |
11092 | return __builtin_mve_vshrq_m_n_sv4si (__inactive, __a, __imm, __p); | |
11093 | } | |
11094 | ||
11095 | __extension__ extern __inline int16x8_t | |
11096 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11097 | __arm_vshrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
11098 | { | |
11099 | return __builtin_mve_vshrq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
11100 | } | |
11101 | ||
11102 | __extension__ extern __inline uint8x16_t | |
11103 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11104 | __arm_vshrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
11105 | { | |
11106 | return __builtin_mve_vshrq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
11107 | } | |
11108 | ||
11109 | __extension__ extern __inline uint32x4_t | |
11110 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11111 | __arm_vshrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
11112 | { | |
11113 | return __builtin_mve_vshrq_m_n_uv4si (__inactive, __a, __imm, __p); | |
11114 | } | |
11115 | ||
11116 | __extension__ extern __inline uint16x8_t | |
11117 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11118 | __arm_vshrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
11119 | { | |
11120 | return __builtin_mve_vshrq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
11121 | } | |
11122 | ||
11123 | __extension__ extern __inline int8x16_t | |
11124 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11125 | __arm_vsliq_m_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p) | |
11126 | { | |
11127 | return __builtin_mve_vsliq_m_n_sv16qi (__a, __b, __imm, __p); | |
11128 | } | |
11129 | ||
11130 | __extension__ extern __inline int32x4_t | |
11131 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11132 | __arm_vsliq_m_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11133 | { | |
11134 | return __builtin_mve_vsliq_m_n_sv4si (__a, __b, __imm, __p); | |
11135 | } | |
11136 | ||
11137 | __extension__ extern __inline int16x8_t | |
11138 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11139 | __arm_vsliq_m_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11140 | { | |
11141 | return __builtin_mve_vsliq_m_n_sv8hi (__a, __b, __imm, __p); | |
11142 | } | |
11143 | ||
11144 | __extension__ extern __inline uint8x16_t | |
11145 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11146 | __arm_vsliq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p) | |
11147 | { | |
11148 | return __builtin_mve_vsliq_m_n_uv16qi (__a, __b, __imm, __p); | |
11149 | } | |
11150 | ||
11151 | __extension__ extern __inline uint32x4_t | |
11152 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11153 | __arm_vsliq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11154 | { | |
11155 | return __builtin_mve_vsliq_m_n_uv4si (__a, __b, __imm, __p); | |
11156 | } | |
11157 | ||
11158 | __extension__ extern __inline uint16x8_t | |
11159 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11160 | __arm_vsliq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11161 | { | |
11162 | return __builtin_mve_vsliq_m_n_uv8hi (__a, __b, __imm, __p); | |
11163 | } | |
11164 | ||
11165 | __extension__ extern __inline int8x16_t | |
11166 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11167 | __arm_vsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
11168 | { | |
11169 | return __builtin_mve_vsubq_m_n_sv16qi (__inactive, __a, __b, __p); | |
11170 | } | |
11171 | ||
11172 | __extension__ extern __inline int32x4_t | |
11173 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11174 | __arm_vsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
11175 | { | |
11176 | return __builtin_mve_vsubq_m_n_sv4si (__inactive, __a, __b, __p); | |
11177 | } | |
11178 | ||
11179 | __extension__ extern __inline int16x8_t | |
11180 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11181 | __arm_vsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
11182 | { | |
11183 | return __builtin_mve_vsubq_m_n_sv8hi (__inactive, __a, __b, __p); | |
11184 | } | |
11185 | ||
11186 | __extension__ extern __inline uint8x16_t | |
11187 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11188 | __arm_vsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
11189 | { | |
11190 | return __builtin_mve_vsubq_m_n_uv16qi (__inactive, __a, __b, __p); | |
11191 | } | |
11192 | ||
11193 | __extension__ extern __inline uint32x4_t | |
11194 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11195 | __arm_vsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
11196 | { | |
11197 | return __builtin_mve_vsubq_m_n_uv4si (__inactive, __a, __b, __p); | |
11198 | } | |
11199 | ||
11200 | __extension__ extern __inline uint16x8_t | |
11201 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11202 | __arm_vsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
11203 | { | |
11204 | return __builtin_mve_vsubq_m_n_uv8hi (__inactive, __a, __b, __p); | |
11205 | } | |
11206 | ||
f2170a37 SP |
11207 | __extension__ extern __inline int64_t |
11208 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11209 | __arm_vmlaldavaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11210 | { | |
11211 | return __builtin_mve_vmlaldavaq_p_sv4si (__a, __b, __c, __p); | |
11212 | } | |
11213 | ||
11214 | __extension__ extern __inline int64_t | |
11215 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11216 | __arm_vmlaldavaq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
11217 | { | |
11218 | return __builtin_mve_vmlaldavaq_p_sv8hi (__a, __b, __c, __p); | |
11219 | } | |
11220 | ||
11221 | __extension__ extern __inline uint64_t | |
11222 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11223 | __arm_vmlaldavaq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
11224 | { | |
11225 | return __builtin_mve_vmlaldavaq_p_uv4si (__a, __b, __c, __p); | |
11226 | } | |
11227 | ||
11228 | __extension__ extern __inline uint64_t | |
11229 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11230 | __arm_vmlaldavaq_p_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) | |
11231 | { | |
11232 | return __builtin_mve_vmlaldavaq_p_uv8hi (__a, __b, __c, __p); | |
11233 | } | |
11234 | ||
11235 | __extension__ extern __inline int64_t | |
11236 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11237 | __arm_vmlaldavaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11238 | { | |
11239 | return __builtin_mve_vmlaldavaxq_p_sv4si (__a, __b, __c, __p); | |
11240 | } | |
11241 | ||
11242 | __extension__ extern __inline int64_t | |
11243 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11244 | __arm_vmlaldavaxq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
11245 | { | |
11246 | return __builtin_mve_vmlaldavaxq_p_sv8hi (__a, __b, __c, __p); | |
11247 | } | |
11248 | ||
11249 | __extension__ extern __inline uint64_t | |
11250 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11251 | __arm_vmlaldavaxq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
11252 | { | |
11253 | return __builtin_mve_vmlaldavaxq_p_uv4si (__a, __b, __c, __p); | |
11254 | } | |
11255 | ||
11256 | __extension__ extern __inline uint64_t | |
11257 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11258 | __arm_vmlaldavaxq_p_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) | |
11259 | { | |
11260 | return __builtin_mve_vmlaldavaxq_p_uv8hi (__a, __b, __c, __p); | |
11261 | } | |
11262 | ||
11263 | __extension__ extern __inline int64_t | |
11264 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11265 | __arm_vmlsldavaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11266 | { | |
11267 | return __builtin_mve_vmlsldavaq_p_sv4si (__a, __b, __c, __p); | |
11268 | } | |
11269 | ||
11270 | __extension__ extern __inline int64_t | |
11271 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11272 | __arm_vmlsldavaq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
11273 | { | |
11274 | return __builtin_mve_vmlsldavaq_p_sv8hi (__a, __b, __c, __p); | |
11275 | } | |
11276 | ||
11277 | __extension__ extern __inline int64_t | |
11278 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11279 | __arm_vmlsldavaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11280 | { | |
11281 | return __builtin_mve_vmlsldavaxq_p_sv4si (__a, __b, __c, __p); | |
11282 | } | |
11283 | ||
11284 | __extension__ extern __inline int64_t | |
11285 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11286 | __arm_vmlsldavaxq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
11287 | { | |
11288 | return __builtin_mve_vmlsldavaxq_p_sv8hi (__a, __b, __c, __p); | |
11289 | } | |
11290 | ||
11291 | __extension__ extern __inline uint16x8_t | |
11292 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11293 | __arm_vmullbq_poly_m_p8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
11294 | { | |
11295 | return __builtin_mve_vmullbq_poly_m_pv16qi (__inactive, __a, __b, __p); | |
11296 | } | |
11297 | ||
11298 | __extension__ extern __inline uint32x4_t | |
11299 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11300 | __arm_vmullbq_poly_m_p16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
11301 | { | |
11302 | return __builtin_mve_vmullbq_poly_m_pv8hi (__inactive, __a, __b, __p); | |
11303 | } | |
11304 | ||
11305 | __extension__ extern __inline uint16x8_t | |
11306 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11307 | __arm_vmulltq_poly_m_p8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
11308 | { | |
11309 | return __builtin_mve_vmulltq_poly_m_pv16qi (__inactive, __a, __b, __p); | |
11310 | } | |
11311 | ||
11312 | __extension__ extern __inline uint32x4_t | |
11313 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11314 | __arm_vmulltq_poly_m_p16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
11315 | { | |
11316 | return __builtin_mve_vmulltq_poly_m_pv8hi (__inactive, __a, __b, __p); | |
11317 | } | |
11318 | ||
11319 | __extension__ extern __inline int64x2_t | |
11320 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11321 | __arm_vqdmullbq_m_n_s32 (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
11322 | { | |
11323 | return __builtin_mve_vqdmullbq_m_n_sv4si (__inactive, __a, __b, __p); | |
11324 | } | |
11325 | ||
11326 | __extension__ extern __inline int32x4_t | |
11327 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11328 | __arm_vqdmullbq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
11329 | { | |
11330 | return __builtin_mve_vqdmullbq_m_n_sv8hi (__inactive, __a, __b, __p); | |
11331 | } | |
11332 | ||
11333 | __extension__ extern __inline int64x2_t | |
11334 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11335 | __arm_vqdmullbq_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11336 | { | |
11337 | return __builtin_mve_vqdmullbq_m_sv4si (__inactive, __a, __b, __p); | |
11338 | } | |
11339 | ||
11340 | __extension__ extern __inline int32x4_t | |
11341 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11342 | __arm_vqdmullbq_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11343 | { | |
11344 | return __builtin_mve_vqdmullbq_m_sv8hi (__inactive, __a, __b, __p); | |
11345 | } | |
11346 | ||
11347 | __extension__ extern __inline int64x2_t | |
11348 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11349 | __arm_vqdmulltq_m_n_s32 (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
11350 | { | |
11351 | return __builtin_mve_vqdmulltq_m_n_sv4si (__inactive, __a, __b, __p); | |
11352 | } | |
11353 | ||
11354 | __extension__ extern __inline int32x4_t | |
11355 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11356 | __arm_vqdmulltq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
11357 | { | |
11358 | return __builtin_mve_vqdmulltq_m_n_sv8hi (__inactive, __a, __b, __p); | |
11359 | } | |
11360 | ||
11361 | __extension__ extern __inline int64x2_t | |
11362 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11363 | __arm_vqdmulltq_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11364 | { | |
11365 | return __builtin_mve_vqdmulltq_m_sv4si (__inactive, __a, __b, __p); | |
11366 | } | |
11367 | ||
11368 | __extension__ extern __inline int32x4_t | |
11369 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11370 | __arm_vqdmulltq_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11371 | { | |
11372 | return __builtin_mve_vqdmulltq_m_sv8hi (__inactive, __a, __b, __p); | |
11373 | } | |
11374 | ||
11375 | __extension__ extern __inline int16x8_t | |
11376 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11377 | __arm_vqrshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11378 | { | |
11379 | return __builtin_mve_vqrshrnbq_m_n_sv4si (__a, __b, __imm, __p); | |
11380 | } | |
11381 | ||
11382 | __extension__ extern __inline int8x16_t | |
11383 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11384 | __arm_vqrshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11385 | { | |
11386 | return __builtin_mve_vqrshrnbq_m_n_sv8hi (__a, __b, __imm, __p); | |
11387 | } | |
11388 | ||
11389 | __extension__ extern __inline uint16x8_t | |
11390 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11391 | __arm_vqrshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11392 | { | |
11393 | return __builtin_mve_vqrshrnbq_m_n_uv4si (__a, __b, __imm, __p); | |
11394 | } | |
11395 | ||
11396 | __extension__ extern __inline uint8x16_t | |
11397 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11398 | __arm_vqrshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11399 | { | |
11400 | return __builtin_mve_vqrshrnbq_m_n_uv8hi (__a, __b, __imm, __p); | |
11401 | } | |
11402 | ||
11403 | __extension__ extern __inline int16x8_t | |
11404 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11405 | __arm_vqrshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11406 | { | |
11407 | return __builtin_mve_vqrshrntq_m_n_sv4si (__a, __b, __imm, __p); | |
11408 | } | |
11409 | ||
11410 | __extension__ extern __inline int8x16_t | |
11411 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11412 | __arm_vqrshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11413 | { | |
11414 | return __builtin_mve_vqrshrntq_m_n_sv8hi (__a, __b, __imm, __p); | |
11415 | } | |
11416 | ||
11417 | __extension__ extern __inline uint16x8_t | |
11418 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11419 | __arm_vqrshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11420 | { | |
11421 | return __builtin_mve_vqrshrntq_m_n_uv4si (__a, __b, __imm, __p); | |
11422 | } | |
11423 | ||
11424 | __extension__ extern __inline uint8x16_t | |
11425 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11426 | __arm_vqrshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11427 | { | |
11428 | return __builtin_mve_vqrshrntq_m_n_uv8hi (__a, __b, __imm, __p); | |
11429 | } | |
11430 | ||
11431 | __extension__ extern __inline uint16x8_t | |
11432 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11433 | __arm_vqrshrunbq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11434 | { | |
11435 | return __builtin_mve_vqrshrunbq_m_n_sv4si (__a, __b, __imm, __p); | |
11436 | } | |
11437 | ||
11438 | __extension__ extern __inline uint8x16_t | |
11439 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11440 | __arm_vqrshrunbq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11441 | { | |
11442 | return __builtin_mve_vqrshrunbq_m_n_sv8hi (__a, __b, __imm, __p); | |
11443 | } | |
11444 | ||
11445 | __extension__ extern __inline uint16x8_t | |
11446 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11447 | __arm_vqrshruntq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11448 | { | |
11449 | return __builtin_mve_vqrshruntq_m_n_sv4si (__a, __b, __imm, __p); | |
11450 | } | |
11451 | ||
11452 | __extension__ extern __inline uint8x16_t | |
11453 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11454 | __arm_vqrshruntq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11455 | { | |
11456 | return __builtin_mve_vqrshruntq_m_n_sv8hi (__a, __b, __imm, __p); | |
11457 | } | |
11458 | ||
11459 | __extension__ extern __inline int16x8_t | |
11460 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11461 | __arm_vqshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11462 | { | |
11463 | return __builtin_mve_vqshrnbq_m_n_sv4si (__a, __b, __imm, __p); | |
11464 | } | |
11465 | ||
11466 | __extension__ extern __inline int8x16_t | |
11467 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11468 | __arm_vqshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11469 | { | |
11470 | return __builtin_mve_vqshrnbq_m_n_sv8hi (__a, __b, __imm, __p); | |
11471 | } | |
11472 | ||
11473 | __extension__ extern __inline uint16x8_t | |
11474 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11475 | __arm_vqshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11476 | { | |
11477 | return __builtin_mve_vqshrnbq_m_n_uv4si (__a, __b, __imm, __p); | |
11478 | } | |
11479 | ||
11480 | __extension__ extern __inline uint8x16_t | |
11481 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11482 | __arm_vqshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11483 | { | |
11484 | return __builtin_mve_vqshrnbq_m_n_uv8hi (__a, __b, __imm, __p); | |
11485 | } | |
11486 | ||
11487 | __extension__ extern __inline int16x8_t | |
11488 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11489 | __arm_vqshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11490 | { | |
11491 | return __builtin_mve_vqshrntq_m_n_sv4si (__a, __b, __imm, __p); | |
11492 | } | |
11493 | ||
11494 | __extension__ extern __inline int8x16_t | |
11495 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11496 | __arm_vqshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11497 | { | |
11498 | return __builtin_mve_vqshrntq_m_n_sv8hi (__a, __b, __imm, __p); | |
11499 | } | |
11500 | ||
11501 | __extension__ extern __inline uint16x8_t | |
11502 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11503 | __arm_vqshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11504 | { | |
11505 | return __builtin_mve_vqshrntq_m_n_uv4si (__a, __b, __imm, __p); | |
11506 | } | |
11507 | ||
11508 | __extension__ extern __inline uint8x16_t | |
11509 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11510 | __arm_vqshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11511 | { | |
11512 | return __builtin_mve_vqshrntq_m_n_uv8hi (__a, __b, __imm, __p); | |
11513 | } | |
11514 | ||
11515 | __extension__ extern __inline uint16x8_t | |
11516 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11517 | __arm_vqshrunbq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11518 | { | |
11519 | return __builtin_mve_vqshrunbq_m_n_sv4si (__a, __b, __imm, __p); | |
11520 | } | |
11521 | ||
11522 | __extension__ extern __inline uint8x16_t | |
11523 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11524 | __arm_vqshrunbq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11525 | { | |
11526 | return __builtin_mve_vqshrunbq_m_n_sv8hi (__a, __b, __imm, __p); | |
11527 | } | |
11528 | ||
11529 | __extension__ extern __inline uint16x8_t | |
11530 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11531 | __arm_vqshruntq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11532 | { | |
11533 | return __builtin_mve_vqshruntq_m_n_sv4si (__a, __b, __imm, __p); | |
11534 | } | |
11535 | ||
11536 | __extension__ extern __inline uint8x16_t | |
11537 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11538 | __arm_vqshruntq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11539 | { | |
11540 | return __builtin_mve_vqshruntq_m_n_sv8hi (__a, __b, __imm, __p); | |
11541 | } | |
11542 | ||
11543 | __extension__ extern __inline int64_t | |
11544 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11545 | __arm_vrmlaldavhaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11546 | { | |
11547 | return __builtin_mve_vrmlaldavhaq_p_sv4si (__a, __b, __c, __p); | |
11548 | } | |
11549 | ||
11550 | __extension__ extern __inline uint64_t | |
11551 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11552 | __arm_vrmlaldavhaq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
11553 | { | |
11554 | return __builtin_mve_vrmlaldavhaq_p_uv4si (__a, __b, __c, __p); | |
11555 | } | |
11556 | ||
11557 | __extension__ extern __inline int64_t | |
11558 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11559 | __arm_vrmlaldavhaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11560 | { | |
11561 | return __builtin_mve_vrmlaldavhaxq_p_sv4si (__a, __b, __c, __p); | |
11562 | } | |
11563 | ||
11564 | __extension__ extern __inline int64_t | |
11565 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11566 | __arm_vrmlsldavhaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11567 | { | |
11568 | return __builtin_mve_vrmlsldavhaq_p_sv4si (__a, __b, __c, __p); | |
11569 | } | |
11570 | ||
11571 | __extension__ extern __inline int64_t | |
11572 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11573 | __arm_vrmlsldavhaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11574 | { | |
11575 | return __builtin_mve_vrmlsldavhaxq_p_sv4si (__a, __b, __c, __p); | |
11576 | } | |
11577 | ||
11578 | __extension__ extern __inline int16x8_t | |
11579 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11580 | __arm_vrshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11581 | { | |
11582 | return __builtin_mve_vrshrnbq_m_n_sv4si (__a, __b, __imm, __p); | |
11583 | } | |
11584 | ||
11585 | __extension__ extern __inline int8x16_t | |
11586 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11587 | __arm_vrshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11588 | { | |
11589 | return __builtin_mve_vrshrnbq_m_n_sv8hi (__a, __b, __imm, __p); | |
11590 | } | |
11591 | ||
11592 | __extension__ extern __inline uint16x8_t | |
11593 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11594 | __arm_vrshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11595 | { | |
11596 | return __builtin_mve_vrshrnbq_m_n_uv4si (__a, __b, __imm, __p); | |
11597 | } | |
11598 | ||
11599 | __extension__ extern __inline uint8x16_t | |
11600 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11601 | __arm_vrshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11602 | { | |
11603 | return __builtin_mve_vrshrnbq_m_n_uv8hi (__a, __b, __imm, __p); | |
11604 | } | |
11605 | ||
11606 | __extension__ extern __inline int16x8_t | |
11607 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11608 | __arm_vrshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11609 | { | |
11610 | return __builtin_mve_vrshrntq_m_n_sv4si (__a, __b, __imm, __p); | |
11611 | } | |
11612 | ||
11613 | __extension__ extern __inline int8x16_t | |
11614 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11615 | __arm_vrshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11616 | { | |
11617 | return __builtin_mve_vrshrntq_m_n_sv8hi (__a, __b, __imm, __p); | |
11618 | } | |
11619 | ||
11620 | __extension__ extern __inline uint16x8_t | |
11621 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11622 | __arm_vrshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11623 | { | |
11624 | return __builtin_mve_vrshrntq_m_n_uv4si (__a, __b, __imm, __p); | |
11625 | } | |
11626 | ||
11627 | __extension__ extern __inline uint8x16_t | |
11628 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11629 | __arm_vrshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11630 | { | |
11631 | return __builtin_mve_vrshrntq_m_n_uv8hi (__a, __b, __imm, __p); | |
11632 | } | |
11633 | ||
11634 | __extension__ extern __inline int16x8_t | |
11635 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11636 | __arm_vshllbq_m_n_s8 (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
11637 | { | |
11638 | return __builtin_mve_vshllbq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
11639 | } | |
11640 | ||
11641 | __extension__ extern __inline int32x4_t | |
11642 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11643 | __arm_vshllbq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
11644 | { | |
11645 | return __builtin_mve_vshllbq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
11646 | } | |
11647 | ||
11648 | __extension__ extern __inline uint16x8_t | |
11649 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11650 | __arm_vshllbq_m_n_u8 (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
11651 | { | |
11652 | return __builtin_mve_vshllbq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
11653 | } | |
11654 | ||
11655 | __extension__ extern __inline uint32x4_t | |
11656 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11657 | __arm_vshllbq_m_n_u16 (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
11658 | { | |
11659 | return __builtin_mve_vshllbq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
11660 | } | |
11661 | ||
11662 | __extension__ extern __inline int16x8_t | |
11663 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11664 | __arm_vshlltq_m_n_s8 (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
11665 | { | |
11666 | return __builtin_mve_vshlltq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
11667 | } | |
11668 | ||
11669 | __extension__ extern __inline int32x4_t | |
11670 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11671 | __arm_vshlltq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
11672 | { | |
11673 | return __builtin_mve_vshlltq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
11674 | } | |
11675 | ||
11676 | __extension__ extern __inline uint16x8_t | |
11677 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11678 | __arm_vshlltq_m_n_u8 (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
11679 | { | |
11680 | return __builtin_mve_vshlltq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
11681 | } | |
11682 | ||
11683 | __extension__ extern __inline uint32x4_t | |
11684 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11685 | __arm_vshlltq_m_n_u16 (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
11686 | { | |
11687 | return __builtin_mve_vshlltq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
11688 | } | |
11689 | ||
11690 | __extension__ extern __inline int16x8_t | |
11691 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11692 | __arm_vshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11693 | { | |
11694 | return __builtin_mve_vshrnbq_m_n_sv4si (__a, __b, __imm, __p); | |
11695 | } | |
11696 | ||
11697 | __extension__ extern __inline int8x16_t | |
11698 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11699 | __arm_vshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11700 | { | |
11701 | return __builtin_mve_vshrnbq_m_n_sv8hi (__a, __b, __imm, __p); | |
11702 | } | |
11703 | ||
11704 | __extension__ extern __inline uint16x8_t | |
11705 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11706 | __arm_vshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11707 | { | |
11708 | return __builtin_mve_vshrnbq_m_n_uv4si (__a, __b, __imm, __p); | |
11709 | } | |
11710 | ||
11711 | __extension__ extern __inline uint8x16_t | |
11712 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11713 | __arm_vshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11714 | { | |
11715 | return __builtin_mve_vshrnbq_m_n_uv8hi (__a, __b, __imm, __p); | |
11716 | } | |
11717 | ||
11718 | __extension__ extern __inline int16x8_t | |
11719 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11720 | __arm_vshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11721 | { | |
11722 | return __builtin_mve_vshrntq_m_n_sv4si (__a, __b, __imm, __p); | |
11723 | } | |
11724 | ||
11725 | __extension__ extern __inline int8x16_t | |
11726 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11727 | __arm_vshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11728 | { | |
11729 | return __builtin_mve_vshrntq_m_n_sv8hi (__a, __b, __imm, __p); | |
11730 | } | |
11731 | ||
11732 | __extension__ extern __inline uint16x8_t | |
11733 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11734 | __arm_vshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11735 | { | |
11736 | return __builtin_mve_vshrntq_m_n_uv4si (__a, __b, __imm, __p); | |
11737 | } | |
11738 | ||
11739 | __extension__ extern __inline uint8x16_t | |
11740 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11741 | __arm_vshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11742 | { | |
11743 | return __builtin_mve_vshrntq_m_n_uv8hi (__a, __b, __imm, __p); | |
11744 | } | |
11745 | ||
4ff68575 SP |
11746 | __extension__ extern __inline void |
11747 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11748 | __arm_vstrbq_scatter_offset_s8 (int8_t * __base, uint8x16_t __offset, int8x16_t __value) | |
11749 | { | |
11750 | __builtin_mve_vstrbq_scatter_offset_sv16qi ((__builtin_neon_qi *) __base, __offset, __value); | |
11751 | } | |
11752 | ||
11753 | __extension__ extern __inline void | |
11754 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11755 | __arm_vstrbq_scatter_offset_s32 (int8_t * __base, uint32x4_t __offset, int32x4_t __value) | |
11756 | { | |
11757 | __builtin_mve_vstrbq_scatter_offset_sv4si ((__builtin_neon_qi *) __base, __offset, __value); | |
11758 | } | |
11759 | ||
11760 | __extension__ extern __inline void | |
11761 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11762 | __arm_vstrbq_scatter_offset_s16 (int8_t * __base, uint16x8_t __offset, int16x8_t __value) | |
11763 | { | |
11764 | __builtin_mve_vstrbq_scatter_offset_sv8hi ((__builtin_neon_qi *) __base, __offset, __value); | |
11765 | } | |
11766 | ||
11767 | __extension__ extern __inline void | |
11768 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11769 | __arm_vstrbq_scatter_offset_u8 (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value) | |
11770 | { | |
11771 | __builtin_mve_vstrbq_scatter_offset_uv16qi ((__builtin_neon_qi *) __base, __offset, __value); | |
11772 | } | |
11773 | ||
11774 | __extension__ extern __inline void | |
11775 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11776 | __arm_vstrbq_scatter_offset_u32 (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
11777 | { | |
11778 | __builtin_mve_vstrbq_scatter_offset_uv4si ((__builtin_neon_qi *) __base, __offset, __value); | |
11779 | } | |
11780 | ||
11781 | __extension__ extern __inline void | |
11782 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11783 | __arm_vstrbq_scatter_offset_u16 (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value) | |
11784 | { | |
11785 | __builtin_mve_vstrbq_scatter_offset_uv8hi ((__builtin_neon_qi *) __base, __offset, __value); | |
11786 | } | |
11787 | ||
11788 | __extension__ extern __inline void | |
11789 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11790 | __arm_vstrbq_s8 (int8_t * __addr, int8x16_t __value) | |
11791 | { | |
11792 | __builtin_mve_vstrbq_sv16qi ((__builtin_neon_qi *) __addr, __value); | |
11793 | } | |
11794 | ||
11795 | __extension__ extern __inline void | |
11796 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11797 | __arm_vstrbq_s32 (int8_t * __addr, int32x4_t __value) | |
11798 | { | |
11799 | __builtin_mve_vstrbq_sv4si ((__builtin_neon_qi *) __addr, __value); | |
11800 | } | |
11801 | ||
11802 | __extension__ extern __inline void | |
11803 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11804 | __arm_vstrbq_s16 (int8_t * __addr, int16x8_t __value) | |
11805 | { | |
11806 | __builtin_mve_vstrbq_sv8hi ((__builtin_neon_qi *) __addr, __value); | |
11807 | } | |
11808 | ||
11809 | __extension__ extern __inline void | |
11810 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11811 | __arm_vstrbq_u8 (uint8_t * __addr, uint8x16_t __value) | |
11812 | { | |
11813 | __builtin_mve_vstrbq_uv16qi ((__builtin_neon_qi *) __addr, __value); | |
11814 | } | |
11815 | ||
11816 | __extension__ extern __inline void | |
11817 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11818 | __arm_vstrbq_u32 (uint8_t * __addr, uint32x4_t __value) | |
11819 | { | |
11820 | __builtin_mve_vstrbq_uv4si ((__builtin_neon_qi *) __addr, __value); | |
11821 | } | |
11822 | ||
11823 | __extension__ extern __inline void | |
11824 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11825 | __arm_vstrbq_u16 (uint8_t * __addr, uint16x8_t __value) | |
11826 | { | |
11827 | __builtin_mve_vstrbq_uv8hi ((__builtin_neon_qi *) __addr, __value); | |
11828 | } | |
11829 | ||
11830 | __extension__ extern __inline void | |
11831 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11832 | __arm_vstrwq_scatter_base_s32 (uint32x4_t __addr, const int __offset, int32x4_t __value) | |
11833 | { | |
11834 | __builtin_mve_vstrwq_scatter_base_sv4si (__addr, __offset, __value); | |
11835 | } | |
11836 | ||
11837 | __extension__ extern __inline void | |
11838 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11839 | __arm_vstrwq_scatter_base_u32 (uint32x4_t __addr, const int __offset, uint32x4_t __value) | |
11840 | { | |
11841 | __builtin_mve_vstrwq_scatter_base_uv4si (__addr, __offset, __value); | |
11842 | } | |
535a8645 SP |
11843 | |
11844 | __extension__ extern __inline uint8x16_t | |
11845 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11846 | __arm_vldrbq_gather_offset_u8 (uint8_t const * __base, uint8x16_t __offset) | |
11847 | { | |
11848 | return __builtin_mve_vldrbq_gather_offset_uv16qi ((__builtin_neon_qi *) __base, __offset); | |
11849 | } | |
11850 | ||
11851 | __extension__ extern __inline int8x16_t | |
11852 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11853 | __arm_vldrbq_gather_offset_s8 (int8_t const * __base, uint8x16_t __offset) | |
11854 | { | |
11855 | return __builtin_mve_vldrbq_gather_offset_sv16qi ((__builtin_neon_qi *) __base, __offset); | |
11856 | } | |
11857 | ||
11858 | __extension__ extern __inline int8x16_t | |
11859 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11860 | __arm_vldrbq_s8 (int8_t const * __base) | |
11861 | { | |
11862 | return __builtin_mve_vldrbq_sv16qi ((__builtin_neon_qi *) __base); | |
11863 | } | |
11864 | ||
11865 | __extension__ extern __inline uint8x16_t | |
11866 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11867 | __arm_vldrbq_u8 (uint8_t const * __base) | |
11868 | { | |
11869 | return __builtin_mve_vldrbq_uv16qi ((__builtin_neon_qi *) __base); | |
11870 | } | |
11871 | ||
11872 | __extension__ extern __inline uint16x8_t | |
11873 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11874 | __arm_vldrbq_gather_offset_u16 (uint8_t const * __base, uint16x8_t __offset) | |
11875 | { | |
11876 | return __builtin_mve_vldrbq_gather_offset_uv8hi ((__builtin_neon_qi *) __base, __offset); | |
11877 | } | |
11878 | ||
11879 | __extension__ extern __inline int16x8_t | |
11880 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11881 | __arm_vldrbq_gather_offset_s16 (int8_t const * __base, uint16x8_t __offset) | |
11882 | { | |
11883 | return __builtin_mve_vldrbq_gather_offset_sv8hi ((__builtin_neon_qi *) __base, __offset); | |
11884 | } | |
11885 | ||
11886 | __extension__ extern __inline int16x8_t | |
11887 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11888 | __arm_vldrbq_s16 (int8_t const * __base) | |
11889 | { | |
11890 | return __builtin_mve_vldrbq_sv8hi ((__builtin_neon_qi *) __base); | |
11891 | } | |
11892 | ||
11893 | __extension__ extern __inline uint16x8_t | |
11894 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11895 | __arm_vldrbq_u16 (uint8_t const * __base) | |
11896 | { | |
11897 | return __builtin_mve_vldrbq_uv8hi ((__builtin_neon_qi *) __base); | |
11898 | } | |
11899 | ||
11900 | __extension__ extern __inline uint32x4_t | |
11901 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11902 | __arm_vldrbq_gather_offset_u32 (uint8_t const * __base, uint32x4_t __offset) | |
11903 | { | |
11904 | return __builtin_mve_vldrbq_gather_offset_uv4si ((__builtin_neon_qi *) __base, __offset); | |
11905 | } | |
11906 | ||
11907 | __extension__ extern __inline int32x4_t | |
11908 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11909 | __arm_vldrbq_gather_offset_s32 (int8_t const * __base, uint32x4_t __offset) | |
11910 | { | |
11911 | return __builtin_mve_vldrbq_gather_offset_sv4si ((__builtin_neon_qi *) __base, __offset); | |
11912 | } | |
11913 | ||
11914 | __extension__ extern __inline int32x4_t | |
11915 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11916 | __arm_vldrbq_s32 (int8_t const * __base) | |
11917 | { | |
11918 | return __builtin_mve_vldrbq_sv4si ((__builtin_neon_qi *) __base); | |
11919 | } | |
11920 | ||
11921 | __extension__ extern __inline uint32x4_t | |
11922 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11923 | __arm_vldrbq_u32 (uint8_t const * __base) | |
11924 | { | |
11925 | return __builtin_mve_vldrbq_uv4si ((__builtin_neon_qi *) __base); | |
11926 | } | |
11927 | ||
11928 | __extension__ extern __inline int32x4_t | |
11929 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11930 | __arm_vldrwq_gather_base_s32 (uint32x4_t __addr, const int __offset) | |
11931 | { | |
11932 | return __builtin_mve_vldrwq_gather_base_sv4si (__addr, __offset); | |
11933 | } | |
11934 | ||
11935 | __extension__ extern __inline uint32x4_t | |
11936 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11937 | __arm_vldrwq_gather_base_u32 (uint32x4_t __addr, const int __offset) | |
11938 | { | |
11939 | return __builtin_mve_vldrwq_gather_base_uv4si (__addr, __offset); | |
11940 | } | |
11941 | ||
405e918c SP |
11942 | __extension__ extern __inline void |
11943 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11944 | __arm_vstrbq_p_s8 (int8_t * __addr, int8x16_t __value, mve_pred16_t __p) | |
11945 | { | |
11946 | __builtin_mve_vstrbq_p_sv16qi ((__builtin_neon_qi *) __addr, __value, __p); | |
11947 | } | |
11948 | ||
11949 | __extension__ extern __inline void | |
11950 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11951 | __arm_vstrbq_p_s32 (int8_t * __addr, int32x4_t __value, mve_pred16_t __p) | |
11952 | { | |
11953 | __builtin_mve_vstrbq_p_sv4si ((__builtin_neon_qi *) __addr, __value, __p); | |
11954 | } | |
11955 | ||
11956 | __extension__ extern __inline void | |
11957 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11958 | __arm_vstrbq_p_s16 (int8_t * __addr, int16x8_t __value, mve_pred16_t __p) | |
11959 | { | |
11960 | __builtin_mve_vstrbq_p_sv8hi ((__builtin_neon_qi *) __addr, __value, __p); | |
11961 | } | |
11962 | ||
11963 | __extension__ extern __inline void | |
11964 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11965 | __arm_vstrbq_p_u8 (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p) | |
11966 | { | |
11967 | __builtin_mve_vstrbq_p_uv16qi ((__builtin_neon_qi *) __addr, __value, __p); | |
11968 | } | |
11969 | ||
11970 | __extension__ extern __inline void | |
11971 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11972 | __arm_vstrbq_p_u32 (uint8_t * __addr, uint32x4_t __value, mve_pred16_t __p) | |
11973 | { | |
11974 | __builtin_mve_vstrbq_p_uv4si ((__builtin_neon_qi *) __addr, __value, __p); | |
11975 | } | |
11976 | ||
11977 | __extension__ extern __inline void | |
11978 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11979 | __arm_vstrbq_p_u16 (uint8_t * __addr, uint16x8_t __value, mve_pred16_t __p) | |
11980 | { | |
11981 | __builtin_mve_vstrbq_p_uv8hi ((__builtin_neon_qi *) __addr, __value, __p); | |
11982 | } | |
11983 | ||
11984 | __extension__ extern __inline void | |
11985 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11986 | __arm_vstrbq_scatter_offset_p_s8 (int8_t * __base, uint8x16_t __offset, int8x16_t __value, mve_pred16_t __p) | |
11987 | { | |
11988 | __builtin_mve_vstrbq_scatter_offset_p_sv16qi ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
11989 | } | |
11990 | ||
11991 | __extension__ extern __inline void | |
11992 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11993 | __arm_vstrbq_scatter_offset_p_s32 (int8_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
11994 | { | |
11995 | __builtin_mve_vstrbq_scatter_offset_p_sv4si ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
11996 | } | |
11997 | ||
11998 | __extension__ extern __inline void | |
11999 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12000 | __arm_vstrbq_scatter_offset_p_s16 (int8_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p) | |
12001 | { | |
12002 | __builtin_mve_vstrbq_scatter_offset_p_sv8hi ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12003 | } | |
12004 | ||
12005 | __extension__ extern __inline void | |
12006 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12007 | __arm_vstrbq_scatter_offset_p_u8 (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value, mve_pred16_t __p) | |
12008 | { | |
12009 | __builtin_mve_vstrbq_scatter_offset_p_uv16qi ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12010 | } | |
12011 | ||
12012 | __extension__ extern __inline void | |
12013 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12014 | __arm_vstrbq_scatter_offset_p_u32 (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
12015 | { | |
12016 | __builtin_mve_vstrbq_scatter_offset_p_uv4si ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12017 | } | |
12018 | ||
12019 | __extension__ extern __inline void | |
12020 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12021 | __arm_vstrbq_scatter_offset_p_u16 (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p) | |
12022 | { | |
12023 | __builtin_mve_vstrbq_scatter_offset_p_uv8hi ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12024 | } | |
12025 | ||
12026 | __extension__ extern __inline void | |
12027 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12028 | __arm_vstrwq_scatter_base_p_s32 (uint32x4_t __addr, const int __offset, int32x4_t __value, mve_pred16_t __p) | |
12029 | { | |
12030 | __builtin_mve_vstrwq_scatter_base_p_sv4si (__addr, __offset, __value, __p); | |
12031 | } | |
12032 | ||
12033 | __extension__ extern __inline void | |
12034 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12035 | __arm_vstrwq_scatter_base_p_u32 (uint32x4_t __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p) | |
12036 | { | |
12037 | __builtin_mve_vstrwq_scatter_base_p_uv4si (__addr, __offset, __value, __p); | |
12038 | } | |
429d607b SP |
12039 | |
12040 | __extension__ extern __inline int8x16_t | |
12041 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12042 | __arm_vldrbq_gather_offset_z_s8 (int8_t const * __base, uint8x16_t __offset, mve_pred16_t __p) | |
12043 | { | |
12044 | return __builtin_mve_vldrbq_gather_offset_z_sv16qi ((__builtin_neon_qi *) __base, __offset, __p); | |
12045 | } | |
12046 | ||
12047 | __extension__ extern __inline int32x4_t | |
12048 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12049 | __arm_vldrbq_gather_offset_z_s32 (int8_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12050 | { | |
12051 | return __builtin_mve_vldrbq_gather_offset_z_sv4si ((__builtin_neon_qi *) __base, __offset, __p); | |
12052 | } | |
12053 | ||
12054 | __extension__ extern __inline int16x8_t | |
12055 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12056 | __arm_vldrbq_gather_offset_z_s16 (int8_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12057 | { | |
12058 | return __builtin_mve_vldrbq_gather_offset_z_sv8hi ((__builtin_neon_qi *) __base, __offset, __p); | |
12059 | } | |
12060 | ||
12061 | __extension__ extern __inline uint8x16_t | |
12062 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12063 | __arm_vldrbq_gather_offset_z_u8 (uint8_t const * __base, uint8x16_t __offset, mve_pred16_t __p) | |
12064 | { | |
12065 | return __builtin_mve_vldrbq_gather_offset_z_uv16qi ((__builtin_neon_qi *) __base, __offset, __p); | |
12066 | } | |
12067 | ||
12068 | __extension__ extern __inline uint32x4_t | |
12069 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12070 | __arm_vldrbq_gather_offset_z_u32 (uint8_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12071 | { | |
12072 | return __builtin_mve_vldrbq_gather_offset_z_uv4si ((__builtin_neon_qi *) __base, __offset, __p); | |
12073 | } | |
12074 | ||
12075 | __extension__ extern __inline uint16x8_t | |
12076 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12077 | __arm_vldrbq_gather_offset_z_u16 (uint8_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12078 | { | |
12079 | return __builtin_mve_vldrbq_gather_offset_z_uv8hi ((__builtin_neon_qi *) __base, __offset, __p); | |
12080 | } | |
12081 | ||
12082 | __extension__ extern __inline int8x16_t | |
12083 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12084 | __arm_vldrbq_z_s8 (int8_t const * __base, mve_pred16_t __p) | |
12085 | { | |
12086 | return __builtin_mve_vldrbq_z_sv16qi ((__builtin_neon_qi *) __base, __p); | |
12087 | } | |
12088 | ||
12089 | __extension__ extern __inline int32x4_t | |
12090 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12091 | __arm_vldrbq_z_s32 (int8_t const * __base, mve_pred16_t __p) | |
12092 | { | |
12093 | return __builtin_mve_vldrbq_z_sv4si ((__builtin_neon_qi *) __base, __p); | |
12094 | } | |
12095 | ||
12096 | __extension__ extern __inline int16x8_t | |
12097 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12098 | __arm_vldrbq_z_s16 (int8_t const * __base, mve_pred16_t __p) | |
12099 | { | |
12100 | return __builtin_mve_vldrbq_z_sv8hi ((__builtin_neon_qi *) __base, __p); | |
12101 | } | |
12102 | ||
12103 | __extension__ extern __inline uint8x16_t | |
12104 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12105 | __arm_vldrbq_z_u8 (uint8_t const * __base, mve_pred16_t __p) | |
12106 | { | |
12107 | return __builtin_mve_vldrbq_z_uv16qi ((__builtin_neon_qi *) __base, __p); | |
12108 | } | |
12109 | ||
12110 | __extension__ extern __inline uint32x4_t | |
12111 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12112 | __arm_vldrbq_z_u32 (uint8_t const * __base, mve_pred16_t __p) | |
12113 | { | |
12114 | return __builtin_mve_vldrbq_z_uv4si ((__builtin_neon_qi *) __base, __p); | |
12115 | } | |
12116 | ||
12117 | __extension__ extern __inline uint16x8_t | |
12118 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12119 | __arm_vldrbq_z_u16 (uint8_t const * __base, mve_pred16_t __p) | |
12120 | { | |
12121 | return __builtin_mve_vldrbq_z_uv8hi ((__builtin_neon_qi *) __base, __p); | |
12122 | } | |
12123 | ||
12124 | __extension__ extern __inline int32x4_t | |
12125 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12126 | __arm_vldrwq_gather_base_z_s32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p) | |
12127 | { | |
12128 | return __builtin_mve_vldrwq_gather_base_z_sv4si (__addr, __offset, __p); | |
12129 | } | |
12130 | ||
12131 | __extension__ extern __inline uint32x4_t | |
12132 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12133 | __arm_vldrwq_gather_base_z_u32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p) | |
12134 | { | |
12135 | return __builtin_mve_vldrwq_gather_base_z_uv4si (__addr, __offset, __p); | |
12136 | } | |
12137 | ||
bf1e3d5a SP |
12138 | __extension__ extern __inline int8x16_t |
12139 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12140 | __arm_vld1q_s8 (int8_t const * __base) | |
12141 | { | |
12142 | return __builtin_mve_vld1q_sv16qi ((__builtin_neon_qi *) __base); | |
12143 | } | |
12144 | ||
12145 | __extension__ extern __inline int32x4_t | |
12146 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12147 | __arm_vld1q_s32 (int32_t const * __base) | |
12148 | { | |
12149 | return __builtin_mve_vld1q_sv4si ((__builtin_neon_si *) __base); | |
12150 | } | |
12151 | ||
12152 | __extension__ extern __inline int16x8_t | |
12153 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12154 | __arm_vld1q_s16 (int16_t const * __base) | |
12155 | { | |
12156 | return __builtin_mve_vld1q_sv8hi ((__builtin_neon_hi *) __base); | |
12157 | } | |
12158 | ||
12159 | __extension__ extern __inline uint8x16_t | |
12160 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12161 | __arm_vld1q_u8 (uint8_t const * __base) | |
12162 | { | |
12163 | return __builtin_mve_vld1q_uv16qi ((__builtin_neon_qi *) __base); | |
12164 | } | |
12165 | ||
12166 | __extension__ extern __inline uint32x4_t | |
12167 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12168 | __arm_vld1q_u32 (uint32_t const * __base) | |
12169 | { | |
12170 | return __builtin_mve_vld1q_uv4si ((__builtin_neon_si *) __base); | |
12171 | } | |
12172 | ||
12173 | __extension__ extern __inline uint16x8_t | |
12174 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12175 | __arm_vld1q_u16 (uint16_t const * __base) | |
12176 | { | |
12177 | return __builtin_mve_vld1q_uv8hi ((__builtin_neon_hi *) __base); | |
12178 | } | |
12179 | ||
12180 | __extension__ extern __inline int32x4_t | |
12181 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12182 | __arm_vldrhq_gather_offset_s32 (int16_t const * __base, uint32x4_t __offset) | |
12183 | { | |
12184 | return __builtin_mve_vldrhq_gather_offset_sv4si ((__builtin_neon_hi *) __base, __offset); | |
12185 | } | |
12186 | ||
12187 | __extension__ extern __inline int16x8_t | |
12188 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12189 | __arm_vldrhq_gather_offset_s16 (int16_t const * __base, uint16x8_t __offset) | |
12190 | { | |
12191 | return __builtin_mve_vldrhq_gather_offset_sv8hi ((__builtin_neon_hi *) __base, __offset); | |
12192 | } | |
12193 | ||
12194 | __extension__ extern __inline uint32x4_t | |
12195 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12196 | __arm_vldrhq_gather_offset_u32 (uint16_t const * __base, uint32x4_t __offset) | |
12197 | { | |
12198 | return __builtin_mve_vldrhq_gather_offset_uv4si ((__builtin_neon_hi *) __base, __offset); | |
12199 | } | |
12200 | ||
12201 | __extension__ extern __inline uint16x8_t | |
12202 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12203 | __arm_vldrhq_gather_offset_u16 (uint16_t const * __base, uint16x8_t __offset) | |
12204 | { | |
12205 | return __builtin_mve_vldrhq_gather_offset_uv8hi ((__builtin_neon_hi *) __base, __offset); | |
12206 | } | |
12207 | ||
12208 | __extension__ extern __inline int32x4_t | |
12209 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12210 | __arm_vldrhq_gather_offset_z_s32 (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12211 | { | |
12212 | return __builtin_mve_vldrhq_gather_offset_z_sv4si ((__builtin_neon_hi *) __base, __offset, __p); | |
12213 | } | |
12214 | ||
12215 | __extension__ extern __inline int16x8_t | |
12216 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12217 | __arm_vldrhq_gather_offset_z_s16 (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12218 | { | |
12219 | return __builtin_mve_vldrhq_gather_offset_z_sv8hi ((__builtin_neon_hi *) __base, __offset, __p); | |
12220 | } | |
12221 | ||
12222 | __extension__ extern __inline uint32x4_t | |
12223 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12224 | __arm_vldrhq_gather_offset_z_u32 (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12225 | { | |
12226 | return __builtin_mve_vldrhq_gather_offset_z_uv4si ((__builtin_neon_hi *) __base, __offset, __p); | |
12227 | } | |
12228 | ||
12229 | __extension__ extern __inline uint16x8_t | |
12230 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12231 | __arm_vldrhq_gather_offset_z_u16 (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12232 | { | |
12233 | return __builtin_mve_vldrhq_gather_offset_z_uv8hi ((__builtin_neon_hi *) __base, __offset, __p); | |
12234 | } | |
12235 | ||
12236 | __extension__ extern __inline int32x4_t | |
12237 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12238 | __arm_vldrhq_gather_shifted_offset_s32 (int16_t const * __base, uint32x4_t __offset) | |
12239 | { | |
12240 | return __builtin_mve_vldrhq_gather_shifted_offset_sv4si ((__builtin_neon_hi *) __base, __offset); | |
12241 | } | |
12242 | ||
12243 | __extension__ extern __inline int16x8_t | |
12244 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12245 | __arm_vldrhq_gather_shifted_offset_s16 (int16_t const * __base, uint16x8_t __offset) | |
12246 | { | |
12247 | return __builtin_mve_vldrhq_gather_shifted_offset_sv8hi ((__builtin_neon_hi *) __base, __offset); | |
12248 | } | |
12249 | ||
12250 | __extension__ extern __inline uint32x4_t | |
12251 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12252 | __arm_vldrhq_gather_shifted_offset_u32 (uint16_t const * __base, uint32x4_t __offset) | |
12253 | { | |
12254 | return __builtin_mve_vldrhq_gather_shifted_offset_uv4si ((__builtin_neon_hi *) __base, __offset); | |
12255 | } | |
12256 | ||
12257 | __extension__ extern __inline uint16x8_t | |
12258 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12259 | __arm_vldrhq_gather_shifted_offset_u16 (uint16_t const * __base, uint16x8_t __offset) | |
12260 | { | |
12261 | return __builtin_mve_vldrhq_gather_shifted_offset_uv8hi ((__builtin_neon_hi *) __base, __offset); | |
12262 | } | |
12263 | ||
12264 | __extension__ extern __inline int32x4_t | |
12265 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12266 | __arm_vldrhq_gather_shifted_offset_z_s32 (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12267 | { | |
12268 | return __builtin_mve_vldrhq_gather_shifted_offset_z_sv4si ((__builtin_neon_hi *) __base, __offset, __p); | |
12269 | } | |
12270 | ||
12271 | __extension__ extern __inline int16x8_t | |
12272 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12273 | __arm_vldrhq_gather_shifted_offset_z_s16 (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12274 | { | |
12275 | return __builtin_mve_vldrhq_gather_shifted_offset_z_sv8hi ((__builtin_neon_hi *) __base, __offset, __p); | |
12276 | } | |
12277 | ||
12278 | __extension__ extern __inline uint32x4_t | |
12279 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12280 | __arm_vldrhq_gather_shifted_offset_z_u32 (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12281 | { | |
12282 | return __builtin_mve_vldrhq_gather_shifted_offset_z_uv4si ((__builtin_neon_hi *) __base, __offset, __p); | |
12283 | } | |
12284 | ||
12285 | __extension__ extern __inline uint16x8_t | |
12286 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12287 | __arm_vldrhq_gather_shifted_offset_z_u16 (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12288 | { | |
12289 | return __builtin_mve_vldrhq_gather_shifted_offset_z_uv8hi ((__builtin_neon_hi *) __base, __offset, __p); | |
12290 | } | |
12291 | ||
12292 | __extension__ extern __inline int32x4_t | |
12293 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12294 | __arm_vldrhq_s32 (int16_t const * __base) | |
12295 | { | |
12296 | return __builtin_mve_vldrhq_sv4si ((__builtin_neon_hi *) __base); | |
12297 | } | |
12298 | ||
12299 | __extension__ extern __inline int16x8_t | |
12300 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12301 | __arm_vldrhq_s16 (int16_t const * __base) | |
12302 | { | |
12303 | return __builtin_mve_vldrhq_sv8hi ((__builtin_neon_hi *) __base); | |
12304 | } | |
12305 | ||
12306 | __extension__ extern __inline uint32x4_t | |
12307 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12308 | __arm_vldrhq_u32 (uint16_t const * __base) | |
12309 | { | |
12310 | return __builtin_mve_vldrhq_uv4si ((__builtin_neon_hi *) __base); | |
12311 | } | |
12312 | ||
12313 | __extension__ extern __inline uint16x8_t | |
12314 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12315 | __arm_vldrhq_u16 (uint16_t const * __base) | |
12316 | { | |
12317 | return __builtin_mve_vldrhq_uv8hi ((__builtin_neon_hi *) __base); | |
12318 | } | |
12319 | ||
12320 | __extension__ extern __inline int32x4_t | |
12321 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12322 | __arm_vldrhq_z_s32 (int16_t const * __base, mve_pred16_t __p) | |
12323 | { | |
12324 | return __builtin_mve_vldrhq_z_sv4si ((__builtin_neon_hi *) __base, __p); | |
12325 | } | |
12326 | ||
12327 | __extension__ extern __inline int16x8_t | |
12328 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12329 | __arm_vldrhq_z_s16 (int16_t const * __base, mve_pred16_t __p) | |
12330 | { | |
12331 | return __builtin_mve_vldrhq_z_sv8hi ((__builtin_neon_hi *) __base, __p); | |
12332 | } | |
12333 | ||
12334 | __extension__ extern __inline uint32x4_t | |
12335 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12336 | __arm_vldrhq_z_u32 (uint16_t const * __base, mve_pred16_t __p) | |
12337 | { | |
12338 | return __builtin_mve_vldrhq_z_uv4si ((__builtin_neon_hi *) __base, __p); | |
12339 | } | |
12340 | ||
12341 | __extension__ extern __inline uint16x8_t | |
12342 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12343 | __arm_vldrhq_z_u16 (uint16_t const * __base, mve_pred16_t __p) | |
12344 | { | |
12345 | return __builtin_mve_vldrhq_z_uv8hi ((__builtin_neon_hi *) __base, __p); | |
12346 | } | |
12347 | ||
12348 | __extension__ extern __inline int32x4_t | |
12349 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12350 | __arm_vldrwq_s32 (int32_t const * __base) | |
12351 | { | |
12352 | return __builtin_mve_vldrwq_sv4si ((__builtin_neon_si *) __base); | |
12353 | } | |
12354 | ||
12355 | __extension__ extern __inline uint32x4_t | |
12356 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12357 | __arm_vldrwq_u32 (uint32_t const * __base) | |
12358 | { | |
12359 | return __builtin_mve_vldrwq_uv4si ((__builtin_neon_si *) __base); | |
12360 | } | |
12361 | ||
12362 | ||
12363 | __extension__ extern __inline int32x4_t | |
12364 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12365 | __arm_vldrwq_z_s32 (int32_t const * __base, mve_pred16_t __p) | |
12366 | { | |
12367 | return __builtin_mve_vldrwq_z_sv4si ((__builtin_neon_si *) __base, __p); | |
12368 | } | |
12369 | ||
12370 | __extension__ extern __inline uint32x4_t | |
12371 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12372 | __arm_vldrwq_z_u32 (uint32_t const * __base, mve_pred16_t __p) | |
12373 | { | |
12374 | return __builtin_mve_vldrwq_z_uv4si ((__builtin_neon_si *) __base, __p); | |
12375 | } | |
12376 | ||
4cc23303 SP |
12377 | __extension__ extern __inline int64x2_t |
12378 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12379 | __arm_vldrdq_gather_base_s64 (uint64x2_t __addr, const int __offset) | |
12380 | { | |
12381 | return __builtin_mve_vldrdq_gather_base_sv2di (__addr, __offset); | |
12382 | } | |
12383 | ||
12384 | __extension__ extern __inline uint64x2_t | |
12385 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12386 | __arm_vldrdq_gather_base_u64 (uint64x2_t __addr, const int __offset) | |
12387 | { | |
12388 | return __builtin_mve_vldrdq_gather_base_uv2di (__addr, __offset); | |
12389 | } | |
12390 | ||
12391 | __extension__ extern __inline int64x2_t | |
12392 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12393 | __arm_vldrdq_gather_base_z_s64 (uint64x2_t __addr, const int __offset, mve_pred16_t __p) | |
12394 | { | |
12395 | return __builtin_mve_vldrdq_gather_base_z_sv2di (__addr, __offset, __p); | |
12396 | } | |
12397 | ||
12398 | __extension__ extern __inline uint64x2_t | |
12399 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12400 | __arm_vldrdq_gather_base_z_u64 (uint64x2_t __addr, const int __offset, mve_pred16_t __p) | |
12401 | { | |
12402 | return __builtin_mve_vldrdq_gather_base_z_uv2di (__addr, __offset, __p); | |
12403 | } | |
12404 | ||
12405 | __extension__ extern __inline int64x2_t | |
12406 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12407 | __arm_vldrdq_gather_offset_s64 (int64_t const * __base, uint64x2_t __offset) | |
12408 | { | |
12409 | return __builtin_mve_vldrdq_gather_offset_sv2di ((__builtin_neon_di *) __base, __offset); | |
12410 | } | |
12411 | ||
12412 | __extension__ extern __inline uint64x2_t | |
12413 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12414 | __arm_vldrdq_gather_offset_u64 (uint64_t const * __base, uint64x2_t __offset) | |
12415 | { | |
12416 | return __builtin_mve_vldrdq_gather_offset_uv2di ((__builtin_neon_di *) __base, __offset); | |
12417 | } | |
12418 | ||
12419 | __extension__ extern __inline int64x2_t | |
12420 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12421 | __arm_vldrdq_gather_offset_z_s64 (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
12422 | { | |
12423 | return __builtin_mve_vldrdq_gather_offset_z_sv2di ((__builtin_neon_di *) __base, __offset, __p); | |
12424 | } | |
12425 | ||
12426 | ||
12427 | __extension__ extern __inline uint64x2_t | |
12428 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12429 | __arm_vldrdq_gather_offset_z_u64 (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
12430 | { | |
12431 | return __builtin_mve_vldrdq_gather_offset_z_uv2di ((__builtin_neon_di *) __base, __offset, __p); | |
12432 | } | |
12433 | ||
12434 | __extension__ extern __inline int64x2_t | |
12435 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12436 | __arm_vldrdq_gather_shifted_offset_s64 (int64_t const * __base, uint64x2_t __offset) | |
12437 | { | |
12438 | return __builtin_mve_vldrdq_gather_shifted_offset_sv2di ((__builtin_neon_di *) __base, __offset); | |
12439 | } | |
12440 | ||
12441 | __extension__ extern __inline uint64x2_t | |
12442 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12443 | __arm_vldrdq_gather_shifted_offset_u64 (uint64_t const * __base, uint64x2_t __offset) | |
12444 | { | |
12445 | return __builtin_mve_vldrdq_gather_shifted_offset_uv2di ((__builtin_neon_di *) __base, __offset); | |
12446 | } | |
12447 | ||
12448 | __extension__ extern __inline int64x2_t | |
12449 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12450 | __arm_vldrdq_gather_shifted_offset_z_s64 (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
12451 | { | |
12452 | return __builtin_mve_vldrdq_gather_shifted_offset_z_sv2di ((__builtin_neon_di *) __base, __offset, __p); | |
12453 | } | |
12454 | ||
12455 | __extension__ extern __inline uint64x2_t | |
12456 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12457 | __arm_vldrdq_gather_shifted_offset_z_u64 (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
12458 | { | |
12459 | return __builtin_mve_vldrdq_gather_shifted_offset_z_uv2di ((__builtin_neon_di *) __base, __offset, __p); | |
12460 | } | |
12461 | ||
12462 | __extension__ extern __inline int32x4_t | |
12463 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12464 | __arm_vldrwq_gather_offset_s32 (int32_t const * __base, uint32x4_t __offset) | |
12465 | { | |
12466 | return __builtin_mve_vldrwq_gather_offset_sv4si ((__builtin_neon_si *) __base, __offset); | |
12467 | } | |
12468 | ||
12469 | __extension__ extern __inline uint32x4_t | |
12470 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12471 | __arm_vldrwq_gather_offset_u32 (uint32_t const * __base, uint32x4_t __offset) | |
12472 | { | |
12473 | return __builtin_mve_vldrwq_gather_offset_uv4si ((__builtin_neon_si *) __base, __offset); | |
12474 | } | |
12475 | ||
12476 | __extension__ extern __inline int32x4_t | |
12477 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12478 | __arm_vldrwq_gather_offset_z_s32 (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12479 | { | |
12480 | return __builtin_mve_vldrwq_gather_offset_z_sv4si ((__builtin_neon_si *) __base, __offset, __p); | |
12481 | } | |
12482 | ||
12483 | __extension__ extern __inline uint32x4_t | |
12484 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12485 | __arm_vldrwq_gather_offset_z_u32 (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12486 | { | |
12487 | return __builtin_mve_vldrwq_gather_offset_z_uv4si ((__builtin_neon_si *) __base, __offset, __p); | |
12488 | } | |
12489 | ||
12490 | __extension__ extern __inline int32x4_t | |
12491 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12492 | __arm_vldrwq_gather_shifted_offset_s32 (int32_t const * __base, uint32x4_t __offset) | |
12493 | { | |
12494 | return __builtin_mve_vldrwq_gather_shifted_offset_sv4si ((__builtin_neon_si *) __base, __offset); | |
12495 | } | |
12496 | ||
12497 | __extension__ extern __inline uint32x4_t | |
12498 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12499 | __arm_vldrwq_gather_shifted_offset_u32 (uint32_t const * __base, uint32x4_t __offset) | |
12500 | { | |
12501 | return __builtin_mve_vldrwq_gather_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset); | |
12502 | } | |
12503 | ||
12504 | __extension__ extern __inline int32x4_t | |
12505 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12506 | __arm_vldrwq_gather_shifted_offset_z_s32 (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12507 | { | |
12508 | return __builtin_mve_vldrwq_gather_shifted_offset_z_sv4si ((__builtin_neon_si *) __base, __offset, __p); | |
12509 | } | |
12510 | ||
12511 | __extension__ extern __inline uint32x4_t | |
12512 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12513 | __arm_vldrwq_gather_shifted_offset_z_u32 (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12514 | { | |
12515 | return __builtin_mve_vldrwq_gather_shifted_offset_z_uv4si ((__builtin_neon_si *) __base, __offset, __p); | |
12516 | } | |
12517 | ||
5cad47e0 SP |
12518 | __extension__ extern __inline void |
12519 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12520 | __arm_vst1q_s8 (int8_t * __addr, int8x16_t __value) | |
12521 | { | |
12522 | __builtin_mve_vst1q_sv16qi ((__builtin_neon_qi *) __addr, __value); | |
12523 | } | |
12524 | ||
12525 | __extension__ extern __inline void | |
12526 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12527 | __arm_vst1q_s32 (int32_t * __addr, int32x4_t __value) | |
12528 | { | |
12529 | __builtin_mve_vst1q_sv4si ((__builtin_neon_si *) __addr, __value); | |
12530 | } | |
12531 | ||
12532 | __extension__ extern __inline void | |
12533 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12534 | __arm_vst1q_s16 (int16_t * __addr, int16x8_t __value) | |
12535 | { | |
12536 | __builtin_mve_vst1q_sv8hi ((__builtin_neon_hi *) __addr, __value); | |
12537 | } | |
12538 | ||
12539 | __extension__ extern __inline void | |
12540 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12541 | __arm_vst1q_u8 (uint8_t * __addr, uint8x16_t __value) | |
12542 | { | |
12543 | __builtin_mve_vst1q_uv16qi ((__builtin_neon_qi *) __addr, __value); | |
12544 | } | |
12545 | ||
12546 | __extension__ extern __inline void | |
12547 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12548 | __arm_vst1q_u32 (uint32_t * __addr, uint32x4_t __value) | |
12549 | { | |
12550 | __builtin_mve_vst1q_uv4si ((__builtin_neon_si *) __addr, __value); | |
12551 | } | |
12552 | ||
12553 | __extension__ extern __inline void | |
12554 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12555 | __arm_vst1q_u16 (uint16_t * __addr, uint16x8_t __value) | |
12556 | { | |
12557 | __builtin_mve_vst1q_uv8hi ((__builtin_neon_hi *) __addr, __value); | |
12558 | } | |
12559 | ||
12560 | __extension__ extern __inline void | |
12561 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12562 | __arm_vstrhq_scatter_offset_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value) | |
12563 | { | |
12564 | __builtin_mve_vstrhq_scatter_offset_sv4si ((__builtin_neon_hi *) __base, __offset, __value); | |
12565 | } | |
12566 | ||
12567 | __extension__ extern __inline void | |
12568 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12569 | __arm_vstrhq_scatter_offset_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value) | |
12570 | { | |
12571 | __builtin_mve_vstrhq_scatter_offset_sv8hi ((__builtin_neon_hi *) __base, __offset, __value); | |
12572 | } | |
12573 | ||
12574 | __extension__ extern __inline void | |
12575 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12576 | __arm_vstrhq_scatter_offset_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
12577 | { | |
12578 | __builtin_mve_vstrhq_scatter_offset_uv4si ((__builtin_neon_hi *) __base, __offset, __value); | |
12579 | } | |
12580 | ||
12581 | __extension__ extern __inline void | |
12582 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12583 | __arm_vstrhq_scatter_offset_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value) | |
12584 | { | |
12585 | __builtin_mve_vstrhq_scatter_offset_uv8hi ((__builtin_neon_hi *) __base, __offset, __value); | |
12586 | } | |
12587 | ||
12588 | __extension__ extern __inline void | |
12589 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12590 | __arm_vstrhq_scatter_offset_p_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
12591 | { | |
12592 | __builtin_mve_vstrhq_scatter_offset_p_sv4si ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12593 | } | |
12594 | ||
12595 | __extension__ extern __inline void | |
12596 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12597 | __arm_vstrhq_scatter_offset_p_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p) | |
12598 | { | |
12599 | __builtin_mve_vstrhq_scatter_offset_p_sv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12600 | } | |
12601 | ||
12602 | __extension__ extern __inline void | |
12603 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12604 | __arm_vstrhq_scatter_offset_p_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
12605 | { | |
12606 | __builtin_mve_vstrhq_scatter_offset_p_uv4si ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12607 | } | |
12608 | ||
12609 | __extension__ extern __inline void | |
12610 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12611 | __arm_vstrhq_scatter_offset_p_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p) | |
12612 | { | |
12613 | __builtin_mve_vstrhq_scatter_offset_p_uv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12614 | } | |
12615 | ||
12616 | __extension__ extern __inline void | |
12617 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12618 | __arm_vstrhq_scatter_shifted_offset_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value) | |
12619 | { | |
12620 | __builtin_mve_vstrhq_scatter_shifted_offset_sv4si ((__builtin_neon_hi *) __base, __offset, __value); | |
12621 | } | |
12622 | ||
12623 | __extension__ extern __inline void | |
12624 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12625 | __arm_vstrhq_scatter_shifted_offset_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value) | |
12626 | { | |
12627 | __builtin_mve_vstrhq_scatter_shifted_offset_sv8hi ((__builtin_neon_hi *) __base, __offset, __value); | |
12628 | } | |
12629 | ||
12630 | __extension__ extern __inline void | |
12631 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12632 | __arm_vstrhq_scatter_shifted_offset_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
12633 | { | |
12634 | __builtin_mve_vstrhq_scatter_shifted_offset_uv4si ((__builtin_neon_hi *) __base, __offset, __value); | |
12635 | } | |
12636 | ||
12637 | __extension__ extern __inline void | |
12638 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12639 | __arm_vstrhq_scatter_shifted_offset_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value) | |
12640 | { | |
12641 | __builtin_mve_vstrhq_scatter_shifted_offset_uv8hi ((__builtin_neon_hi *) __base, __offset, __value); | |
12642 | } | |
12643 | ||
12644 | __extension__ extern __inline void | |
12645 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12646 | __arm_vstrhq_scatter_shifted_offset_p_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
12647 | { | |
12648 | __builtin_mve_vstrhq_scatter_shifted_offset_p_sv4si ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12649 | } | |
12650 | ||
12651 | __extension__ extern __inline void | |
12652 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12653 | __arm_vstrhq_scatter_shifted_offset_p_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p) | |
12654 | { | |
12655 | __builtin_mve_vstrhq_scatter_shifted_offset_p_sv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12656 | } | |
12657 | ||
12658 | __extension__ extern __inline void | |
12659 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12660 | __arm_vstrhq_scatter_shifted_offset_p_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
12661 | { | |
12662 | __builtin_mve_vstrhq_scatter_shifted_offset_p_uv4si ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12663 | } | |
12664 | ||
12665 | __extension__ extern __inline void | |
12666 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12667 | __arm_vstrhq_scatter_shifted_offset_p_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p) | |
12668 | { | |
12669 | __builtin_mve_vstrhq_scatter_shifted_offset_p_uv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
12670 | } | |
12671 | ||
12672 | __extension__ extern __inline void | |
12673 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12674 | __arm_vstrhq_s32 (int16_t * __addr, int32x4_t __value) | |
12675 | { | |
12676 | __builtin_mve_vstrhq_sv4si ((__builtin_neon_hi *) __addr, __value); | |
12677 | } | |
12678 | ||
12679 | __extension__ extern __inline void | |
12680 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12681 | __arm_vstrhq_s16 (int16_t * __addr, int16x8_t __value) | |
12682 | { | |
12683 | __builtin_mve_vstrhq_sv8hi ((__builtin_neon_hi *) __addr, __value); | |
12684 | } | |
12685 | ||
12686 | __extension__ extern __inline void | |
12687 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12688 | __arm_vstrhq_u32 (uint16_t * __addr, uint32x4_t __value) | |
12689 | { | |
12690 | __builtin_mve_vstrhq_uv4si ((__builtin_neon_hi *) __addr, __value); | |
12691 | } | |
12692 | ||
12693 | __extension__ extern __inline void | |
12694 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12695 | __arm_vstrhq_u16 (uint16_t * __addr, uint16x8_t __value) | |
12696 | { | |
12697 | __builtin_mve_vstrhq_uv8hi ((__builtin_neon_hi *) __addr, __value); | |
12698 | } | |
12699 | ||
12700 | __extension__ extern __inline void | |
12701 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12702 | __arm_vstrhq_p_s32 (int16_t * __addr, int32x4_t __value, mve_pred16_t __p) | |
12703 | { | |
12704 | __builtin_mve_vstrhq_p_sv4si ((__builtin_neon_hi *) __addr, __value, __p); | |
12705 | } | |
12706 | ||
12707 | __extension__ extern __inline void | |
12708 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12709 | __arm_vstrhq_p_s16 (int16_t * __addr, int16x8_t __value, mve_pred16_t __p) | |
12710 | { | |
12711 | __builtin_mve_vstrhq_p_sv8hi ((__builtin_neon_hi *) __addr, __value, __p); | |
12712 | } | |
12713 | ||
12714 | __extension__ extern __inline void | |
12715 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12716 | __arm_vstrhq_p_u32 (uint16_t * __addr, uint32x4_t __value, mve_pred16_t __p) | |
12717 | { | |
12718 | __builtin_mve_vstrhq_p_uv4si ((__builtin_neon_hi *) __addr, __value, __p); | |
12719 | } | |
12720 | ||
12721 | __extension__ extern __inline void | |
12722 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12723 | __arm_vstrhq_p_u16 (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p) | |
12724 | { | |
12725 | __builtin_mve_vstrhq_p_uv8hi ((__builtin_neon_hi *) __addr, __value, __p); | |
12726 | } | |
12727 | ||
12728 | __extension__ extern __inline void | |
12729 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12730 | __arm_vstrwq_s32 (int32_t * __addr, int32x4_t __value) | |
12731 | { | |
12732 | __builtin_mve_vstrwq_sv4si ((__builtin_neon_si *) __addr, __value); | |
12733 | } | |
12734 | ||
12735 | __extension__ extern __inline void | |
12736 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12737 | __arm_vstrwq_u32 (uint32_t * __addr, uint32x4_t __value) | |
12738 | { | |
12739 | __builtin_mve_vstrwq_uv4si ((__builtin_neon_si *) __addr, __value); | |
12740 | } | |
12741 | ||
12742 | __extension__ extern __inline void | |
12743 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12744 | __arm_vstrwq_p_s32 (int32_t * __addr, int32x4_t __value, mve_pred16_t __p) | |
12745 | { | |
12746 | __builtin_mve_vstrwq_p_sv4si ((__builtin_neon_si *) __addr, __value, __p); | |
12747 | } | |
12748 | ||
12749 | __extension__ extern __inline void | |
12750 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12751 | __arm_vstrwq_p_u32 (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p) | |
12752 | { | |
12753 | __builtin_mve_vstrwq_p_uv4si ((__builtin_neon_si *) __addr, __value, __p); | |
12754 | } | |
12755 | ||
7a5fffa5 SP |
12756 | __extension__ extern __inline void |
12757 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12758 | __arm_vstrdq_scatter_base_p_s64 (uint64x2_t __addr, const int __offset, int64x2_t __value, mve_pred16_t __p) | |
12759 | { | |
12760 | __builtin_mve_vstrdq_scatter_base_p_sv2di (__addr, __offset, __value, __p); | |
12761 | } | |
12762 | ||
12763 | __extension__ extern __inline void | |
12764 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12765 | __arm_vstrdq_scatter_base_p_u64 (uint64x2_t __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p) | |
12766 | { | |
12767 | __builtin_mve_vstrdq_scatter_base_p_uv2di (__addr, __offset, __value, __p); | |
12768 | } | |
12769 | ||
12770 | __extension__ extern __inline void | |
12771 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12772 | __arm_vstrdq_scatter_base_s64 (uint64x2_t __addr, const int __offset, int64x2_t __value) | |
12773 | { | |
12774 | __builtin_mve_vstrdq_scatter_base_sv2di (__addr, __offset, __value); | |
12775 | } | |
12776 | ||
12777 | __extension__ extern __inline void | |
12778 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12779 | __arm_vstrdq_scatter_base_u64 (uint64x2_t __addr, const int __offset, uint64x2_t __value) | |
12780 | { | |
12781 | __builtin_mve_vstrdq_scatter_base_uv2di (__addr, __offset, __value); | |
12782 | } | |
12783 | ||
12784 | __extension__ extern __inline void | |
12785 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12786 | __arm_vstrdq_scatter_offset_p_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p) | |
12787 | { | |
12788 | __builtin_mve_vstrdq_scatter_offset_p_sv2di (__base, __offset, __value, __p); | |
12789 | } | |
12790 | ||
12791 | __extension__ extern __inline void | |
12792 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12793 | __arm_vstrdq_scatter_offset_p_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p) | |
12794 | { | |
12795 | __builtin_mve_vstrdq_scatter_offset_p_uv2di (__base, __offset, __value, __p); | |
12796 | } | |
12797 | ||
12798 | __extension__ extern __inline void | |
12799 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12800 | __arm_vstrdq_scatter_offset_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value) | |
12801 | { | |
12802 | __builtin_mve_vstrdq_scatter_offset_sv2di (__base, __offset, __value); | |
12803 | } | |
12804 | ||
12805 | __extension__ extern __inline void | |
12806 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12807 | __arm_vstrdq_scatter_offset_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value) | |
12808 | { | |
12809 | __builtin_mve_vstrdq_scatter_offset_uv2di (__base, __offset, __value); | |
12810 | } | |
12811 | ||
12812 | __extension__ extern __inline void | |
12813 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12814 | __arm_vstrdq_scatter_shifted_offset_p_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p) | |
12815 | { | |
12816 | __builtin_mve_vstrdq_scatter_shifted_offset_p_sv2di (__base, __offset, __value, __p); | |
12817 | } | |
12818 | ||
12819 | __extension__ extern __inline void | |
12820 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12821 | __arm_vstrdq_scatter_shifted_offset_p_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p) | |
12822 | { | |
12823 | __builtin_mve_vstrdq_scatter_shifted_offset_p_uv2di (__base, __offset, __value, __p); | |
12824 | } | |
12825 | ||
12826 | __extension__ extern __inline void | |
12827 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12828 | __arm_vstrdq_scatter_shifted_offset_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value) | |
12829 | { | |
12830 | __builtin_mve_vstrdq_scatter_shifted_offset_sv2di (__base, __offset, __value); | |
12831 | } | |
12832 | ||
12833 | __extension__ extern __inline void | |
12834 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12835 | __arm_vstrdq_scatter_shifted_offset_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value) | |
12836 | { | |
12837 | __builtin_mve_vstrdq_scatter_shifted_offset_uv2di (__base, __offset, __value); | |
12838 | } | |
12839 | ||
12840 | __extension__ extern __inline void | |
12841 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12842 | __arm_vstrwq_scatter_offset_p_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
12843 | { | |
12844 | __builtin_mve_vstrwq_scatter_offset_p_sv4si ((__builtin_neon_si *) __base, __offset, __value, __p); | |
12845 | } | |
12846 | ||
12847 | __extension__ extern __inline void | |
12848 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12849 | __arm_vstrwq_scatter_offset_p_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
12850 | { | |
12851 | __builtin_mve_vstrwq_scatter_offset_p_uv4si ((__builtin_neon_si *) __base, __offset, __value, __p); | |
12852 | } | |
12853 | ||
12854 | __extension__ extern __inline void | |
12855 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12856 | __arm_vstrwq_scatter_offset_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value) | |
12857 | { | |
12858 | __builtin_mve_vstrwq_scatter_offset_sv4si ((__builtin_neon_si *) __base, __offset, __value); | |
12859 | } | |
12860 | ||
12861 | __extension__ extern __inline void | |
12862 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12863 | __arm_vstrwq_scatter_offset_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
12864 | { | |
12865 | __builtin_mve_vstrwq_scatter_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value); | |
12866 | } | |
12867 | ||
12868 | __extension__ extern __inline void | |
12869 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12870 | __arm_vstrwq_scatter_shifted_offset_p_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
12871 | { | |
12872 | __builtin_mve_vstrwq_scatter_shifted_offset_p_sv4si ((__builtin_neon_si *) __base, __offset, __value, __p); | |
12873 | } | |
12874 | ||
12875 | __extension__ extern __inline void | |
12876 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12877 | __arm_vstrwq_scatter_shifted_offset_p_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
12878 | { | |
12879 | __builtin_mve_vstrwq_scatter_shifted_offset_p_uv4si ((__builtin_neon_si *) __base, __offset, __value, __p); | |
12880 | } | |
12881 | ||
12882 | __extension__ extern __inline void | |
12883 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12884 | __arm_vstrwq_scatter_shifted_offset_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value) | |
12885 | { | |
12886 | __builtin_mve_vstrwq_scatter_shifted_offset_sv4si ((__builtin_neon_si *) __base, __offset, __value); | |
12887 | } | |
12888 | ||
12889 | __extension__ extern __inline void | |
12890 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12891 | __arm_vstrwq_scatter_shifted_offset_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
12892 | { | |
12893 | __builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value); | |
12894 | } | |
12895 | ||
3eff57aa SP |
12896 | __extension__ extern __inline int8x16_t |
12897 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12898 | __arm_vaddq_s8 (int8x16_t __a, int8x16_t __b) | |
12899 | { | |
12900 | return __a + __b; | |
12901 | } | |
12902 | ||
12903 | __extension__ extern __inline int16x8_t | |
12904 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12905 | __arm_vaddq_s16 (int16x8_t __a, int16x8_t __b) | |
12906 | { | |
12907 | return __a + __b; | |
12908 | } | |
12909 | ||
12910 | __extension__ extern __inline int32x4_t | |
12911 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12912 | __arm_vaddq_s32 (int32x4_t __a, int32x4_t __b) | |
12913 | { | |
12914 | return __a + __b; | |
12915 | } | |
12916 | ||
12917 | __extension__ extern __inline uint8x16_t | |
12918 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12919 | __arm_vaddq_u8 (uint8x16_t __a, uint8x16_t __b) | |
12920 | { | |
12921 | return __a + __b; | |
12922 | } | |
12923 | ||
12924 | __extension__ extern __inline uint16x8_t | |
12925 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12926 | __arm_vaddq_u16 (uint16x8_t __a, uint16x8_t __b) | |
12927 | { | |
12928 | return __a + __b; | |
12929 | } | |
12930 | ||
12931 | __extension__ extern __inline uint32x4_t | |
12932 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12933 | __arm_vaddq_u32 (uint32x4_t __a, uint32x4_t __b) | |
12934 | { | |
12935 | return __a + __b; | |
12936 | } | |
12937 | ||
85a94e87 SP |
12938 | __extension__ extern __inline uint8x16_t |
12939 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12940 | __arm_vuninitializedq_u8 (void) | |
12941 | { | |
12942 | uint8x16_t __uninit; | |
12943 | __asm__ ("": "=w"(__uninit)); | |
12944 | return __uninit; | |
12945 | } | |
12946 | ||
12947 | __extension__ extern __inline uint16x8_t | |
12948 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12949 | __arm_vuninitializedq_u16 (void) | |
12950 | { | |
12951 | uint16x8_t __uninit; | |
12952 | __asm__ ("": "=w"(__uninit)); | |
12953 | return __uninit; | |
12954 | } | |
12955 | ||
12956 | __extension__ extern __inline uint32x4_t | |
12957 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12958 | __arm_vuninitializedq_u32 (void) | |
12959 | { | |
12960 | uint32x4_t __uninit; | |
12961 | __asm__ ("": "=w"(__uninit)); | |
12962 | return __uninit; | |
12963 | } | |
12964 | ||
12965 | __extension__ extern __inline uint64x2_t | |
12966 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12967 | __arm_vuninitializedq_u64 (void) | |
12968 | { | |
12969 | uint64x2_t __uninit; | |
12970 | __asm__ ("": "=w"(__uninit)); | |
12971 | return __uninit; | |
12972 | } | |
12973 | ||
12974 | __extension__ extern __inline int8x16_t | |
12975 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12976 | __arm_vuninitializedq_s8 (void) | |
12977 | { | |
12978 | int8x16_t __uninit; | |
12979 | __asm__ ("": "=w"(__uninit)); | |
12980 | return __uninit; | |
12981 | } | |
12982 | ||
12983 | __extension__ extern __inline int16x8_t | |
12984 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12985 | __arm_vuninitializedq_s16 (void) | |
12986 | { | |
12987 | int16x8_t __uninit; | |
12988 | __asm__ ("": "=w"(__uninit)); | |
12989 | return __uninit; | |
12990 | } | |
12991 | ||
12992 | __extension__ extern __inline int32x4_t | |
12993 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12994 | __arm_vuninitializedq_s32 (void) | |
12995 | { | |
12996 | int32x4_t __uninit; | |
12997 | __asm__ ("": "=w"(__uninit)); | |
12998 | return __uninit; | |
12999 | } | |
13000 | ||
13001 | __extension__ extern __inline int64x2_t | |
13002 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13003 | __arm_vuninitializedq_s64 (void) | |
13004 | { | |
13005 | int64x2_t __uninit; | |
13006 | __asm__ ("": "=w"(__uninit)); | |
13007 | return __uninit; | |
13008 | } | |
13009 | ||
13010 | __extension__ extern __inline int16x8_t | |
13011 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13012 | __arm_vreinterpretq_s16_s32 (int32x4_t __a) | |
13013 | { | |
13014 | return (int16x8_t) __a; | |
13015 | } | |
13016 | ||
13017 | __extension__ extern __inline int16x8_t | |
13018 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13019 | __arm_vreinterpretq_s16_s64 (int64x2_t __a) | |
13020 | { | |
13021 | return (int16x8_t) __a; | |
13022 | } | |
13023 | ||
13024 | __extension__ extern __inline int16x8_t | |
13025 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13026 | __arm_vreinterpretq_s16_s8 (int8x16_t __a) | |
13027 | { | |
13028 | return (int16x8_t) __a; | |
13029 | } | |
13030 | ||
13031 | __extension__ extern __inline int16x8_t | |
13032 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13033 | __arm_vreinterpretq_s16_u16 (uint16x8_t __a) | |
13034 | { | |
13035 | return (int16x8_t) __a; | |
13036 | } | |
13037 | ||
13038 | __extension__ extern __inline int16x8_t | |
13039 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13040 | __arm_vreinterpretq_s16_u32 (uint32x4_t __a) | |
13041 | { | |
13042 | return (int16x8_t) __a; | |
13043 | } | |
13044 | ||
13045 | __extension__ extern __inline int16x8_t | |
13046 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13047 | __arm_vreinterpretq_s16_u64 (uint64x2_t __a) | |
13048 | { | |
13049 | return (int16x8_t) __a; | |
13050 | } | |
13051 | ||
13052 | __extension__ extern __inline int16x8_t | |
13053 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13054 | __arm_vreinterpretq_s16_u8 (uint8x16_t __a) | |
13055 | { | |
13056 | return (int16x8_t) __a; | |
13057 | } | |
13058 | ||
13059 | __extension__ extern __inline int32x4_t | |
13060 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13061 | __arm_vreinterpretq_s32_s16 (int16x8_t __a) | |
13062 | { | |
13063 | return (int32x4_t) __a; | |
13064 | } | |
13065 | ||
13066 | __extension__ extern __inline int32x4_t | |
13067 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13068 | __arm_vreinterpretq_s32_s64 (int64x2_t __a) | |
13069 | { | |
13070 | return (int32x4_t) __a; | |
13071 | } | |
13072 | ||
13073 | __extension__ extern __inline int32x4_t | |
13074 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13075 | __arm_vreinterpretq_s32_s8 (int8x16_t __a) | |
13076 | { | |
13077 | return (int32x4_t) __a; | |
13078 | } | |
13079 | ||
13080 | __extension__ extern __inline int32x4_t | |
13081 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13082 | __arm_vreinterpretq_s32_u16 (uint16x8_t __a) | |
13083 | { | |
13084 | return (int32x4_t) __a; | |
13085 | } | |
13086 | ||
13087 | __extension__ extern __inline int32x4_t | |
13088 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13089 | __arm_vreinterpretq_s32_u32 (uint32x4_t __a) | |
13090 | { | |
13091 | return (int32x4_t) __a; | |
13092 | } | |
13093 | ||
13094 | __extension__ extern __inline int32x4_t | |
13095 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13096 | __arm_vreinterpretq_s32_u64 (uint64x2_t __a) | |
13097 | { | |
13098 | return (int32x4_t) __a; | |
13099 | } | |
13100 | ||
13101 | __extension__ extern __inline int32x4_t | |
13102 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13103 | __arm_vreinterpretq_s32_u8 (uint8x16_t __a) | |
13104 | { | |
13105 | return (int32x4_t) __a; | |
13106 | } | |
13107 | ||
13108 | __extension__ extern __inline int64x2_t | |
13109 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13110 | __arm_vreinterpretq_s64_s16 (int16x8_t __a) | |
13111 | { | |
13112 | return (int64x2_t) __a; | |
13113 | } | |
13114 | ||
13115 | __extension__ extern __inline int64x2_t | |
13116 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13117 | __arm_vreinterpretq_s64_s32 (int32x4_t __a) | |
13118 | { | |
13119 | return (int64x2_t) __a; | |
13120 | } | |
13121 | ||
13122 | __extension__ extern __inline int64x2_t | |
13123 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13124 | __arm_vreinterpretq_s64_s8 (int8x16_t __a) | |
13125 | { | |
13126 | return (int64x2_t) __a; | |
13127 | } | |
13128 | ||
13129 | __extension__ extern __inline int64x2_t | |
13130 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13131 | __arm_vreinterpretq_s64_u16 (uint16x8_t __a) | |
13132 | { | |
13133 | return (int64x2_t) __a; | |
13134 | } | |
13135 | ||
13136 | __extension__ extern __inline int64x2_t | |
13137 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13138 | __arm_vreinterpretq_s64_u32 (uint32x4_t __a) | |
13139 | { | |
13140 | return (int64x2_t) __a; | |
13141 | } | |
13142 | ||
13143 | __extension__ extern __inline int64x2_t | |
13144 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13145 | __arm_vreinterpretq_s64_u64 (uint64x2_t __a) | |
13146 | { | |
13147 | return (int64x2_t) __a; | |
13148 | } | |
13149 | ||
13150 | __extension__ extern __inline int64x2_t | |
13151 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13152 | __arm_vreinterpretq_s64_u8 (uint8x16_t __a) | |
13153 | { | |
13154 | return (int64x2_t) __a; | |
13155 | } | |
13156 | ||
13157 | __extension__ extern __inline int8x16_t | |
13158 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13159 | __arm_vreinterpretq_s8_s16 (int16x8_t __a) | |
13160 | { | |
13161 | return (int8x16_t) __a; | |
13162 | } | |
13163 | ||
13164 | __extension__ extern __inline int8x16_t | |
13165 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13166 | __arm_vreinterpretq_s8_s32 (int32x4_t __a) | |
13167 | { | |
13168 | return (int8x16_t) __a; | |
13169 | } | |
13170 | ||
13171 | __extension__ extern __inline int8x16_t | |
13172 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13173 | __arm_vreinterpretq_s8_s64 (int64x2_t __a) | |
13174 | { | |
13175 | return (int8x16_t) __a; | |
13176 | } | |
13177 | ||
13178 | __extension__ extern __inline int8x16_t | |
13179 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13180 | __arm_vreinterpretq_s8_u16 (uint16x8_t __a) | |
13181 | { | |
13182 | return (int8x16_t) __a; | |
13183 | } | |
13184 | ||
13185 | __extension__ extern __inline int8x16_t | |
13186 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13187 | __arm_vreinterpretq_s8_u32 (uint32x4_t __a) | |
13188 | { | |
13189 | return (int8x16_t) __a; | |
13190 | } | |
13191 | ||
13192 | __extension__ extern __inline int8x16_t | |
13193 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13194 | __arm_vreinterpretq_s8_u64 (uint64x2_t __a) | |
13195 | { | |
13196 | return (int8x16_t) __a; | |
13197 | } | |
13198 | ||
13199 | __extension__ extern __inline int8x16_t | |
13200 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13201 | __arm_vreinterpretq_s8_u8 (uint8x16_t __a) | |
13202 | { | |
13203 | return (int8x16_t) __a; | |
13204 | } | |
13205 | ||
13206 | __extension__ extern __inline uint16x8_t | |
13207 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13208 | __arm_vreinterpretq_u16_s16 (int16x8_t __a) | |
13209 | { | |
13210 | return (uint16x8_t) __a; | |
13211 | } | |
13212 | ||
13213 | __extension__ extern __inline uint16x8_t | |
13214 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13215 | __arm_vreinterpretq_u16_s32 (int32x4_t __a) | |
13216 | { | |
13217 | return (uint16x8_t) __a; | |
13218 | } | |
13219 | ||
13220 | __extension__ extern __inline uint16x8_t | |
13221 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13222 | __arm_vreinterpretq_u16_s64 (int64x2_t __a) | |
13223 | { | |
13224 | return (uint16x8_t) __a; | |
13225 | } | |
13226 | ||
13227 | __extension__ extern __inline uint16x8_t | |
13228 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13229 | __arm_vreinterpretq_u16_s8 (int8x16_t __a) | |
13230 | { | |
13231 | return (uint16x8_t) __a; | |
13232 | } | |
13233 | ||
13234 | __extension__ extern __inline uint16x8_t | |
13235 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13236 | __arm_vreinterpretq_u16_u32 (uint32x4_t __a) | |
13237 | { | |
13238 | return (uint16x8_t) __a; | |
13239 | } | |
13240 | ||
13241 | __extension__ extern __inline uint16x8_t | |
13242 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13243 | __arm_vreinterpretq_u16_u64 (uint64x2_t __a) | |
13244 | { | |
13245 | return (uint16x8_t) __a; | |
13246 | } | |
13247 | ||
13248 | __extension__ extern __inline uint16x8_t | |
13249 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13250 | __arm_vreinterpretq_u16_u8 (uint8x16_t __a) | |
13251 | { | |
13252 | return (uint16x8_t) __a; | |
13253 | } | |
13254 | ||
13255 | ||
13256 | __extension__ extern __inline uint32x4_t | |
13257 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13258 | __arm_vreinterpretq_u32_s16 (int16x8_t __a) | |
13259 | { | |
13260 | return (uint32x4_t) __a; | |
13261 | } | |
13262 | ||
13263 | __extension__ extern __inline uint32x4_t | |
13264 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13265 | __arm_vreinterpretq_u32_s32 (int32x4_t __a) | |
13266 | { | |
13267 | return (uint32x4_t) __a; | |
13268 | } | |
13269 | ||
13270 | __extension__ extern __inline uint32x4_t | |
13271 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13272 | __arm_vreinterpretq_u32_s64 (int64x2_t __a) | |
13273 | { | |
13274 | return (uint32x4_t) __a; | |
13275 | } | |
13276 | ||
13277 | __extension__ extern __inline uint32x4_t | |
13278 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13279 | __arm_vreinterpretq_u32_s8 (int8x16_t __a) | |
13280 | { | |
13281 | return (uint32x4_t) __a; | |
13282 | } | |
13283 | ||
13284 | __extension__ extern __inline uint32x4_t | |
13285 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13286 | __arm_vreinterpretq_u32_u16 (uint16x8_t __a) | |
13287 | { | |
13288 | return (uint32x4_t) __a; | |
13289 | } | |
13290 | ||
13291 | __extension__ extern __inline uint32x4_t | |
13292 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13293 | __arm_vreinterpretq_u32_u64 (uint64x2_t __a) | |
13294 | { | |
13295 | return (uint32x4_t) __a; | |
13296 | } | |
13297 | ||
13298 | __extension__ extern __inline uint32x4_t | |
13299 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13300 | __arm_vreinterpretq_u32_u8 (uint8x16_t __a) | |
13301 | { | |
13302 | return (uint32x4_t) __a; | |
13303 | } | |
13304 | ||
13305 | __extension__ extern __inline uint64x2_t | |
13306 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13307 | __arm_vreinterpretq_u64_s16 (int16x8_t __a) | |
13308 | { | |
13309 | return (uint64x2_t) __a; | |
13310 | } | |
13311 | ||
13312 | __extension__ extern __inline uint64x2_t | |
13313 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13314 | __arm_vreinterpretq_u64_s32 (int32x4_t __a) | |
13315 | { | |
13316 | return (uint64x2_t) __a; | |
13317 | } | |
13318 | ||
13319 | __extension__ extern __inline uint64x2_t | |
13320 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13321 | __arm_vreinterpretq_u64_s64 (int64x2_t __a) | |
13322 | { | |
13323 | return (uint64x2_t) __a; | |
13324 | } | |
13325 | ||
13326 | __extension__ extern __inline uint64x2_t | |
13327 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13328 | __arm_vreinterpretq_u64_s8 (int8x16_t __a) | |
13329 | { | |
13330 | return (uint64x2_t) __a; | |
13331 | } | |
13332 | ||
13333 | __extension__ extern __inline uint64x2_t | |
13334 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13335 | __arm_vreinterpretq_u64_u16 (uint16x8_t __a) | |
13336 | { | |
13337 | return (uint64x2_t) __a; | |
13338 | } | |
13339 | ||
13340 | __extension__ extern __inline uint64x2_t | |
13341 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13342 | __arm_vreinterpretq_u64_u32 (uint32x4_t __a) | |
13343 | { | |
13344 | return (uint64x2_t) __a; | |
13345 | } | |
13346 | ||
13347 | __extension__ extern __inline uint64x2_t | |
13348 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13349 | __arm_vreinterpretq_u64_u8 (uint8x16_t __a) | |
13350 | { | |
13351 | return (uint64x2_t) __a; | |
13352 | } | |
13353 | ||
13354 | __extension__ extern __inline uint8x16_t | |
13355 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13356 | __arm_vreinterpretq_u8_s16 (int16x8_t __a) | |
13357 | { | |
13358 | return (uint8x16_t) __a; | |
13359 | } | |
13360 | ||
13361 | __extension__ extern __inline uint8x16_t | |
13362 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13363 | __arm_vreinterpretq_u8_s32 (int32x4_t __a) | |
13364 | { | |
13365 | return (uint8x16_t) __a; | |
13366 | } | |
13367 | ||
13368 | __extension__ extern __inline uint8x16_t | |
13369 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13370 | __arm_vreinterpretq_u8_s64 (int64x2_t __a) | |
13371 | { | |
13372 | return (uint8x16_t) __a; | |
13373 | } | |
13374 | ||
13375 | __extension__ extern __inline uint8x16_t | |
13376 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13377 | __arm_vreinterpretq_u8_s8 (int8x16_t __a) | |
13378 | { | |
13379 | return (uint8x16_t) __a; | |
13380 | } | |
13381 | ||
13382 | __extension__ extern __inline uint8x16_t | |
13383 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13384 | __arm_vreinterpretq_u8_u16 (uint16x8_t __a) | |
13385 | { | |
13386 | return (uint8x16_t) __a; | |
13387 | } | |
13388 | ||
13389 | __extension__ extern __inline uint8x16_t | |
13390 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13391 | __arm_vreinterpretq_u8_u32 (uint32x4_t __a) | |
13392 | { | |
13393 | return (uint8x16_t) __a; | |
13394 | } | |
13395 | ||
13396 | __extension__ extern __inline uint8x16_t | |
13397 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13398 | __arm_vreinterpretq_u8_u64 (uint64x2_t __a) | |
13399 | { | |
13400 | return (uint8x16_t) __a; | |
13401 | } | |
13402 | ||
92f80065 SP |
13403 | __extension__ extern __inline uint8x16_t |
13404 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13405 | __arm_vddupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13406 | { | |
13407 | return __builtin_mve_vddupq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
13408 | } | |
13409 | ||
13410 | __extension__ extern __inline uint32x4_t | |
13411 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13412 | __arm_vddupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13413 | { | |
13414 | return __builtin_mve_vddupq_m_n_uv4si (__inactive, __a, __imm, __p); | |
13415 | } | |
13416 | ||
13417 | __extension__ extern __inline uint16x8_t | |
13418 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13419 | __arm_vddupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13420 | { | |
13421 | return __builtin_mve_vddupq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
13422 | } | |
13423 | ||
13424 | __extension__ extern __inline uint8x16_t | |
13425 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13426 | __arm_vddupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13427 | { | |
13428 | uint8x16_t __res = __builtin_mve_vddupq_m_n_uv16qi (__inactive, * __a, __imm, __p); | |
13429 | *__a -= __imm * 16u; | |
13430 | return __res; | |
13431 | } | |
13432 | ||
13433 | __extension__ extern __inline uint16x8_t | |
13434 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13435 | __arm_vddupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13436 | { | |
13437 | uint16x8_t __res = __builtin_mve_vddupq_m_n_uv8hi (__inactive, *__a, __imm, __p); | |
13438 | *__a -= __imm * 8u; | |
13439 | return __res; | |
13440 | } | |
13441 | ||
13442 | __extension__ extern __inline uint32x4_t | |
13443 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13444 | __arm_vddupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13445 | { | |
13446 | uint32x4_t __res = __builtin_mve_vddupq_m_n_uv4si (__inactive, *__a, __imm, __p); | |
13447 | *__a -= __imm * 4u; | |
13448 | return __res; | |
13449 | } | |
13450 | ||
13451 | __extension__ extern __inline uint8x16_t | |
13452 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13453 | __arm_vddupq_n_u8 (uint32_t __a, const int __imm) | |
13454 | { | |
13455 | return __builtin_mve_vddupq_n_uv16qi (__a, __imm); | |
13456 | } | |
13457 | ||
13458 | __extension__ extern __inline uint32x4_t | |
13459 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13460 | __arm_vddupq_n_u32 (uint32_t __a, const int __imm) | |
13461 | { | |
13462 | return __builtin_mve_vddupq_n_uv4si (__a, __imm); | |
13463 | } | |
13464 | ||
13465 | __extension__ extern __inline uint16x8_t | |
13466 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13467 | __arm_vddupq_n_u16 (uint32_t __a, const int __imm) | |
13468 | { | |
13469 | return __builtin_mve_vddupq_n_uv8hi (__a, __imm); | |
13470 | } | |
13471 | ||
13472 | __extension__ extern __inline uint8x16_t | |
13473 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13474 | __arm_vdwdupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13475 | { | |
13476 | return __builtin_mve_vdwdupq_m_n_uv16qi (__inactive, __a, __b, __imm, __p); | |
13477 | } | |
13478 | ||
13479 | __extension__ extern __inline uint32x4_t | |
13480 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13481 | __arm_vdwdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13482 | { | |
13483 | return __builtin_mve_vdwdupq_m_n_uv4si (__inactive, __a, __b, __imm, __p); | |
13484 | } | |
13485 | ||
13486 | __extension__ extern __inline uint16x8_t | |
13487 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13488 | __arm_vdwdupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13489 | { | |
13490 | return __builtin_mve_vdwdupq_m_n_uv8hi (__inactive, __a, __b, __imm, __p); | |
13491 | } | |
13492 | ||
13493 | __extension__ extern __inline uint8x16_t | |
13494 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13495 | __arm_vdwdupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13496 | { | |
13497 | uint8x16_t __res = __builtin_mve_vdwdupq_m_n_uv16qi (__inactive, *__a, __b, __imm, __p); | |
13498 | *__a = __builtin_mve_vdwdupq_m_wb_uv16qi (__inactive, *__a, __b, __imm, __p); | |
13499 | return __res; | |
13500 | } | |
13501 | ||
13502 | __extension__ extern __inline uint32x4_t | |
13503 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13504 | __arm_vdwdupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13505 | { | |
13506 | uint32x4_t __res = __builtin_mve_vdwdupq_m_n_uv4si (__inactive, *__a, __b, __imm, __p); | |
13507 | *__a = __builtin_mve_vdwdupq_m_wb_uv4si (__inactive, *__a, __b, __imm, __p); | |
13508 | return __res; | |
13509 | } | |
13510 | ||
13511 | __extension__ extern __inline uint16x8_t | |
13512 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13513 | __arm_vdwdupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13514 | { | |
13515 | uint16x8_t __res = __builtin_mve_vdwdupq_m_n_uv8hi (__inactive, *__a, __b, __imm, __p); | |
13516 | *__a = __builtin_mve_vdwdupq_m_wb_uv8hi (__inactive, *__a, __b, __imm, __p); | |
13517 | return __res; | |
13518 | } | |
13519 | ||
13520 | __extension__ extern __inline uint8x16_t | |
13521 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13522 | __arm_vdwdupq_n_u8 (uint32_t __a, uint32_t __b, const int __imm) | |
13523 | { | |
13524 | return __builtin_mve_vdwdupq_n_uv16qi (__a, __b, __imm); | |
13525 | } | |
13526 | ||
13527 | __extension__ extern __inline uint32x4_t | |
13528 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13529 | __arm_vdwdupq_n_u32 (uint32_t __a, uint32_t __b, const int __imm) | |
13530 | { | |
13531 | return __builtin_mve_vdwdupq_n_uv4si (__a, __b, __imm); | |
13532 | } | |
13533 | ||
13534 | __extension__ extern __inline uint16x8_t | |
13535 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13536 | __arm_vdwdupq_n_u16 (uint32_t __a, uint32_t __b, const int __imm) | |
13537 | { | |
13538 | return __builtin_mve_vdwdupq_n_uv8hi (__a, __b, __imm); | |
13539 | } | |
13540 | ||
13541 | __extension__ extern __inline uint8x16_t | |
13542 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13543 | __arm_vdwdupq_wb_u8 (uint32_t * __a, uint32_t __b, const int __imm) | |
13544 | { | |
13545 | uint8x16_t __res = __builtin_mve_vdwdupq_n_uv16qi (*__a, __b, __imm); | |
13546 | *__a = __builtin_mve_vdwdupq_wb_uv16qi (*__a, __b, __imm); | |
13547 | return __res; | |
13548 | } | |
13549 | ||
13550 | __extension__ extern __inline uint32x4_t | |
13551 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13552 | __arm_vdwdupq_wb_u32 (uint32_t * __a, uint32_t __b, const int __imm) | |
13553 | { | |
13554 | uint32x4_t __res = __builtin_mve_vdwdupq_n_uv4si (*__a, __b, __imm); | |
13555 | *__a = __builtin_mve_vdwdupq_wb_uv4si (*__a, __b, __imm); | |
13556 | return __res; | |
13557 | } | |
13558 | ||
13559 | __extension__ extern __inline uint16x8_t | |
13560 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13561 | __arm_vdwdupq_wb_u16 (uint32_t * __a, uint32_t __b, const int __imm) | |
13562 | { | |
13563 | uint16x8_t __res = __builtin_mve_vdwdupq_n_uv8hi (*__a, __b, __imm); | |
13564 | *__a = __builtin_mve_vdwdupq_wb_uv8hi (*__a, __b, __imm); | |
13565 | return __res; | |
13566 | } | |
13567 | ||
13568 | __extension__ extern __inline uint8x16_t | |
13569 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13570 | __arm_vidupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13571 | { | |
13572 | return __builtin_mve_vidupq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
13573 | } | |
13574 | ||
13575 | __extension__ extern __inline uint32x4_t | |
13576 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13577 | __arm_vidupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13578 | { | |
13579 | return __builtin_mve_vidupq_m_n_uv4si (__inactive, __a, __imm, __p); | |
13580 | } | |
13581 | ||
13582 | __extension__ extern __inline uint16x8_t | |
13583 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13584 | __arm_vidupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13585 | { | |
13586 | return __builtin_mve_vidupq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
13587 | } | |
13588 | ||
13589 | __extension__ extern __inline uint8x16_t | |
13590 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13591 | __arm_vidupq_n_u8 (uint32_t __a, const int __imm) | |
13592 | { | |
13593 | return __builtin_mve_vidupq_n_uv16qi (__a, __imm); | |
13594 | } | |
13595 | ||
13596 | __extension__ extern __inline uint8x16_t | |
13597 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13598 | __arm_vidupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13599 | { | |
13600 | uint8x16_t __res = __builtin_mve_vidupq_m_n_uv16qi (__inactive, *__a, __imm, __p); | |
13601 | *__a += __imm * 16u; | |
13602 | return __res; | |
13603 | } | |
13604 | ||
13605 | __extension__ extern __inline uint16x8_t | |
13606 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13607 | __arm_vidupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13608 | { | |
13609 | uint16x8_t __res = __builtin_mve_vidupq_m_n_uv8hi (__inactive, *__a, __imm, __p); | |
13610 | *__a += __imm * 8u; | |
13611 | return __res; | |
13612 | } | |
13613 | ||
13614 | __extension__ extern __inline uint32x4_t | |
13615 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13616 | __arm_vidupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13617 | { | |
13618 | uint32x4_t __res = __builtin_mve_vidupq_m_n_uv4si (__inactive, *__a, __imm, __p); | |
13619 | *__a += __imm * 4u; | |
13620 | return __res; | |
13621 | } | |
13622 | ||
13623 | __extension__ extern __inline uint32x4_t | |
13624 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13625 | __arm_vidupq_n_u32 (uint32_t __a, const int __imm) | |
13626 | { | |
13627 | return __builtin_mve_vidupq_n_uv4si (__a, __imm); | |
13628 | } | |
13629 | ||
13630 | __extension__ extern __inline uint16x8_t | |
13631 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13632 | __arm_vidupq_n_u16 (uint32_t __a, const int __imm) | |
13633 | { | |
13634 | return __builtin_mve_vidupq_n_uv8hi (__a, __imm); | |
13635 | } | |
13636 | ||
13637 | __extension__ extern __inline uint8x16_t | |
13638 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13639 | __arm_vidupq_wb_u8 (uint32_t * __a, const int __imm) | |
13640 | { | |
13641 | uint8x16_t __res = __builtin_mve_vidupq_n_uv16qi (*__a, __imm); | |
13642 | *__a += __imm * 16u; | |
13643 | return __res; | |
13644 | } | |
13645 | ||
13646 | __extension__ extern __inline uint16x8_t | |
13647 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13648 | __arm_vidupq_wb_u16 (uint32_t * __a, const int __imm) | |
13649 | { | |
13650 | uint16x8_t __res = __builtin_mve_vidupq_n_uv8hi (*__a, __imm); | |
13651 | *__a += __imm * 8u; | |
13652 | return __res; | |
13653 | } | |
13654 | ||
13655 | __extension__ extern __inline uint32x4_t | |
13656 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13657 | __arm_vidupq_wb_u32 (uint32_t * __a, const int __imm) | |
13658 | { | |
13659 | uint32x4_t __res = __builtin_mve_vidupq_n_uv4si (*__a, __imm); | |
13660 | *__a += __imm * 4u; | |
13661 | return __res; | |
13662 | } | |
13663 | ||
13664 | __extension__ extern __inline uint8x16_t | |
13665 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13666 | __arm_vddupq_wb_u8 (uint32_t * __a, const int __imm) | |
13667 | { | |
13668 | uint8x16_t __res = __builtin_mve_vddupq_n_uv16qi (*__a, __imm); | |
13669 | *__a -= __imm * 16u; | |
13670 | return __res; | |
13671 | } | |
13672 | ||
13673 | __extension__ extern __inline uint16x8_t | |
13674 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13675 | __arm_vddupq_wb_u16 (uint32_t * __a, const int __imm) | |
13676 | { | |
13677 | uint16x8_t __res = __builtin_mve_vddupq_n_uv8hi (*__a, __imm); | |
13678 | *__a -= __imm * 8u; | |
13679 | return __res; | |
13680 | } | |
13681 | ||
13682 | __extension__ extern __inline uint32x4_t | |
13683 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13684 | __arm_vddupq_wb_u32 (uint32_t * __a, const int __imm) | |
13685 | { | |
13686 | uint32x4_t __res = __builtin_mve_vddupq_n_uv4si (*__a, __imm); | |
13687 | *__a -= __imm * 4u; | |
13688 | return __res; | |
13689 | } | |
13690 | ||
13691 | __extension__ extern __inline uint8x16_t | |
13692 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13693 | __arm_viwdupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13694 | { | |
13695 | return __builtin_mve_viwdupq_m_n_uv16qi (__inactive, __a, __b, __imm, __p); | |
13696 | } | |
13697 | ||
13698 | __extension__ extern __inline uint32x4_t | |
13699 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13700 | __arm_viwdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13701 | { | |
13702 | return __builtin_mve_viwdupq_m_n_uv4si (__inactive, __a, __b, __imm, __p); | |
13703 | } | |
13704 | ||
13705 | __extension__ extern __inline uint16x8_t | |
13706 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13707 | __arm_viwdupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13708 | { | |
13709 | return __builtin_mve_viwdupq_m_n_uv8hi (__inactive, __a, __b, __imm, __p); | |
13710 | } | |
13711 | ||
13712 | __extension__ extern __inline uint8x16_t | |
13713 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13714 | __arm_viwdupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13715 | { | |
13716 | uint8x16_t __res = __builtin_mve_viwdupq_m_n_uv16qi (__inactive, *__a, __b, __imm, __p); | |
13717 | *__a = __builtin_mve_viwdupq_m_wb_uv16qi (__inactive, *__a, __b, __imm, __p); | |
13718 | return __res; | |
13719 | } | |
13720 | ||
13721 | __extension__ extern __inline uint32x4_t | |
13722 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13723 | __arm_viwdupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13724 | { | |
13725 | uint32x4_t __res = __builtin_mve_viwdupq_m_n_uv4si (__inactive, *__a, __b, __imm, __p); | |
13726 | *__a = __builtin_mve_viwdupq_m_wb_uv4si (__inactive, *__a, __b, __imm, __p); | |
13727 | return __res; | |
13728 | } | |
13729 | ||
13730 | __extension__ extern __inline uint16x8_t | |
13731 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13732 | __arm_viwdupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13733 | { | |
13734 | uint16x8_t __res = __builtin_mve_viwdupq_m_n_uv8hi (__inactive, *__a, __b, __imm, __p); | |
13735 | *__a = __builtin_mve_viwdupq_m_wb_uv8hi (__inactive, *__a, __b, __imm, __p); | |
13736 | return __res; | |
13737 | } | |
13738 | ||
13739 | __extension__ extern __inline uint8x16_t | |
13740 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13741 | __arm_viwdupq_n_u8 (uint32_t __a, uint32_t __b, const int __imm) | |
13742 | { | |
13743 | return __builtin_mve_viwdupq_n_uv16qi (__a, __b, __imm); | |
13744 | } | |
13745 | ||
13746 | __extension__ extern __inline uint32x4_t | |
13747 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13748 | __arm_viwdupq_n_u32 (uint32_t __a, uint32_t __b, const int __imm) | |
13749 | { | |
13750 | return __builtin_mve_viwdupq_n_uv4si (__a, __b, __imm); | |
13751 | } | |
13752 | ||
13753 | __extension__ extern __inline uint16x8_t | |
13754 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13755 | __arm_viwdupq_n_u16 (uint32_t __a, uint32_t __b, const int __imm) | |
13756 | { | |
13757 | return __builtin_mve_viwdupq_n_uv8hi (__a, __b, __imm); | |
13758 | } | |
13759 | ||
13760 | __extension__ extern __inline uint8x16_t | |
13761 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13762 | __arm_viwdupq_wb_u8 (uint32_t * __a, uint32_t __b, const int __imm) | |
13763 | { | |
13764 | uint8x16_t __res = __builtin_mve_viwdupq_n_uv16qi (*__a, __b, __imm); | |
13765 | *__a = __builtin_mve_viwdupq_wb_uv16qi (*__a, __b, __imm); | |
13766 | return __res; | |
13767 | } | |
13768 | ||
13769 | __extension__ extern __inline uint32x4_t | |
13770 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13771 | __arm_viwdupq_wb_u32 (uint32_t * __a, uint32_t __b, const int __imm) | |
13772 | { | |
13773 | uint32x4_t __res = __builtin_mve_viwdupq_n_uv4si (*__a, __b, __imm); | |
13774 | *__a = __builtin_mve_viwdupq_wb_uv4si (*__a, __b, __imm); | |
13775 | return __res; | |
13776 | } | |
13777 | ||
13778 | __extension__ extern __inline uint16x8_t | |
13779 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13780 | __arm_viwdupq_wb_u16 (uint32_t * __a, uint32_t __b, const int __imm) | |
13781 | { | |
13782 | uint16x8_t __res = __builtin_mve_viwdupq_n_uv8hi (*__a, __b, __imm); | |
13783 | *__a = __builtin_mve_viwdupq_wb_uv8hi (*__a, __b, __imm); | |
13784 | return __res; | |
13785 | } | |
13786 | ||
41e1a7ff SP |
13787 | __extension__ extern __inline int64x2_t |
13788 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13789 | __arm_vldrdq_gather_base_wb_s64 (uint64x2_t * __addr, const int __offset) | |
13790 | { | |
13791 | int64x2_t | |
13792 | result = __builtin_mve_vldrdq_gather_base_wb_sv2di (*__addr, __offset); | |
13793 | __addr += __offset; | |
13794 | return result; | |
13795 | } | |
13796 | ||
13797 | __extension__ extern __inline uint64x2_t | |
13798 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13799 | __arm_vldrdq_gather_base_wb_u64 (uint64x2_t * __addr, const int __offset) | |
13800 | { | |
13801 | uint64x2_t | |
13802 | result = __builtin_mve_vldrdq_gather_base_wb_uv2di (*__addr, __offset); | |
13803 | __addr += __offset; | |
13804 | return result; | |
13805 | } | |
13806 | ||
13807 | __extension__ extern __inline int64x2_t | |
13808 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13809 | __arm_vldrdq_gather_base_wb_z_s64 (uint64x2_t * __addr, const int __offset, mve_pred16_t __p) | |
13810 | { | |
13811 | int64x2_t | |
13812 | result = __builtin_mve_vldrdq_gather_base_wb_z_sv2di (*__addr, __offset, __p); | |
13813 | __addr += __offset; | |
13814 | return result; | |
13815 | } | |
13816 | ||
13817 | __extension__ extern __inline uint64x2_t | |
13818 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13819 | __arm_vldrdq_gather_base_wb_z_u64 (uint64x2_t * __addr, const int __offset, mve_pred16_t __p) | |
13820 | { | |
13821 | uint64x2_t | |
13822 | result = __builtin_mve_vldrdq_gather_base_wb_z_uv2di (*__addr, __offset, __p); | |
13823 | __addr += __offset; | |
13824 | return result; | |
13825 | } | |
13826 | ||
13827 | __extension__ extern __inline int32x4_t | |
13828 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13829 | __arm_vldrwq_gather_base_wb_s32 (uint32x4_t * __addr, const int __offset) | |
13830 | { | |
13831 | int32x4_t | |
13832 | result = __builtin_mve_vldrwq_gather_base_wb_sv4si (*__addr, __offset); | |
13833 | __addr += __offset; | |
13834 | return result; | |
13835 | } | |
13836 | ||
13837 | __extension__ extern __inline uint32x4_t | |
13838 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13839 | __arm_vldrwq_gather_base_wb_u32 (uint32x4_t * __addr, const int __offset) | |
13840 | { | |
13841 | uint32x4_t | |
13842 | result = __builtin_mve_vldrwq_gather_base_wb_uv4si (*__addr, __offset); | |
13843 | __addr += __offset; | |
13844 | return result; | |
13845 | } | |
13846 | ||
13847 | __extension__ extern __inline int32x4_t | |
13848 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13849 | __arm_vldrwq_gather_base_wb_z_s32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p) | |
13850 | { | |
13851 | int32x4_t | |
13852 | result = __builtin_mve_vldrwq_gather_base_wb_z_sv4si (*__addr, __offset, __p); | |
13853 | __addr += __offset; | |
13854 | return result; | |
13855 | } | |
13856 | ||
13857 | __extension__ extern __inline uint32x4_t | |
13858 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13859 | __arm_vldrwq_gather_base_wb_z_u32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p) | |
13860 | { | |
13861 | uint32x4_t | |
13862 | result = __builtin_mve_vldrwq_gather_base_wb_z_uv4si (*__addr, __offset, __p); | |
13863 | __addr += __offset; | |
13864 | return result; | |
13865 | } | |
13866 | ||
13867 | __extension__ extern __inline void | |
13868 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13869 | __arm_vstrdq_scatter_base_wb_s64 (uint64x2_t * __addr, const int __offset, int64x2_t __value) | |
13870 | { | |
13871 | __builtin_mve_vstrdq_scatter_base_wb_sv2di (*__addr, __offset, __value); | |
13872 | __builtin_mve_vstrdq_scatter_base_wb_add_sv2di (*__addr, __offset, *__addr); | |
13873 | } | |
13874 | ||
13875 | __extension__ extern __inline void | |
13876 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13877 | __arm_vstrdq_scatter_base_wb_u64 (uint64x2_t * __addr, const int __offset, uint64x2_t __value) | |
13878 | { | |
13879 | __builtin_mve_vstrdq_scatter_base_wb_uv2di (*__addr, __offset, __value); | |
13880 | __builtin_mve_vstrdq_scatter_base_wb_add_uv2di (*__addr, __offset, *__addr); | |
13881 | } | |
13882 | ||
13883 | __extension__ extern __inline void | |
13884 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13885 | __arm_vstrdq_scatter_base_wb_p_s64 (uint64x2_t * __addr, const int __offset, int64x2_t __value, mve_pred16_t __p) | |
13886 | { | |
13887 | __builtin_mve_vstrdq_scatter_base_wb_p_sv2di (*__addr, __offset, __value, __p); | |
13888 | __builtin_mve_vstrdq_scatter_base_wb_p_add_sv2di (*__addr, __offset, *__addr, __p); | |
13889 | } | |
13890 | ||
13891 | __extension__ extern __inline void | |
13892 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13893 | __arm_vstrdq_scatter_base_wb_p_u64 (uint64x2_t * __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p) | |
13894 | { | |
13895 | __builtin_mve_vstrdq_scatter_base_wb_p_uv2di (*__addr, __offset, __value, __p); | |
13896 | __builtin_mve_vstrdq_scatter_base_wb_p_add_uv2di (*__addr, __offset, *__addr, __p); | |
13897 | } | |
13898 | ||
13899 | __extension__ extern __inline void | |
13900 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13901 | __arm_vstrwq_scatter_base_wb_p_s32 (uint32x4_t * __addr, const int __offset, int32x4_t __value, mve_pred16_t __p) | |
13902 | { | |
13903 | __builtin_mve_vstrwq_scatter_base_wb_p_sv4si (*__addr, __offset, __value, __p); | |
13904 | __builtin_mve_vstrwq_scatter_base_wb_p_add_sv4si (*__addr, __offset, *__addr, __p); | |
13905 | } | |
13906 | ||
13907 | __extension__ extern __inline void | |
13908 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13909 | __arm_vstrwq_scatter_base_wb_p_u32 (uint32x4_t * __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p) | |
13910 | { | |
13911 | __builtin_mve_vstrwq_scatter_base_wb_p_uv4si (*__addr, __offset, __value, __p); | |
13912 | __builtin_mve_vstrwq_scatter_base_wb_p_add_uv4si (*__addr, __offset, *__addr, __p); | |
13913 | } | |
13914 | ||
13915 | __extension__ extern __inline void | |
13916 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13917 | __arm_vstrwq_scatter_base_wb_s32 (uint32x4_t * __addr, const int __offset, int32x4_t __value) | |
13918 | { | |
13919 | __builtin_mve_vstrwq_scatter_base_wb_sv4si (*__addr, __offset, __value); | |
13920 | __builtin_mve_vstrwq_scatter_base_wb_add_sv4si (*__addr, __offset, *__addr); | |
13921 | } | |
13922 | ||
13923 | __extension__ extern __inline void | |
13924 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13925 | __arm_vstrwq_scatter_base_wb_u32 (uint32x4_t * __addr, const int __offset, uint32x4_t __value) | |
13926 | { | |
13927 | __builtin_mve_vstrwq_scatter_base_wb_uv4si (*__addr, __offset, __value); | |
13928 | __builtin_mve_vstrwq_scatter_base_wb_add_uv4si (*__addr, __offset, *__addr); | |
13929 | } | |
13930 | ||
261014a1 SP |
13931 | __extension__ extern __inline uint8x16_t |
13932 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13933 | __arm_vddupq_x_n_u8 (uint32_t __a, const int __imm, mve_pred16_t __p) | |
13934 | { | |
13935 | return __builtin_mve_vddupq_m_n_uv16qi (vuninitializedq_u8 (), __a, __imm, __p); | |
13936 | } | |
f9355dee | 13937 | |
261014a1 | 13938 | __extension__ extern __inline uint16x8_t |
f9355dee | 13939 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 13940 | __arm_vddupq_x_n_u16 (uint32_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 13941 | { |
261014a1 | 13942 | return __builtin_mve_vddupq_m_n_uv8hi (vuninitializedq_u16 (), __a, __imm, __p); |
f9355dee SP |
13943 | } |
13944 | ||
261014a1 | 13945 | __extension__ extern __inline uint32x4_t |
f9355dee | 13946 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 13947 | __arm_vddupq_x_n_u32 (uint32_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 13948 | { |
261014a1 | 13949 | return __builtin_mve_vddupq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p); |
f9355dee SP |
13950 | } |
13951 | ||
261014a1 | 13952 | __extension__ extern __inline uint8x16_t |
f9355dee | 13953 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 13954 | __arm_vddupq_x_wb_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 13955 | { |
261014a1 SP |
13956 | uint8x16_t __arg1 = vuninitializedq_u8 (); |
13957 | uint8x16_t __res = __builtin_mve_vddupq_m_n_uv16qi (__arg1, * __a, __imm, __p); | |
13958 | *__a -= __imm * 16u; | |
13959 | return __res; | |
f9355dee SP |
13960 | } |
13961 | ||
261014a1 | 13962 | __extension__ extern __inline uint16x8_t |
f9355dee | 13963 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 13964 | __arm_vddupq_x_wb_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 13965 | { |
261014a1 SP |
13966 | uint16x8_t __arg1 = vuninitializedq_u16 (); |
13967 | uint16x8_t __res = __builtin_mve_vddupq_m_n_uv8hi (__arg1, *__a, __imm, __p); | |
13968 | *__a -= __imm * 8u; | |
13969 | return __res; | |
f9355dee SP |
13970 | } |
13971 | ||
261014a1 | 13972 | __extension__ extern __inline uint32x4_t |
f9355dee | 13973 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 13974 | __arm_vddupq_x_wb_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 13975 | { |
261014a1 SP |
13976 | uint32x4_t __arg1 = vuninitializedq_u32 (); |
13977 | uint32x4_t __res = __builtin_mve_vddupq_m_n_uv4si (__arg1, *__a, __imm, __p); | |
13978 | *__a -= __imm * 4u; | |
13979 | return __res; | |
f9355dee SP |
13980 | } |
13981 | ||
261014a1 | 13982 | __extension__ extern __inline uint8x16_t |
f9355dee | 13983 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 13984 | __arm_vdwdupq_x_n_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 13985 | { |
261014a1 | 13986 | return __builtin_mve_vdwdupq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __imm, __p); |
f9355dee SP |
13987 | } |
13988 | ||
261014a1 | 13989 | __extension__ extern __inline uint16x8_t |
f9355dee | 13990 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 13991 | __arm_vdwdupq_x_n_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 13992 | { |
261014a1 | 13993 | return __builtin_mve_vdwdupq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __imm, __p); |
f9355dee SP |
13994 | } |
13995 | ||
261014a1 | 13996 | __extension__ extern __inline uint32x4_t |
f9355dee | 13997 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 13998 | __arm_vdwdupq_x_n_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 13999 | { |
261014a1 | 14000 | return __builtin_mve_vdwdupq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __imm, __p); |
f9355dee SP |
14001 | } |
14002 | ||
261014a1 | 14003 | __extension__ extern __inline uint8x16_t |
f9355dee | 14004 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14005 | __arm_vdwdupq_x_wb_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14006 | { |
261014a1 SP |
14007 | uint8x16_t __arg1 = vuninitializedq_u8 (); |
14008 | uint8x16_t __res = __builtin_mve_vdwdupq_m_n_uv16qi (__arg1, *__a, __b, __imm, __p); | |
14009 | *__a = __builtin_mve_vdwdupq_m_wb_uv16qi (__arg1, *__a, __b, __imm, __p); | |
14010 | return __res; | |
f9355dee SP |
14011 | } |
14012 | ||
261014a1 | 14013 | __extension__ extern __inline uint16x8_t |
f9355dee | 14014 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14015 | __arm_vdwdupq_x_wb_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14016 | { |
261014a1 SP |
14017 | uint16x8_t __arg1 = vuninitializedq_u16 (); |
14018 | uint16x8_t __res = __builtin_mve_vdwdupq_m_n_uv8hi (__arg1, *__a, __b, __imm, __p); | |
14019 | *__a = __builtin_mve_vdwdupq_m_wb_uv8hi (__arg1, *__a, __b, __imm, __p); | |
14020 | return __res; | |
f9355dee SP |
14021 | } |
14022 | ||
261014a1 | 14023 | __extension__ extern __inline uint32x4_t |
f9355dee | 14024 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14025 | __arm_vdwdupq_x_wb_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14026 | { |
261014a1 SP |
14027 | uint32x4_t __arg1 = vuninitializedq_u32 (); |
14028 | uint32x4_t __res = __builtin_mve_vdwdupq_m_n_uv4si (__arg1, *__a, __b, __imm, __p); | |
14029 | *__a = __builtin_mve_vdwdupq_m_wb_uv4si (__arg1, *__a, __b, __imm, __p); | |
14030 | return __res; | |
f9355dee SP |
14031 | } |
14032 | ||
261014a1 | 14033 | __extension__ extern __inline uint8x16_t |
f9355dee | 14034 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14035 | __arm_vidupq_x_n_u8 (uint32_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 14036 | { |
261014a1 | 14037 | return __builtin_mve_vidupq_m_n_uv16qi (vuninitializedq_u8 (), __a, __imm, __p); |
f9355dee SP |
14038 | } |
14039 | ||
261014a1 | 14040 | __extension__ extern __inline uint16x8_t |
f9355dee | 14041 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14042 | __arm_vidupq_x_n_u16 (uint32_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 14043 | { |
261014a1 | 14044 | return __builtin_mve_vidupq_m_n_uv8hi (vuninitializedq_u16 (), __a, __imm, __p); |
f9355dee SP |
14045 | } |
14046 | ||
261014a1 | 14047 | __extension__ extern __inline uint32x4_t |
f9355dee | 14048 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14049 | __arm_vidupq_x_n_u32 (uint32_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 14050 | { |
261014a1 | 14051 | return __builtin_mve_vidupq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p); |
f9355dee SP |
14052 | } |
14053 | ||
261014a1 | 14054 | __extension__ extern __inline uint8x16_t |
f9355dee | 14055 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14056 | __arm_vidupq_x_wb_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 14057 | { |
261014a1 SP |
14058 | uint8x16_t __arg1 = vuninitializedq_u8 (); |
14059 | uint8x16_t __res = __builtin_mve_vidupq_m_n_uv16qi (__arg1, *__a, __imm, __p); | |
14060 | *__a += __imm * 16u; | |
14061 | return __res; | |
f9355dee SP |
14062 | } |
14063 | ||
261014a1 | 14064 | __extension__ extern __inline uint16x8_t |
f9355dee | 14065 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14066 | __arm_vidupq_x_wb_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 14067 | { |
261014a1 SP |
14068 | uint16x8_t __arg1 = vuninitializedq_u16 (); |
14069 | uint16x8_t __res = __builtin_mve_vidupq_m_n_uv8hi (__arg1, *__a, __imm, __p); | |
14070 | *__a += __imm * 8u; | |
14071 | return __res; | |
f9355dee SP |
14072 | } |
14073 | ||
261014a1 | 14074 | __extension__ extern __inline uint32x4_t |
f9355dee | 14075 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14076 | __arm_vidupq_x_wb_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 14077 | { |
261014a1 SP |
14078 | uint32x4_t __arg1 = vuninitializedq_u32 (); |
14079 | uint32x4_t __res = __builtin_mve_vidupq_m_n_uv4si (__arg1, *__a, __imm, __p); | |
14080 | *__a += __imm * 4u; | |
14081 | return __res; | |
f9355dee SP |
14082 | } |
14083 | ||
261014a1 | 14084 | __extension__ extern __inline uint8x16_t |
f9355dee | 14085 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14086 | __arm_viwdupq_x_n_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14087 | { |
261014a1 | 14088 | return __builtin_mve_viwdupq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __imm, __p); |
f9355dee SP |
14089 | } |
14090 | ||
261014a1 | 14091 | __extension__ extern __inline uint16x8_t |
f9355dee | 14092 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14093 | __arm_viwdupq_x_n_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14094 | { |
261014a1 | 14095 | return __builtin_mve_viwdupq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __imm, __p); |
f9355dee SP |
14096 | } |
14097 | ||
261014a1 | 14098 | __extension__ extern __inline uint32x4_t |
f9355dee | 14099 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14100 | __arm_viwdupq_x_n_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14101 | { |
261014a1 | 14102 | return __builtin_mve_viwdupq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __imm, __p); |
f9355dee SP |
14103 | } |
14104 | ||
261014a1 | 14105 | __extension__ extern __inline uint8x16_t |
f9355dee | 14106 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14107 | __arm_viwdupq_x_wb_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14108 | { |
261014a1 SP |
14109 | uint8x16_t __arg1 = vuninitializedq_u8 (); |
14110 | uint8x16_t __res = __builtin_mve_viwdupq_m_n_uv16qi (__arg1, *__a, __b, __imm, __p); | |
14111 | *__a = __builtin_mve_viwdupq_m_wb_uv16qi (__arg1, *__a, __b, __imm, __p); | |
14112 | return __res; | |
f9355dee SP |
14113 | } |
14114 | ||
261014a1 | 14115 | __extension__ extern __inline uint16x8_t |
f9355dee | 14116 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14117 | __arm_viwdupq_x_wb_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14118 | { |
261014a1 SP |
14119 | uint16x8_t __arg1 = vuninitializedq_u16 (); |
14120 | uint16x8_t __res = __builtin_mve_viwdupq_m_n_uv8hi (__arg1, *__a, __b, __imm, __p); | |
14121 | *__a = __builtin_mve_viwdupq_m_wb_uv8hi (__arg1, *__a, __b, __imm, __p); | |
14122 | return __res; | |
f9355dee SP |
14123 | } |
14124 | ||
261014a1 | 14125 | __extension__ extern __inline uint32x4_t |
f9355dee | 14126 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14127 | __arm_viwdupq_x_wb_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14128 | { |
261014a1 SP |
14129 | uint32x4_t __arg1 = vuninitializedq_u32 (); |
14130 | uint32x4_t __res = __builtin_mve_viwdupq_m_n_uv4si (__arg1, *__a, __b, __imm, __p); | |
14131 | *__a = __builtin_mve_viwdupq_m_wb_uv4si (__arg1, *__a, __b, __imm, __p); | |
14132 | return __res; | |
f9355dee SP |
14133 | } |
14134 | ||
261014a1 | 14135 | __extension__ extern __inline int8x16_t |
f9355dee | 14136 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14137 | __arm_vdupq_x_n_s8 (int8_t __a, mve_pred16_t __p) |
f9355dee | 14138 | { |
261014a1 | 14139 | return __builtin_mve_vdupq_m_n_sv16qi (vuninitializedq_s8 (), __a, __p); |
f9355dee SP |
14140 | } |
14141 | ||
261014a1 | 14142 | __extension__ extern __inline int16x8_t |
f9355dee | 14143 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14144 | __arm_vdupq_x_n_s16 (int16_t __a, mve_pred16_t __p) |
f9355dee | 14145 | { |
261014a1 | 14146 | return __builtin_mve_vdupq_m_n_sv8hi (vuninitializedq_s16 (), __a, __p); |
f9355dee SP |
14147 | } |
14148 | ||
261014a1 | 14149 | __extension__ extern __inline int32x4_t |
f9355dee | 14150 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14151 | __arm_vdupq_x_n_s32 (int32_t __a, mve_pred16_t __p) |
f9355dee | 14152 | { |
261014a1 | 14153 | return __builtin_mve_vdupq_m_n_sv4si (vuninitializedq_s32 (), __a, __p); |
f9355dee SP |
14154 | } |
14155 | ||
261014a1 | 14156 | __extension__ extern __inline uint8x16_t |
f9355dee | 14157 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14158 | __arm_vdupq_x_n_u8 (uint8_t __a, mve_pred16_t __p) |
f9355dee | 14159 | { |
261014a1 | 14160 | return __builtin_mve_vdupq_m_n_uv16qi (vuninitializedq_u8 (), __a, __p); |
f9355dee SP |
14161 | } |
14162 | ||
261014a1 | 14163 | __extension__ extern __inline uint16x8_t |
f9355dee | 14164 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14165 | __arm_vdupq_x_n_u16 (uint16_t __a, mve_pred16_t __p) |
f9355dee | 14166 | { |
261014a1 | 14167 | return __builtin_mve_vdupq_m_n_uv8hi (vuninitializedq_u16 (), __a, __p); |
f9355dee SP |
14168 | } |
14169 | ||
261014a1 | 14170 | __extension__ extern __inline uint32x4_t |
f9355dee | 14171 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14172 | __arm_vdupq_x_n_u32 (uint32_t __a, mve_pred16_t __p) |
f9355dee | 14173 | { |
261014a1 | 14174 | return __builtin_mve_vdupq_m_n_uv4si (vuninitializedq_u32 (), __a, __p); |
f9355dee SP |
14175 | } |
14176 | ||
261014a1 | 14177 | __extension__ extern __inline int8x16_t |
0dad5b33 | 14178 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14179 | __arm_vminq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
0dad5b33 | 14180 | { |
261014a1 | 14181 | return __builtin_mve_vminq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
0dad5b33 SP |
14182 | } |
14183 | ||
261014a1 | 14184 | __extension__ extern __inline int16x8_t |
0dad5b33 | 14185 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14186 | __arm_vminq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
0dad5b33 | 14187 | { |
261014a1 | 14188 | return __builtin_mve_vminq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
0dad5b33 SP |
14189 | } |
14190 | ||
261014a1 | 14191 | __extension__ extern __inline int32x4_t |
0dad5b33 | 14192 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14193 | __arm_vminq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
0dad5b33 | 14194 | { |
261014a1 | 14195 | return __builtin_mve_vminq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14196 | } |
14197 | ||
261014a1 | 14198 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14199 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14200 | __arm_vminq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14201 | { |
261014a1 | 14202 | return __builtin_mve_vminq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
0dad5b33 SP |
14203 | } |
14204 | ||
14205 | __extension__ extern __inline uint16x8_t | |
14206 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14207 | __arm_vminq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
0dad5b33 | 14208 | { |
261014a1 | 14209 | return __builtin_mve_vminq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
0dad5b33 SP |
14210 | } |
14211 | ||
e3678b44 | 14212 | __extension__ extern __inline uint32x4_t |
0dad5b33 | 14213 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14214 | __arm_vminq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
0dad5b33 | 14215 | { |
261014a1 | 14216 | return __builtin_mve_vminq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14217 | } |
14218 | ||
261014a1 | 14219 | __extension__ extern __inline int8x16_t |
e3678b44 | 14220 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14221 | __arm_vmaxq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14222 | { |
261014a1 | 14223 | return __builtin_mve_vmaxq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14224 | } |
14225 | ||
261014a1 | 14226 | __extension__ extern __inline int16x8_t |
e3678b44 | 14227 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14228 | __arm_vmaxq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14229 | { |
261014a1 | 14230 | return __builtin_mve_vmaxq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
0dad5b33 SP |
14231 | } |
14232 | ||
261014a1 | 14233 | __extension__ extern __inline int32x4_t |
0dad5b33 | 14234 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14235 | __arm_vmaxq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
0dad5b33 | 14236 | { |
261014a1 SP |
14237 | return __builtin_mve_vmaxq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
14238 | } | |
14239 | ||
14240 | __extension__ extern __inline uint8x16_t | |
14241 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
14242 | __arm_vmaxq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
14243 | { | |
14244 | return __builtin_mve_vmaxq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); | |
e3678b44 SP |
14245 | } |
14246 | ||
14247 | __extension__ extern __inline uint16x8_t | |
14248 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14249 | __arm_vmaxq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14250 | { |
261014a1 | 14251 | return __builtin_mve_vmaxq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14252 | } |
14253 | ||
14254 | __extension__ extern __inline uint32x4_t | |
14255 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14256 | __arm_vmaxq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14257 | { |
261014a1 SP |
14258 | return __builtin_mve_vmaxq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
14259 | } | |
14260 | ||
14261 | __extension__ extern __inline int8x16_t | |
14262 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
14263 | __arm_vabdq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
14264 | { | |
14265 | return __builtin_mve_vabdq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); | |
e3678b44 SP |
14266 | } |
14267 | ||
14268 | __extension__ extern __inline int16x8_t | |
14269 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14270 | __arm_vabdq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14271 | { |
261014a1 | 14272 | return __builtin_mve_vabdq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14273 | } |
14274 | ||
14275 | __extension__ extern __inline int32x4_t | |
14276 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14277 | __arm_vabdq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14278 | { |
261014a1 | 14279 | return __builtin_mve_vabdq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14280 | } |
14281 | ||
261014a1 | 14282 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14283 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14284 | __arm_vabdq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14285 | { |
261014a1 | 14286 | return __builtin_mve_vabdq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14287 | } |
14288 | ||
261014a1 | 14289 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14290 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14291 | __arm_vabdq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14292 | { |
261014a1 | 14293 | return __builtin_mve_vabdq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14294 | } |
14295 | ||
261014a1 | 14296 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14297 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14298 | __arm_vabdq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14299 | { |
261014a1 | 14300 | return __builtin_mve_vabdq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14301 | } |
14302 | ||
261014a1 | 14303 | __extension__ extern __inline int8x16_t |
e3678b44 | 14304 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14305 | __arm_vabsq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 14306 | { |
261014a1 | 14307 | return __builtin_mve_vabsq_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
14308 | } |
14309 | ||
14310 | __extension__ extern __inline int16x8_t | |
14311 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14312 | __arm_vabsq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 14313 | { |
261014a1 | 14314 | return __builtin_mve_vabsq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
14315 | } |
14316 | ||
14317 | __extension__ extern __inline int32x4_t | |
14318 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14319 | __arm_vabsq_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 14320 | { |
261014a1 | 14321 | return __builtin_mve_vabsq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
0dad5b33 SP |
14322 | } |
14323 | ||
261014a1 | 14324 | __extension__ extern __inline int8x16_t |
0dad5b33 | 14325 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14326 | __arm_vaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
0dad5b33 | 14327 | { |
261014a1 | 14328 | return __builtin_mve_vaddq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14329 | } |
14330 | ||
261014a1 | 14331 | __extension__ extern __inline int16x8_t |
e3678b44 | 14332 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14333 | __arm_vaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14334 | { |
261014a1 | 14335 | return __builtin_mve_vaddq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
0dad5b33 SP |
14336 | } |
14337 | ||
261014a1 | 14338 | __extension__ extern __inline int32x4_t |
0dad5b33 | 14339 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14340 | __arm_vaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
0dad5b33 | 14341 | { |
261014a1 | 14342 | return __builtin_mve_vaddq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
0dad5b33 SP |
14343 | } |
14344 | ||
261014a1 | 14345 | __extension__ extern __inline int8x16_t |
0dad5b33 | 14346 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14347 | __arm_vaddq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) |
0dad5b33 | 14348 | { |
261014a1 | 14349 | return __builtin_mve_vaddq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14350 | } |
14351 | ||
261014a1 | 14352 | __extension__ extern __inline int16x8_t |
e3678b44 | 14353 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14354 | __arm_vaddq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) |
e3678b44 | 14355 | { |
261014a1 | 14356 | return __builtin_mve_vaddq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
0dad5b33 SP |
14357 | } |
14358 | ||
261014a1 | 14359 | __extension__ extern __inline int32x4_t |
0dad5b33 | 14360 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14361 | __arm_vaddq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
0dad5b33 | 14362 | { |
261014a1 | 14363 | return __builtin_mve_vaddq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
0dad5b33 SP |
14364 | } |
14365 | ||
261014a1 | 14366 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14367 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14368 | __arm_vaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14369 | { |
261014a1 | 14370 | return __builtin_mve_vaddq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 | 14371 | } |
f9355dee | 14372 | |
261014a1 | 14373 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14374 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14375 | __arm_vaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14376 | { |
261014a1 | 14377 | return __builtin_mve_vaddq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14378 | } |
14379 | ||
261014a1 | 14380 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14381 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14382 | __arm_vaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14383 | { |
261014a1 | 14384 | return __builtin_mve_vaddq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14385 | } |
14386 | ||
261014a1 | 14387 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14388 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14389 | __arm_vaddq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
e3678b44 | 14390 | { |
261014a1 | 14391 | return __builtin_mve_vaddq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14392 | } |
14393 | ||
261014a1 | 14394 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14395 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14396 | __arm_vaddq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
e3678b44 | 14397 | { |
261014a1 | 14398 | return __builtin_mve_vaddq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14399 | } |
14400 | ||
261014a1 | 14401 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14402 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14403 | __arm_vaddq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
e3678b44 | 14404 | { |
261014a1 | 14405 | return __builtin_mve_vaddq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14406 | } |
14407 | ||
261014a1 | 14408 | __extension__ extern __inline int8x16_t |
e3678b44 | 14409 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14410 | __arm_vclsq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 14411 | { |
261014a1 | 14412 | return __builtin_mve_vclsq_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
14413 | } |
14414 | ||
261014a1 | 14415 | __extension__ extern __inline int16x8_t |
e3678b44 | 14416 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14417 | __arm_vclsq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 14418 | { |
261014a1 | 14419 | return __builtin_mve_vclsq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
14420 | } |
14421 | ||
261014a1 | 14422 | __extension__ extern __inline int32x4_t |
e3678b44 | 14423 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14424 | __arm_vclsq_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 14425 | { |
261014a1 | 14426 | return __builtin_mve_vclsq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
14427 | } |
14428 | ||
261014a1 | 14429 | __extension__ extern __inline int8x16_t |
e3678b44 | 14430 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14431 | __arm_vclzq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 14432 | { |
261014a1 | 14433 | return __builtin_mve_vclzq_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
14434 | } |
14435 | ||
261014a1 | 14436 | __extension__ extern __inline int16x8_t |
e3678b44 | 14437 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14438 | __arm_vclzq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 14439 | { |
261014a1 | 14440 | return __builtin_mve_vclzq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
14441 | } |
14442 | ||
261014a1 | 14443 | __extension__ extern __inline int32x4_t |
e3678b44 | 14444 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14445 | __arm_vclzq_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 14446 | { |
261014a1 | 14447 | return __builtin_mve_vclzq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
14448 | } |
14449 | ||
261014a1 | 14450 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14451 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14452 | __arm_vclzq_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 14453 | { |
261014a1 | 14454 | return __builtin_mve_vclzq_m_uv16qi (vuninitializedq_u8 (), __a, __p); |
e3678b44 SP |
14455 | } |
14456 | ||
261014a1 | 14457 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14458 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14459 | __arm_vclzq_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 14460 | { |
261014a1 | 14461 | return __builtin_mve_vclzq_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
14462 | } |
14463 | ||
261014a1 | 14464 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14465 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14466 | __arm_vclzq_x_u32 (uint32x4_t __a, mve_pred16_t __p) |
e3678b44 | 14467 | { |
261014a1 | 14468 | return __builtin_mve_vclzq_m_uv4si (vuninitializedq_u32 (), __a, __p); |
e3678b44 SP |
14469 | } |
14470 | ||
261014a1 | 14471 | __extension__ extern __inline int8x16_t |
e3678b44 | 14472 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14473 | __arm_vnegq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 14474 | { |
261014a1 | 14475 | return __builtin_mve_vnegq_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
14476 | } |
14477 | ||
261014a1 | 14478 | __extension__ extern __inline int16x8_t |
e3678b44 | 14479 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14480 | __arm_vnegq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 14481 | { |
261014a1 | 14482 | return __builtin_mve_vnegq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
14483 | } |
14484 | ||
261014a1 | 14485 | __extension__ extern __inline int32x4_t |
e3678b44 | 14486 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14487 | __arm_vnegq_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 14488 | { |
261014a1 | 14489 | return __builtin_mve_vnegq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
14490 | } |
14491 | ||
261014a1 | 14492 | __extension__ extern __inline int8x16_t |
e3678b44 | 14493 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14494 | __arm_vmulhq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14495 | { |
261014a1 | 14496 | return __builtin_mve_vmulhq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14497 | } |
14498 | ||
261014a1 | 14499 | __extension__ extern __inline int16x8_t |
e3678b44 | 14500 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14501 | __arm_vmulhq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14502 | { |
261014a1 | 14503 | return __builtin_mve_vmulhq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14504 | } |
14505 | ||
261014a1 | 14506 | __extension__ extern __inline int32x4_t |
e3678b44 | 14507 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14508 | __arm_vmulhq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14509 | { |
261014a1 | 14510 | return __builtin_mve_vmulhq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14511 | } |
14512 | ||
261014a1 | 14513 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14514 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14515 | __arm_vmulhq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14516 | { |
261014a1 | 14517 | return __builtin_mve_vmulhq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14518 | } |
14519 | ||
261014a1 | 14520 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14521 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14522 | __arm_vmulhq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14523 | { |
261014a1 | 14524 | return __builtin_mve_vmulhq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14525 | } |
14526 | ||
261014a1 | 14527 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14528 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14529 | __arm_vmulhq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14530 | { |
261014a1 | 14531 | return __builtin_mve_vmulhq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14532 | } |
14533 | ||
261014a1 | 14534 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14535 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14536 | __arm_vmullbq_poly_x_p8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14537 | { |
261014a1 | 14538 | return __builtin_mve_vmullbq_poly_m_pv16qi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14539 | } |
14540 | ||
261014a1 | 14541 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14542 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14543 | __arm_vmullbq_poly_x_p16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14544 | { |
261014a1 | 14545 | return __builtin_mve_vmullbq_poly_m_pv8hi (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14546 | } |
14547 | ||
261014a1 | 14548 | __extension__ extern __inline int16x8_t |
e3678b44 | 14549 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14550 | __arm_vmullbq_int_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14551 | { |
261014a1 | 14552 | return __builtin_mve_vmullbq_int_m_sv16qi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14553 | } |
14554 | ||
261014a1 | 14555 | __extension__ extern __inline int32x4_t |
e3678b44 | 14556 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14557 | __arm_vmullbq_int_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14558 | { |
261014a1 | 14559 | return __builtin_mve_vmullbq_int_m_sv8hi (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14560 | } |
14561 | ||
261014a1 | 14562 | __extension__ extern __inline int64x2_t |
e3678b44 | 14563 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14564 | __arm_vmullbq_int_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14565 | { |
261014a1 | 14566 | return __builtin_mve_vmullbq_int_m_sv4si (vuninitializedq_s64 (), __a, __b, __p); |
e3678b44 SP |
14567 | } |
14568 | ||
261014a1 | 14569 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14570 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14571 | __arm_vmullbq_int_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14572 | { |
261014a1 | 14573 | return __builtin_mve_vmullbq_int_m_uv16qi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14574 | } |
14575 | ||
261014a1 | 14576 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14577 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14578 | __arm_vmullbq_int_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14579 | { |
261014a1 | 14580 | return __builtin_mve_vmullbq_int_m_uv8hi (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14581 | } |
14582 | ||
261014a1 | 14583 | __extension__ extern __inline uint64x2_t |
e3678b44 | 14584 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14585 | __arm_vmullbq_int_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14586 | { |
261014a1 | 14587 | return __builtin_mve_vmullbq_int_m_uv4si (vuninitializedq_u64 (), __a, __b, __p); |
e3678b44 SP |
14588 | } |
14589 | ||
261014a1 | 14590 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14591 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14592 | __arm_vmulltq_poly_x_p8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14593 | { |
261014a1 | 14594 | return __builtin_mve_vmulltq_poly_m_pv16qi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14595 | } |
14596 | ||
261014a1 | 14597 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14598 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14599 | __arm_vmulltq_poly_x_p16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14600 | { |
261014a1 | 14601 | return __builtin_mve_vmulltq_poly_m_pv8hi (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14602 | } |
14603 | ||
261014a1 | 14604 | __extension__ extern __inline int16x8_t |
e3678b44 | 14605 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14606 | __arm_vmulltq_int_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14607 | { |
261014a1 | 14608 | return __builtin_mve_vmulltq_int_m_sv16qi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14609 | } |
14610 | ||
261014a1 | 14611 | __extension__ extern __inline int32x4_t |
e3678b44 | 14612 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14613 | __arm_vmulltq_int_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14614 | { |
261014a1 | 14615 | return __builtin_mve_vmulltq_int_m_sv8hi (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14616 | } |
14617 | ||
261014a1 | 14618 | __extension__ extern __inline int64x2_t |
e3678b44 | 14619 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14620 | __arm_vmulltq_int_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14621 | { |
261014a1 | 14622 | return __builtin_mve_vmulltq_int_m_sv4si (vuninitializedq_s64 (), __a, __b, __p); |
e3678b44 SP |
14623 | } |
14624 | ||
261014a1 | 14625 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14626 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14627 | __arm_vmulltq_int_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14628 | { |
261014a1 | 14629 | return __builtin_mve_vmulltq_int_m_uv16qi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14630 | } |
14631 | ||
261014a1 | 14632 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14633 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14634 | __arm_vmulltq_int_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14635 | { |
261014a1 | 14636 | return __builtin_mve_vmulltq_int_m_uv8hi (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14637 | } |
14638 | ||
261014a1 | 14639 | __extension__ extern __inline uint64x2_t |
e3678b44 | 14640 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14641 | __arm_vmulltq_int_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14642 | { |
261014a1 | 14643 | return __builtin_mve_vmulltq_int_m_uv4si (vuninitializedq_u64 (), __a, __b, __p); |
e3678b44 SP |
14644 | } |
14645 | ||
261014a1 | 14646 | __extension__ extern __inline int8x16_t |
e3678b44 | 14647 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14648 | __arm_vmulq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14649 | { |
261014a1 | 14650 | return __builtin_mve_vmulq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14651 | } |
14652 | ||
261014a1 | 14653 | __extension__ extern __inline int16x8_t |
e3678b44 | 14654 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14655 | __arm_vmulq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14656 | { |
261014a1 | 14657 | return __builtin_mve_vmulq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14658 | } |
14659 | ||
261014a1 | 14660 | __extension__ extern __inline int32x4_t |
e3678b44 | 14661 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14662 | __arm_vmulq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14663 | { |
261014a1 | 14664 | return __builtin_mve_vmulq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14665 | } |
14666 | ||
261014a1 | 14667 | __extension__ extern __inline int8x16_t |
e3678b44 | 14668 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14669 | __arm_vmulq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) |
e3678b44 | 14670 | { |
261014a1 | 14671 | return __builtin_mve_vmulq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14672 | } |
14673 | ||
261014a1 | 14674 | __extension__ extern __inline int16x8_t |
e3678b44 | 14675 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14676 | __arm_vmulq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) |
e3678b44 | 14677 | { |
261014a1 | 14678 | return __builtin_mve_vmulq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14679 | } |
14680 | ||
261014a1 | 14681 | __extension__ extern __inline int32x4_t |
e3678b44 | 14682 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14683 | __arm_vmulq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 14684 | { |
261014a1 | 14685 | return __builtin_mve_vmulq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14686 | } |
14687 | ||
261014a1 | 14688 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14689 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14690 | __arm_vmulq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14691 | { |
261014a1 | 14692 | return __builtin_mve_vmulq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14693 | } |
14694 | ||
261014a1 | 14695 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14696 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14697 | __arm_vmulq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14698 | { |
261014a1 | 14699 | return __builtin_mve_vmulq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14700 | } |
14701 | ||
261014a1 | 14702 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14703 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14704 | __arm_vmulq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14705 | { |
261014a1 | 14706 | return __builtin_mve_vmulq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14707 | } |
14708 | ||
261014a1 | 14709 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14710 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14711 | __arm_vmulq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
e3678b44 | 14712 | { |
261014a1 | 14713 | return __builtin_mve_vmulq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14714 | } |
14715 | ||
261014a1 | 14716 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14717 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14718 | __arm_vmulq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
e3678b44 | 14719 | { |
261014a1 | 14720 | return __builtin_mve_vmulq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14721 | } |
14722 | ||
261014a1 | 14723 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14724 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14725 | __arm_vmulq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
e3678b44 | 14726 | { |
261014a1 | 14727 | return __builtin_mve_vmulq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14728 | } |
14729 | ||
261014a1 | 14730 | __extension__ extern __inline int8x16_t |
e3678b44 | 14731 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14732 | __arm_vsubq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14733 | { |
261014a1 | 14734 | return __builtin_mve_vsubq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14735 | } |
14736 | ||
261014a1 | 14737 | __extension__ extern __inline int16x8_t |
e3678b44 | 14738 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14739 | __arm_vsubq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14740 | { |
261014a1 | 14741 | return __builtin_mve_vsubq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14742 | } |
14743 | ||
261014a1 | 14744 | __extension__ extern __inline int32x4_t |
e3678b44 | 14745 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14746 | __arm_vsubq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14747 | { |
261014a1 | 14748 | return __builtin_mve_vsubq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14749 | } |
14750 | ||
261014a1 | 14751 | __extension__ extern __inline int8x16_t |
e3678b44 | 14752 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14753 | __arm_vsubq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) |
e3678b44 | 14754 | { |
261014a1 | 14755 | return __builtin_mve_vsubq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14756 | } |
14757 | ||
261014a1 | 14758 | __extension__ extern __inline int16x8_t |
e3678b44 | 14759 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14760 | __arm_vsubq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) |
e3678b44 | 14761 | { |
261014a1 | 14762 | return __builtin_mve_vsubq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14763 | } |
14764 | ||
261014a1 | 14765 | __extension__ extern __inline int32x4_t |
e3678b44 | 14766 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14767 | __arm_vsubq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 14768 | { |
261014a1 | 14769 | return __builtin_mve_vsubq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14770 | } |
14771 | ||
261014a1 | 14772 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14773 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14774 | __arm_vsubq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14775 | { |
261014a1 | 14776 | return __builtin_mve_vsubq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14777 | } |
14778 | ||
261014a1 | 14779 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14780 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14781 | __arm_vsubq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14782 | { |
261014a1 | 14783 | return __builtin_mve_vsubq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14784 | } |
14785 | ||
261014a1 | 14786 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14787 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14788 | __arm_vsubq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14789 | { |
261014a1 | 14790 | return __builtin_mve_vsubq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14791 | } |
14792 | ||
261014a1 | 14793 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14794 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14795 | __arm_vsubq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
e3678b44 | 14796 | { |
261014a1 | 14797 | return __builtin_mve_vsubq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14798 | } |
14799 | ||
261014a1 | 14800 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14801 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14802 | __arm_vsubq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
e3678b44 | 14803 | { |
261014a1 | 14804 | return __builtin_mve_vsubq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14805 | } |
14806 | ||
261014a1 | 14807 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14808 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14809 | __arm_vsubq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
e3678b44 | 14810 | { |
261014a1 | 14811 | return __builtin_mve_vsubq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14812 | } |
14813 | ||
261014a1 | 14814 | __extension__ extern __inline int8x16_t |
e3678b44 | 14815 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14816 | __arm_vcaddq_rot90_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14817 | { |
261014a1 | 14818 | return __builtin_mve_vcaddq_rot90_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14819 | } |
14820 | ||
261014a1 | 14821 | __extension__ extern __inline int16x8_t |
e3678b44 | 14822 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14823 | __arm_vcaddq_rot90_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14824 | { |
261014a1 | 14825 | return __builtin_mve_vcaddq_rot90_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14826 | } |
14827 | ||
261014a1 | 14828 | __extension__ extern __inline int32x4_t |
e3678b44 | 14829 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14830 | __arm_vcaddq_rot90_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14831 | { |
261014a1 | 14832 | return __builtin_mve_vcaddq_rot90_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14833 | } |
14834 | ||
261014a1 | 14835 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14836 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14837 | __arm_vcaddq_rot90_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14838 | { |
261014a1 | 14839 | return __builtin_mve_vcaddq_rot90_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14840 | } |
14841 | ||
261014a1 | 14842 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14843 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14844 | __arm_vcaddq_rot90_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14845 | { |
261014a1 | 14846 | return __builtin_mve_vcaddq_rot90_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14847 | } |
14848 | ||
261014a1 | 14849 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14850 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14851 | __arm_vcaddq_rot90_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14852 | { |
261014a1 | 14853 | return __builtin_mve_vcaddq_rot90_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14854 | } |
14855 | ||
261014a1 | 14856 | __extension__ extern __inline int8x16_t |
e3678b44 | 14857 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14858 | __arm_vcaddq_rot270_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14859 | { |
261014a1 | 14860 | return __builtin_mve_vcaddq_rot270_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14861 | } |
14862 | ||
261014a1 | 14863 | __extension__ extern __inline int16x8_t |
e3678b44 | 14864 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14865 | __arm_vcaddq_rot270_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14866 | { |
261014a1 | 14867 | return __builtin_mve_vcaddq_rot270_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14868 | } |
14869 | ||
261014a1 | 14870 | __extension__ extern __inline int32x4_t |
e3678b44 | 14871 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14872 | __arm_vcaddq_rot270_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14873 | { |
261014a1 | 14874 | return __builtin_mve_vcaddq_rot270_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14875 | } |
14876 | ||
261014a1 | 14877 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14878 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14879 | __arm_vcaddq_rot270_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14880 | { |
261014a1 | 14881 | return __builtin_mve_vcaddq_rot270_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14882 | } |
14883 | ||
261014a1 | 14884 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14885 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14886 | __arm_vcaddq_rot270_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14887 | { |
261014a1 | 14888 | return __builtin_mve_vcaddq_rot270_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14889 | } |
14890 | ||
261014a1 | 14891 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14892 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14893 | __arm_vcaddq_rot270_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14894 | { |
261014a1 | 14895 | return __builtin_mve_vcaddq_rot270_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14896 | } |
14897 | ||
261014a1 | 14898 | __extension__ extern __inline int8x16_t |
e3678b44 | 14899 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14900 | __arm_vhaddq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) |
e3678b44 | 14901 | { |
261014a1 | 14902 | return __builtin_mve_vhaddq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14903 | } |
14904 | ||
261014a1 | 14905 | __extension__ extern __inline int16x8_t |
e3678b44 | 14906 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14907 | __arm_vhaddq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) |
e3678b44 | 14908 | { |
261014a1 | 14909 | return __builtin_mve_vhaddq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14910 | } |
14911 | ||
261014a1 | 14912 | __extension__ extern __inline int32x4_t |
e3678b44 | 14913 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14914 | __arm_vhaddq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 14915 | { |
261014a1 | 14916 | return __builtin_mve_vhaddq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14917 | } |
14918 | ||
261014a1 | 14919 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14920 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14921 | __arm_vhaddq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
e3678b44 | 14922 | { |
261014a1 | 14923 | return __builtin_mve_vhaddq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14924 | } |
14925 | ||
261014a1 | 14926 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14927 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14928 | __arm_vhaddq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
e3678b44 | 14929 | { |
261014a1 | 14930 | return __builtin_mve_vhaddq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14931 | } |
14932 | ||
261014a1 | 14933 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14934 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14935 | __arm_vhaddq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
e3678b44 | 14936 | { |
261014a1 | 14937 | return __builtin_mve_vhaddq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14938 | } |
14939 | ||
261014a1 | 14940 | __extension__ extern __inline int8x16_t |
e3678b44 | 14941 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14942 | __arm_vhaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14943 | { |
261014a1 | 14944 | return __builtin_mve_vhaddq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14945 | } |
14946 | ||
261014a1 | 14947 | __extension__ extern __inline int16x8_t |
e3678b44 | 14948 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14949 | __arm_vhaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14950 | { |
261014a1 | 14951 | return __builtin_mve_vhaddq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14952 | } |
14953 | ||
261014a1 | 14954 | __extension__ extern __inline int32x4_t |
e3678b44 | 14955 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14956 | __arm_vhaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14957 | { |
261014a1 | 14958 | return __builtin_mve_vhaddq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14959 | } |
14960 | ||
261014a1 | 14961 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14962 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14963 | __arm_vhaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14964 | { |
261014a1 | 14965 | return __builtin_mve_vhaddq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14966 | } |
14967 | ||
261014a1 | 14968 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14969 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14970 | __arm_vhaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14971 | { |
261014a1 | 14972 | return __builtin_mve_vhaddq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14973 | } |
14974 | ||
14975 | __extension__ extern __inline uint32x4_t | |
14976 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14977 | __arm_vhaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14978 | { |
261014a1 | 14979 | return __builtin_mve_vhaddq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14980 | } |
14981 | ||
261014a1 | 14982 | __extension__ extern __inline int8x16_t |
e3678b44 | 14983 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14984 | __arm_vhcaddq_rot90_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14985 | { |
261014a1 | 14986 | return __builtin_mve_vhcaddq_rot90_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14987 | } |
14988 | ||
261014a1 | 14989 | __extension__ extern __inline int16x8_t |
e3678b44 | 14990 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14991 | __arm_vhcaddq_rot90_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14992 | { |
261014a1 | 14993 | return __builtin_mve_vhcaddq_rot90_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14994 | } |
14995 | ||
261014a1 | 14996 | __extension__ extern __inline int32x4_t |
e3678b44 | 14997 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14998 | __arm_vhcaddq_rot90_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14999 | { |
261014a1 | 15000 | return __builtin_mve_vhcaddq_rot90_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15001 | } |
15002 | ||
261014a1 | 15003 | __extension__ extern __inline int8x16_t |
e3678b44 | 15004 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15005 | __arm_vhcaddq_rot270_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15006 | { |
261014a1 | 15007 | return __builtin_mve_vhcaddq_rot270_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15008 | } |
15009 | ||
261014a1 | 15010 | __extension__ extern __inline int16x8_t |
e3678b44 | 15011 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15012 | __arm_vhcaddq_rot270_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15013 | { |
261014a1 | 15014 | return __builtin_mve_vhcaddq_rot270_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15015 | } |
15016 | ||
261014a1 | 15017 | __extension__ extern __inline int32x4_t |
e3678b44 | 15018 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15019 | __arm_vhcaddq_rot270_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15020 | { |
261014a1 | 15021 | return __builtin_mve_vhcaddq_rot270_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15022 | } |
15023 | ||
261014a1 | 15024 | __extension__ extern __inline int8x16_t |
e3678b44 | 15025 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15026 | __arm_vhsubq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) |
e3678b44 | 15027 | { |
261014a1 | 15028 | return __builtin_mve_vhsubq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15029 | } |
15030 | ||
261014a1 | 15031 | __extension__ extern __inline int16x8_t |
e3678b44 | 15032 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15033 | __arm_vhsubq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) |
e3678b44 | 15034 | { |
261014a1 | 15035 | return __builtin_mve_vhsubq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15036 | } |
15037 | ||
261014a1 | 15038 | __extension__ extern __inline int32x4_t |
e3678b44 | 15039 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15040 | __arm_vhsubq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15041 | { |
261014a1 | 15042 | return __builtin_mve_vhsubq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15043 | } |
15044 | ||
261014a1 | 15045 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15046 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15047 | __arm_vhsubq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
e3678b44 | 15048 | { |
261014a1 | 15049 | return __builtin_mve_vhsubq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15050 | } |
15051 | ||
261014a1 | 15052 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15053 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15054 | __arm_vhsubq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
e3678b44 | 15055 | { |
261014a1 | 15056 | return __builtin_mve_vhsubq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15057 | } |
15058 | ||
261014a1 | 15059 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15060 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15061 | __arm_vhsubq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
e3678b44 | 15062 | { |
261014a1 | 15063 | return __builtin_mve_vhsubq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15064 | } |
15065 | ||
261014a1 | 15066 | __extension__ extern __inline int8x16_t |
e3678b44 | 15067 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15068 | __arm_vhsubq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15069 | { |
261014a1 | 15070 | return __builtin_mve_vhsubq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15071 | } |
15072 | ||
261014a1 | 15073 | __extension__ extern __inline int16x8_t |
e3678b44 | 15074 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15075 | __arm_vhsubq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15076 | { |
261014a1 | 15077 | return __builtin_mve_vhsubq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15078 | } |
15079 | ||
261014a1 | 15080 | __extension__ extern __inline int32x4_t |
e3678b44 | 15081 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15082 | __arm_vhsubq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15083 | { |
261014a1 | 15084 | return __builtin_mve_vhsubq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15085 | } |
15086 | ||
261014a1 | 15087 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15088 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15089 | __arm_vhsubq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15090 | { |
261014a1 | 15091 | return __builtin_mve_vhsubq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15092 | } |
15093 | ||
261014a1 | 15094 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15095 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15096 | __arm_vhsubq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15097 | { |
261014a1 | 15098 | return __builtin_mve_vhsubq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15099 | } |
15100 | ||
261014a1 | 15101 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15102 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15103 | __arm_vhsubq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15104 | { |
261014a1 | 15105 | return __builtin_mve_vhsubq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15106 | } |
15107 | ||
261014a1 | 15108 | __extension__ extern __inline int8x16_t |
e3678b44 | 15109 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15110 | __arm_vrhaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15111 | { |
261014a1 | 15112 | return __builtin_mve_vrhaddq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15113 | } |
15114 | ||
15115 | __extension__ extern __inline int16x8_t | |
15116 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15117 | __arm_vrhaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15118 | { |
261014a1 | 15119 | return __builtin_mve_vrhaddq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15120 | } |
15121 | ||
261014a1 | 15122 | __extension__ extern __inline int32x4_t |
e3678b44 | 15123 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15124 | __arm_vrhaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15125 | { |
261014a1 | 15126 | return __builtin_mve_vrhaddq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15127 | } |
15128 | ||
261014a1 | 15129 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15130 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15131 | __arm_vrhaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15132 | { |
261014a1 | 15133 | return __builtin_mve_vrhaddq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15134 | } |
15135 | ||
261014a1 | 15136 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15137 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15138 | __arm_vrhaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15139 | { |
261014a1 | 15140 | return __builtin_mve_vrhaddq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15141 | } |
15142 | ||
261014a1 | 15143 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15144 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15145 | __arm_vrhaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15146 | { |
261014a1 | 15147 | return __builtin_mve_vrhaddq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15148 | } |
15149 | ||
261014a1 | 15150 | __extension__ extern __inline int8x16_t |
e3678b44 | 15151 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15152 | __arm_vrmulhq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15153 | { |
261014a1 | 15154 | return __builtin_mve_vrmulhq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15155 | } |
15156 | ||
261014a1 | 15157 | __extension__ extern __inline int16x8_t |
e3678b44 | 15158 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15159 | __arm_vrmulhq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15160 | { |
261014a1 | 15161 | return __builtin_mve_vrmulhq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15162 | } |
15163 | ||
261014a1 | 15164 | __extension__ extern __inline int32x4_t |
e3678b44 | 15165 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15166 | __arm_vrmulhq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15167 | { |
261014a1 | 15168 | return __builtin_mve_vrmulhq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15169 | } |
15170 | ||
261014a1 | 15171 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15172 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15173 | __arm_vrmulhq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15174 | { |
261014a1 | 15175 | return __builtin_mve_vrmulhq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15176 | } |
15177 | ||
261014a1 | 15178 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15179 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15180 | __arm_vrmulhq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15181 | { |
261014a1 | 15182 | return __builtin_mve_vrmulhq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15183 | } |
15184 | ||
261014a1 | 15185 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15186 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15187 | __arm_vrmulhq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15188 | { |
261014a1 | 15189 | return __builtin_mve_vrmulhq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15190 | } |
15191 | ||
261014a1 | 15192 | __extension__ extern __inline int8x16_t |
e3678b44 | 15193 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15194 | __arm_vandq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15195 | { |
261014a1 | 15196 | return __builtin_mve_vandq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15197 | } |
15198 | ||
261014a1 | 15199 | __extension__ extern __inline int16x8_t |
e3678b44 | 15200 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15201 | __arm_vandq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15202 | { |
261014a1 | 15203 | return __builtin_mve_vandq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15204 | } |
15205 | ||
261014a1 | 15206 | __extension__ extern __inline int32x4_t |
e3678b44 | 15207 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15208 | __arm_vandq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15209 | { |
261014a1 | 15210 | return __builtin_mve_vandq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15211 | } |
15212 | ||
261014a1 | 15213 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15214 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15215 | __arm_vandq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15216 | { |
261014a1 | 15217 | return __builtin_mve_vandq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15218 | } |
15219 | ||
261014a1 | 15220 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15221 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15222 | __arm_vandq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15223 | { |
261014a1 | 15224 | return __builtin_mve_vandq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15225 | } |
15226 | ||
261014a1 | 15227 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15228 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15229 | __arm_vandq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15230 | { |
261014a1 | 15231 | return __builtin_mve_vandq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15232 | } |
15233 | ||
261014a1 | 15234 | __extension__ extern __inline int8x16_t |
e3678b44 | 15235 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15236 | __arm_vbicq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15237 | { |
261014a1 | 15238 | return __builtin_mve_vbicq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15239 | } |
15240 | ||
261014a1 | 15241 | __extension__ extern __inline int16x8_t |
e3678b44 | 15242 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15243 | __arm_vbicq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15244 | { |
261014a1 | 15245 | return __builtin_mve_vbicq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15246 | } |
15247 | ||
261014a1 | 15248 | __extension__ extern __inline int32x4_t |
e3678b44 | 15249 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15250 | __arm_vbicq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15251 | { |
261014a1 | 15252 | return __builtin_mve_vbicq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15253 | } |
15254 | ||
261014a1 | 15255 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15256 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15257 | __arm_vbicq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15258 | { |
261014a1 | 15259 | return __builtin_mve_vbicq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15260 | } |
15261 | ||
261014a1 | 15262 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15263 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15264 | __arm_vbicq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15265 | { |
261014a1 | 15266 | return __builtin_mve_vbicq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15267 | } |
15268 | ||
261014a1 | 15269 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15270 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15271 | __arm_vbicq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15272 | { |
261014a1 | 15273 | return __builtin_mve_vbicq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15274 | } |
15275 | ||
261014a1 | 15276 | __extension__ extern __inline int8x16_t |
e3678b44 | 15277 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15278 | __arm_vbrsrq_x_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15279 | { |
261014a1 | 15280 | return __builtin_mve_vbrsrq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15281 | } |
15282 | ||
261014a1 | 15283 | __extension__ extern __inline int16x8_t |
e3678b44 | 15284 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15285 | __arm_vbrsrq_x_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15286 | { |
261014a1 | 15287 | return __builtin_mve_vbrsrq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15288 | } |
15289 | ||
261014a1 | 15290 | __extension__ extern __inline int32x4_t |
e3678b44 | 15291 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15292 | __arm_vbrsrq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15293 | { |
261014a1 | 15294 | return __builtin_mve_vbrsrq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15295 | } |
15296 | ||
261014a1 | 15297 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15298 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15299 | __arm_vbrsrq_x_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15300 | { |
261014a1 | 15301 | return __builtin_mve_vbrsrq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15302 | } |
15303 | ||
261014a1 | 15304 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15305 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15306 | __arm_vbrsrq_x_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15307 | { |
261014a1 | 15308 | return __builtin_mve_vbrsrq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15309 | } |
15310 | ||
261014a1 | 15311 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15312 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15313 | __arm_vbrsrq_x_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15314 | { |
261014a1 | 15315 | return __builtin_mve_vbrsrq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15316 | } |
15317 | ||
261014a1 | 15318 | __extension__ extern __inline int8x16_t |
e3678b44 | 15319 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15320 | __arm_veorq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15321 | { |
261014a1 | 15322 | return __builtin_mve_veorq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15323 | } |
15324 | ||
261014a1 | 15325 | __extension__ extern __inline int16x8_t |
e3678b44 | 15326 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15327 | __arm_veorq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15328 | { |
261014a1 | 15329 | return __builtin_mve_veorq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15330 | } |
15331 | ||
261014a1 | 15332 | __extension__ extern __inline int32x4_t |
e3678b44 | 15333 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15334 | __arm_veorq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15335 | { |
261014a1 | 15336 | return __builtin_mve_veorq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15337 | } |
15338 | ||
261014a1 | 15339 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15340 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15341 | __arm_veorq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15342 | { |
261014a1 | 15343 | return __builtin_mve_veorq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15344 | } |
15345 | ||
15346 | __extension__ extern __inline uint16x8_t | |
15347 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15348 | __arm_veorq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15349 | { |
261014a1 | 15350 | return __builtin_mve_veorq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15351 | } |
15352 | ||
261014a1 | 15353 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15354 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15355 | __arm_veorq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15356 | { |
261014a1 | 15357 | return __builtin_mve_veorq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15358 | } |
15359 | ||
261014a1 | 15360 | __extension__ extern __inline int16x8_t |
e3678b44 | 15361 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15362 | __arm_vmovlbq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15363 | { |
261014a1 | 15364 | return __builtin_mve_vmovlbq_m_sv16qi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
15365 | } |
15366 | ||
261014a1 | 15367 | __extension__ extern __inline int32x4_t |
e3678b44 | 15368 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15369 | __arm_vmovlbq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15370 | { |
261014a1 | 15371 | return __builtin_mve_vmovlbq_m_sv8hi (vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
15372 | } |
15373 | ||
261014a1 | 15374 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15375 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15376 | __arm_vmovlbq_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15377 | { |
261014a1 | 15378 | return __builtin_mve_vmovlbq_m_uv16qi (vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
15379 | } |
15380 | ||
261014a1 | 15381 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15382 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15383 | __arm_vmovlbq_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15384 | { |
261014a1 | 15385 | return __builtin_mve_vmovlbq_m_uv8hi (vuninitializedq_u32 (), __a, __p); |
e3678b44 SP |
15386 | } |
15387 | ||
261014a1 | 15388 | __extension__ extern __inline int16x8_t |
e3678b44 | 15389 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15390 | __arm_vmovltq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15391 | { |
261014a1 | 15392 | return __builtin_mve_vmovltq_m_sv16qi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
15393 | } |
15394 | ||
261014a1 | 15395 | __extension__ extern __inline int32x4_t |
e3678b44 | 15396 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15397 | __arm_vmovltq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15398 | { |
261014a1 | 15399 | return __builtin_mve_vmovltq_m_sv8hi (vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
15400 | } |
15401 | ||
261014a1 | 15402 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15403 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15404 | __arm_vmovltq_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15405 | { |
261014a1 | 15406 | return __builtin_mve_vmovltq_m_uv16qi (vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
15407 | } |
15408 | ||
261014a1 | 15409 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15410 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15411 | __arm_vmovltq_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15412 | { |
261014a1 | 15413 | return __builtin_mve_vmovltq_m_uv8hi (vuninitializedq_u32 (), __a, __p); |
e3678b44 SP |
15414 | } |
15415 | ||
261014a1 | 15416 | __extension__ extern __inline int8x16_t |
e3678b44 | 15417 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15418 | __arm_vmvnq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15419 | { |
261014a1 | 15420 | return __builtin_mve_vmvnq_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
15421 | } |
15422 | ||
261014a1 | 15423 | __extension__ extern __inline int16x8_t |
e3678b44 | 15424 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15425 | __arm_vmvnq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15426 | { |
261014a1 | 15427 | return __builtin_mve_vmvnq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
15428 | } |
15429 | ||
15430 | __extension__ extern __inline int32x4_t | |
15431 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15432 | __arm_vmvnq_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 15433 | { |
261014a1 | 15434 | return __builtin_mve_vmvnq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
15435 | } |
15436 | ||
261014a1 | 15437 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15438 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15439 | __arm_vmvnq_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15440 | { |
261014a1 | 15441 | return __builtin_mve_vmvnq_m_uv16qi (vuninitializedq_u8 (), __a, __p); |
e3678b44 SP |
15442 | } |
15443 | ||
261014a1 | 15444 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15445 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15446 | __arm_vmvnq_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15447 | { |
261014a1 | 15448 | return __builtin_mve_vmvnq_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
15449 | } |
15450 | ||
261014a1 | 15451 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15452 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15453 | __arm_vmvnq_x_u32 (uint32x4_t __a, mve_pred16_t __p) |
e3678b44 | 15454 | { |
261014a1 | 15455 | return __builtin_mve_vmvnq_m_uv4si (vuninitializedq_u32 (), __a, __p); |
e3678b44 SP |
15456 | } |
15457 | ||
261014a1 | 15458 | __extension__ extern __inline int16x8_t |
e3678b44 | 15459 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15460 | __arm_vmvnq_x_n_s16 (const int __imm, mve_pred16_t __p) |
e3678b44 | 15461 | { |
261014a1 | 15462 | return __builtin_mve_vmvnq_m_n_sv8hi (vuninitializedq_s16 (), __imm, __p); |
e3678b44 SP |
15463 | } |
15464 | ||
261014a1 | 15465 | __extension__ extern __inline int32x4_t |
e3678b44 | 15466 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15467 | __arm_vmvnq_x_n_s32 (const int __imm, mve_pred16_t __p) |
e3678b44 | 15468 | { |
261014a1 | 15469 | return __builtin_mve_vmvnq_m_n_sv4si (vuninitializedq_s32 (), __imm, __p); |
e3678b44 SP |
15470 | } |
15471 | ||
261014a1 | 15472 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15473 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15474 | __arm_vmvnq_x_n_u16 (const int __imm, mve_pred16_t __p) |
e3678b44 | 15475 | { |
261014a1 | 15476 | return __builtin_mve_vmvnq_m_n_uv8hi (vuninitializedq_u16 (), __imm, __p); |
e3678b44 SP |
15477 | } |
15478 | ||
261014a1 | 15479 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15480 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15481 | __arm_vmvnq_x_n_u32 (const int __imm, mve_pred16_t __p) |
e3678b44 | 15482 | { |
261014a1 | 15483 | return __builtin_mve_vmvnq_m_n_uv4si (vuninitializedq_u32 (), __imm, __p); |
e3678b44 SP |
15484 | } |
15485 | ||
261014a1 | 15486 | __extension__ extern __inline int8x16_t |
e3678b44 | 15487 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15488 | __arm_vornq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15489 | { |
261014a1 | 15490 | return __builtin_mve_vornq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15491 | } |
15492 | ||
261014a1 | 15493 | __extension__ extern __inline int16x8_t |
e3678b44 | 15494 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15495 | __arm_vornq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15496 | { |
261014a1 | 15497 | return __builtin_mve_vornq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15498 | } |
15499 | ||
261014a1 | 15500 | __extension__ extern __inline int32x4_t |
e3678b44 | 15501 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15502 | __arm_vornq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15503 | { |
261014a1 | 15504 | return __builtin_mve_vornq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15505 | } |
15506 | ||
261014a1 | 15507 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15508 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15509 | __arm_vornq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15510 | { |
261014a1 | 15511 | return __builtin_mve_vornq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15512 | } |
15513 | ||
261014a1 | 15514 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15515 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15516 | __arm_vornq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15517 | { |
261014a1 | 15518 | return __builtin_mve_vornq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15519 | } |
15520 | ||
261014a1 | 15521 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15522 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15523 | __arm_vornq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15524 | { |
261014a1 | 15525 | return __builtin_mve_vornq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15526 | } |
15527 | ||
261014a1 | 15528 | __extension__ extern __inline int8x16_t |
e3678b44 | 15529 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15530 | __arm_vorrq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15531 | { |
261014a1 | 15532 | return __builtin_mve_vorrq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15533 | } |
15534 | ||
261014a1 | 15535 | __extension__ extern __inline int16x8_t |
e3678b44 | 15536 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15537 | __arm_vorrq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15538 | { |
261014a1 | 15539 | return __builtin_mve_vorrq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15540 | } |
15541 | ||
261014a1 | 15542 | __extension__ extern __inline int32x4_t |
e3678b44 | 15543 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15544 | __arm_vorrq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15545 | { |
261014a1 | 15546 | return __builtin_mve_vorrq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15547 | } |
15548 | ||
261014a1 | 15549 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15550 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15551 | __arm_vorrq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15552 | { |
261014a1 | 15553 | return __builtin_mve_vorrq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15554 | } |
15555 | ||
261014a1 | 15556 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15557 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15558 | __arm_vorrq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15559 | { |
261014a1 | 15560 | return __builtin_mve_vorrq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15561 | } |
15562 | ||
261014a1 | 15563 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15564 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15565 | __arm_vorrq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15566 | { |
261014a1 | 15567 | return __builtin_mve_vorrq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15568 | } |
15569 | ||
261014a1 | 15570 | __extension__ extern __inline int8x16_t |
e3678b44 | 15571 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15572 | __arm_vrev16q_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15573 | { |
261014a1 | 15574 | return __builtin_mve_vrev16q_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
15575 | } |
15576 | ||
261014a1 | 15577 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15578 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15579 | __arm_vrev16q_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15580 | { |
261014a1 | 15581 | return __builtin_mve_vrev16q_m_uv16qi (vuninitializedq_u8 (), __a, __p); |
e3678b44 SP |
15582 | } |
15583 | ||
261014a1 | 15584 | __extension__ extern __inline int8x16_t |
e3678b44 | 15585 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15586 | __arm_vrev32q_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15587 | { |
261014a1 | 15588 | return __builtin_mve_vrev32q_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
15589 | } |
15590 | ||
261014a1 | 15591 | __extension__ extern __inline int16x8_t |
e3678b44 | 15592 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15593 | __arm_vrev32q_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15594 | { |
261014a1 | 15595 | return __builtin_mve_vrev32q_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
15596 | } |
15597 | ||
261014a1 | 15598 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15599 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15600 | __arm_vrev32q_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15601 | { |
261014a1 | 15602 | return __builtin_mve_vrev32q_m_uv16qi (vuninitializedq_u8 (), __a, __p); |
e3678b44 SP |
15603 | } |
15604 | ||
261014a1 | 15605 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15606 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15607 | __arm_vrev32q_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15608 | { |
261014a1 | 15609 | return __builtin_mve_vrev32q_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
15610 | } |
15611 | ||
261014a1 | 15612 | __extension__ extern __inline int8x16_t |
e3678b44 | 15613 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15614 | __arm_vrev64q_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15615 | { |
261014a1 | 15616 | return __builtin_mve_vrev64q_m_sv16qi (vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
15617 | } |
15618 | ||
261014a1 | 15619 | __extension__ extern __inline int16x8_t |
e3678b44 | 15620 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15621 | __arm_vrev64q_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15622 | { |
261014a1 | 15623 | return __builtin_mve_vrev64q_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
15624 | } |
15625 | ||
261014a1 | 15626 | __extension__ extern __inline int32x4_t |
e3678b44 | 15627 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15628 | __arm_vrev64q_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 15629 | { |
261014a1 | 15630 | return __builtin_mve_vrev64q_m_sv4si (vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
15631 | } |
15632 | ||
261014a1 | 15633 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15634 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15635 | __arm_vrev64q_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15636 | { |
261014a1 | 15637 | return __builtin_mve_vrev64q_m_uv16qi (vuninitializedq_u8 (), __a, __p); |
e3678b44 SP |
15638 | } |
15639 | ||
261014a1 | 15640 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15641 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15642 | __arm_vrev64q_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15643 | { |
261014a1 | 15644 | return __builtin_mve_vrev64q_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
15645 | } |
15646 | ||
15647 | __extension__ extern __inline uint32x4_t | |
15648 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15649 | __arm_vrev64q_x_u32 (uint32x4_t __a, mve_pred16_t __p) |
e3678b44 | 15650 | { |
261014a1 | 15651 | return __builtin_mve_vrev64q_m_uv4si (vuninitializedq_u32 (), __a, __p); |
e3678b44 SP |
15652 | } |
15653 | ||
261014a1 | 15654 | __extension__ extern __inline int8x16_t |
e3678b44 | 15655 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15656 | __arm_vrshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15657 | { |
261014a1 | 15658 | return __builtin_mve_vrshlq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15659 | } |
15660 | ||
261014a1 | 15661 | __extension__ extern __inline int16x8_t |
db5db9d2 | 15662 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15663 | __arm_vrshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
db5db9d2 | 15664 | { |
261014a1 | 15665 | return __builtin_mve_vrshlq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); |
db5db9d2 SP |
15666 | } |
15667 | ||
261014a1 | 15668 | __extension__ extern __inline int32x4_t |
db5db9d2 | 15669 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15670 | __arm_vrshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
db5db9d2 | 15671 | { |
261014a1 | 15672 | return __builtin_mve_vrshlq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); |
db5db9d2 SP |
15673 | } |
15674 | ||
261014a1 | 15675 | __extension__ extern __inline uint8x16_t |
db5db9d2 | 15676 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15677 | __arm_vrshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
db5db9d2 | 15678 | { |
261014a1 | 15679 | return __builtin_mve_vrshlq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); |
db5db9d2 SP |
15680 | } |
15681 | ||
261014a1 | 15682 | __extension__ extern __inline uint16x8_t |
db5db9d2 | 15683 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15684 | __arm_vrshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
db5db9d2 | 15685 | { |
261014a1 | 15686 | return __builtin_mve_vrshlq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); |
db5db9d2 SP |
15687 | } |
15688 | ||
261014a1 | 15689 | __extension__ extern __inline uint32x4_t |
532e9e24 | 15690 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15691 | __arm_vrshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
532e9e24 | 15692 | { |
261014a1 | 15693 | return __builtin_mve_vrshlq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); |
532e9e24 SP |
15694 | } |
15695 | ||
261014a1 | 15696 | __extension__ extern __inline int16x8_t |
532e9e24 | 15697 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15698 | __arm_vshllbq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15699 | { |
261014a1 | 15700 | return __builtin_mve_vshllbq_m_n_sv16qi (vuninitializedq_s16 (), __a, __imm, __p); |
532e9e24 SP |
15701 | } |
15702 | ||
261014a1 | 15703 | __extension__ extern __inline int32x4_t |
532e9e24 | 15704 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15705 | __arm_vshllbq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15706 | { |
261014a1 | 15707 | return __builtin_mve_vshllbq_m_n_sv8hi (vuninitializedq_s32 (), __a, __imm, __p); |
532e9e24 SP |
15708 | } |
15709 | ||
261014a1 | 15710 | __extension__ extern __inline uint16x8_t |
532e9e24 | 15711 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15712 | __arm_vshllbq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15713 | { |
261014a1 | 15714 | return __builtin_mve_vshllbq_m_n_uv16qi (vuninitializedq_u16 (), __a, __imm, __p); |
532e9e24 SP |
15715 | } |
15716 | ||
261014a1 | 15717 | __extension__ extern __inline uint32x4_t |
532e9e24 | 15718 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15719 | __arm_vshllbq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15720 | { |
261014a1 | 15721 | return __builtin_mve_vshllbq_m_n_uv8hi (vuninitializedq_u32 (), __a, __imm, __p); |
532e9e24 SP |
15722 | } |
15723 | ||
261014a1 | 15724 | __extension__ extern __inline int16x8_t |
532e9e24 | 15725 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15726 | __arm_vshlltq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15727 | { |
261014a1 | 15728 | return __builtin_mve_vshlltq_m_n_sv16qi (vuninitializedq_s16 (), __a, __imm, __p); |
532e9e24 SP |
15729 | } |
15730 | ||
261014a1 | 15731 | __extension__ extern __inline int32x4_t |
532e9e24 | 15732 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15733 | __arm_vshlltq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15734 | { |
261014a1 | 15735 | return __builtin_mve_vshlltq_m_n_sv8hi (vuninitializedq_s32 (), __a, __imm, __p); |
532e9e24 SP |
15736 | } |
15737 | ||
261014a1 | 15738 | __extension__ extern __inline uint16x8_t |
532e9e24 | 15739 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15740 | __arm_vshlltq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15741 | { |
261014a1 | 15742 | return __builtin_mve_vshlltq_m_n_uv16qi (vuninitializedq_u16 (), __a, __imm, __p); |
532e9e24 SP |
15743 | } |
15744 | ||
261014a1 | 15745 | __extension__ extern __inline uint32x4_t |
532e9e24 | 15746 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15747 | __arm_vshlltq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15748 | { |
261014a1 | 15749 | return __builtin_mve_vshlltq_m_n_uv8hi (vuninitializedq_u32 (), __a, __imm, __p); |
532e9e24 SP |
15750 | } |
15751 | ||
261014a1 | 15752 | __extension__ extern __inline int8x16_t |
532e9e24 | 15753 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15754 | __arm_vshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
532e9e24 | 15755 | { |
261014a1 SP |
15756 | return __builtin_mve_vshlq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p); |
15757 | } | |
15758 | ||
15759 | __extension__ extern __inline int16x8_t | |
15760 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15761 | __arm_vshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
15762 | { | |
15763 | return __builtin_mve_vshlq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p); | |
15764 | } | |
15765 | ||
15766 | __extension__ extern __inline int32x4_t | |
15767 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15768 | __arm_vshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
15769 | { | |
15770 | return __builtin_mve_vshlq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p); | |
15771 | } | |
15772 | ||
15773 | __extension__ extern __inline uint8x16_t | |
15774 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15775 | __arm_vshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
15776 | { | |
15777 | return __builtin_mve_vshlq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p); | |
15778 | } | |
15779 | ||
15780 | __extension__ extern __inline uint16x8_t | |
15781 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15782 | __arm_vshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
15783 | { | |
15784 | return __builtin_mve_vshlq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p); | |
15785 | } | |
15786 | ||
15787 | __extension__ extern __inline uint32x4_t | |
15788 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15789 | __arm_vshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
15790 | { | |
15791 | return __builtin_mve_vshlq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p); | |
15792 | } | |
15793 | ||
15794 | __extension__ extern __inline int8x16_t | |
15795 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15796 | __arm_vshlq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p) | |
15797 | { | |
15798 | return __builtin_mve_vshlq_m_n_sv16qi (vuninitializedq_s8 (), __a, __imm, __p); | |
15799 | } | |
15800 | ||
15801 | __extension__ extern __inline int16x8_t | |
15802 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15803 | __arm_vshlq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
15804 | { | |
15805 | return __builtin_mve_vshlq_m_n_sv8hi (vuninitializedq_s16 (), __a, __imm, __p); | |
15806 | } | |
15807 | ||
15808 | __extension__ extern __inline int32x4_t | |
15809 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15810 | __arm_vshlq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
15811 | { | |
15812 | return __builtin_mve_vshlq_m_n_sv4si (vuninitializedq_s32 (), __a, __imm, __p); | |
15813 | } | |
15814 | ||
15815 | __extension__ extern __inline uint8x16_t | |
15816 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15817 | __arm_vshlq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
15818 | { | |
15819 | return __builtin_mve_vshlq_m_n_uv16qi (vuninitializedq_u8 (), __a, __imm, __p); | |
15820 | } | |
15821 | ||
15822 | __extension__ extern __inline uint16x8_t | |
15823 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15824 | __arm_vshlq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
15825 | { | |
15826 | return __builtin_mve_vshlq_m_n_uv8hi (vuninitializedq_u16 (), __a, __imm, __p); | |
15827 | } | |
15828 | ||
15829 | __extension__ extern __inline uint32x4_t | |
15830 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15831 | __arm_vshlq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
15832 | { | |
15833 | return __builtin_mve_vshlq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p); | |
15834 | } | |
15835 | ||
15836 | __extension__ extern __inline int8x16_t | |
15837 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15838 | __arm_vrshrq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p) | |
15839 | { | |
15840 | return __builtin_mve_vrshrq_m_n_sv16qi (vuninitializedq_s8 (), __a, __imm, __p); | |
15841 | } | |
15842 | ||
15843 | __extension__ extern __inline int16x8_t | |
15844 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15845 | __arm_vrshrq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
15846 | { | |
15847 | return __builtin_mve_vrshrq_m_n_sv8hi (vuninitializedq_s16 (), __a, __imm, __p); | |
15848 | } | |
15849 | ||
15850 | __extension__ extern __inline int32x4_t | |
15851 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15852 | __arm_vrshrq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
15853 | { | |
15854 | return __builtin_mve_vrshrq_m_n_sv4si (vuninitializedq_s32 (), __a, __imm, __p); | |
15855 | } | |
15856 | ||
15857 | __extension__ extern __inline uint8x16_t | |
15858 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15859 | __arm_vrshrq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
15860 | { | |
15861 | return __builtin_mve_vrshrq_m_n_uv16qi (vuninitializedq_u8 (), __a, __imm, __p); | |
15862 | } | |
15863 | ||
15864 | __extension__ extern __inline uint16x8_t | |
15865 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15866 | __arm_vrshrq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
15867 | { | |
15868 | return __builtin_mve_vrshrq_m_n_uv8hi (vuninitializedq_u16 (), __a, __imm, __p); | |
15869 | } | |
15870 | ||
15871 | __extension__ extern __inline uint32x4_t | |
15872 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15873 | __arm_vrshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
15874 | { | |
15875 | return __builtin_mve_vrshrq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p); | |
15876 | } | |
15877 | ||
15878 | __extension__ extern __inline int8x16_t | |
15879 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15880 | __arm_vshrq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p) | |
15881 | { | |
15882 | return __builtin_mve_vshrq_m_n_sv16qi (vuninitializedq_s8 (), __a, __imm, __p); | |
15883 | } | |
15884 | ||
15885 | __extension__ extern __inline int16x8_t | |
15886 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15887 | __arm_vshrq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
15888 | { | |
15889 | return __builtin_mve_vshrq_m_n_sv8hi (vuninitializedq_s16 (), __a, __imm, __p); | |
15890 | } | |
15891 | ||
15892 | __extension__ extern __inline int32x4_t | |
15893 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15894 | __arm_vshrq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
15895 | { | |
15896 | return __builtin_mve_vshrq_m_n_sv4si (vuninitializedq_s32 (), __a, __imm, __p); | |
15897 | } | |
15898 | ||
15899 | __extension__ extern __inline uint8x16_t | |
15900 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15901 | __arm_vshrq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
15902 | { | |
15903 | return __builtin_mve_vshrq_m_n_uv16qi (vuninitializedq_u8 (), __a, __imm, __p); | |
15904 | } | |
15905 | ||
15906 | __extension__ extern __inline uint16x8_t | |
15907 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15908 | __arm_vshrq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
15909 | { | |
15910 | return __builtin_mve_vshrq_m_n_uv8hi (vuninitializedq_u16 (), __a, __imm, __p); | |
15911 | } | |
15912 | ||
15913 | __extension__ extern __inline uint32x4_t | |
15914 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15915 | __arm_vshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
15916 | { | |
15917 | return __builtin_mve_vshrq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p); | |
15918 | } | |
15919 | ||
15920 | #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ | |
15921 | ||
15922 | __extension__ extern __inline void | |
15923 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15924 | __arm_vst4q_f16 (float16_t * __addr, float16x8x4_t __value) | |
15925 | { | |
15926 | union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv; | |
15927 | __rv.__i = __value; | |
15928 | __builtin_mve_vst4qv8hf (__addr, __rv.__o); | |
15929 | } | |
15930 | ||
15931 | __extension__ extern __inline void | |
15932 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15933 | __arm_vst4q_f32 (float32_t * __addr, float32x4x4_t __value) | |
15934 | { | |
15935 | union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv; | |
15936 | __rv.__i = __value; | |
15937 | __builtin_mve_vst4qv4sf (__addr, __rv.__o); | |
15938 | } | |
15939 | ||
15940 | __extension__ extern __inline float16x8_t | |
15941 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15942 | __arm_vrndxq_f16 (float16x8_t __a) | |
15943 | { | |
15944 | return __builtin_mve_vrndxq_fv8hf (__a); | |
532e9e24 SP |
15945 | } |
15946 | ||
15947 | __extension__ extern __inline float32x4_t | |
15948 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15949 | __arm_vrndxq_f32 (float32x4_t __a) |
532e9e24 | 15950 | { |
261014a1 | 15951 | return __builtin_mve_vrndxq_fv4sf (__a); |
532e9e24 SP |
15952 | } |
15953 | ||
15954 | __extension__ extern __inline float16x8_t | |
15955 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15956 | __arm_vrndq_f16 (float16x8_t __a) |
532e9e24 | 15957 | { |
261014a1 | 15958 | return __builtin_mve_vrndq_fv8hf (__a); |
532e9e24 SP |
15959 | } |
15960 | ||
15961 | __extension__ extern __inline float32x4_t | |
15962 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15963 | __arm_vrndq_f32 (float32x4_t __a) |
532e9e24 | 15964 | { |
261014a1 | 15965 | return __builtin_mve_vrndq_fv4sf (__a); |
532e9e24 SP |
15966 | } |
15967 | ||
15968 | __extension__ extern __inline float16x8_t | |
15969 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15970 | __arm_vrndpq_f16 (float16x8_t __a) |
532e9e24 | 15971 | { |
261014a1 | 15972 | return __builtin_mve_vrndpq_fv8hf (__a); |
532e9e24 SP |
15973 | } |
15974 | ||
15975 | __extension__ extern __inline float32x4_t | |
15976 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15977 | __arm_vrndpq_f32 (float32x4_t __a) |
532e9e24 | 15978 | { |
261014a1 | 15979 | return __builtin_mve_vrndpq_fv4sf (__a); |
532e9e24 SP |
15980 | } |
15981 | ||
15982 | __extension__ extern __inline float16x8_t | |
15983 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15984 | __arm_vrndnq_f16 (float16x8_t __a) |
532e9e24 | 15985 | { |
261014a1 | 15986 | return __builtin_mve_vrndnq_fv8hf (__a); |
532e9e24 SP |
15987 | } |
15988 | ||
15989 | __extension__ extern __inline float32x4_t | |
15990 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15991 | __arm_vrndnq_f32 (float32x4_t __a) |
532e9e24 | 15992 | { |
261014a1 | 15993 | return __builtin_mve_vrndnq_fv4sf (__a); |
532e9e24 SP |
15994 | } |
15995 | ||
15996 | __extension__ extern __inline float16x8_t | |
15997 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15998 | __arm_vrndmq_f16 (float16x8_t __a) |
532e9e24 | 15999 | { |
261014a1 | 16000 | return __builtin_mve_vrndmq_fv8hf (__a); |
532e9e24 SP |
16001 | } |
16002 | ||
16003 | __extension__ extern __inline float32x4_t | |
16004 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16005 | __arm_vrndmq_f32 (float32x4_t __a) |
532e9e24 | 16006 | { |
261014a1 | 16007 | return __builtin_mve_vrndmq_fv4sf (__a); |
532e9e24 SP |
16008 | } |
16009 | ||
16010 | __extension__ extern __inline float16x8_t | |
16011 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16012 | __arm_vrndaq_f16 (float16x8_t __a) |
532e9e24 | 16013 | { |
261014a1 | 16014 | return __builtin_mve_vrndaq_fv8hf (__a); |
532e9e24 SP |
16015 | } |
16016 | ||
16017 | __extension__ extern __inline float32x4_t | |
16018 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16019 | __arm_vrndaq_f32 (float32x4_t __a) |
532e9e24 | 16020 | { |
261014a1 | 16021 | return __builtin_mve_vrndaq_fv4sf (__a); |
532e9e24 SP |
16022 | } |
16023 | ||
16024 | __extension__ extern __inline float16x8_t | |
16025 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16026 | __arm_vrev64q_f16 (float16x8_t __a) |
532e9e24 | 16027 | { |
261014a1 | 16028 | return __builtin_mve_vrev64q_fv8hf (__a); |
532e9e24 SP |
16029 | } |
16030 | ||
16031 | __extension__ extern __inline float32x4_t | |
16032 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16033 | __arm_vrev64q_f32 (float32x4_t __a) |
532e9e24 | 16034 | { |
261014a1 | 16035 | return __builtin_mve_vrev64q_fv4sf (__a); |
532e9e24 SP |
16036 | } |
16037 | ||
16038 | __extension__ extern __inline float16x8_t | |
16039 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16040 | __arm_vnegq_f16 (float16x8_t __a) |
532e9e24 | 16041 | { |
261014a1 SP |
16042 | return __builtin_mve_vnegq_fv8hf (__a); |
16043 | } | |
16044 | ||
16045 | __extension__ extern __inline float32x4_t | |
16046 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16047 | __arm_vnegq_f32 (float32x4_t __a) | |
16048 | { | |
16049 | return __builtin_mve_vnegq_fv4sf (__a); | |
16050 | } | |
16051 | ||
16052 | __extension__ extern __inline float16x8_t | |
16053 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16054 | __arm_vdupq_n_f16 (float16_t __a) | |
16055 | { | |
16056 | return __builtin_mve_vdupq_n_fv8hf (__a); | |
16057 | } | |
16058 | ||
16059 | __extension__ extern __inline float32x4_t | |
16060 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16061 | __arm_vdupq_n_f32 (float32_t __a) | |
16062 | { | |
16063 | return __builtin_mve_vdupq_n_fv4sf (__a); | |
16064 | } | |
16065 | ||
16066 | __extension__ extern __inline float16x8_t | |
16067 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16068 | __arm_vabsq_f16 (float16x8_t __a) | |
16069 | { | |
16070 | return __builtin_mve_vabsq_fv8hf (__a); | |
16071 | } | |
16072 | ||
16073 | __extension__ extern __inline float32x4_t | |
16074 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16075 | __arm_vabsq_f32 (float32x4_t __a) | |
16076 | { | |
16077 | return __builtin_mve_vabsq_fv4sf (__a); | |
16078 | } | |
16079 | ||
16080 | __extension__ extern __inline float16x8_t | |
16081 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16082 | __arm_vrev32q_f16 (float16x8_t __a) | |
16083 | { | |
16084 | return __builtin_mve_vrev32q_fv8hf (__a); | |
532e9e24 SP |
16085 | } |
16086 | ||
16087 | __extension__ extern __inline float32x4_t | |
16088 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 SP |
16089 | __arm_vcvttq_f32_f16 (float16x8_t __a) |
16090 | { | |
16091 | return __builtin_mve_vcvttq_f32_f16v4sf (__a); | |
16092 | } | |
16093 | ||
16094 | __extension__ extern __inline float32x4_t | |
16095 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16096 | __arm_vcvtbq_f32_f16 (float16x8_t __a) | |
16097 | { | |
16098 | return __builtin_mve_vcvtbq_f32_f16v4sf (__a); | |
16099 | } | |
16100 | ||
16101 | __extension__ extern __inline float16x8_t | |
16102 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16103 | __arm_vcvtq_f16_s16 (int16x8_t __a) | |
16104 | { | |
16105 | return __builtin_mve_vcvtq_to_f_sv8hf (__a); | |
16106 | } | |
16107 | ||
16108 | __extension__ extern __inline float32x4_t | |
16109 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16110 | __arm_vcvtq_f32_s32 (int32x4_t __a) | |
16111 | { | |
16112 | return __builtin_mve_vcvtq_to_f_sv4sf (__a); | |
16113 | } | |
16114 | ||
16115 | __extension__ extern __inline float16x8_t | |
16116 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16117 | __arm_vcvtq_f16_u16 (uint16x8_t __a) | |
16118 | { | |
16119 | return __builtin_mve_vcvtq_to_f_uv8hf (__a); | |
16120 | } | |
16121 | ||
16122 | __extension__ extern __inline float32x4_t | |
16123 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16124 | __arm_vcvtq_f32_u32 (uint32x4_t __a) | |
16125 | { | |
16126 | return __builtin_mve_vcvtq_to_f_uv4sf (__a); | |
16127 | } | |
16128 | ||
16129 | __extension__ extern __inline int16x8_t | |
16130 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16131 | __arm_vcvtq_s16_f16 (float16x8_t __a) | |
16132 | { | |
16133 | return __builtin_mve_vcvtq_from_f_sv8hi (__a); | |
16134 | } | |
16135 | ||
16136 | __extension__ extern __inline int32x4_t | |
16137 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16138 | __arm_vcvtq_s32_f32 (float32x4_t __a) | |
16139 | { | |
16140 | return __builtin_mve_vcvtq_from_f_sv4si (__a); | |
16141 | } | |
16142 | ||
16143 | __extension__ extern __inline uint16x8_t | |
16144 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16145 | __arm_vcvtq_u16_f16 (float16x8_t __a) | |
16146 | { | |
16147 | return __builtin_mve_vcvtq_from_f_uv8hi (__a); | |
16148 | } | |
16149 | ||
16150 | __extension__ extern __inline uint32x4_t | |
16151 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16152 | __arm_vcvtq_u32_f32 (float32x4_t __a) | |
16153 | { | |
16154 | return __builtin_mve_vcvtq_from_f_uv4si (__a); | |
16155 | } | |
16156 | ||
16157 | __extension__ extern __inline uint16x8_t | |
16158 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16159 | __arm_vcvtpq_u16_f16 (float16x8_t __a) | |
16160 | { | |
16161 | return __builtin_mve_vcvtpq_uv8hi (__a); | |
16162 | } | |
16163 | ||
16164 | __extension__ extern __inline uint32x4_t | |
16165 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16166 | __arm_vcvtpq_u32_f32 (float32x4_t __a) | |
16167 | { | |
16168 | return __builtin_mve_vcvtpq_uv4si (__a); | |
16169 | } | |
16170 | ||
16171 | __extension__ extern __inline uint16x8_t | |
16172 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16173 | __arm_vcvtnq_u16_f16 (float16x8_t __a) | |
16174 | { | |
16175 | return __builtin_mve_vcvtnq_uv8hi (__a); | |
16176 | } | |
16177 | ||
16178 | __extension__ extern __inline uint16x8_t | |
16179 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16180 | __arm_vcvtmq_u16_f16 (float16x8_t __a) | |
16181 | { | |
16182 | return __builtin_mve_vcvtmq_uv8hi (__a); | |
16183 | } | |
16184 | ||
16185 | __extension__ extern __inline uint32x4_t | |
16186 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16187 | __arm_vcvtmq_u32_f32 (float32x4_t __a) | |
16188 | { | |
16189 | return __builtin_mve_vcvtmq_uv4si (__a); | |
16190 | } | |
16191 | ||
16192 | __extension__ extern __inline uint16x8_t | |
16193 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16194 | __arm_vcvtaq_u16_f16 (float16x8_t __a) | |
16195 | { | |
16196 | return __builtin_mve_vcvtaq_uv8hi (__a); | |
16197 | } | |
16198 | ||
16199 | __extension__ extern __inline uint32x4_t | |
16200 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16201 | __arm_vcvtaq_u32_f32 (float32x4_t __a) | |
16202 | { | |
16203 | return __builtin_mve_vcvtaq_uv4si (__a); | |
16204 | } | |
16205 | ||
16206 | __extension__ extern __inline int16x8_t | |
16207 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16208 | __arm_vcvtaq_s16_f16 (float16x8_t __a) | |
16209 | { | |
16210 | return __builtin_mve_vcvtaq_sv8hi (__a); | |
16211 | } | |
16212 | ||
16213 | __extension__ extern __inline int32x4_t | |
16214 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16215 | __arm_vcvtaq_s32_f32 (float32x4_t __a) | |
16216 | { | |
16217 | return __builtin_mve_vcvtaq_sv4si (__a); | |
16218 | } | |
16219 | ||
16220 | __extension__ extern __inline int16x8_t | |
16221 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16222 | __arm_vcvtnq_s16_f16 (float16x8_t __a) | |
16223 | { | |
16224 | return __builtin_mve_vcvtnq_sv8hi (__a); | |
16225 | } | |
16226 | ||
16227 | __extension__ extern __inline int32x4_t | |
16228 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16229 | __arm_vcvtnq_s32_f32 (float32x4_t __a) | |
16230 | { | |
16231 | return __builtin_mve_vcvtnq_sv4si (__a); | |
16232 | } | |
16233 | ||
16234 | __extension__ extern __inline int16x8_t | |
16235 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16236 | __arm_vcvtpq_s16_f16 (float16x8_t __a) | |
16237 | { | |
16238 | return __builtin_mve_vcvtpq_sv8hi (__a); | |
16239 | } | |
16240 | ||
16241 | __extension__ extern __inline int32x4_t | |
16242 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16243 | __arm_vcvtpq_s32_f32 (float32x4_t __a) | |
16244 | { | |
16245 | return __builtin_mve_vcvtpq_sv4si (__a); | |
16246 | } | |
16247 | ||
16248 | __extension__ extern __inline int16x8_t | |
16249 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16250 | __arm_vcvtmq_s16_f16 (float16x8_t __a) | |
16251 | { | |
16252 | return __builtin_mve_vcvtmq_sv8hi (__a); | |
16253 | } | |
16254 | ||
16255 | __extension__ extern __inline int32x4_t | |
16256 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16257 | __arm_vcvtmq_s32_f32 (float32x4_t __a) | |
16258 | { | |
16259 | return __builtin_mve_vcvtmq_sv4si (__a); | |
16260 | } | |
16261 | ||
16262 | __extension__ extern __inline float16x8_t | |
16263 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16264 | __arm_vsubq_n_f16 (float16x8_t __a, float16_t __b) | |
16265 | { | |
16266 | return __builtin_mve_vsubq_n_fv8hf (__a, __b); | |
16267 | } | |
16268 | ||
16269 | __extension__ extern __inline float32x4_t | |
16270 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16271 | __arm_vsubq_n_f32 (float32x4_t __a, float32_t __b) | |
16272 | { | |
16273 | return __builtin_mve_vsubq_n_fv4sf (__a, __b); | |
16274 | } | |
16275 | ||
16276 | __extension__ extern __inline float16x8_t | |
16277 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16278 | __arm_vbrsrq_n_f16 (float16x8_t __a, int32_t __b) | |
16279 | { | |
16280 | return __builtin_mve_vbrsrq_n_fv8hf (__a, __b); | |
16281 | } | |
16282 | ||
16283 | __extension__ extern __inline float32x4_t | |
16284 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16285 | __arm_vbrsrq_n_f32 (float32x4_t __a, int32_t __b) | |
16286 | { | |
16287 | return __builtin_mve_vbrsrq_n_fv4sf (__a, __b); | |
16288 | } | |
16289 | ||
16290 | __extension__ extern __inline float16x8_t | |
16291 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16292 | __arm_vcvtq_n_f16_s16 (int16x8_t __a, const int __imm6) | |
16293 | { | |
16294 | return __builtin_mve_vcvtq_n_to_f_sv8hf (__a, __imm6); | |
16295 | } | |
16296 | ||
16297 | __extension__ extern __inline float32x4_t | |
16298 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16299 | __arm_vcvtq_n_f32_s32 (int32x4_t __a, const int __imm6) | |
16300 | { | |
16301 | return __builtin_mve_vcvtq_n_to_f_sv4sf (__a, __imm6); | |
16302 | } | |
16303 | ||
16304 | __extension__ extern __inline float16x8_t | |
16305 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16306 | __arm_vcvtq_n_f16_u16 (uint16x8_t __a, const int __imm6) | |
16307 | { | |
16308 | return __builtin_mve_vcvtq_n_to_f_uv8hf (__a, __imm6); | |
16309 | } | |
16310 | ||
16311 | __extension__ extern __inline float32x4_t | |
16312 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16313 | __arm_vcvtq_n_f32_u32 (uint32x4_t __a, const int __imm6) | |
16314 | { | |
16315 | return __builtin_mve_vcvtq_n_to_f_uv4sf (__a, __imm6); | |
16316 | } | |
16317 | ||
16318 | __extension__ extern __inline float16x8_t | |
16319 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16320 | __arm_vcreateq_f16 (uint64_t __a, uint64_t __b) | |
16321 | { | |
16322 | return __builtin_mve_vcreateq_fv8hf (__a, __b); | |
16323 | } | |
16324 | ||
16325 | __extension__ extern __inline float32x4_t | |
16326 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16327 | __arm_vcreateq_f32 (uint64_t __a, uint64_t __b) | |
16328 | { | |
16329 | return __builtin_mve_vcreateq_fv4sf (__a, __b); | |
16330 | } | |
16331 | ||
16332 | __extension__ extern __inline int16x8_t | |
16333 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16334 | __arm_vcvtq_n_s16_f16 (float16x8_t __a, const int __imm6) | |
16335 | { | |
16336 | return __builtin_mve_vcvtq_n_from_f_sv8hi (__a, __imm6); | |
16337 | } | |
16338 | ||
16339 | __extension__ extern __inline int32x4_t | |
16340 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16341 | __arm_vcvtq_n_s32_f32 (float32x4_t __a, const int __imm6) | |
16342 | { | |
16343 | return __builtin_mve_vcvtq_n_from_f_sv4si (__a, __imm6); | |
16344 | } | |
16345 | ||
16346 | __extension__ extern __inline uint16x8_t | |
16347 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16348 | __arm_vcvtq_n_u16_f16 (float16x8_t __a, const int __imm6) | |
16349 | { | |
16350 | return __builtin_mve_vcvtq_n_from_f_uv8hi (__a, __imm6); | |
16351 | } | |
16352 | ||
16353 | __extension__ extern __inline uint32x4_t | |
16354 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16355 | __arm_vcvtq_n_u32_f32 (float32x4_t __a, const int __imm6) | |
16356 | { | |
16357 | return __builtin_mve_vcvtq_n_from_f_uv4si (__a, __imm6); | |
16358 | } | |
16359 | ||
16360 | __extension__ extern __inline mve_pred16_t | |
16361 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16362 | __arm_vcmpneq_n_f16 (float16x8_t __a, float16_t __b) | |
16363 | { | |
16364 | return __builtin_mve_vcmpneq_n_fv8hf (__a, __b); | |
16365 | } | |
16366 | ||
16367 | __extension__ extern __inline mve_pred16_t | |
16368 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16369 | __arm_vcmpneq_f16 (float16x8_t __a, float16x8_t __b) | |
16370 | { | |
16371 | return __builtin_mve_vcmpneq_fv8hf (__a, __b); | |
16372 | } | |
16373 | ||
16374 | __extension__ extern __inline mve_pred16_t | |
16375 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16376 | __arm_vcmpltq_n_f16 (float16x8_t __a, float16_t __b) | |
16377 | { | |
16378 | return __builtin_mve_vcmpltq_n_fv8hf (__a, __b); | |
16379 | } | |
16380 | ||
16381 | __extension__ extern __inline mve_pred16_t | |
16382 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16383 | __arm_vcmpltq_f16 (float16x8_t __a, float16x8_t __b) | |
16384 | { | |
16385 | return __builtin_mve_vcmpltq_fv8hf (__a, __b); | |
16386 | } | |
16387 | ||
16388 | __extension__ extern __inline mve_pred16_t | |
16389 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16390 | __arm_vcmpleq_n_f16 (float16x8_t __a, float16_t __b) | |
16391 | { | |
16392 | return __builtin_mve_vcmpleq_n_fv8hf (__a, __b); | |
16393 | } | |
16394 | ||
16395 | __extension__ extern __inline mve_pred16_t | |
16396 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16397 | __arm_vcmpleq_f16 (float16x8_t __a, float16x8_t __b) | |
16398 | { | |
16399 | return __builtin_mve_vcmpleq_fv8hf (__a, __b); | |
16400 | } | |
16401 | ||
16402 | __extension__ extern __inline mve_pred16_t | |
16403 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16404 | __arm_vcmpgtq_n_f16 (float16x8_t __a, float16_t __b) | |
16405 | { | |
16406 | return __builtin_mve_vcmpgtq_n_fv8hf (__a, __b); | |
16407 | } | |
16408 | ||
16409 | __extension__ extern __inline mve_pred16_t | |
16410 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16411 | __arm_vcmpgtq_f16 (float16x8_t __a, float16x8_t __b) | |
16412 | { | |
16413 | return __builtin_mve_vcmpgtq_fv8hf (__a, __b); | |
16414 | } | |
16415 | ||
16416 | __extension__ extern __inline mve_pred16_t | |
16417 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16418 | __arm_vcmpgeq_n_f16 (float16x8_t __a, float16_t __b) | |
16419 | { | |
16420 | return __builtin_mve_vcmpgeq_n_fv8hf (__a, __b); | |
16421 | } | |
16422 | ||
16423 | __extension__ extern __inline mve_pred16_t | |
16424 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16425 | __arm_vcmpgeq_f16 (float16x8_t __a, float16x8_t __b) | |
16426 | { | |
16427 | return __builtin_mve_vcmpgeq_fv8hf (__a, __b); | |
16428 | } | |
16429 | ||
16430 | __extension__ extern __inline mve_pred16_t | |
16431 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16432 | __arm_vcmpeqq_n_f16 (float16x8_t __a, float16_t __b) | |
16433 | { | |
16434 | return __builtin_mve_vcmpeqq_n_fv8hf (__a, __b); | |
16435 | } | |
16436 | ||
16437 | __extension__ extern __inline mve_pred16_t | |
16438 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16439 | __arm_vcmpeqq_f16 (float16x8_t __a, float16x8_t __b) | |
16440 | { | |
16441 | return __builtin_mve_vcmpeqq_fv8hf (__a, __b); | |
16442 | } | |
16443 | ||
16444 | __extension__ extern __inline float16x8_t | |
16445 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16446 | __arm_vsubq_f16 (float16x8_t __a, float16x8_t __b) | |
16447 | { | |
16448 | return __builtin_mve_vsubq_fv8hf (__a, __b); | |
16449 | } | |
16450 | ||
16451 | __extension__ extern __inline float16x8_t | |
16452 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16453 | __arm_vorrq_f16 (float16x8_t __a, float16x8_t __b) | |
16454 | { | |
16455 | return __builtin_mve_vorrq_fv8hf (__a, __b); | |
16456 | } | |
16457 | ||
16458 | __extension__ extern __inline float16x8_t | |
16459 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16460 | __arm_vornq_f16 (float16x8_t __a, float16x8_t __b) | |
16461 | { | |
16462 | return __builtin_mve_vornq_fv8hf (__a, __b); | |
16463 | } | |
16464 | ||
16465 | __extension__ extern __inline float16x8_t | |
16466 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16467 | __arm_vmulq_n_f16 (float16x8_t __a, float16_t __b) | |
16468 | { | |
16469 | return __builtin_mve_vmulq_n_fv8hf (__a, __b); | |
16470 | } | |
16471 | ||
16472 | __extension__ extern __inline float16x8_t | |
16473 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16474 | __arm_vmulq_f16 (float16x8_t __a, float16x8_t __b) | |
16475 | { | |
16476 | return __builtin_mve_vmulq_fv8hf (__a, __b); | |
16477 | } | |
16478 | ||
16479 | __extension__ extern __inline float16_t | |
16480 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16481 | __arm_vminnmvq_f16 (float16_t __a, float16x8_t __b) | |
16482 | { | |
16483 | return __builtin_mve_vminnmvq_fv8hf (__a, __b); | |
16484 | } | |
16485 | ||
16486 | __extension__ extern __inline float16x8_t | |
16487 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16488 | __arm_vminnmq_f16 (float16x8_t __a, float16x8_t __b) | |
16489 | { | |
16490 | return __builtin_mve_vminnmq_fv8hf (__a, __b); | |
16491 | } | |
16492 | ||
16493 | __extension__ extern __inline float16_t | |
16494 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16495 | __arm_vminnmavq_f16 (float16_t __a, float16x8_t __b) | |
16496 | { | |
16497 | return __builtin_mve_vminnmavq_fv8hf (__a, __b); | |
16498 | } | |
16499 | ||
16500 | __extension__ extern __inline float16x8_t | |
16501 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16502 | __arm_vminnmaq_f16 (float16x8_t __a, float16x8_t __b) | |
16503 | { | |
16504 | return __builtin_mve_vminnmaq_fv8hf (__a, __b); | |
16505 | } | |
16506 | ||
16507 | __extension__ extern __inline float16_t | |
16508 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16509 | __arm_vmaxnmvq_f16 (float16_t __a, float16x8_t __b) | |
16510 | { | |
16511 | return __builtin_mve_vmaxnmvq_fv8hf (__a, __b); | |
16512 | } | |
16513 | ||
16514 | __extension__ extern __inline float16x8_t | |
16515 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16516 | __arm_vmaxnmq_f16 (float16x8_t __a, float16x8_t __b) | |
16517 | { | |
16518 | return __builtin_mve_vmaxnmq_fv8hf (__a, __b); | |
16519 | } | |
16520 | ||
16521 | __extension__ extern __inline float16_t | |
16522 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16523 | __arm_vmaxnmavq_f16 (float16_t __a, float16x8_t __b) | |
16524 | { | |
16525 | return __builtin_mve_vmaxnmavq_fv8hf (__a, __b); | |
16526 | } | |
16527 | ||
16528 | __extension__ extern __inline float16x8_t | |
16529 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16530 | __arm_vmaxnmaq_f16 (float16x8_t __a, float16x8_t __b) | |
16531 | { | |
16532 | return __builtin_mve_vmaxnmaq_fv8hf (__a, __b); | |
16533 | } | |
16534 | ||
16535 | __extension__ extern __inline float16x8_t | |
16536 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16537 | __arm_veorq_f16 (float16x8_t __a, float16x8_t __b) | |
16538 | { | |
16539 | return __builtin_mve_veorq_fv8hf (__a, __b); | |
16540 | } | |
16541 | ||
16542 | __extension__ extern __inline float16x8_t | |
16543 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16544 | __arm_vcmulq_rot90_f16 (float16x8_t __a, float16x8_t __b) | |
16545 | { | |
16546 | return __builtin_mve_vcmulq_rot90_fv8hf (__a, __b); | |
16547 | } | |
16548 | ||
16549 | __extension__ extern __inline float16x8_t | |
16550 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16551 | __arm_vcmulq_rot270_f16 (float16x8_t __a, float16x8_t __b) | |
16552 | { | |
16553 | return __builtin_mve_vcmulq_rot270_fv8hf (__a, __b); | |
16554 | } | |
16555 | ||
16556 | __extension__ extern __inline float16x8_t | |
16557 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16558 | __arm_vcmulq_rot180_f16 (float16x8_t __a, float16x8_t __b) | |
16559 | { | |
16560 | return __builtin_mve_vcmulq_rot180_fv8hf (__a, __b); | |
16561 | } | |
16562 | ||
16563 | __extension__ extern __inline float16x8_t | |
16564 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16565 | __arm_vcmulq_f16 (float16x8_t __a, float16x8_t __b) | |
16566 | { | |
16567 | return __builtin_mve_vcmulq_fv8hf (__a, __b); | |
16568 | } | |
16569 | ||
16570 | __extension__ extern __inline float16x8_t | |
16571 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16572 | __arm_vcaddq_rot90_f16 (float16x8_t __a, float16x8_t __b) | |
16573 | { | |
16574 | return __builtin_mve_vcaddq_rot90_fv8hf (__a, __b); | |
16575 | } | |
16576 | ||
16577 | __extension__ extern __inline float16x8_t | |
16578 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16579 | __arm_vcaddq_rot270_f16 (float16x8_t __a, float16x8_t __b) | |
16580 | { | |
16581 | return __builtin_mve_vcaddq_rot270_fv8hf (__a, __b); | |
16582 | } | |
16583 | ||
16584 | __extension__ extern __inline float16x8_t | |
16585 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16586 | __arm_vbicq_f16 (float16x8_t __a, float16x8_t __b) | |
16587 | { | |
16588 | return __builtin_mve_vbicq_fv8hf (__a, __b); | |
16589 | } | |
16590 | ||
16591 | __extension__ extern __inline float16x8_t | |
16592 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16593 | __arm_vandq_f16 (float16x8_t __a, float16x8_t __b) | |
16594 | { | |
16595 | return __builtin_mve_vandq_fv8hf (__a, __b); | |
16596 | } | |
16597 | ||
16598 | __extension__ extern __inline float16x8_t | |
16599 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16600 | __arm_vaddq_n_f16 (float16x8_t __a, float16_t __b) | |
16601 | { | |
16602 | return __builtin_mve_vaddq_n_fv8hf (__a, __b); | |
16603 | } | |
16604 | ||
16605 | __extension__ extern __inline float16x8_t | |
16606 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16607 | __arm_vabdq_f16 (float16x8_t __a, float16x8_t __b) | |
16608 | { | |
16609 | return __builtin_mve_vabdq_fv8hf (__a, __b); | |
16610 | } | |
16611 | ||
16612 | __extension__ extern __inline mve_pred16_t | |
16613 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16614 | __arm_vcmpneq_n_f32 (float32x4_t __a, float32_t __b) | |
16615 | { | |
16616 | return __builtin_mve_vcmpneq_n_fv4sf (__a, __b); | |
16617 | } | |
16618 | ||
16619 | __extension__ extern __inline mve_pred16_t | |
16620 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16621 | __arm_vcmpneq_f32 (float32x4_t __a, float32x4_t __b) | |
16622 | { | |
16623 | return __builtin_mve_vcmpneq_fv4sf (__a, __b); | |
16624 | } | |
16625 | ||
16626 | __extension__ extern __inline mve_pred16_t | |
16627 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16628 | __arm_vcmpltq_n_f32 (float32x4_t __a, float32_t __b) | |
16629 | { | |
16630 | return __builtin_mve_vcmpltq_n_fv4sf (__a, __b); | |
16631 | } | |
16632 | ||
16633 | __extension__ extern __inline mve_pred16_t | |
16634 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16635 | __arm_vcmpltq_f32 (float32x4_t __a, float32x4_t __b) | |
16636 | { | |
16637 | return __builtin_mve_vcmpltq_fv4sf (__a, __b); | |
16638 | } | |
16639 | ||
16640 | __extension__ extern __inline mve_pred16_t | |
16641 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16642 | __arm_vcmpleq_n_f32 (float32x4_t __a, float32_t __b) | |
16643 | { | |
16644 | return __builtin_mve_vcmpleq_n_fv4sf (__a, __b); | |
16645 | } | |
16646 | ||
16647 | __extension__ extern __inline mve_pred16_t | |
16648 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16649 | __arm_vcmpleq_f32 (float32x4_t __a, float32x4_t __b) | |
16650 | { | |
16651 | return __builtin_mve_vcmpleq_fv4sf (__a, __b); | |
16652 | } | |
16653 | ||
16654 | __extension__ extern __inline mve_pred16_t | |
16655 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16656 | __arm_vcmpgtq_n_f32 (float32x4_t __a, float32_t __b) | |
16657 | { | |
16658 | return __builtin_mve_vcmpgtq_n_fv4sf (__a, __b); | |
16659 | } | |
16660 | ||
16661 | __extension__ extern __inline mve_pred16_t | |
16662 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16663 | __arm_vcmpgtq_f32 (float32x4_t __a, float32x4_t __b) | |
16664 | { | |
16665 | return __builtin_mve_vcmpgtq_fv4sf (__a, __b); | |
16666 | } | |
16667 | ||
16668 | __extension__ extern __inline mve_pred16_t | |
16669 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16670 | __arm_vcmpgeq_n_f32 (float32x4_t __a, float32_t __b) | |
16671 | { | |
16672 | return __builtin_mve_vcmpgeq_n_fv4sf (__a, __b); | |
16673 | } | |
16674 | ||
16675 | __extension__ extern __inline mve_pred16_t | |
16676 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16677 | __arm_vcmpgeq_f32 (float32x4_t __a, float32x4_t __b) | |
16678 | { | |
16679 | return __builtin_mve_vcmpgeq_fv4sf (__a, __b); | |
16680 | } | |
16681 | ||
16682 | __extension__ extern __inline mve_pred16_t | |
16683 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16684 | __arm_vcmpeqq_n_f32 (float32x4_t __a, float32_t __b) | |
16685 | { | |
16686 | return __builtin_mve_vcmpeqq_n_fv4sf (__a, __b); | |
16687 | } | |
16688 | ||
16689 | __extension__ extern __inline mve_pred16_t | |
16690 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16691 | __arm_vcmpeqq_f32 (float32x4_t __a, float32x4_t __b) | |
16692 | { | |
16693 | return __builtin_mve_vcmpeqq_fv4sf (__a, __b); | |
16694 | } | |
16695 | ||
16696 | __extension__ extern __inline float32x4_t | |
16697 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16698 | __arm_vsubq_f32 (float32x4_t __a, float32x4_t __b) | |
16699 | { | |
16700 | return __builtin_mve_vsubq_fv4sf (__a, __b); | |
16701 | } | |
16702 | ||
16703 | __extension__ extern __inline float32x4_t | |
16704 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16705 | __arm_vorrq_f32 (float32x4_t __a, float32x4_t __b) | |
16706 | { | |
16707 | return __builtin_mve_vorrq_fv4sf (__a, __b); | |
16708 | } | |
16709 | ||
16710 | __extension__ extern __inline float32x4_t | |
16711 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16712 | __arm_vornq_f32 (float32x4_t __a, float32x4_t __b) | |
16713 | { | |
16714 | return __builtin_mve_vornq_fv4sf (__a, __b); | |
16715 | } | |
16716 | ||
16717 | __extension__ extern __inline float32x4_t | |
16718 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16719 | __arm_vmulq_n_f32 (float32x4_t __a, float32_t __b) | |
16720 | { | |
16721 | return __builtin_mve_vmulq_n_fv4sf (__a, __b); | |
16722 | } | |
16723 | ||
16724 | __extension__ extern __inline float32x4_t | |
16725 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16726 | __arm_vmulq_f32 (float32x4_t __a, float32x4_t __b) | |
16727 | { | |
16728 | return __builtin_mve_vmulq_fv4sf (__a, __b); | |
16729 | } | |
16730 | ||
16731 | __extension__ extern __inline float32_t | |
16732 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16733 | __arm_vminnmvq_f32 (float32_t __a, float32x4_t __b) | |
16734 | { | |
16735 | return __builtin_mve_vminnmvq_fv4sf (__a, __b); | |
16736 | } | |
16737 | ||
16738 | __extension__ extern __inline float32x4_t | |
16739 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16740 | __arm_vminnmq_f32 (float32x4_t __a, float32x4_t __b) | |
16741 | { | |
16742 | return __builtin_mve_vminnmq_fv4sf (__a, __b); | |
16743 | } | |
16744 | ||
16745 | __extension__ extern __inline float32_t | |
16746 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16747 | __arm_vminnmavq_f32 (float32_t __a, float32x4_t __b) | |
16748 | { | |
16749 | return __builtin_mve_vminnmavq_fv4sf (__a, __b); | |
16750 | } | |
16751 | ||
16752 | __extension__ extern __inline float32x4_t | |
16753 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16754 | __arm_vminnmaq_f32 (float32x4_t __a, float32x4_t __b) | |
16755 | { | |
16756 | return __builtin_mve_vminnmaq_fv4sf (__a, __b); | |
16757 | } | |
16758 | ||
16759 | __extension__ extern __inline float32_t | |
16760 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16761 | __arm_vmaxnmvq_f32 (float32_t __a, float32x4_t __b) | |
16762 | { | |
16763 | return __builtin_mve_vmaxnmvq_fv4sf (__a, __b); | |
16764 | } | |
16765 | ||
16766 | __extension__ extern __inline float32x4_t | |
16767 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16768 | __arm_vmaxnmq_f32 (float32x4_t __a, float32x4_t __b) | |
16769 | { | |
16770 | return __builtin_mve_vmaxnmq_fv4sf (__a, __b); | |
16771 | } | |
16772 | ||
16773 | __extension__ extern __inline float32_t | |
16774 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16775 | __arm_vmaxnmavq_f32 (float32_t __a, float32x4_t __b) | |
16776 | { | |
16777 | return __builtin_mve_vmaxnmavq_fv4sf (__a, __b); | |
16778 | } | |
16779 | ||
16780 | __extension__ extern __inline float32x4_t | |
16781 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16782 | __arm_vmaxnmaq_f32 (float32x4_t __a, float32x4_t __b) | |
16783 | { | |
16784 | return __builtin_mve_vmaxnmaq_fv4sf (__a, __b); | |
16785 | } | |
16786 | ||
16787 | __extension__ extern __inline float32x4_t | |
16788 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16789 | __arm_veorq_f32 (float32x4_t __a, float32x4_t __b) | |
16790 | { | |
16791 | return __builtin_mve_veorq_fv4sf (__a, __b); | |
16792 | } | |
16793 | ||
16794 | __extension__ extern __inline float32x4_t | |
16795 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16796 | __arm_vcmulq_rot90_f32 (float32x4_t __a, float32x4_t __b) | |
16797 | { | |
16798 | return __builtin_mve_vcmulq_rot90_fv4sf (__a, __b); | |
16799 | } | |
16800 | ||
16801 | __extension__ extern __inline float32x4_t | |
16802 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16803 | __arm_vcmulq_rot270_f32 (float32x4_t __a, float32x4_t __b) | |
16804 | { | |
16805 | return __builtin_mve_vcmulq_rot270_fv4sf (__a, __b); | |
16806 | } | |
16807 | ||
16808 | __extension__ extern __inline float32x4_t | |
16809 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16810 | __arm_vcmulq_rot180_f32 (float32x4_t __a, float32x4_t __b) | |
16811 | { | |
16812 | return __builtin_mve_vcmulq_rot180_fv4sf (__a, __b); | |
16813 | } | |
16814 | ||
16815 | __extension__ extern __inline float32x4_t | |
16816 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16817 | __arm_vcmulq_f32 (float32x4_t __a, float32x4_t __b) | |
16818 | { | |
16819 | return __builtin_mve_vcmulq_fv4sf (__a, __b); | |
16820 | } | |
16821 | ||
16822 | __extension__ extern __inline float32x4_t | |
16823 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16824 | __arm_vcaddq_rot90_f32 (float32x4_t __a, float32x4_t __b) | |
16825 | { | |
16826 | return __builtin_mve_vcaddq_rot90_fv4sf (__a, __b); | |
16827 | } | |
16828 | ||
16829 | __extension__ extern __inline float32x4_t | |
16830 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16831 | __arm_vcaddq_rot270_f32 (float32x4_t __a, float32x4_t __b) | |
16832 | { | |
16833 | return __builtin_mve_vcaddq_rot270_fv4sf (__a, __b); | |
16834 | } | |
16835 | ||
16836 | __extension__ extern __inline float32x4_t | |
16837 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16838 | __arm_vbicq_f32 (float32x4_t __a, float32x4_t __b) | |
16839 | { | |
16840 | return __builtin_mve_vbicq_fv4sf (__a, __b); | |
16841 | } | |
16842 | ||
16843 | __extension__ extern __inline float32x4_t | |
16844 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16845 | __arm_vandq_f32 (float32x4_t __a, float32x4_t __b) | |
16846 | { | |
16847 | return __builtin_mve_vandq_fv4sf (__a, __b); | |
16848 | } | |
16849 | ||
16850 | __extension__ extern __inline float32x4_t | |
16851 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16852 | __arm_vaddq_n_f32 (float32x4_t __a, float32_t __b) | |
16853 | { | |
16854 | return __builtin_mve_vaddq_n_fv4sf (__a, __b); | |
16855 | } | |
16856 | ||
16857 | __extension__ extern __inline float32x4_t | |
16858 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16859 | __arm_vabdq_f32 (float32x4_t __a, float32x4_t __b) | |
16860 | { | |
16861 | return __builtin_mve_vabdq_fv4sf (__a, __b); | |
16862 | } | |
16863 | ||
16864 | __extension__ extern __inline float16x8_t | |
16865 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16866 | __arm_vcvttq_f16_f32 (float16x8_t __a, float32x4_t __b) | |
16867 | { | |
16868 | return __builtin_mve_vcvttq_f16_f32v8hf (__a, __b); | |
16869 | } | |
16870 | ||
16871 | __extension__ extern __inline float16x8_t | |
16872 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16873 | __arm_vcvtbq_f16_f32 (float16x8_t __a, float32x4_t __b) | |
16874 | { | |
16875 | return __builtin_mve_vcvtbq_f16_f32v8hf (__a, __b); | |
16876 | } | |
16877 | ||
16878 | __extension__ extern __inline mve_pred16_t | |
16879 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16880 | __arm_vcmpeqq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
16881 | { | |
16882 | return __builtin_mve_vcmpeqq_m_fv8hf (__a, __b, __p); | |
16883 | } | |
16884 | ||
16885 | __extension__ extern __inline mve_pred16_t | |
16886 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16887 | __arm_vcmpeqq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
16888 | { | |
16889 | return __builtin_mve_vcmpeqq_m_fv4sf (__a, __b, __p); | |
16890 | } | |
16891 | ||
16892 | __extension__ extern __inline int16x8_t | |
16893 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16894 | __arm_vcvtaq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
16895 | { | |
16896 | return __builtin_mve_vcvtaq_m_sv8hi (__inactive, __a, __p); | |
16897 | } | |
16898 | ||
16899 | __extension__ extern __inline uint16x8_t | |
16900 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16901 | __arm_vcvtaq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
16902 | { | |
16903 | return __builtin_mve_vcvtaq_m_uv8hi (__inactive, __a, __p); | |
16904 | } | |
16905 | ||
16906 | __extension__ extern __inline int32x4_t | |
16907 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16908 | __arm_vcvtaq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
16909 | { | |
16910 | return __builtin_mve_vcvtaq_m_sv4si (__inactive, __a, __p); | |
16911 | } | |
16912 | ||
16913 | __extension__ extern __inline uint32x4_t | |
16914 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16915 | __arm_vcvtaq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
16916 | { | |
16917 | return __builtin_mve_vcvtaq_m_uv4si (__inactive, __a, __p); | |
16918 | } | |
16919 | ||
16920 | __extension__ extern __inline float16x8_t | |
16921 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16922 | __arm_vcvtq_m_f16_s16 (float16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
16923 | { | |
16924 | return __builtin_mve_vcvtq_m_to_f_sv8hf (__inactive, __a, __p); | |
16925 | } | |
16926 | ||
16927 | __extension__ extern __inline float16x8_t | |
16928 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16929 | __arm_vcvtq_m_f16_u16 (float16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
16930 | { | |
16931 | return __builtin_mve_vcvtq_m_to_f_uv8hf (__inactive, __a, __p); | |
16932 | } | |
16933 | ||
16934 | __extension__ extern __inline float32x4_t | |
16935 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16936 | __arm_vcvtq_m_f32_s32 (float32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
16937 | { | |
16938 | return __builtin_mve_vcvtq_m_to_f_sv4sf (__inactive, __a, __p); | |
16939 | } | |
16940 | ||
16941 | __extension__ extern __inline float32x4_t | |
16942 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16943 | __arm_vcvtq_m_f32_u32 (float32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
16944 | { | |
16945 | return __builtin_mve_vcvtq_m_to_f_uv4sf (__inactive, __a, __p); | |
16946 | } | |
16947 | ||
16948 | ||
16949 | __extension__ extern __inline float16x8_t | |
16950 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16951 | __arm_vcvtbq_m_f16_f32 (float16x8_t __a, float32x4_t __b, mve_pred16_t __p) | |
16952 | { | |
16953 | return __builtin_mve_vcvtbq_m_f16_f32v8hf (__a, __b, __p); | |
16954 | } | |
16955 | ||
16956 | __extension__ extern __inline float32x4_t | |
16957 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16958 | __arm_vcvtbq_m_f32_f16 (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
16959 | { | |
16960 | return __builtin_mve_vcvtbq_m_f32_f16v4sf (__inactive, __a, __p); | |
16961 | } | |
16962 | ||
16963 | __extension__ extern __inline float16x8_t | |
16964 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16965 | __arm_vcvttq_m_f16_f32 (float16x8_t __a, float32x4_t __b, mve_pred16_t __p) | |
16966 | { | |
16967 | return __builtin_mve_vcvttq_m_f16_f32v8hf (__a, __b, __p); | |
16968 | } | |
16969 | ||
16970 | __extension__ extern __inline float32x4_t | |
16971 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16972 | __arm_vcvttq_m_f32_f16 (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
16973 | { | |
16974 | return __builtin_mve_vcvttq_m_f32_f16v4sf (__inactive, __a, __p); | |
16975 | } | |
16976 | ||
16977 | __extension__ extern __inline float16x8_t | |
16978 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16979 | __arm_vrev32q_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
16980 | { | |
16981 | return __builtin_mve_vrev32q_m_fv8hf (__inactive, __a, __p); | |
16982 | } | |
16983 | ||
16984 | __extension__ extern __inline float16x8_t | |
16985 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16986 | __arm_vcmlaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
16987 | { | |
16988 | return __builtin_mve_vcmlaq_fv8hf (__a, __b, __c); | |
16989 | } | |
16990 | ||
16991 | __extension__ extern __inline float16x8_t | |
16992 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16993 | __arm_vcmlaq_rot180_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
16994 | { | |
16995 | return __builtin_mve_vcmlaq_rot180_fv8hf (__a, __b, __c); | |
16996 | } | |
16997 | ||
16998 | __extension__ extern __inline float16x8_t | |
16999 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17000 | __arm_vcmlaq_rot270_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17001 | { | |
17002 | return __builtin_mve_vcmlaq_rot270_fv8hf (__a, __b, __c); | |
17003 | } | |
17004 | ||
17005 | __extension__ extern __inline float16x8_t | |
17006 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17007 | __arm_vcmlaq_rot90_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17008 | { | |
17009 | return __builtin_mve_vcmlaq_rot90_fv8hf (__a, __b, __c); | |
17010 | } | |
17011 | ||
17012 | __extension__ extern __inline float16x8_t | |
17013 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17014 | __arm_vfmaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17015 | { | |
17016 | return __builtin_mve_vfmaq_fv8hf (__a, __b, __c); | |
17017 | } | |
17018 | ||
17019 | __extension__ extern __inline float16x8_t | |
17020 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17021 | __arm_vfmaq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c) | |
17022 | { | |
17023 | return __builtin_mve_vfmaq_n_fv8hf (__a, __b, __c); | |
17024 | } | |
17025 | ||
17026 | __extension__ extern __inline float16x8_t | |
17027 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17028 | __arm_vfmasq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c) | |
17029 | { | |
17030 | return __builtin_mve_vfmasq_n_fv8hf (__a, __b, __c); | |
17031 | } | |
17032 | ||
17033 | __extension__ extern __inline float16x8_t | |
17034 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17035 | __arm_vfmsq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17036 | { | |
17037 | return __builtin_mve_vfmsq_fv8hf (__a, __b, __c); | |
17038 | } | |
17039 | ||
17040 | __extension__ extern __inline float16x8_t | |
17041 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17042 | __arm_vabsq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17043 | { | |
17044 | return __builtin_mve_vabsq_m_fv8hf (__inactive, __a, __p); | |
17045 | } | |
17046 | ||
17047 | __extension__ extern __inline int16x8_t | |
17048 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17049 | __arm_vcvtmq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17050 | { | |
17051 | return __builtin_mve_vcvtmq_m_sv8hi (__inactive, __a, __p); | |
17052 | } | |
17053 | ||
17054 | __extension__ extern __inline int16x8_t | |
17055 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17056 | __arm_vcvtnq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17057 | { | |
17058 | return __builtin_mve_vcvtnq_m_sv8hi (__inactive, __a, __p); | |
17059 | } | |
17060 | ||
17061 | __extension__ extern __inline int16x8_t | |
17062 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17063 | __arm_vcvtpq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17064 | { | |
17065 | return __builtin_mve_vcvtpq_m_sv8hi (__inactive, __a, __p); | |
17066 | } | |
17067 | ||
17068 | __extension__ extern __inline int16x8_t | |
17069 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17070 | __arm_vcvtq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17071 | { | |
17072 | return __builtin_mve_vcvtq_m_from_f_sv8hi (__inactive, __a, __p); | |
17073 | } | |
17074 | ||
17075 | __extension__ extern __inline float16x8_t | |
17076 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17077 | __arm_vdupq_m_n_f16 (float16x8_t __inactive, float16_t __a, mve_pred16_t __p) | |
17078 | { | |
17079 | return __builtin_mve_vdupq_m_n_fv8hf (__inactive, __a, __p); | |
17080 | } | |
17081 | ||
17082 | __extension__ extern __inline float16x8_t | |
17083 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17084 | __arm_vmaxnmaq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17085 | { | |
17086 | return __builtin_mve_vmaxnmaq_m_fv8hf (__a, __b, __p); | |
17087 | } | |
17088 | ||
17089 | __extension__ extern __inline float16_t | |
17090 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17091 | __arm_vmaxnmavq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
17092 | { | |
17093 | return __builtin_mve_vmaxnmavq_p_fv8hf (__a, __b, __p); | |
17094 | } | |
17095 | ||
17096 | __extension__ extern __inline float16_t | |
17097 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17098 | __arm_vmaxnmvq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
17099 | { | |
17100 | return __builtin_mve_vmaxnmvq_p_fv8hf (__a, __b, __p); | |
17101 | } | |
17102 | ||
17103 | __extension__ extern __inline float16x8_t | |
17104 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17105 | __arm_vminnmaq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17106 | { | |
17107 | return __builtin_mve_vminnmaq_m_fv8hf (__a, __b, __p); | |
17108 | } | |
17109 | ||
17110 | __extension__ extern __inline float16_t | |
17111 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17112 | __arm_vminnmavq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
17113 | { | |
17114 | return __builtin_mve_vminnmavq_p_fv8hf (__a, __b, __p); | |
17115 | } | |
17116 | ||
17117 | __extension__ extern __inline float16_t | |
17118 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17119 | __arm_vminnmvq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
17120 | { | |
17121 | return __builtin_mve_vminnmvq_p_fv8hf (__a, __b, __p); | |
17122 | } | |
17123 | ||
17124 | __extension__ extern __inline float16x8_t | |
17125 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17126 | __arm_vnegq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17127 | { | |
17128 | return __builtin_mve_vnegq_m_fv8hf (__inactive, __a, __p); | |
17129 | } | |
17130 | ||
17131 | __extension__ extern __inline float16x8_t | |
17132 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17133 | __arm_vpselq_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17134 | { | |
17135 | return __builtin_mve_vpselq_fv8hf (__a, __b, __p); | |
17136 | } | |
17137 | ||
17138 | __extension__ extern __inline float16x8_t | |
17139 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17140 | __arm_vrev64q_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17141 | { | |
17142 | return __builtin_mve_vrev64q_m_fv8hf (__inactive, __a, __p); | |
17143 | } | |
17144 | ||
17145 | __extension__ extern __inline float16x8_t | |
17146 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17147 | __arm_vrndaq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17148 | { | |
17149 | return __builtin_mve_vrndaq_m_fv8hf (__inactive, __a, __p); | |
17150 | } | |
17151 | ||
17152 | __extension__ extern __inline float16x8_t | |
17153 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17154 | __arm_vrndmq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17155 | { | |
17156 | return __builtin_mve_vrndmq_m_fv8hf (__inactive, __a, __p); | |
17157 | } | |
17158 | ||
17159 | __extension__ extern __inline float16x8_t | |
17160 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17161 | __arm_vrndnq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17162 | { | |
17163 | return __builtin_mve_vrndnq_m_fv8hf (__inactive, __a, __p); | |
17164 | } | |
17165 | ||
17166 | __extension__ extern __inline float16x8_t | |
17167 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17168 | __arm_vrndpq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17169 | { | |
17170 | return __builtin_mve_vrndpq_m_fv8hf (__inactive, __a, __p); | |
17171 | } | |
17172 | ||
17173 | __extension__ extern __inline float16x8_t | |
17174 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17175 | __arm_vrndq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17176 | { | |
17177 | return __builtin_mve_vrndq_m_fv8hf (__inactive, __a, __p); | |
17178 | } | |
17179 | ||
17180 | __extension__ extern __inline float16x8_t | |
17181 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17182 | __arm_vrndxq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17183 | { | |
17184 | return __builtin_mve_vrndxq_m_fv8hf (__inactive, __a, __p); | |
17185 | } | |
17186 | ||
17187 | __extension__ extern __inline mve_pred16_t | |
17188 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17189 | __arm_vcmpeqq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
17190 | { | |
17191 | return __builtin_mve_vcmpeqq_m_n_fv8hf (__a, __b, __p); | |
17192 | } | |
17193 | ||
17194 | __extension__ extern __inline mve_pred16_t | |
17195 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17196 | __arm_vcmpgeq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17197 | { | |
17198 | return __builtin_mve_vcmpgeq_m_fv8hf (__a, __b, __p); | |
17199 | } | |
17200 | ||
17201 | __extension__ extern __inline mve_pred16_t | |
17202 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17203 | __arm_vcmpgeq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
17204 | { | |
17205 | return __builtin_mve_vcmpgeq_m_n_fv8hf (__a, __b, __p); | |
17206 | } | |
17207 | ||
17208 | __extension__ extern __inline mve_pred16_t | |
17209 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17210 | __arm_vcmpgtq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17211 | { | |
17212 | return __builtin_mve_vcmpgtq_m_fv8hf (__a, __b, __p); | |
17213 | } | |
17214 | ||
17215 | __extension__ extern __inline mve_pred16_t | |
17216 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17217 | __arm_vcmpgtq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
17218 | { | |
17219 | return __builtin_mve_vcmpgtq_m_n_fv8hf (__a, __b, __p); | |
17220 | } | |
17221 | ||
17222 | __extension__ extern __inline mve_pred16_t | |
17223 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17224 | __arm_vcmpleq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17225 | { | |
17226 | return __builtin_mve_vcmpleq_m_fv8hf (__a, __b, __p); | |
17227 | } | |
17228 | ||
17229 | __extension__ extern __inline mve_pred16_t | |
17230 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17231 | __arm_vcmpleq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
17232 | { | |
17233 | return __builtin_mve_vcmpleq_m_n_fv8hf (__a, __b, __p); | |
17234 | } | |
17235 | ||
17236 | __extension__ extern __inline mve_pred16_t | |
17237 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17238 | __arm_vcmpltq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17239 | { | |
17240 | return __builtin_mve_vcmpltq_m_fv8hf (__a, __b, __p); | |
17241 | } | |
17242 | ||
17243 | __extension__ extern __inline mve_pred16_t | |
17244 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17245 | __arm_vcmpltq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
17246 | { | |
17247 | return __builtin_mve_vcmpltq_m_n_fv8hf (__a, __b, __p); | |
17248 | } | |
17249 | ||
17250 | __extension__ extern __inline mve_pred16_t | |
17251 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17252 | __arm_vcmpneq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17253 | { | |
17254 | return __builtin_mve_vcmpneq_m_fv8hf (__a, __b, __p); | |
17255 | } | |
17256 | ||
17257 | __extension__ extern __inline mve_pred16_t | |
17258 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17259 | __arm_vcmpneq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
17260 | { | |
17261 | return __builtin_mve_vcmpneq_m_n_fv8hf (__a, __b, __p); | |
17262 | } | |
17263 | ||
17264 | __extension__ extern __inline uint16x8_t | |
17265 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17266 | __arm_vcvtmq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17267 | { | |
17268 | return __builtin_mve_vcvtmq_m_uv8hi (__inactive, __a, __p); | |
17269 | } | |
17270 | ||
17271 | __extension__ extern __inline uint16x8_t | |
17272 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17273 | __arm_vcvtnq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17274 | { | |
17275 | return __builtin_mve_vcvtnq_m_uv8hi (__inactive, __a, __p); | |
17276 | } | |
17277 | ||
17278 | __extension__ extern __inline uint16x8_t | |
17279 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17280 | __arm_vcvtpq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17281 | { | |
17282 | return __builtin_mve_vcvtpq_m_uv8hi (__inactive, __a, __p); | |
17283 | } | |
17284 | ||
17285 | __extension__ extern __inline uint16x8_t | |
17286 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17287 | __arm_vcvtq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17288 | { | |
17289 | return __builtin_mve_vcvtq_m_from_f_uv8hi (__inactive, __a, __p); | |
17290 | } | |
17291 | ||
17292 | __extension__ extern __inline float32x4_t | |
17293 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17294 | __arm_vcmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
17295 | { | |
17296 | return __builtin_mve_vcmlaq_fv4sf (__a, __b, __c); | |
17297 | } | |
17298 | ||
17299 | __extension__ extern __inline float32x4_t | |
17300 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17301 | __arm_vcmlaq_rot180_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
17302 | { | |
17303 | return __builtin_mve_vcmlaq_rot180_fv4sf (__a, __b, __c); | |
17304 | } | |
17305 | ||
17306 | __extension__ extern __inline float32x4_t | |
17307 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17308 | __arm_vcmlaq_rot270_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
17309 | { | |
17310 | return __builtin_mve_vcmlaq_rot270_fv4sf (__a, __b, __c); | |
17311 | } | |
17312 | ||
17313 | __extension__ extern __inline float32x4_t | |
17314 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17315 | __arm_vcmlaq_rot90_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
17316 | { | |
17317 | return __builtin_mve_vcmlaq_rot90_fv4sf (__a, __b, __c); | |
17318 | } | |
17319 | ||
17320 | __extension__ extern __inline float32x4_t | |
17321 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17322 | __arm_vfmaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
17323 | { | |
17324 | return __builtin_mve_vfmaq_fv4sf (__a, __b, __c); | |
17325 | } | |
17326 | ||
17327 | __extension__ extern __inline float32x4_t | |
17328 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17329 | __arm_vfmaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) | |
17330 | { | |
17331 | return __builtin_mve_vfmaq_n_fv4sf (__a, __b, __c); | |
17332 | } | |
17333 | ||
17334 | __extension__ extern __inline float32x4_t | |
17335 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17336 | __arm_vfmasq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) | |
17337 | { | |
17338 | return __builtin_mve_vfmasq_n_fv4sf (__a, __b, __c); | |
17339 | } | |
17340 | ||
17341 | __extension__ extern __inline float32x4_t | |
17342 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17343 | __arm_vfmsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
17344 | { | |
17345 | return __builtin_mve_vfmsq_fv4sf (__a, __b, __c); | |
17346 | } | |
17347 | ||
17348 | __extension__ extern __inline float32x4_t | |
17349 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17350 | __arm_vabsq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17351 | { | |
17352 | return __builtin_mve_vabsq_m_fv4sf (__inactive, __a, __p); | |
17353 | } | |
17354 | ||
17355 | __extension__ extern __inline int32x4_t | |
17356 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17357 | __arm_vcvtmq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17358 | { | |
17359 | return __builtin_mve_vcvtmq_m_sv4si (__inactive, __a, __p); | |
17360 | } | |
17361 | ||
17362 | __extension__ extern __inline int32x4_t | |
17363 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17364 | __arm_vcvtnq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17365 | { | |
17366 | return __builtin_mve_vcvtnq_m_sv4si (__inactive, __a, __p); | |
17367 | } | |
17368 | ||
17369 | __extension__ extern __inline int32x4_t | |
17370 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17371 | __arm_vcvtpq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17372 | { | |
17373 | return __builtin_mve_vcvtpq_m_sv4si (__inactive, __a, __p); | |
17374 | } | |
17375 | ||
17376 | __extension__ extern __inline int32x4_t | |
17377 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17378 | __arm_vcvtq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17379 | { | |
17380 | return __builtin_mve_vcvtq_m_from_f_sv4si (__inactive, __a, __p); | |
17381 | } | |
17382 | ||
17383 | __extension__ extern __inline float32x4_t | |
17384 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17385 | __arm_vdupq_m_n_f32 (float32x4_t __inactive, float32_t __a, mve_pred16_t __p) | |
17386 | { | |
17387 | return __builtin_mve_vdupq_m_n_fv4sf (__inactive, __a, __p); | |
17388 | } | |
17389 | ||
17390 | __extension__ extern __inline float32x4_t | |
17391 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17392 | __arm_vmaxnmaq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17393 | { | |
17394 | return __builtin_mve_vmaxnmaq_m_fv4sf (__a, __b, __p); | |
17395 | } | |
17396 | ||
17397 | __extension__ extern __inline float32_t | |
17398 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17399 | __arm_vmaxnmavq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
17400 | { | |
17401 | return __builtin_mve_vmaxnmavq_p_fv4sf (__a, __b, __p); | |
17402 | } | |
17403 | ||
17404 | __extension__ extern __inline float32_t | |
17405 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17406 | __arm_vmaxnmvq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
17407 | { | |
17408 | return __builtin_mve_vmaxnmvq_p_fv4sf (__a, __b, __p); | |
17409 | } | |
17410 | ||
17411 | __extension__ extern __inline float32x4_t | |
17412 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17413 | __arm_vminnmaq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17414 | { | |
17415 | return __builtin_mve_vminnmaq_m_fv4sf (__a, __b, __p); | |
17416 | } | |
17417 | ||
17418 | __extension__ extern __inline float32_t | |
17419 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17420 | __arm_vminnmavq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
17421 | { | |
17422 | return __builtin_mve_vminnmavq_p_fv4sf (__a, __b, __p); | |
17423 | } | |
17424 | ||
17425 | __extension__ extern __inline float32_t | |
17426 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17427 | __arm_vminnmvq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
17428 | { | |
17429 | return __builtin_mve_vminnmvq_p_fv4sf (__a, __b, __p); | |
17430 | } | |
17431 | ||
17432 | __extension__ extern __inline float32x4_t | |
17433 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17434 | __arm_vnegq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17435 | { | |
17436 | return __builtin_mve_vnegq_m_fv4sf (__inactive, __a, __p); | |
17437 | } | |
17438 | ||
17439 | __extension__ extern __inline float32x4_t | |
17440 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17441 | __arm_vpselq_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17442 | { | |
17443 | return __builtin_mve_vpselq_fv4sf (__a, __b, __p); | |
17444 | } | |
17445 | ||
17446 | __extension__ extern __inline float32x4_t | |
17447 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17448 | __arm_vrev64q_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17449 | { | |
17450 | return __builtin_mve_vrev64q_m_fv4sf (__inactive, __a, __p); | |
17451 | } | |
17452 | ||
17453 | __extension__ extern __inline float32x4_t | |
17454 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17455 | __arm_vrndaq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17456 | { | |
17457 | return __builtin_mve_vrndaq_m_fv4sf (__inactive, __a, __p); | |
17458 | } | |
17459 | ||
17460 | __extension__ extern __inline float32x4_t | |
17461 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17462 | __arm_vrndmq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17463 | { | |
17464 | return __builtin_mve_vrndmq_m_fv4sf (__inactive, __a, __p); | |
17465 | } | |
17466 | ||
17467 | __extension__ extern __inline float32x4_t | |
17468 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17469 | __arm_vrndnq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17470 | { | |
17471 | return __builtin_mve_vrndnq_m_fv4sf (__inactive, __a, __p); | |
17472 | } | |
17473 | ||
17474 | __extension__ extern __inline float32x4_t | |
17475 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17476 | __arm_vrndpq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17477 | { | |
17478 | return __builtin_mve_vrndpq_m_fv4sf (__inactive, __a, __p); | |
17479 | } | |
17480 | ||
17481 | __extension__ extern __inline float32x4_t | |
17482 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17483 | __arm_vrndq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17484 | { | |
17485 | return __builtin_mve_vrndq_m_fv4sf (__inactive, __a, __p); | |
17486 | } | |
17487 | ||
17488 | __extension__ extern __inline float32x4_t | |
17489 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17490 | __arm_vrndxq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17491 | { | |
17492 | return __builtin_mve_vrndxq_m_fv4sf (__inactive, __a, __p); | |
17493 | } | |
17494 | ||
17495 | __extension__ extern __inline mve_pred16_t | |
17496 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17497 | __arm_vcmpeqq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
17498 | { | |
17499 | return __builtin_mve_vcmpeqq_m_n_fv4sf (__a, __b, __p); | |
17500 | } | |
17501 | ||
17502 | __extension__ extern __inline mve_pred16_t | |
17503 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17504 | __arm_vcmpgeq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17505 | { | |
17506 | return __builtin_mve_vcmpgeq_m_fv4sf (__a, __b, __p); | |
17507 | } | |
17508 | ||
17509 | __extension__ extern __inline mve_pred16_t | |
17510 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17511 | __arm_vcmpgeq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
17512 | { | |
17513 | return __builtin_mve_vcmpgeq_m_n_fv4sf (__a, __b, __p); | |
17514 | } | |
17515 | ||
17516 | __extension__ extern __inline mve_pred16_t | |
17517 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17518 | __arm_vcmpgtq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17519 | { | |
17520 | return __builtin_mve_vcmpgtq_m_fv4sf (__a, __b, __p); | |
17521 | } | |
17522 | ||
17523 | __extension__ extern __inline mve_pred16_t | |
17524 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17525 | __arm_vcmpgtq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
17526 | { | |
17527 | return __builtin_mve_vcmpgtq_m_n_fv4sf (__a, __b, __p); | |
17528 | } | |
17529 | ||
17530 | __extension__ extern __inline mve_pred16_t | |
17531 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17532 | __arm_vcmpleq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17533 | { | |
17534 | return __builtin_mve_vcmpleq_m_fv4sf (__a, __b, __p); | |
17535 | } | |
17536 | ||
17537 | __extension__ extern __inline mve_pred16_t | |
17538 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17539 | __arm_vcmpleq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
17540 | { | |
17541 | return __builtin_mve_vcmpleq_m_n_fv4sf (__a, __b, __p); | |
17542 | } | |
17543 | ||
17544 | __extension__ extern __inline mve_pred16_t | |
17545 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17546 | __arm_vcmpltq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17547 | { | |
17548 | return __builtin_mve_vcmpltq_m_fv4sf (__a, __b, __p); | |
17549 | } | |
17550 | ||
17551 | __extension__ extern __inline mve_pred16_t | |
17552 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17553 | __arm_vcmpltq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
17554 | { | |
17555 | return __builtin_mve_vcmpltq_m_n_fv4sf (__a, __b, __p); | |
17556 | } | |
17557 | ||
17558 | __extension__ extern __inline mve_pred16_t | |
17559 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17560 | __arm_vcmpneq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17561 | { | |
17562 | return __builtin_mve_vcmpneq_m_fv4sf (__a, __b, __p); | |
17563 | } | |
17564 | ||
17565 | __extension__ extern __inline mve_pred16_t | |
17566 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17567 | __arm_vcmpneq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
17568 | { | |
17569 | return __builtin_mve_vcmpneq_m_n_fv4sf (__a, __b, __p); | |
17570 | } | |
17571 | ||
17572 | __extension__ extern __inline uint32x4_t | |
17573 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17574 | __arm_vcvtmq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17575 | { | |
17576 | return __builtin_mve_vcvtmq_m_uv4si (__inactive, __a, __p); | |
17577 | } | |
17578 | ||
17579 | __extension__ extern __inline uint32x4_t | |
17580 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17581 | __arm_vcvtnq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17582 | { | |
17583 | return __builtin_mve_vcvtnq_m_uv4si (__inactive, __a, __p); | |
17584 | } | |
17585 | ||
17586 | __extension__ extern __inline uint32x4_t | |
17587 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17588 | __arm_vcvtpq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17589 | { | |
17590 | return __builtin_mve_vcvtpq_m_uv4si (__inactive, __a, __p); | |
17591 | } | |
17592 | ||
17593 | __extension__ extern __inline uint32x4_t | |
17594 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17595 | __arm_vcvtq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17596 | { | |
17597 | return __builtin_mve_vcvtq_m_from_f_uv4si (__inactive, __a, __p); | |
17598 | } | |
17599 | ||
17600 | __extension__ extern __inline float16x8_t | |
17601 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17602 | __arm_vcvtq_m_n_f16_u16 (float16x8_t __inactive, uint16x8_t __a, const int __imm6, mve_pred16_t __p) | |
17603 | { | |
17604 | return __builtin_mve_vcvtq_m_n_to_f_uv8hf (__inactive, __a, __imm6, __p); | |
17605 | } | |
17606 | ||
17607 | __extension__ extern __inline float16x8_t | |
17608 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17609 | __arm_vcvtq_m_n_f16_s16 (float16x8_t __inactive, int16x8_t __a, const int __imm6, mve_pred16_t __p) | |
17610 | { | |
17611 | return __builtin_mve_vcvtq_m_n_to_f_sv8hf (__inactive, __a, __imm6, __p); | |
17612 | } | |
17613 | ||
17614 | __extension__ extern __inline float32x4_t | |
17615 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17616 | __arm_vcvtq_m_n_f32_u32 (float32x4_t __inactive, uint32x4_t __a, const int __imm6, mve_pred16_t __p) | |
17617 | { | |
17618 | return __builtin_mve_vcvtq_m_n_to_f_uv4sf (__inactive, __a, __imm6, __p); | |
17619 | } | |
17620 | ||
17621 | __extension__ extern __inline float32x4_t | |
17622 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17623 | __arm_vcvtq_m_n_f32_s32 (float32x4_t __inactive, int32x4_t __a, const int __imm6, mve_pred16_t __p) | |
17624 | { | |
17625 | return __builtin_mve_vcvtq_m_n_to_f_sv4sf (__inactive, __a, __imm6, __p); | |
17626 | } | |
17627 | ||
17628 | __extension__ extern __inline float32x4_t | |
17629 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17630 | __arm_vabdq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17631 | { | |
17632 | return __builtin_mve_vabdq_m_fv4sf (__inactive, __a, __b, __p); | |
17633 | } | |
17634 | ||
17635 | __extension__ extern __inline float16x8_t | |
17636 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17637 | __arm_vabdq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17638 | { | |
17639 | return __builtin_mve_vabdq_m_fv8hf (__inactive, __a, __b, __p); | |
17640 | } | |
17641 | ||
17642 | __extension__ extern __inline float32x4_t | |
17643 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17644 | __arm_vaddq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17645 | { | |
17646 | return __builtin_mve_vaddq_m_fv4sf (__inactive, __a, __b, __p); | |
17647 | } | |
17648 | ||
17649 | __extension__ extern __inline float16x8_t | |
17650 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17651 | __arm_vaddq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17652 | { | |
17653 | return __builtin_mve_vaddq_m_fv8hf (__inactive, __a, __b, __p); | |
17654 | } | |
17655 | ||
17656 | __extension__ extern __inline float32x4_t | |
17657 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17658 | __arm_vaddq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
17659 | { | |
17660 | return __builtin_mve_vaddq_m_n_fv4sf (__inactive, __a, __b, __p); | |
17661 | } | |
17662 | ||
17663 | __extension__ extern __inline float16x8_t | |
17664 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17665 | __arm_vaddq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
17666 | { | |
17667 | return __builtin_mve_vaddq_m_n_fv8hf (__inactive, __a, __b, __p); | |
17668 | } | |
17669 | ||
17670 | __extension__ extern __inline float32x4_t | |
17671 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17672 | __arm_vandq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17673 | { | |
17674 | return __builtin_mve_vandq_m_fv4sf (__inactive, __a, __b, __p); | |
17675 | } | |
17676 | ||
17677 | __extension__ extern __inline float16x8_t | |
17678 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17679 | __arm_vandq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17680 | { | |
17681 | return __builtin_mve_vandq_m_fv8hf (__inactive, __a, __b, __p); | |
17682 | } | |
17683 | ||
17684 | __extension__ extern __inline float32x4_t | |
17685 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17686 | __arm_vbicq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17687 | { | |
17688 | return __builtin_mve_vbicq_m_fv4sf (__inactive, __a, __b, __p); | |
17689 | } | |
17690 | ||
17691 | __extension__ extern __inline float16x8_t | |
17692 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17693 | __arm_vbicq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17694 | { | |
17695 | return __builtin_mve_vbicq_m_fv8hf (__inactive, __a, __b, __p); | |
17696 | } | |
17697 | ||
17698 | __extension__ extern __inline float32x4_t | |
17699 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17700 | __arm_vbrsrq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, int32_t __b, mve_pred16_t __p) | |
17701 | { | |
17702 | return __builtin_mve_vbrsrq_m_n_fv4sf (__inactive, __a, __b, __p); | |
17703 | } | |
17704 | ||
17705 | __extension__ extern __inline float16x8_t | |
17706 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17707 | __arm_vbrsrq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, int32_t __b, mve_pred16_t __p) | |
17708 | { | |
17709 | return __builtin_mve_vbrsrq_m_n_fv8hf (__inactive, __a, __b, __p); | |
17710 | } | |
17711 | ||
17712 | __extension__ extern __inline float32x4_t | |
17713 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17714 | __arm_vcaddq_rot270_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17715 | { | |
17716 | return __builtin_mve_vcaddq_rot270_m_fv4sf (__inactive, __a, __b, __p); | |
17717 | } | |
17718 | ||
17719 | __extension__ extern __inline float16x8_t | |
17720 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17721 | __arm_vcaddq_rot270_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17722 | { | |
17723 | return __builtin_mve_vcaddq_rot270_m_fv8hf (__inactive, __a, __b, __p); | |
17724 | } | |
17725 | ||
17726 | __extension__ extern __inline float32x4_t | |
17727 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17728 | __arm_vcaddq_rot90_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17729 | { | |
17730 | return __builtin_mve_vcaddq_rot90_m_fv4sf (__inactive, __a, __b, __p); | |
17731 | } | |
17732 | ||
17733 | __extension__ extern __inline float16x8_t | |
17734 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17735 | __arm_vcaddq_rot90_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17736 | { | |
17737 | return __builtin_mve_vcaddq_rot90_m_fv8hf (__inactive, __a, __b, __p); | |
17738 | } | |
17739 | ||
17740 | __extension__ extern __inline float32x4_t | |
17741 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17742 | __arm_vcmlaq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
17743 | { | |
17744 | return __builtin_mve_vcmlaq_m_fv4sf (__a, __b, __c, __p); | |
17745 | } | |
17746 | ||
17747 | __extension__ extern __inline float16x8_t | |
17748 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17749 | __arm_vcmlaq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
17750 | { | |
17751 | return __builtin_mve_vcmlaq_m_fv8hf (__a, __b, __c, __p); | |
17752 | } | |
17753 | ||
17754 | __extension__ extern __inline float32x4_t | |
17755 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17756 | __arm_vcmlaq_rot180_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
17757 | { | |
17758 | return __builtin_mve_vcmlaq_rot180_m_fv4sf (__a, __b, __c, __p); | |
17759 | } | |
17760 | ||
17761 | __extension__ extern __inline float16x8_t | |
17762 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17763 | __arm_vcmlaq_rot180_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
17764 | { | |
17765 | return __builtin_mve_vcmlaq_rot180_m_fv8hf (__a, __b, __c, __p); | |
17766 | } | |
17767 | ||
17768 | __extension__ extern __inline float32x4_t | |
17769 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17770 | __arm_vcmlaq_rot270_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
17771 | { | |
17772 | return __builtin_mve_vcmlaq_rot270_m_fv4sf (__a, __b, __c, __p); | |
17773 | } | |
17774 | ||
17775 | __extension__ extern __inline float16x8_t | |
17776 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17777 | __arm_vcmlaq_rot270_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
17778 | { | |
17779 | return __builtin_mve_vcmlaq_rot270_m_fv8hf (__a, __b, __c, __p); | |
17780 | } | |
17781 | ||
17782 | __extension__ extern __inline float32x4_t | |
17783 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17784 | __arm_vcmlaq_rot90_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
17785 | { | |
17786 | return __builtin_mve_vcmlaq_rot90_m_fv4sf (__a, __b, __c, __p); | |
17787 | } | |
17788 | ||
17789 | __extension__ extern __inline float16x8_t | |
17790 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17791 | __arm_vcmlaq_rot90_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
17792 | { | |
17793 | return __builtin_mve_vcmlaq_rot90_m_fv8hf (__a, __b, __c, __p); | |
17794 | } | |
17795 | ||
17796 | __extension__ extern __inline float32x4_t | |
17797 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17798 | __arm_vcmulq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17799 | { | |
17800 | return __builtin_mve_vcmulq_m_fv4sf (__inactive, __a, __b, __p); | |
17801 | } | |
17802 | ||
17803 | __extension__ extern __inline float16x8_t | |
17804 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17805 | __arm_vcmulq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17806 | { | |
17807 | return __builtin_mve_vcmulq_m_fv8hf (__inactive, __a, __b, __p); | |
17808 | } | |
17809 | ||
17810 | __extension__ extern __inline float32x4_t | |
17811 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17812 | __arm_vcmulq_rot180_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17813 | { | |
17814 | return __builtin_mve_vcmulq_rot180_m_fv4sf (__inactive, __a, __b, __p); | |
17815 | } | |
17816 | ||
17817 | __extension__ extern __inline float16x8_t | |
17818 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17819 | __arm_vcmulq_rot180_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17820 | { | |
17821 | return __builtin_mve_vcmulq_rot180_m_fv8hf (__inactive, __a, __b, __p); | |
17822 | } | |
17823 | ||
17824 | __extension__ extern __inline float32x4_t | |
17825 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17826 | __arm_vcmulq_rot270_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17827 | { | |
17828 | return __builtin_mve_vcmulq_rot270_m_fv4sf (__inactive, __a, __b, __p); | |
17829 | } | |
17830 | ||
17831 | __extension__ extern __inline float16x8_t | |
17832 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17833 | __arm_vcmulq_rot270_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17834 | { | |
17835 | return __builtin_mve_vcmulq_rot270_m_fv8hf (__inactive, __a, __b, __p); | |
17836 | } | |
17837 | ||
17838 | __extension__ extern __inline float32x4_t | |
17839 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17840 | __arm_vcmulq_rot90_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17841 | { | |
17842 | return __builtin_mve_vcmulq_rot90_m_fv4sf (__inactive, __a, __b, __p); | |
17843 | } | |
17844 | ||
17845 | __extension__ extern __inline float16x8_t | |
17846 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17847 | __arm_vcmulq_rot90_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17848 | { | |
17849 | return __builtin_mve_vcmulq_rot90_m_fv8hf (__inactive, __a, __b, __p); | |
17850 | } | |
17851 | ||
17852 | __extension__ extern __inline int32x4_t | |
17853 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17854 | __arm_vcvtq_m_n_s32_f32 (int32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p) | |
17855 | { | |
17856 | return __builtin_mve_vcvtq_m_n_from_f_sv4si (__inactive, __a, __imm6, __p); | |
17857 | } | |
17858 | ||
17859 | __extension__ extern __inline int16x8_t | |
17860 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17861 | __arm_vcvtq_m_n_s16_f16 (int16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p) | |
17862 | { | |
17863 | return __builtin_mve_vcvtq_m_n_from_f_sv8hi (__inactive, __a, __imm6, __p); | |
17864 | } | |
17865 | ||
17866 | __extension__ extern __inline uint32x4_t | |
17867 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17868 | __arm_vcvtq_m_n_u32_f32 (uint32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p) | |
17869 | { | |
17870 | return __builtin_mve_vcvtq_m_n_from_f_uv4si (__inactive, __a, __imm6, __p); | |
17871 | } | |
17872 | ||
17873 | __extension__ extern __inline uint16x8_t | |
17874 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17875 | __arm_vcvtq_m_n_u16_f16 (uint16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p) | |
17876 | { | |
17877 | return __builtin_mve_vcvtq_m_n_from_f_uv8hi (__inactive, __a, __imm6, __p); | |
17878 | } | |
17879 | ||
17880 | __extension__ extern __inline float32x4_t | |
17881 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17882 | __arm_veorq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17883 | { | |
17884 | return __builtin_mve_veorq_m_fv4sf (__inactive, __a, __b, __p); | |
17885 | } | |
17886 | ||
17887 | __extension__ extern __inline float16x8_t | |
17888 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17889 | __arm_veorq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17890 | { | |
17891 | return __builtin_mve_veorq_m_fv8hf (__inactive, __a, __b, __p); | |
17892 | } | |
17893 | ||
17894 | __extension__ extern __inline float32x4_t | |
17895 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17896 | __arm_vfmaq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
17897 | { | |
17898 | return __builtin_mve_vfmaq_m_fv4sf (__a, __b, __c, __p); | |
17899 | } | |
17900 | ||
17901 | __extension__ extern __inline float16x8_t | |
17902 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17903 | __arm_vfmaq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
17904 | { | |
17905 | return __builtin_mve_vfmaq_m_fv8hf (__a, __b, __c, __p); | |
17906 | } | |
17907 | ||
17908 | __extension__ extern __inline float32x4_t | |
17909 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17910 | __arm_vfmaq_m_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p) | |
17911 | { | |
17912 | return __builtin_mve_vfmaq_m_n_fv4sf (__a, __b, __c, __p); | |
17913 | } | |
17914 | ||
17915 | __extension__ extern __inline float16x8_t | |
17916 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17917 | __arm_vfmaq_m_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p) | |
17918 | { | |
17919 | return __builtin_mve_vfmaq_m_n_fv8hf (__a, __b, __c, __p); | |
17920 | } | |
17921 | ||
17922 | __extension__ extern __inline float32x4_t | |
17923 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17924 | __arm_vfmasq_m_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p) | |
17925 | { | |
17926 | return __builtin_mve_vfmasq_m_n_fv4sf (__a, __b, __c, __p); | |
17927 | } | |
17928 | ||
17929 | __extension__ extern __inline float16x8_t | |
17930 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17931 | __arm_vfmasq_m_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p) | |
17932 | { | |
17933 | return __builtin_mve_vfmasq_m_n_fv8hf (__a, __b, __c, __p); | |
17934 | } | |
17935 | ||
17936 | __extension__ extern __inline float32x4_t | |
17937 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17938 | __arm_vfmsq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
17939 | { | |
17940 | return __builtin_mve_vfmsq_m_fv4sf (__a, __b, __c, __p); | |
17941 | } | |
17942 | ||
17943 | __extension__ extern __inline float16x8_t | |
17944 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17945 | __arm_vfmsq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
17946 | { | |
17947 | return __builtin_mve_vfmsq_m_fv8hf (__a, __b, __c, __p); | |
17948 | } | |
17949 | ||
17950 | __extension__ extern __inline float32x4_t | |
17951 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17952 | __arm_vmaxnmq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17953 | { | |
17954 | return __builtin_mve_vmaxnmq_m_fv4sf (__inactive, __a, __b, __p); | |
17955 | } | |
17956 | ||
17957 | __extension__ extern __inline float16x8_t | |
17958 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17959 | __arm_vmaxnmq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17960 | { | |
17961 | return __builtin_mve_vmaxnmq_m_fv8hf (__inactive, __a, __b, __p); | |
17962 | } | |
17963 | ||
17964 | __extension__ extern __inline float32x4_t | |
17965 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17966 | __arm_vminnmq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17967 | { | |
17968 | return __builtin_mve_vminnmq_m_fv4sf (__inactive, __a, __b, __p); | |
17969 | } | |
17970 | ||
17971 | __extension__ extern __inline float16x8_t | |
17972 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17973 | __arm_vminnmq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17974 | { | |
17975 | return __builtin_mve_vminnmq_m_fv8hf (__inactive, __a, __b, __p); | |
17976 | } | |
17977 | ||
17978 | __extension__ extern __inline float32x4_t | |
17979 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17980 | __arm_vmulq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17981 | { | |
17982 | return __builtin_mve_vmulq_m_fv4sf (__inactive, __a, __b, __p); | |
17983 | } | |
17984 | ||
17985 | __extension__ extern __inline float16x8_t | |
17986 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17987 | __arm_vmulq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17988 | { | |
17989 | return __builtin_mve_vmulq_m_fv8hf (__inactive, __a, __b, __p); | |
17990 | } | |
17991 | ||
17992 | __extension__ extern __inline float32x4_t | |
17993 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17994 | __arm_vmulq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
17995 | { | |
17996 | return __builtin_mve_vmulq_m_n_fv4sf (__inactive, __a, __b, __p); | |
17997 | } | |
17998 | ||
17999 | __extension__ extern __inline float16x8_t | |
18000 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18001 | __arm_vmulq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18002 | { | |
18003 | return __builtin_mve_vmulq_m_n_fv8hf (__inactive, __a, __b, __p); | |
18004 | } | |
18005 | ||
18006 | __extension__ extern __inline float32x4_t | |
18007 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18008 | __arm_vornq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18009 | { | |
18010 | return __builtin_mve_vornq_m_fv4sf (__inactive, __a, __b, __p); | |
18011 | } | |
18012 | ||
18013 | __extension__ extern __inline float16x8_t | |
18014 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18015 | __arm_vornq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18016 | { | |
18017 | return __builtin_mve_vornq_m_fv8hf (__inactive, __a, __b, __p); | |
18018 | } | |
18019 | ||
18020 | __extension__ extern __inline float32x4_t | |
18021 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18022 | __arm_vorrq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18023 | { | |
18024 | return __builtin_mve_vorrq_m_fv4sf (__inactive, __a, __b, __p); | |
18025 | } | |
18026 | ||
18027 | __extension__ extern __inline float16x8_t | |
18028 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18029 | __arm_vorrq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18030 | { | |
18031 | return __builtin_mve_vorrq_m_fv8hf (__inactive, __a, __b, __p); | |
18032 | } | |
18033 | ||
18034 | __extension__ extern __inline float32x4_t | |
18035 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18036 | __arm_vsubq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18037 | { | |
18038 | return __builtin_mve_vsubq_m_fv4sf (__inactive, __a, __b, __p); | |
18039 | } | |
18040 | ||
18041 | __extension__ extern __inline float16x8_t | |
18042 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18043 | __arm_vsubq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18044 | { | |
18045 | return __builtin_mve_vsubq_m_fv8hf (__inactive, __a, __b, __p); | |
18046 | } | |
18047 | ||
18048 | __extension__ extern __inline float32x4_t | |
18049 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18050 | __arm_vsubq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18051 | { | |
18052 | return __builtin_mve_vsubq_m_n_fv4sf (__inactive, __a, __b, __p); | |
18053 | } | |
18054 | ||
18055 | __extension__ extern __inline float16x8_t | |
18056 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18057 | __arm_vsubq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18058 | { | |
18059 | return __builtin_mve_vsubq_m_n_fv8hf (__inactive, __a, __b, __p); | |
18060 | } | |
18061 | ||
18062 | __extension__ extern __inline float32x4_t | |
18063 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18064 | __arm_vld1q_f32 (float32_t const * __base) | |
18065 | { | |
18066 | return __builtin_mve_vld1q_fv4sf((__builtin_neon_si *) __base); | |
18067 | } | |
18068 | ||
18069 | __extension__ extern __inline float16x8_t | |
18070 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18071 | __arm_vld1q_f16 (float16_t const * __base) | |
18072 | { | |
18073 | return __builtin_mve_vld1q_fv8hf((__builtin_neon_hi *) __base); | |
18074 | } | |
18075 | ||
18076 | __extension__ extern __inline float32x4_t | |
18077 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18078 | __arm_vldrwq_f32 (float32_t const * __base) | |
18079 | { | |
18080 | return __builtin_mve_vldrwq_fv4sf((__builtin_neon_si *) __base); | |
18081 | } | |
18082 | ||
18083 | __extension__ extern __inline float32x4_t | |
18084 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18085 | __arm_vldrwq_z_f32 (float32_t const * __base, mve_pred16_t __p) | |
18086 | { | |
18087 | return __builtin_mve_vldrwq_z_fv4sf((__builtin_neon_si *) __base, __p); | |
18088 | } | |
18089 | ||
18090 | __extension__ extern __inline float16x8_t | |
18091 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18092 | __arm_vldrhq_z_f16 (float16_t const * __base, mve_pred16_t __p) | |
18093 | { | |
18094 | return __builtin_mve_vldrhq_z_fv8hf((__builtin_neon_hi *) __base, __p); | |
18095 | } | |
18096 | ||
18097 | __extension__ extern __inline float16x8_t | |
18098 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18099 | __arm_vldrhq_f16 (float16_t const * __base) | |
18100 | { | |
18101 | return __builtin_mve_vldrhq_fv8hf((__builtin_neon_hi *) __base); | |
18102 | } | |
18103 | ||
18104 | __extension__ extern __inline float16x8_t | |
18105 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18106 | __arm_vldrhq_gather_offset_f16 (float16_t const * __base, uint16x8_t __offset) | |
18107 | { | |
18108 | return __builtin_mve_vldrhq_gather_offset_fv8hf((__builtin_neon_hi *) __base, __offset); | |
18109 | } | |
18110 | ||
18111 | __extension__ extern __inline float16x8_t | |
18112 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18113 | __arm_vldrhq_gather_offset_z_f16 (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
18114 | { | |
18115 | return __builtin_mve_vldrhq_gather_offset_z_fv8hf((__builtin_neon_hi *) __base, __offset, __p); | |
18116 | } | |
18117 | ||
18118 | __extension__ extern __inline float16x8_t | |
18119 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18120 | __arm_vldrhq_gather_shifted_offset_f16 (float16_t const * __base, uint16x8_t __offset) | |
18121 | { | |
18122 | return __builtin_mve_vldrhq_gather_shifted_offset_fv8hf (__base, __offset); | |
18123 | } | |
18124 | ||
18125 | __extension__ extern __inline float16x8_t | |
18126 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18127 | __arm_vldrhq_gather_shifted_offset_z_f16 (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
18128 | { | |
18129 | return __builtin_mve_vldrhq_gather_shifted_offset_z_fv8hf (__base, __offset, __p); | |
18130 | } | |
18131 | ||
18132 | __extension__ extern __inline float32x4_t | |
18133 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18134 | __arm_vldrwq_gather_base_f32 (uint32x4_t __addr, const int __offset) | |
18135 | { | |
18136 | return __builtin_mve_vldrwq_gather_base_fv4sf (__addr, __offset); | |
18137 | } | |
18138 | ||
18139 | __extension__ extern __inline float32x4_t | |
18140 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18141 | __arm_vldrwq_gather_base_z_f32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p) | |
18142 | { | |
18143 | return __builtin_mve_vldrwq_gather_base_z_fv4sf (__addr, __offset, __p); | |
18144 | } | |
18145 | ||
18146 | __extension__ extern __inline float32x4_t | |
18147 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18148 | __arm_vldrwq_gather_offset_f32 (float32_t const * __base, uint32x4_t __offset) | |
18149 | { | |
18150 | return __builtin_mve_vldrwq_gather_offset_fv4sf((__builtin_neon_si *) __base, __offset); | |
18151 | } | |
18152 | ||
18153 | __extension__ extern __inline float32x4_t | |
18154 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18155 | __arm_vldrwq_gather_offset_z_f32 (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
18156 | { | |
18157 | return __builtin_mve_vldrwq_gather_offset_z_fv4sf((__builtin_neon_si *) __base, __offset, __p); | |
18158 | } | |
18159 | ||
18160 | __extension__ extern __inline float32x4_t | |
18161 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18162 | __arm_vldrwq_gather_shifted_offset_f32 (float32_t const * __base, uint32x4_t __offset) | |
18163 | { | |
18164 | return __builtin_mve_vldrwq_gather_shifted_offset_fv4sf (__base, __offset); | |
18165 | } | |
18166 | ||
18167 | __extension__ extern __inline float32x4_t | |
18168 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18169 | __arm_vldrwq_gather_shifted_offset_z_f32 (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
18170 | { | |
18171 | return __builtin_mve_vldrwq_gather_shifted_offset_z_fv4sf (__base, __offset, __p); | |
18172 | } | |
18173 | ||
18174 | __extension__ extern __inline void | |
18175 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18176 | __arm_vstrwq_p_f32 (float32_t * __addr, float32x4_t __value, mve_pred16_t __p) | |
18177 | { | |
18178 | __builtin_mve_vstrwq_p_fv4sf (__addr, __value, __p); | |
18179 | } | |
18180 | ||
18181 | __extension__ extern __inline void | |
18182 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18183 | __arm_vstrwq_f32 (float32_t * __addr, float32x4_t __value) | |
18184 | { | |
18185 | __builtin_mve_vstrwq_fv4sf (__addr, __value); | |
18186 | } | |
18187 | ||
18188 | __extension__ extern __inline void | |
18189 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18190 | __arm_vst1q_f32 (float32_t * __addr, float32x4_t __value) | |
18191 | { | |
18192 | __builtin_mve_vst1q_fv4sf (__addr, __value); | |
18193 | } | |
18194 | ||
18195 | __extension__ extern __inline void | |
18196 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18197 | __arm_vst1q_f16 (float16_t * __addr, float16x8_t __value) | |
18198 | { | |
18199 | __builtin_mve_vst1q_fv8hf (__addr, __value); | |
18200 | } | |
18201 | ||
18202 | __extension__ extern __inline void | |
18203 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18204 | __arm_vstrhq_f16 (float16_t * __addr, float16x8_t __value) | |
18205 | { | |
18206 | __builtin_mve_vstrhq_fv8hf (__addr, __value); | |
18207 | } | |
18208 | ||
18209 | __extension__ extern __inline void | |
18210 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18211 | __arm_vstrhq_p_f16 (float16_t * __addr, float16x8_t __value, mve_pred16_t __p) | |
18212 | { | |
18213 | __builtin_mve_vstrhq_p_fv8hf (__addr, __value, __p); | |
18214 | } | |
18215 | ||
18216 | __extension__ extern __inline void | |
18217 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18218 | __arm_vstrhq_scatter_offset_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value) | |
18219 | { | |
18220 | __builtin_mve_vstrhq_scatter_offset_fv8hf (__base, __offset, __value); | |
18221 | } | |
18222 | ||
18223 | __extension__ extern __inline void | |
18224 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18225 | __arm_vstrhq_scatter_offset_p_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p) | |
18226 | { | |
18227 | __builtin_mve_vstrhq_scatter_offset_p_fv8hf (__base, __offset, __value, __p); | |
18228 | } | |
18229 | ||
18230 | __extension__ extern __inline void | |
18231 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18232 | __arm_vstrhq_scatter_shifted_offset_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value) | |
18233 | { | |
18234 | __builtin_mve_vstrhq_scatter_shifted_offset_fv8hf (__base, __offset, __value); | |
18235 | } | |
18236 | ||
18237 | __extension__ extern __inline void | |
18238 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18239 | __arm_vstrhq_scatter_shifted_offset_p_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p) | |
18240 | { | |
18241 | __builtin_mve_vstrhq_scatter_shifted_offset_p_fv8hf (__base, __offset, __value, __p); | |
18242 | } | |
18243 | ||
18244 | __extension__ extern __inline void | |
18245 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18246 | __arm_vstrwq_scatter_base_f32 (uint32x4_t __addr, const int __offset, float32x4_t __value) | |
18247 | { | |
18248 | __builtin_mve_vstrwq_scatter_base_fv4sf (__addr, __offset, __value); | |
18249 | } | |
18250 | ||
18251 | __extension__ extern __inline void | |
18252 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18253 | __arm_vstrwq_scatter_base_p_f32 (uint32x4_t __addr, const int __offset, float32x4_t __value, mve_pred16_t __p) | |
18254 | { | |
18255 | __builtin_mve_vstrwq_scatter_base_p_fv4sf (__addr, __offset, __value, __p); | |
18256 | } | |
18257 | ||
18258 | __extension__ extern __inline void | |
18259 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18260 | __arm_vstrwq_scatter_offset_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value) | |
18261 | { | |
18262 | __builtin_mve_vstrwq_scatter_offset_fv4sf (__base, __offset, __value); | |
18263 | } | |
18264 | ||
18265 | __extension__ extern __inline void | |
18266 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18267 | __arm_vstrwq_scatter_offset_p_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p) | |
18268 | { | |
18269 | __builtin_mve_vstrwq_scatter_offset_p_fv4sf (__base, __offset, __value, __p); | |
18270 | } | |
18271 | ||
18272 | __extension__ extern __inline void | |
18273 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18274 | __arm_vstrwq_scatter_shifted_offset_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value) | |
18275 | { | |
18276 | __builtin_mve_vstrwq_scatter_shifted_offset_fv4sf (__base, __offset, __value); | |
18277 | } | |
18278 | ||
18279 | __extension__ extern __inline void | |
18280 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18281 | __arm_vstrwq_scatter_shifted_offset_p_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p) | |
18282 | { | |
18283 | __builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf (__base, __offset, __value, __p); | |
18284 | } | |
18285 | ||
18286 | __extension__ extern __inline float16x8_t | |
18287 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18288 | __arm_vaddq_f16 (float16x8_t __a, float16x8_t __b) | |
18289 | { | |
18290 | return __a + __b; | |
18291 | } | |
18292 | ||
18293 | __extension__ extern __inline float32x4_t | |
18294 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18295 | __arm_vaddq_f32 (float32x4_t __a, float32x4_t __b) | |
18296 | { | |
18297 | return __a + __b; | |
18298 | } | |
18299 | ||
18300 | __extension__ extern __inline float16x8_t | |
18301 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18302 | __arm_vuninitializedq_f16 (void) | |
18303 | { | |
18304 | float16x8_t __uninit; | |
18305 | __asm__ ("": "=w" (__uninit)); | |
18306 | return __uninit; | |
18307 | } | |
18308 | ||
18309 | __extension__ extern __inline float32x4_t | |
18310 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18311 | __arm_vuninitializedq_f32 (void) | |
18312 | { | |
18313 | float32x4_t __uninit; | |
18314 | __asm__ ("": "=w" (__uninit)); | |
18315 | return __uninit; | |
18316 | } | |
18317 | ||
18318 | __extension__ extern __inline int32x4_t | |
18319 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18320 | __arm_vreinterpretq_s32_f16 (float16x8_t __a) | |
18321 | { | |
18322 | return (int32x4_t) __a; | |
18323 | } | |
18324 | ||
18325 | __extension__ extern __inline int32x4_t | |
18326 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18327 | __arm_vreinterpretq_s32_f32 (float32x4_t __a) | |
18328 | { | |
18329 | return (int32x4_t) __a; | |
18330 | } | |
18331 | ||
18332 | __extension__ extern __inline int16x8_t | |
18333 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18334 | __arm_vreinterpretq_s16_f16 (float16x8_t __a) | |
18335 | { | |
18336 | return (int16x8_t) __a; | |
18337 | } | |
18338 | ||
18339 | __extension__ extern __inline int16x8_t | |
18340 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18341 | __arm_vreinterpretq_s16_f32 (float32x4_t __a) | |
18342 | { | |
18343 | return (int16x8_t) __a; | |
18344 | } | |
18345 | ||
18346 | __extension__ extern __inline int64x2_t | |
18347 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18348 | __arm_vreinterpretq_s64_f16 (float16x8_t __a) | |
18349 | { | |
18350 | return (int64x2_t) __a; | |
18351 | } | |
18352 | ||
18353 | __extension__ extern __inline int64x2_t | |
18354 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18355 | __arm_vreinterpretq_s64_f32 (float32x4_t __a) | |
18356 | { | |
18357 | return (int64x2_t) __a; | |
18358 | } | |
18359 | ||
18360 | __extension__ extern __inline int8x16_t | |
18361 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18362 | __arm_vreinterpretq_s8_f16 (float16x8_t __a) | |
18363 | { | |
18364 | return (int8x16_t) __a; | |
18365 | } | |
18366 | ||
18367 | __extension__ extern __inline int8x16_t | |
18368 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18369 | __arm_vreinterpretq_s8_f32 (float32x4_t __a) | |
18370 | { | |
18371 | return (int8x16_t) __a; | |
18372 | } | |
18373 | ||
18374 | __extension__ extern __inline uint16x8_t | |
18375 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18376 | __arm_vreinterpretq_u16_f16 (float16x8_t __a) | |
18377 | { | |
18378 | return (uint16x8_t) __a; | |
18379 | } | |
18380 | ||
18381 | __extension__ extern __inline uint16x8_t | |
18382 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18383 | __arm_vreinterpretq_u16_f32 (float32x4_t __a) | |
18384 | { | |
18385 | return (uint16x8_t) __a; | |
18386 | } | |
18387 | ||
18388 | __extension__ extern __inline uint32x4_t | |
18389 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18390 | __arm_vreinterpretq_u32_f16 (float16x8_t __a) | |
18391 | { | |
18392 | return (uint32x4_t) __a; | |
18393 | } | |
18394 | ||
18395 | __extension__ extern __inline uint32x4_t | |
18396 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18397 | __arm_vreinterpretq_u32_f32 (float32x4_t __a) | |
18398 | { | |
18399 | return (uint32x4_t) __a; | |
18400 | } | |
18401 | ||
18402 | __extension__ extern __inline uint64x2_t | |
18403 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18404 | __arm_vreinterpretq_u64_f16 (float16x8_t __a) | |
18405 | { | |
18406 | return (uint64x2_t) __a; | |
18407 | } | |
18408 | ||
18409 | __extension__ extern __inline uint64x2_t | |
18410 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18411 | __arm_vreinterpretq_u64_f32 (float32x4_t __a) | |
18412 | { | |
18413 | return (uint64x2_t) __a; | |
18414 | } | |
18415 | ||
18416 | __extension__ extern __inline uint8x16_t | |
18417 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18418 | __arm_vreinterpretq_u8_f16 (float16x8_t __a) | |
18419 | { | |
18420 | return (uint8x16_t) __a; | |
18421 | } | |
18422 | ||
18423 | __extension__ extern __inline uint8x16_t | |
18424 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18425 | __arm_vreinterpretq_u8_f32 (float32x4_t __a) | |
18426 | { | |
18427 | return (uint8x16_t) __a; | |
18428 | } | |
18429 | ||
18430 | __extension__ extern __inline float16x8_t | |
18431 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18432 | __arm_vreinterpretq_f16_f32 (float32x4_t __a) | |
18433 | { | |
18434 | return (float16x8_t) __a; | |
18435 | } | |
18436 | ||
18437 | __extension__ extern __inline float16x8_t | |
18438 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18439 | __arm_vreinterpretq_f16_s16 (int16x8_t __a) | |
18440 | { | |
18441 | return (float16x8_t) __a; | |
18442 | } | |
18443 | ||
18444 | __extension__ extern __inline float16x8_t | |
18445 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18446 | __arm_vreinterpretq_f16_s32 (int32x4_t __a) | |
18447 | { | |
18448 | return (float16x8_t) __a; | |
18449 | } | |
18450 | ||
18451 | __extension__ extern __inline float16x8_t | |
18452 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18453 | __arm_vreinterpretq_f16_s64 (int64x2_t __a) | |
18454 | { | |
18455 | return (float16x8_t) __a; | |
18456 | } | |
18457 | ||
18458 | __extension__ extern __inline float16x8_t | |
18459 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18460 | __arm_vreinterpretq_f16_s8 (int8x16_t __a) | |
18461 | { | |
18462 | return (float16x8_t) __a; | |
18463 | } | |
18464 | ||
18465 | __extension__ extern __inline float16x8_t | |
18466 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18467 | __arm_vreinterpretq_f16_u16 (uint16x8_t __a) | |
18468 | { | |
18469 | return (float16x8_t) __a; | |
18470 | } | |
18471 | ||
18472 | __extension__ extern __inline float16x8_t | |
18473 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18474 | __arm_vreinterpretq_f16_u32 (uint32x4_t __a) | |
18475 | { | |
18476 | return (float16x8_t) __a; | |
18477 | } | |
18478 | ||
18479 | __extension__ extern __inline float16x8_t | |
18480 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18481 | __arm_vreinterpretq_f16_u64 (uint64x2_t __a) | |
532e9e24 | 18482 | { |
261014a1 | 18483 | return (float16x8_t) __a; |
532e9e24 SP |
18484 | } |
18485 | ||
18486 | __extension__ extern __inline float16x8_t | |
18487 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18488 | __arm_vreinterpretq_f16_u8 (uint8x16_t __a) |
532e9e24 | 18489 | { |
261014a1 | 18490 | return (float16x8_t) __a; |
532e9e24 SP |
18491 | } |
18492 | ||
18493 | __extension__ extern __inline float32x4_t | |
18494 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18495 | __arm_vreinterpretq_f32_f16 (float16x8_t __a) |
532e9e24 | 18496 | { |
261014a1 | 18497 | return (float32x4_t) __a; |
532e9e24 SP |
18498 | } |
18499 | ||
261014a1 | 18500 | __extension__ extern __inline float32x4_t |
532e9e24 | 18501 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18502 | __arm_vreinterpretq_f32_s16 (int16x8_t __a) |
532e9e24 | 18503 | { |
261014a1 | 18504 | return (float32x4_t) __a; |
532e9e24 SP |
18505 | } |
18506 | ||
18507 | __extension__ extern __inline float32x4_t | |
18508 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18509 | __arm_vreinterpretq_f32_s32 (int32x4_t __a) |
532e9e24 | 18510 | { |
261014a1 | 18511 | return (float32x4_t) __a; |
532e9e24 SP |
18512 | } |
18513 | ||
261014a1 | 18514 | __extension__ extern __inline float32x4_t |
532e9e24 | 18515 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18516 | __arm_vreinterpretq_f32_s64 (int64x2_t __a) |
532e9e24 | 18517 | { |
261014a1 | 18518 | return (float32x4_t) __a; |
532e9e24 SP |
18519 | } |
18520 | ||
18521 | __extension__ extern __inline float32x4_t | |
18522 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18523 | __arm_vreinterpretq_f32_s8 (int8x16_t __a) |
532e9e24 | 18524 | { |
261014a1 | 18525 | return (float32x4_t) __a; |
532e9e24 SP |
18526 | } |
18527 | ||
261014a1 | 18528 | __extension__ extern __inline float32x4_t |
532e9e24 | 18529 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18530 | __arm_vreinterpretq_f32_u16 (uint16x8_t __a) |
532e9e24 | 18531 | { |
261014a1 | 18532 | return (float32x4_t) __a; |
532e9e24 SP |
18533 | } |
18534 | ||
261014a1 | 18535 | __extension__ extern __inline float32x4_t |
532e9e24 | 18536 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18537 | __arm_vreinterpretq_f32_u32 (uint32x4_t __a) |
532e9e24 | 18538 | { |
261014a1 | 18539 | return (float32x4_t) __a; |
532e9e24 SP |
18540 | } |
18541 | ||
261014a1 | 18542 | __extension__ extern __inline float32x4_t |
532e9e24 | 18543 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18544 | __arm_vreinterpretq_f32_u64 (uint64x2_t __a) |
532e9e24 | 18545 | { |
261014a1 | 18546 | return (float32x4_t) __a; |
532e9e24 SP |
18547 | } |
18548 | ||
261014a1 | 18549 | __extension__ extern __inline float32x4_t |
532e9e24 | 18550 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18551 | __arm_vreinterpretq_f32_u8 (uint8x16_t __a) |
532e9e24 | 18552 | { |
261014a1 | 18553 | return (float32x4_t) __a; |
532e9e24 SP |
18554 | } |
18555 | ||
261014a1 | 18556 | __extension__ extern __inline float32x4_t |
532e9e24 | 18557 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18558 | __arm_vldrwq_gather_base_wb_f32 (uint32x4_t * __addr, const int __offset) |
532e9e24 | 18559 | { |
261014a1 SP |
18560 | float32x4_t |
18561 | result = __builtin_mve_vldrwq_gather_base_wb_fv4sf (*__addr, __offset); | |
18562 | __addr += __offset; | |
18563 | return result; | |
532e9e24 SP |
18564 | } |
18565 | ||
18566 | __extension__ extern __inline float32x4_t | |
18567 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18568 | __arm_vldrwq_gather_base_wb_z_f32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p) |
532e9e24 | 18569 | { |
261014a1 SP |
18570 | float32x4_t |
18571 | result = __builtin_mve_vldrwq_gather_base_wb_z_fv4sf (*__addr, __offset, __p); | |
18572 | __addr += __offset; | |
18573 | return result; | |
532e9e24 SP |
18574 | } |
18575 | ||
261014a1 | 18576 | __extension__ extern __inline void |
532e9e24 | 18577 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18578 | __arm_vstrwq_scatter_base_wb_f32 (uint32x4_t * __addr, const int __offset, float32x4_t __value) |
532e9e24 | 18579 | { |
261014a1 SP |
18580 | __builtin_mve_vstrwq_scatter_base_wb_fv4sf (*__addr, __offset, __value); |
18581 | __builtin_mve_vstrwq_scatter_base_wb_add_fv4sf (*__addr, __offset, *__addr); | |
532e9e24 SP |
18582 | } |
18583 | ||
261014a1 | 18584 | __extension__ extern __inline void |
532e9e24 | 18585 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18586 | __arm_vstrwq_scatter_base_wb_p_f32 (uint32x4_t * __addr, const int __offset, float32x4_t __value, mve_pred16_t __p) |
532e9e24 | 18587 | { |
261014a1 SP |
18588 | __builtin_mve_vstrwq_scatter_base_wb_p_fv4sf (*__addr, __offset, __value, __p); |
18589 | __builtin_mve_vstrwq_scatter_base_wb_p_add_fv4sf (*__addr, __offset, *__addr, __p); | |
532e9e24 SP |
18590 | } |
18591 | ||
18592 | __extension__ extern __inline float16x8_t | |
18593 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18594 | __arm_vdupq_x_n_f16 (float16_t __a, mve_pred16_t __p) |
532e9e24 | 18595 | { |
261014a1 | 18596 | return __builtin_mve_vdupq_m_n_fv8hf (vuninitializedq_f16 (), __a, __p); |
532e9e24 SP |
18597 | } |
18598 | ||
18599 | __extension__ extern __inline float32x4_t | |
18600 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18601 | __arm_vdupq_x_n_f32 (float32_t __a, mve_pred16_t __p) |
532e9e24 | 18602 | { |
261014a1 | 18603 | return __builtin_mve_vdupq_m_n_fv4sf (vuninitializedq_f32 (), __a, __p); |
532e9e24 SP |
18604 | } |
18605 | ||
18606 | __extension__ extern __inline float16x8_t | |
18607 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18608 | __arm_vminnmq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 18609 | { |
261014a1 | 18610 | return __builtin_mve_vminnmq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
18611 | } |
18612 | ||
18613 | __extension__ extern __inline float32x4_t | |
18614 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18615 | __arm_vminnmq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 18616 | { |
261014a1 | 18617 | return __builtin_mve_vminnmq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
18618 | } |
18619 | ||
18620 | __extension__ extern __inline float16x8_t | |
18621 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18622 | __arm_vmaxnmq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 18623 | { |
261014a1 | 18624 | return __builtin_mve_vmaxnmq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
18625 | } |
18626 | ||
18627 | __extension__ extern __inline float32x4_t | |
18628 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18629 | __arm_vmaxnmq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 18630 | { |
261014a1 | 18631 | return __builtin_mve_vmaxnmq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
18632 | } |
18633 | ||
18634 | __extension__ extern __inline float16x8_t | |
18635 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18636 | __arm_vabdq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 18637 | { |
261014a1 | 18638 | return __builtin_mve_vabdq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
18639 | } |
18640 | ||
18641 | __extension__ extern __inline float32x4_t | |
18642 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18643 | __arm_vabdq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 18644 | { |
261014a1 | 18645 | return __builtin_mve_vabdq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
18646 | } |
18647 | ||
18648 | __extension__ extern __inline float16x8_t | |
18649 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18650 | __arm_vabsq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
532e9e24 | 18651 | { |
261014a1 | 18652 | return __builtin_mve_vabsq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
532e9e24 SP |
18653 | } |
18654 | ||
18655 | __extension__ extern __inline float32x4_t | |
18656 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18657 | __arm_vabsq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
532e9e24 | 18658 | { |
261014a1 | 18659 | return __builtin_mve_vabsq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
532e9e24 SP |
18660 | } |
18661 | ||
18662 | __extension__ extern __inline float16x8_t | |
18663 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18664 | __arm_vaddq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 18665 | { |
261014a1 | 18666 | return __builtin_mve_vaddq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
18667 | } |
18668 | ||
18669 | __extension__ extern __inline float32x4_t | |
18670 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18671 | __arm_vaddq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 18672 | { |
261014a1 | 18673 | return __builtin_mve_vaddq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
18674 | } |
18675 | ||
18676 | __extension__ extern __inline float16x8_t | |
18677 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18678 | __arm_vaddq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) |
532e9e24 | 18679 | { |
261014a1 | 18680 | return __builtin_mve_vaddq_m_n_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
18681 | } |
18682 | ||
18683 | __extension__ extern __inline float32x4_t | |
18684 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18685 | __arm_vaddq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) |
532e9e24 | 18686 | { |
261014a1 | 18687 | return __builtin_mve_vaddq_m_n_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
18688 | } |
18689 | ||
18690 | __extension__ extern __inline float16x8_t | |
18691 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18692 | __arm_vnegq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
532e9e24 | 18693 | { |
261014a1 | 18694 | return __builtin_mve_vnegq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
532e9e24 SP |
18695 | } |
18696 | ||
18697 | __extension__ extern __inline float32x4_t | |
18698 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18699 | __arm_vnegq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
532e9e24 | 18700 | { |
261014a1 | 18701 | return __builtin_mve_vnegq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
532e9e24 SP |
18702 | } |
18703 | ||
18704 | __extension__ extern __inline float16x8_t | |
18705 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18706 | __arm_vmulq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 18707 | { |
261014a1 | 18708 | return __builtin_mve_vmulq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
18709 | } |
18710 | ||
18711 | __extension__ extern __inline float32x4_t | |
18712 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18713 | __arm_vmulq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 18714 | { |
261014a1 | 18715 | return __builtin_mve_vmulq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
18716 | } |
18717 | ||
18718 | __extension__ extern __inline float16x8_t | |
18719 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18720 | __arm_vmulq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) |
532e9e24 | 18721 | { |
261014a1 | 18722 | return __builtin_mve_vmulq_m_n_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
18723 | } |
18724 | ||
18725 | __extension__ extern __inline float32x4_t | |
18726 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18727 | __arm_vmulq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) |
532e9e24 | 18728 | { |
261014a1 | 18729 | return __builtin_mve_vmulq_m_n_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
18730 | } |
18731 | ||
18732 | __extension__ extern __inline float16x8_t | |
18733 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18734 | __arm_vsubq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 18735 | { |
261014a1 | 18736 | return __builtin_mve_vsubq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
18737 | } |
18738 | ||
18739 | __extension__ extern __inline float32x4_t | |
18740 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18741 | __arm_vsubq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 18742 | { |
261014a1 | 18743 | return __builtin_mve_vsubq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
18744 | } |
18745 | ||
18746 | __extension__ extern __inline float16x8_t | |
18747 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18748 | __arm_vsubq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) |
532e9e24 | 18749 | { |
261014a1 | 18750 | return __builtin_mve_vsubq_m_n_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 | 18751 | } |
429d607b | 18752 | |
bf1e3d5a SP |
18753 | __extension__ extern __inline float32x4_t |
18754 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18755 | __arm_vsubq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) |
bf1e3d5a | 18756 | { |
261014a1 | 18757 | return __builtin_mve_vsubq_m_n_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
bf1e3d5a SP |
18758 | } |
18759 | ||
18760 | __extension__ extern __inline float16x8_t | |
18761 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18762 | __arm_vcaddq_rot90_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
bf1e3d5a | 18763 | { |
261014a1 | 18764 | return __builtin_mve_vcaddq_rot90_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
bf1e3d5a SP |
18765 | } |
18766 | ||
18767 | __extension__ extern __inline float32x4_t | |
18768 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18769 | __arm_vcaddq_rot90_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
bf1e3d5a | 18770 | { |
261014a1 | 18771 | return __builtin_mve_vcaddq_rot90_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
bf1e3d5a SP |
18772 | } |
18773 | ||
261014a1 | 18774 | __extension__ extern __inline float16x8_t |
bf1e3d5a | 18775 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18776 | __arm_vcaddq_rot270_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
bf1e3d5a | 18777 | { |
261014a1 | 18778 | return __builtin_mve_vcaddq_rot270_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
bf1e3d5a SP |
18779 | } |
18780 | ||
261014a1 | 18781 | __extension__ extern __inline float32x4_t |
bf1e3d5a | 18782 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18783 | __arm_vcaddq_rot270_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
bf1e3d5a | 18784 | { |
261014a1 | 18785 | return __builtin_mve_vcaddq_rot270_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
bf1e3d5a SP |
18786 | } |
18787 | ||
18788 | __extension__ extern __inline float16x8_t | |
18789 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18790 | __arm_vcmulq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
bf1e3d5a | 18791 | { |
261014a1 | 18792 | return __builtin_mve_vcmulq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
bf1e3d5a | 18793 | } |
4cc23303 | 18794 | |
261014a1 | 18795 | __extension__ extern __inline float32x4_t |
4cc23303 | 18796 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18797 | __arm_vcmulq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
4cc23303 | 18798 | { |
261014a1 | 18799 | return __builtin_mve_vcmulq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
4cc23303 SP |
18800 | } |
18801 | ||
18802 | __extension__ extern __inline float16x8_t | |
18803 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18804 | __arm_vcmulq_rot90_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
4cc23303 | 18805 | { |
261014a1 | 18806 | return __builtin_mve_vcmulq_rot90_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
4cc23303 SP |
18807 | } |
18808 | ||
261014a1 | 18809 | __extension__ extern __inline float32x4_t |
4cc23303 | 18810 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18811 | __arm_vcmulq_rot90_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
4cc23303 | 18812 | { |
261014a1 | 18813 | return __builtin_mve_vcmulq_rot90_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
4cc23303 SP |
18814 | } |
18815 | ||
18816 | __extension__ extern __inline float16x8_t | |
18817 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18818 | __arm_vcmulq_rot180_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
4cc23303 | 18819 | { |
261014a1 | 18820 | return __builtin_mve_vcmulq_rot180_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
4cc23303 SP |
18821 | } |
18822 | ||
18823 | __extension__ extern __inline float32x4_t | |
18824 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18825 | __arm_vcmulq_rot180_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
4cc23303 | 18826 | { |
261014a1 | 18827 | return __builtin_mve_vcmulq_rot180_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
4cc23303 SP |
18828 | } |
18829 | ||
261014a1 | 18830 | __extension__ extern __inline float16x8_t |
4cc23303 | 18831 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18832 | __arm_vcmulq_rot270_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
4cc23303 | 18833 | { |
261014a1 | 18834 | return __builtin_mve_vcmulq_rot270_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
4cc23303 SP |
18835 | } |
18836 | ||
18837 | __extension__ extern __inline float32x4_t | |
18838 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18839 | __arm_vcmulq_rot270_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
4cc23303 | 18840 | { |
261014a1 | 18841 | return __builtin_mve_vcmulq_rot270_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
4cc23303 SP |
18842 | } |
18843 | ||
261014a1 | 18844 | __extension__ extern __inline int16x8_t |
4cc23303 | 18845 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18846 | __arm_vcvtaq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p) |
4cc23303 | 18847 | { |
261014a1 | 18848 | return __builtin_mve_vcvtaq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
4cc23303 SP |
18849 | } |
18850 | ||
261014a1 | 18851 | __extension__ extern __inline int32x4_t |
4cc23303 | 18852 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18853 | __arm_vcvtaq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p) |
4cc23303 | 18854 | { |
261014a1 | 18855 | return __builtin_mve_vcvtaq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
4cc23303 SP |
18856 | } |
18857 | ||
261014a1 | 18858 | __extension__ extern __inline uint16x8_t |
4cc23303 | 18859 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18860 | __arm_vcvtaq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p) |
4cc23303 | 18861 | { |
261014a1 | 18862 | return __builtin_mve_vcvtaq_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
4cc23303 SP |
18863 | } |
18864 | ||
261014a1 | 18865 | __extension__ extern __inline uint32x4_t |
5cad47e0 | 18866 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18867 | __arm_vcvtaq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p) |
5cad47e0 | 18868 | { |
261014a1 | 18869 | return __builtin_mve_vcvtaq_m_uv4si (vuninitializedq_u32 (), __a, __p); |
5cad47e0 SP |
18870 | } |
18871 | ||
261014a1 | 18872 | __extension__ extern __inline int16x8_t |
5cad47e0 | 18873 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18874 | __arm_vcvtnq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p) |
5cad47e0 | 18875 | { |
261014a1 | 18876 | return __builtin_mve_vcvtnq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
5cad47e0 SP |
18877 | } |
18878 | ||
261014a1 | 18879 | __extension__ extern __inline int32x4_t |
5cad47e0 | 18880 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18881 | __arm_vcvtnq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p) |
5cad47e0 | 18882 | { |
261014a1 | 18883 | return __builtin_mve_vcvtnq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
5cad47e0 SP |
18884 | } |
18885 | ||
261014a1 | 18886 | __extension__ extern __inline uint16x8_t |
5cad47e0 | 18887 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18888 | __arm_vcvtnq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p) |
5cad47e0 | 18889 | { |
261014a1 | 18890 | return __builtin_mve_vcvtnq_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
5cad47e0 SP |
18891 | } |
18892 | ||
261014a1 | 18893 | __extension__ extern __inline uint32x4_t |
5cad47e0 | 18894 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18895 | __arm_vcvtnq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p) |
5cad47e0 | 18896 | { |
261014a1 | 18897 | return __builtin_mve_vcvtnq_m_uv4si (vuninitializedq_u32 (), __a, __p); |
5cad47e0 SP |
18898 | } |
18899 | ||
261014a1 | 18900 | __extension__ extern __inline int16x8_t |
5cad47e0 | 18901 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18902 | __arm_vcvtpq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p) |
5cad47e0 | 18903 | { |
261014a1 | 18904 | return __builtin_mve_vcvtpq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
5cad47e0 SP |
18905 | } |
18906 | ||
261014a1 | 18907 | __extension__ extern __inline int32x4_t |
7a5fffa5 | 18908 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18909 | __arm_vcvtpq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p) |
7a5fffa5 | 18910 | { |
261014a1 | 18911 | return __builtin_mve_vcvtpq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
7a5fffa5 SP |
18912 | } |
18913 | ||
261014a1 | 18914 | __extension__ extern __inline uint16x8_t |
7a5fffa5 | 18915 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18916 | __arm_vcvtpq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 18917 | { |
261014a1 | 18918 | return __builtin_mve_vcvtpq_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
7a5fffa5 SP |
18919 | } |
18920 | ||
261014a1 | 18921 | __extension__ extern __inline uint32x4_t |
7a5fffa5 | 18922 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18923 | __arm_vcvtpq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p) |
7a5fffa5 | 18924 | { |
261014a1 | 18925 | return __builtin_mve_vcvtpq_m_uv4si (vuninitializedq_u32 (), __a, __p); |
7a5fffa5 SP |
18926 | } |
18927 | ||
261014a1 | 18928 | __extension__ extern __inline int16x8_t |
7a5fffa5 | 18929 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18930 | __arm_vcvtmq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 18931 | { |
261014a1 | 18932 | return __builtin_mve_vcvtmq_m_sv8hi (vuninitializedq_s16 (), __a, __p); |
7a5fffa5 SP |
18933 | } |
18934 | ||
261014a1 | 18935 | __extension__ extern __inline int32x4_t |
7a5fffa5 | 18936 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18937 | __arm_vcvtmq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p) |
7a5fffa5 | 18938 | { |
261014a1 | 18939 | return __builtin_mve_vcvtmq_m_sv4si (vuninitializedq_s32 (), __a, __p); |
7a5fffa5 SP |
18940 | } |
18941 | ||
261014a1 | 18942 | __extension__ extern __inline uint16x8_t |
7a5fffa5 | 18943 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18944 | __arm_vcvtmq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 18945 | { |
261014a1 | 18946 | return __builtin_mve_vcvtmq_m_uv8hi (vuninitializedq_u16 (), __a, __p); |
7a5fffa5 SP |
18947 | } |
18948 | ||
261014a1 | 18949 | __extension__ extern __inline uint32x4_t |
7a5fffa5 | 18950 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18951 | __arm_vcvtmq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p) |
7a5fffa5 | 18952 | { |
261014a1 | 18953 | return __builtin_mve_vcvtmq_m_uv4si (vuninitializedq_u32 (), __a, __p); |
7a5fffa5 SP |
18954 | } |
18955 | ||
261014a1 | 18956 | __extension__ extern __inline float32x4_t |
7a5fffa5 | 18957 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18958 | __arm_vcvtbq_x_f32_f16 (float16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 18959 | { |
261014a1 | 18960 | return __builtin_mve_vcvtbq_m_f32_f16v4sf (vuninitializedq_f32 (), __a, __p); |
7a5fffa5 SP |
18961 | } |
18962 | ||
261014a1 | 18963 | __extension__ extern __inline float32x4_t |
7a5fffa5 | 18964 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18965 | __arm_vcvttq_x_f32_f16 (float16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 18966 | { |
261014a1 | 18967 | return __builtin_mve_vcvttq_m_f32_f16v4sf (vuninitializedq_f32 (), __a, __p); |
7a5fffa5 SP |
18968 | } |
18969 | ||
261014a1 | 18970 | __extension__ extern __inline float16x8_t |
7a5fffa5 | 18971 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18972 | __arm_vcvtq_x_f16_u16 (uint16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 18973 | { |
261014a1 | 18974 | return __builtin_mve_vcvtq_m_to_f_uv8hf (vuninitializedq_f16 (), __a, __p); |
7a5fffa5 SP |
18975 | } |
18976 | ||
3eff57aa SP |
18977 | __extension__ extern __inline float16x8_t |
18978 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18979 | __arm_vcvtq_x_f16_s16 (int16x8_t __a, mve_pred16_t __p) |
3eff57aa | 18980 | { |
261014a1 | 18981 | return __builtin_mve_vcvtq_m_to_f_sv8hf (vuninitializedq_f16 (), __a, __p); |
3eff57aa SP |
18982 | } |
18983 | ||
18984 | __extension__ extern __inline float32x4_t | |
18985 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 18986 | __arm_vcvtq_x_f32_s32 (int32x4_t __a, mve_pred16_t __p) |
3eff57aa | 18987 | { |
261014a1 | 18988 | return __builtin_mve_vcvtq_m_to_f_sv4sf (vuninitializedq_f32 (), __a, __p); |
3eff57aa SP |
18989 | } |
18990 | ||
261014a1 | 18991 | __extension__ extern __inline float32x4_t |
85a94e87 | 18992 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 18993 | __arm_vcvtq_x_f32_u32 (uint32x4_t __a, mve_pred16_t __p) |
85a94e87 | 18994 | { |
261014a1 | 18995 | return __builtin_mve_vcvtq_m_to_f_uv4sf (vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
18996 | } |
18997 | ||
261014a1 | 18998 | __extension__ extern __inline float16x8_t |
85a94e87 | 18999 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19000 | __arm_vcvtq_x_n_f16_s16 (int16x8_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19001 | { |
261014a1 | 19002 | return __builtin_mve_vcvtq_m_n_to_f_sv8hf (vuninitializedq_f16 (), __a, __imm6, __p); |
85a94e87 SP |
19003 | } |
19004 | ||
261014a1 | 19005 | __extension__ extern __inline float16x8_t |
85a94e87 | 19006 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19007 | __arm_vcvtq_x_n_f16_u16 (uint16x8_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19008 | { |
261014a1 | 19009 | return __builtin_mve_vcvtq_m_n_to_f_uv8hf (vuninitializedq_f16 (), __a, __imm6, __p); |
85a94e87 SP |
19010 | } |
19011 | ||
261014a1 | 19012 | __extension__ extern __inline float32x4_t |
85a94e87 | 19013 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19014 | __arm_vcvtq_x_n_f32_s32 (int32x4_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19015 | { |
261014a1 | 19016 | return __builtin_mve_vcvtq_m_n_to_f_sv4sf (vuninitializedq_f32 (), __a, __imm6, __p); |
85a94e87 SP |
19017 | } |
19018 | ||
261014a1 | 19019 | __extension__ extern __inline float32x4_t |
85a94e87 | 19020 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19021 | __arm_vcvtq_x_n_f32_u32 (uint32x4_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19022 | { |
261014a1 | 19023 | return __builtin_mve_vcvtq_m_n_to_f_uv4sf (vuninitializedq_f32 (), __a, __imm6, __p); |
85a94e87 SP |
19024 | } |
19025 | ||
19026 | __extension__ extern __inline int16x8_t | |
19027 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19028 | __arm_vcvtq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19029 | { |
261014a1 | 19030 | return __builtin_mve_vcvtq_m_from_f_sv8hi (vuninitializedq_s16 (), __a, __p); |
85a94e87 SP |
19031 | } |
19032 | ||
261014a1 | 19033 | __extension__ extern __inline int32x4_t |
85a94e87 | 19034 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19035 | __arm_vcvtq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19036 | { |
261014a1 | 19037 | return __builtin_mve_vcvtq_m_from_f_sv4si (vuninitializedq_s32 (), __a, __p); |
85a94e87 SP |
19038 | } |
19039 | ||
261014a1 | 19040 | __extension__ extern __inline uint16x8_t |
85a94e87 | 19041 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19042 | __arm_vcvtq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19043 | { |
261014a1 | 19044 | return __builtin_mve_vcvtq_m_from_f_uv8hi (vuninitializedq_u16 (), __a, __p); |
85a94e87 SP |
19045 | } |
19046 | ||
261014a1 | 19047 | __extension__ extern __inline uint32x4_t |
85a94e87 | 19048 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19049 | __arm_vcvtq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19050 | { |
261014a1 | 19051 | return __builtin_mve_vcvtq_m_from_f_uv4si (vuninitializedq_u32 (), __a, __p); |
85a94e87 SP |
19052 | } |
19053 | ||
261014a1 | 19054 | __extension__ extern __inline int16x8_t |
85a94e87 | 19055 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19056 | __arm_vcvtq_x_n_s16_f16 (float16x8_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19057 | { |
261014a1 | 19058 | return __builtin_mve_vcvtq_m_n_from_f_sv8hi (vuninitializedq_s16 (), __a, __imm6, __p); |
85a94e87 SP |
19059 | } |
19060 | ||
261014a1 | 19061 | __extension__ extern __inline int32x4_t |
85a94e87 | 19062 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19063 | __arm_vcvtq_x_n_s32_f32 (float32x4_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19064 | { |
261014a1 | 19065 | return __builtin_mve_vcvtq_m_n_from_f_sv4si (vuninitializedq_s32 (), __a, __imm6, __p); |
85a94e87 SP |
19066 | } |
19067 | ||
19068 | __extension__ extern __inline uint16x8_t | |
19069 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19070 | __arm_vcvtq_x_n_u16_f16 (float16x8_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19071 | { |
261014a1 | 19072 | return __builtin_mve_vcvtq_m_n_from_f_uv8hi (vuninitializedq_u16 (), __a, __imm6, __p); |
85a94e87 SP |
19073 | } |
19074 | ||
19075 | __extension__ extern __inline uint32x4_t | |
19076 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19077 | __arm_vcvtq_x_n_u32_f32 (float32x4_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19078 | { |
261014a1 | 19079 | return __builtin_mve_vcvtq_m_n_from_f_uv4si (vuninitializedq_u32 (), __a, __imm6, __p); |
85a94e87 SP |
19080 | } |
19081 | ||
261014a1 | 19082 | __extension__ extern __inline float16x8_t |
85a94e87 | 19083 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19084 | __arm_vrndq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19085 | { |
261014a1 | 19086 | return __builtin_mve_vrndq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19087 | } |
19088 | ||
261014a1 | 19089 | __extension__ extern __inline float32x4_t |
85a94e87 | 19090 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19091 | __arm_vrndq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19092 | { |
261014a1 | 19093 | return __builtin_mve_vrndq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19094 | } |
19095 | ||
261014a1 | 19096 | __extension__ extern __inline float16x8_t |
85a94e87 | 19097 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19098 | __arm_vrndnq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19099 | { |
261014a1 | 19100 | return __builtin_mve_vrndnq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19101 | } |
19102 | ||
261014a1 | 19103 | __extension__ extern __inline float32x4_t |
85a94e87 | 19104 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19105 | __arm_vrndnq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19106 | { |
261014a1 | 19107 | return __builtin_mve_vrndnq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19108 | } |
19109 | ||
261014a1 | 19110 | __extension__ extern __inline float16x8_t |
85a94e87 | 19111 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19112 | __arm_vrndmq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19113 | { |
261014a1 | 19114 | return __builtin_mve_vrndmq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19115 | } |
19116 | ||
261014a1 | 19117 | __extension__ extern __inline float32x4_t |
85a94e87 | 19118 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19119 | __arm_vrndmq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19120 | { |
261014a1 | 19121 | return __builtin_mve_vrndmq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19122 | } |
19123 | ||
19124 | __extension__ extern __inline float16x8_t | |
19125 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19126 | __arm_vrndpq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19127 | { |
261014a1 | 19128 | return __builtin_mve_vrndpq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19129 | } |
19130 | ||
261014a1 | 19131 | __extension__ extern __inline float32x4_t |
85a94e87 | 19132 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19133 | __arm_vrndpq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19134 | { |
261014a1 | 19135 | return __builtin_mve_vrndpq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19136 | } |
19137 | ||
19138 | __extension__ extern __inline float16x8_t | |
19139 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19140 | __arm_vrndaq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19141 | { |
261014a1 | 19142 | return __builtin_mve_vrndaq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19143 | } |
19144 | ||
261014a1 | 19145 | __extension__ extern __inline float32x4_t |
85a94e87 | 19146 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19147 | __arm_vrndaq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19148 | { |
261014a1 | 19149 | return __builtin_mve_vrndaq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19150 | } |
19151 | ||
19152 | __extension__ extern __inline float16x8_t | |
19153 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19154 | __arm_vrndxq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19155 | { |
261014a1 | 19156 | return __builtin_mve_vrndxq_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19157 | } |
19158 | ||
261014a1 | 19159 | __extension__ extern __inline float32x4_t |
85a94e87 | 19160 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19161 | __arm_vrndxq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19162 | { |
261014a1 | 19163 | return __builtin_mve_vrndxq_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19164 | } |
19165 | ||
19166 | __extension__ extern __inline float16x8_t | |
19167 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19168 | __arm_vandq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
85a94e87 | 19169 | { |
261014a1 | 19170 | return __builtin_mve_vandq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
19171 | } |
19172 | ||
261014a1 | 19173 | __extension__ extern __inline float32x4_t |
85a94e87 | 19174 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19175 | __arm_vandq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
85a94e87 | 19176 | { |
261014a1 | 19177 | return __builtin_mve_vandq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
85a94e87 SP |
19178 | } |
19179 | ||
261014a1 | 19180 | __extension__ extern __inline float16x8_t |
85a94e87 | 19181 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19182 | __arm_vbicq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
85a94e87 | 19183 | { |
261014a1 | 19184 | return __builtin_mve_vbicq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
19185 | } |
19186 | ||
19187 | __extension__ extern __inline float32x4_t | |
19188 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19189 | __arm_vbicq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
85a94e87 | 19190 | { |
261014a1 | 19191 | return __builtin_mve_vbicq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
85a94e87 SP |
19192 | } |
19193 | ||
261014a1 | 19194 | __extension__ extern __inline float16x8_t |
85a94e87 | 19195 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19196 | __arm_vbrsrq_x_n_f16 (float16x8_t __a, int32_t __b, mve_pred16_t __p) |
85a94e87 | 19197 | { |
261014a1 | 19198 | return __builtin_mve_vbrsrq_m_n_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
19199 | } |
19200 | ||
19201 | __extension__ extern __inline float32x4_t | |
19202 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19203 | __arm_vbrsrq_x_n_f32 (float32x4_t __a, int32_t __b, mve_pred16_t __p) |
85a94e87 | 19204 | { |
261014a1 | 19205 | return __builtin_mve_vbrsrq_m_n_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
85a94e87 SP |
19206 | } |
19207 | ||
261014a1 | 19208 | __extension__ extern __inline float16x8_t |
85a94e87 | 19209 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19210 | __arm_veorq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
85a94e87 | 19211 | { |
261014a1 | 19212 | return __builtin_mve_veorq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
19213 | } |
19214 | ||
19215 | __extension__ extern __inline float32x4_t | |
19216 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19217 | __arm_veorq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
85a94e87 | 19218 | { |
261014a1 | 19219 | return __builtin_mve_veorq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
85a94e87 SP |
19220 | } |
19221 | ||
261014a1 | 19222 | __extension__ extern __inline float16x8_t |
85a94e87 | 19223 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19224 | __arm_vornq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
85a94e87 | 19225 | { |
261014a1 | 19226 | return __builtin_mve_vornq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
19227 | } |
19228 | ||
19229 | __extension__ extern __inline float32x4_t | |
19230 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19231 | __arm_vornq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
85a94e87 | 19232 | { |
261014a1 | 19233 | return __builtin_mve_vornq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
85a94e87 SP |
19234 | } |
19235 | ||
261014a1 | 19236 | __extension__ extern __inline float16x8_t |
85a94e87 | 19237 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19238 | __arm_vorrq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
85a94e87 | 19239 | { |
261014a1 | 19240 | return __builtin_mve_vorrq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
19241 | } |
19242 | ||
41e1a7ff SP |
19243 | __extension__ extern __inline float32x4_t |
19244 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19245 | __arm_vorrq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
41e1a7ff | 19246 | { |
261014a1 | 19247 | return __builtin_mve_vorrq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p); |
41e1a7ff SP |
19248 | } |
19249 | ||
261014a1 | 19250 | __extension__ extern __inline float16x8_t |
41e1a7ff | 19251 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19252 | __arm_vrev32q_x_f16 (float16x8_t __a, mve_pred16_t __p) |
41e1a7ff | 19253 | { |
261014a1 | 19254 | return __builtin_mve_vrev32q_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
41e1a7ff SP |
19255 | } |
19256 | ||
261014a1 | 19257 | __extension__ extern __inline float16x8_t |
41e1a7ff | 19258 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19259 | __arm_vrev64q_x_f16 (float16x8_t __a, mve_pred16_t __p) |
41e1a7ff | 19260 | { |
261014a1 | 19261 | return __builtin_mve_vrev64q_m_fv8hf (vuninitializedq_f16 (), __a, __p); |
41e1a7ff SP |
19262 | } |
19263 | ||
261014a1 | 19264 | __extension__ extern __inline float32x4_t |
41e1a7ff | 19265 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19266 | __arm_vrev64q_x_f32 (float32x4_t __a, mve_pred16_t __p) |
41e1a7ff | 19267 | { |
261014a1 | 19268 | return __builtin_mve_vrev64q_m_fv4sf (vuninitializedq_f32 (), __a, __p); |
41e1a7ff SP |
19269 | } |
19270 | ||
e3678b44 SP |
19271 | #endif |
19272 | ||
19273 | enum { | |
19274 | __ARM_mve_type_float16_t = 1, | |
19275 | __ARM_mve_type_float16_t_ptr, | |
19276 | __ARM_mve_type_float16_t_const_ptr, | |
19277 | __ARM_mve_type_float16x8_t, | |
19278 | __ARM_mve_type_float16x8x2_t, | |
19279 | __ARM_mve_type_float16x8x4_t, | |
19280 | __ARM_mve_type_float32_t, | |
19281 | __ARM_mve_type_float32_t_ptr, | |
19282 | __ARM_mve_type_float32_t_const_ptr, | |
19283 | __ARM_mve_type_float32x4_t, | |
19284 | __ARM_mve_type_float32x4x2_t, | |
19285 | __ARM_mve_type_float32x4x4_t, | |
19286 | __ARM_mve_type_int16_t, | |
19287 | __ARM_mve_type_int16_t_ptr, | |
19288 | __ARM_mve_type_int16_t_const_ptr, | |
19289 | __ARM_mve_type_int16x8_t, | |
19290 | __ARM_mve_type_int16x8x2_t, | |
19291 | __ARM_mve_type_int16x8x4_t, | |
19292 | __ARM_mve_type_int32_t, | |
19293 | __ARM_mve_type_int32_t_ptr, | |
19294 | __ARM_mve_type_int32_t_const_ptr, | |
19295 | __ARM_mve_type_int32x4_t, | |
19296 | __ARM_mve_type_int32x4x2_t, | |
19297 | __ARM_mve_type_int32x4x4_t, | |
19298 | __ARM_mve_type_int64_t, | |
19299 | __ARM_mve_type_int64_t_ptr, | |
14782c81 SP |
19300 | __ARM_mve_type_int64_t_const_ptr, |
19301 | __ARM_mve_type_int64x2_t, | |
19302 | __ARM_mve_type_int8_t, | |
19303 | __ARM_mve_type_int8_t_ptr, | |
19304 | __ARM_mve_type_int8_t_const_ptr, | |
19305 | __ARM_mve_type_int8x16_t, | |
19306 | __ARM_mve_type_int8x16x2_t, | |
19307 | __ARM_mve_type_int8x16x4_t, | |
19308 | __ARM_mve_type_uint16_t, | |
19309 | __ARM_mve_type_uint16_t_ptr, | |
19310 | __ARM_mve_type_uint16_t_const_ptr, | |
19311 | __ARM_mve_type_uint16x8_t, | |
19312 | __ARM_mve_type_uint16x8x2_t, | |
19313 | __ARM_mve_type_uint16x8x4_t, | |
19314 | __ARM_mve_type_uint32_t, | |
19315 | __ARM_mve_type_uint32_t_ptr, | |
19316 | __ARM_mve_type_uint32_t_const_ptr, | |
19317 | __ARM_mve_type_uint32x4_t, | |
19318 | __ARM_mve_type_uint32x4x2_t, | |
19319 | __ARM_mve_type_uint32x4x4_t, | |
19320 | __ARM_mve_type_uint64_t, | |
19321 | __ARM_mve_type_uint64_t_ptr, | |
19322 | __ARM_mve_type_uint64_t_const_ptr, | |
19323 | __ARM_mve_type_uint64x2_t, | |
19324 | __ARM_mve_type_uint8_t, | |
19325 | __ARM_mve_type_uint8_t_ptr, | |
19326 | __ARM_mve_type_uint8_t_const_ptr, | |
19327 | __ARM_mve_type_uint8x16_t, | |
19328 | __ARM_mve_type_uint8x16x2_t, | |
19329 | __ARM_mve_type_uint8x16x4_t, | |
19330 | __ARM_mve_unsupported_type | |
19331 | }; | |
19332 | ||
e3678b44 SP |
19333 | #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ |
19334 | #define __ARM_mve_typeid(x) _Generic(x, \ | |
19335 | float16_t: __ARM_mve_type_float16_t, \ | |
19336 | float16_t *: __ARM_mve_type_float16_t_ptr, \ | |
19337 | float16_t const *: __ARM_mve_type_float16_t_const_ptr, \ | |
19338 | float16x8_t: __ARM_mve_type_float16x8_t, \ | |
19339 | float16x8x2_t: __ARM_mve_type_float16x8x2_t, \ | |
19340 | float16x8x4_t: __ARM_mve_type_float16x8x4_t, \ | |
19341 | float32_t: __ARM_mve_type_float32_t, \ | |
19342 | float32_t *: __ARM_mve_type_float32_t_ptr, \ | |
19343 | float32_t const *: __ARM_mve_type_float32_t_const_ptr, \ | |
19344 | float32x4_t: __ARM_mve_type_float32x4_t, \ | |
19345 | float32x4x2_t: __ARM_mve_type_float32x4x2_t, \ | |
19346 | float32x4x4_t: __ARM_mve_type_float32x4x4_t, \ | |
19347 | int16_t: __ARM_mve_type_int16_t, \ | |
19348 | int16_t *: __ARM_mve_type_int16_t_ptr, \ | |
19349 | int16_t const *: __ARM_mve_type_int16_t_const_ptr, \ | |
19350 | int16x8_t: __ARM_mve_type_int16x8_t, \ | |
19351 | int16x8x2_t: __ARM_mve_type_int16x8x2_t, \ | |
19352 | int16x8x4_t: __ARM_mve_type_int16x8x4_t, \ | |
19353 | int32_t: __ARM_mve_type_int32_t, \ | |
19354 | int32_t *: __ARM_mve_type_int32_t_ptr, \ | |
19355 | int32_t const *: __ARM_mve_type_int32_t_const_ptr, \ | |
19356 | int32x4_t: __ARM_mve_type_int32x4_t, \ | |
19357 | int32x4x2_t: __ARM_mve_type_int32x4x2_t, \ | |
19358 | int32x4x4_t: __ARM_mve_type_int32x4x4_t, \ | |
19359 | int64_t: __ARM_mve_type_int64_t, \ | |
19360 | int64_t *: __ARM_mve_type_int64_t_ptr, \ | |
19361 | int64_t const *: __ARM_mve_type_int64_t_const_ptr, \ | |
19362 | int64x2_t: __ARM_mve_type_int64x2_t, \ | |
19363 | int8_t: __ARM_mve_type_int8_t, \ | |
19364 | int8_t *: __ARM_mve_type_int8_t_ptr, \ | |
19365 | int8_t const *: __ARM_mve_type_int8_t_const_ptr, \ | |
19366 | int8x16_t: __ARM_mve_type_int8x16_t, \ | |
19367 | int8x16x2_t: __ARM_mve_type_int8x16x2_t, \ | |
19368 | int8x16x4_t: __ARM_mve_type_int8x16x4_t, \ | |
19369 | uint16_t: __ARM_mve_type_uint16_t, \ | |
19370 | uint16_t *: __ARM_mve_type_uint16_t_ptr, \ | |
19371 | uint16_t const *: __ARM_mve_type_uint16_t_const_ptr, \ | |
19372 | uint16x8_t: __ARM_mve_type_uint16x8_t, \ | |
19373 | uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \ | |
19374 | uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \ | |
19375 | uint32_t: __ARM_mve_type_uint32_t, \ | |
19376 | uint32_t *: __ARM_mve_type_uint32_t_ptr, \ | |
19377 | uint32_t const *: __ARM_mve_type_uint32_t_const_ptr, \ | |
19378 | uint32x4_t: __ARM_mve_type_uint32x4_t, \ | |
19379 | uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \ | |
19380 | uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \ | |
19381 | uint64_t: __ARM_mve_type_uint64_t, \ | |
19382 | uint64_t *: __ARM_mve_type_uint64_t_ptr, \ | |
19383 | uint64_t const *: __ARM_mve_type_uint64_t_const_ptr, \ | |
19384 | uint64x2_t: __ARM_mve_type_uint64x2_t, \ | |
19385 | uint8_t: __ARM_mve_type_uint8_t, \ | |
19386 | uint8_t *: __ARM_mve_type_uint8_t_ptr, \ | |
19387 | uint8_t const *: __ARM_mve_type_uint8_t_const_ptr, \ | |
19388 | uint8x16_t: __ARM_mve_type_uint8x16_t, \ | |
19389 | uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \ | |
19390 | uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \ | |
19391 | default: _Generic(x, \ | |
19392 | signed char: __ARM_mve_type_int8_t, \ | |
19393 | short: __ARM_mve_type_int16_t, \ | |
19394 | int: __ARM_mve_type_int32_t, \ | |
19395 | long: __ARM_mve_type_int32_t, \ | |
19396 | long long: __ARM_mve_type_int64_t, \ | |
19397 | unsigned char: __ARM_mve_type_uint8_t, \ | |
19398 | unsigned short: __ARM_mve_type_uint16_t, \ | |
19399 | unsigned int: __ARM_mve_type_uint32_t, \ | |
19400 | unsigned long: __ARM_mve_type_uint32_t, \ | |
19401 | unsigned long long: __ARM_mve_type_uint64_t, \ | |
19402 | default: __ARM_mve_unsupported_type)) | |
19403 | #else | |
19404 | #define __ARM_mve_typeid(x) _Generic(x, \ | |
19405 | int16_t: __ARM_mve_type_int16_t, \ | |
19406 | int16_t *: __ARM_mve_type_int16_t_ptr, \ | |
19407 | int16_t const *: __ARM_mve_type_int16_t_const_ptr, \ | |
19408 | int16x8_t: __ARM_mve_type_int16x8_t, \ | |
19409 | int16x8x2_t: __ARM_mve_type_int16x8x2_t, \ | |
19410 | int16x8x4_t: __ARM_mve_type_int16x8x4_t, \ | |
19411 | int32_t: __ARM_mve_type_int32_t, \ | |
19412 | int32_t *: __ARM_mve_type_int32_t_ptr, \ | |
19413 | int32_t const *: __ARM_mve_type_int32_t_const_ptr, \ | |
19414 | int32x4_t: __ARM_mve_type_int32x4_t, \ | |
19415 | int32x4x2_t: __ARM_mve_type_int32x4x2_t, \ | |
19416 | int32x4x4_t: __ARM_mve_type_int32x4x4_t, \ | |
19417 | int64_t: __ARM_mve_type_int64_t, \ | |
19418 | int64_t *: __ARM_mve_type_int64_t_ptr, \ | |
19419 | int64_t const *: __ARM_mve_type_int64_t_const_ptr, \ | |
19420 | int64x2_t: __ARM_mve_type_int64x2_t, \ | |
19421 | int8_t: __ARM_mve_type_int8_t, \ | |
19422 | int8_t *: __ARM_mve_type_int8_t_ptr, \ | |
19423 | int8_t const *: __ARM_mve_type_int8_t_const_ptr, \ | |
19424 | int8x16_t: __ARM_mve_type_int8x16_t, \ | |
19425 | int8x16x2_t: __ARM_mve_type_int8x16x2_t, \ | |
19426 | int8x16x4_t: __ARM_mve_type_int8x16x4_t, \ | |
19427 | uint16_t: __ARM_mve_type_uint16_t, \ | |
19428 | uint16_t *: __ARM_mve_type_uint16_t_ptr, \ | |
19429 | uint16_t const *: __ARM_mve_type_uint16_t_const_ptr, \ | |
19430 | uint16x8_t: __ARM_mve_type_uint16x8_t, \ | |
19431 | uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \ | |
19432 | uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \ | |
19433 | uint32_t: __ARM_mve_type_uint32_t, \ | |
19434 | uint32_t *: __ARM_mve_type_uint32_t_ptr, \ | |
19435 | uint32_t const *: __ARM_mve_type_uint32_t_const_ptr, \ | |
19436 | uint32x4_t: __ARM_mve_type_uint32x4_t, \ | |
19437 | uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \ | |
19438 | uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \ | |
19439 | uint64_t: __ARM_mve_type_uint64_t, \ | |
19440 | uint64_t *: __ARM_mve_type_uint64_t_ptr, \ | |
19441 | uint64_t const *: __ARM_mve_type_uint64_t_const_ptr, \ | |
19442 | uint64x2_t: __ARM_mve_type_uint64x2_t, \ | |
19443 | uint8_t: __ARM_mve_type_uint8_t, \ | |
19444 | uint8_t *: __ARM_mve_type_uint8_t_ptr, \ | |
19445 | uint8_t const *: __ARM_mve_type_uint8_t_const_ptr, \ | |
19446 | uint8x16_t: __ARM_mve_type_uint8x16_t, \ | |
19447 | uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \ | |
19448 | uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \ | |
19449 | default: _Generic(x, \ | |
19450 | signed char: __ARM_mve_type_int8_t, \ | |
19451 | short: __ARM_mve_type_int16_t, \ | |
19452 | int: __ARM_mve_type_int32_t, \ | |
19453 | long: __ARM_mve_type_int32_t, \ | |
19454 | long long: __ARM_mve_type_int64_t, \ | |
19455 | unsigned char: __ARM_mve_type_uint8_t, \ | |
19456 | unsigned short: __ARM_mve_type_uint16_t, \ | |
19457 | unsigned int: __ARM_mve_type_uint32_t, \ | |
19458 | unsigned long: __ARM_mve_type_uint32_t, \ | |
19459 | unsigned long long: __ARM_mve_type_uint64_t, \ | |
19460 | default: __ARM_mve_unsupported_type)) | |
19461 | #endif /* MVE Floating point. */ | |
19462 | ||
19463 | extern void *__ARM_undef; | |
19464 | #define __ARM_mve_coerce(param, type) \ | |
19465 | _Generic(param, type: param, default: *(type *)__ARM_undef) | |
19466 | #define __ARM_mve_coerce1(param, type) \ | |
19467 | _Generic(param, type: param, const type: param, default: *(type *)__ARM_undef) | |
19468 | ||
19469 | #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ | |
19470 | ||
19471 | #define vst4q(p0,p1) __arm_vst4q(p0,p1) | |
19472 | #define __arm_vst4q(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19473 | __typeof(p1) __p1 = (p1); \ | |
19474 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19475 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \ | |
19476 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \ | |
19477 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \ | |
19478 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \ | |
19479 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \ | |
19480 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)), \ | |
19481 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x4_t]: __arm_vst4q_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x4_t)), \ | |
19482 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x4_t]: __arm_vst4q_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x4_t)));}) | |
19483 | ||
19484 | #define vrndxq(p0) __arm_vrndxq(p0) | |
19485 | #define __arm_vrndxq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19486 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19487 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
19488 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
19489 | ||
19490 | #define vrndq(p0) __arm_vrndq(p0) | |
19491 | #define __arm_vrndq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19492 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19493 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
19494 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
19495 | ||
19496 | #define vrndpq(p0) __arm_vrndpq(p0) | |
19497 | #define __arm_vrndpq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19498 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19499 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
19500 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
19501 | ||
19502 | #define vrndnq(p0) __arm_vrndnq(p0) | |
19503 | #define __arm_vrndnq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19504 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19505 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
19506 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
19507 | ||
19508 | #define vrndmq(p0) __arm_vrndmq(p0) | |
19509 | #define __arm_vrndmq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19510 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19511 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
19512 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
19513 | ||
19514 | #define vrndaq(p0) __arm_vrndaq(p0) | |
19515 | #define __arm_vrndaq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19516 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19517 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
19518 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndaq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
19519 | ||
19520 | #define vrev64q(p0) __arm_vrev64q(p0) | |
19521 | #define __arm_vrev64q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19522 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19523 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
19524 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
19525 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
19526 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
19527 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
19528 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
19529 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev64q_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
19530 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrev64q_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
19531 | ||
19532 | #define vnegq(p0) __arm_vnegq(p0) | |
19533 | #define __arm_vnegq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19534 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19535 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
19536 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
19537 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
19538 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vnegq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
19539 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vnegq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
19540 | ||
19541 | #define vdupq_n(p0) __arm_vdupq_n(p0) | |
19542 | #define __arm_vdupq_n(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19543 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19544 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vdupq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
19545 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vdupq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
19546 | ||
19547 | #define vabsq(p0) __arm_vabsq(p0) | |
19548 | #define __arm_vabsq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19549 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19550 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
19551 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
19552 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
19553 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
19554 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
19555 | ||
19556 | #define vrev32q(p0) __arm_vrev32q(p0) | |
19557 | #define __arm_vrev32q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19558 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19559 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
19560 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
19561 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
19562 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
19563 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev32q_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) | |
19564 | ||
19565 | #define vcvtbq_f32(p0) __arm_vcvtbq_f32(p0) | |
19566 | #define __arm_vcvtbq_f32(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19567 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19568 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vcvtbq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) | |
19569 | ||
19570 | #define vcvttq_f32(p0) __arm_vcvttq_f32(p0) | |
19571 | #define __arm_vcvttq_f32(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19572 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19573 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vcvttq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) | |
19574 | ||
19575 | #define vrev16q(p0) __arm_vrev16q(p0) | |
19576 | #define __arm_vrev16q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19577 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19578 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
19579 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)));}) | |
19580 | ||
19581 | #define vqabsq(p0) __arm_vqabsq(p0) | |
19582 | #define __arm_vqabsq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19583 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19584 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
19585 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
19586 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
19587 | ||
19588 | #define vqnegq(p0) __arm_vqnegq(p0) | |
19589 | #define __arm_vqnegq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19590 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19591 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
19592 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
19593 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
19594 | ||
19595 | #define vmvnq(p0) __arm_vmvnq(p0) | |
19596 | #define __arm_vmvnq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19597 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19598 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
19599 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
19600 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
19601 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
19602 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
19603 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
19604 | ||
19605 | #define vmovlbq(p0) __arm_vmovlbq(p0) | |
19606 | #define __arm_vmovlbq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19607 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19608 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
19609 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
19610 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
19611 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) | |
19612 | ||
19613 | #define vmovltq(p0) __arm_vmovltq(p0) | |
19614 | #define __arm_vmovltq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19615 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19616 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
19617 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
19618 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
19619 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) | |
19620 | ||
19621 | #define vclzq(p0) __arm_vclzq(p0) | |
19622 | #define __arm_vclzq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19623 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19624 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
19625 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
19626 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
19627 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
19628 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
19629 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
19630 | ||
19631 | #define vclsq(p0) __arm_vclsq(p0) | |
19632 | #define __arm_vclsq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19633 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19634 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
19635 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
19636 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
19637 | ||
19638 | #define vcvtq(p0) __arm_vcvtq(p0) | |
19639 | #define __arm_vcvtq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
19640 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19641 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
19642 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
19643 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
19644 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
19645 | ||
19646 | #define vsubq_n(p0,p1) __arm_vsubq_n(p0,p1) | |
19647 | #define __arm_vsubq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19648 | __typeof(p1) __p1 = (p1); \ | |
19649 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19650 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vsubq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
19651 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vsubq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)), \ | |
19652 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
19653 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
19654 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
19655 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
19656 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
19657 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
19658 | ||
19659 | #define vshlq(p0,p1) __arm_vshlq(p0,p1) | |
19660 | #define __arm_vshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19661 | __typeof(p1) __p1 = (p1); \ | |
19662 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19663 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19664 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19665 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19666 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19667 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19668 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
19669 | ||
19670 | #define vshrq(p0,p1) __arm_vshrq(p0,p1) | |
19671 | #define __arm_vshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19672 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19673 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
19674 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
19675 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
19676 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
19677 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
19678 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
19679 | ||
19680 | #define vcvtq_n(p0,p1) __arm_vcvtq_n(p0,p1) | |
19681 | #define __arm_vcvtq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19682 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
19683 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_n_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
19684 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_n_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
19685 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_n_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
19686 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_n_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
19687 | ||
19688 | #define vsubq_n(p0,p1) __arm_vsubq_n(p0,p1) | |
19689 | #define __arm_vsubq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19690 | __typeof(p1) __p1 = (p1); \ | |
19691 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19692 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
19693 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
19694 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
19695 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
19696 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
19697 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
19698 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vsubq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
19699 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vsubq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
19700 | ||
19701 | #define vsubq(p0,p1) __arm_vsubq(p0,p1) | |
19702 | #define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19703 | __typeof(p1) __p1 = (p1); \ | |
19704 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19705 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19706 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19707 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19708 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
19709 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
19710 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
19711 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19712 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
19713 | ||
19714 | #define vorrq(p0,p1) __arm_vorrq(p0,p1) | |
19715 | #define __arm_vorrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19716 | __typeof(p1) __p1 = (p1); \ | |
19717 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19718 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19719 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19720 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19721 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
19722 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
19723 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
19724 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19725 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
19726 | ||
19727 | #define vabdq(p0,p1) __arm_vabdq(p0,p1) | |
19728 | #define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19729 | __typeof(p1) __p1 = (p1); \ | |
19730 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19731 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19732 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19733 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19734 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
19735 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
19736 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
19737 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19738 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
19739 | ||
19740 | #define vaddq(p0,p1) __arm_vaddq(p0,p1) | |
19741 | #define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19742 | __typeof(p1) __p1 = (p1); \ | |
19743 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19744 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19745 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19746 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19747 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
19748 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
19749 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
3eff57aa SP |
19750 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_f16 (__ARM_mve_coerce(p0, float16x8_t), __ARM_mve_coerce(p1, float16x8_t)), \ |
19751 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_f32 (__ARM_mve_coerce(p0, float32x4_t), __ARM_mve_coerce(p1, float32x4_t)), \ | |
e3678b44 SP |
19752 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ |
19753 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
19754 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
19755 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
19756 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
19757 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
19758 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vaddq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
19759 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vaddq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
19760 | ||
19761 | #define vandq(p0,p1) __arm_vandq(p0,p1) | |
19762 | #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19763 | __typeof(p1) __p1 = (p1); \ | |
19764 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19765 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19766 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19767 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19768 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
19769 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
19770 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
19771 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19772 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
19773 | ||
19774 | #define vbicq(p0,p1) __arm_vbicq(p0,p1) | |
19775 | #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19776 | __typeof(p1) __p1 = (p1); \ | |
19777 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19778 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19779 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19780 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19781 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
19782 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
19783 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
19784 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19785 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
19786 | ||
19787 | #define vornq(p0,p1) __arm_vornq(p0,p1) | |
19788 | #define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19789 | __typeof(p1) __p1 = (p1); \ | |
19790 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19791 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19792 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19793 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19794 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
19795 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
19796 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
19797 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19798 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
19799 | ||
19800 | #define vmulq_n(p0,p1) __arm_vmulq_n(p0,p1) | |
19801 | #define __arm_vmulq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19802 | __typeof(p1) __p1 = (p1); \ | |
19803 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19804 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
19805 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
19806 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
19807 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
19808 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
19809 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
19810 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vmulq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
19811 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vmulq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
19812 | ||
19813 | #define vmulq(p0,p1) __arm_vmulq(p0,p1) | |
19814 | #define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19815 | __typeof(p1) __p1 = (p1); \ | |
19816 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19817 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19818 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19819 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19820 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
19821 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
19822 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
19823 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19824 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
19825 | ||
19826 | #define vcaddq_rot270(p0,p1) __arm_vcaddq_rot270(p0,p1) | |
19827 | #define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19828 | __typeof(p1) __p1 = (p1); \ | |
19829 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19830 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19831 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19832 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19833 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
19834 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
19835 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
19836 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19837 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
19838 | ||
19839 | #define vcmpeqq(p0,p1) __arm_vcmpeqq(p0,p1) | |
19840 | #define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19841 | __typeof(p1) __p1 = (p1); \ | |
19842 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19843 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
19844 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
19845 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
19846 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
19847 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
19848 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
19849 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpeqq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
19850 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpeqq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)), \ | |
19851 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19852 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19853 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19854 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
19855 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
19856 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
19857 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19858 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
19859 | ||
19860 | #define vcaddq_rot90(p0,p1) __arm_vcaddq_rot90(p0,p1) | |
19861 | #define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19862 | __typeof(p1) __p1 = (p1); \ | |
19863 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19864 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19865 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19866 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19867 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
19868 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
19869 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
19870 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19871 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 19872 | |
e3678b44 SP |
19873 | #define vcmpgeq_n(p0,p1) __arm_vcmpgeq_n(p0,p1) |
19874 | #define __arm_vcmpgeq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19875 | __typeof(p1) __p1 = (p1); \ | |
19876 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19877 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
19878 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
19879 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
19880 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
19881 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
f9355dee | 19882 | |
e3678b44 SP |
19883 | #define vcmpeqq_m(p0,p1,p2) __arm_vcmpeqq_m(p0,p1,p2) |
19884 | #define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
19885 | __typeof(p1) __p1 = (p1); \ | |
19886 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19887 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
19888 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
19889 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
19890 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
19891 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
19892 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
19893 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
19894 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
19895 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
19896 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
19897 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
19898 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \ | |
19899 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
19900 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
19901 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpeqq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
19902 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpeqq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) | |
f9355dee | 19903 | |
e3678b44 SP |
19904 | #define vcmpgtq(p0,p1) __arm_vcmpgtq(p0,p1) |
19905 | #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
19906 | __typeof(p1) __p1 = (p1); \ |
19907 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
19908 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
19909 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19910 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19911 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19912 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 19913 | |
e3678b44 SP |
19914 | #define vcmpleq(p0,p1) __arm_vcmpleq(p0,p1) |
19915 | #define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19916 | __typeof(p1) __p1 = (p1); \ | |
19917 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19918 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19919 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19920 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19921 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpleq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19922 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpleq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
19923 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
19924 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
19925 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
19926 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpleq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
19927 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpleq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
19928 | ||
19929 | #define vcmpltq(p0,p1) __arm_vcmpltq(p0,p1) | |
19930 | #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19931 | __typeof(p1) __p1 = (p1); \ | |
19932 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19933 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19934 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19935 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19936 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
19937 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
19938 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
19939 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19940 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
19941 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpltq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
19942 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpltq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
19943 | ||
19944 | #define vcmpneq(p0,p1) __arm_vcmpneq(p0,p1) | |
19945 | #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19946 | __typeof(p1) __p1 = (p1); \ | |
19947 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19948 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
19949 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
19950 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
19951 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
19952 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
19953 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
19954 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpneq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
19955 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpneq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)), \ | |
19956 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19957 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19958 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
19959 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
19960 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
19961 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
19962 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpneq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19963 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpneq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
19964 | ||
19965 | #define vcmulq(p0,p1) __arm_vcmulq(p0,p1) | |
19966 | #define __arm_vcmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19967 | __typeof(p1) __p1 = (p1); \ | |
19968 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19969 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19970 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
19971 | ||
19972 | #define vcmulq_rot180(p0,p1) __arm_vcmulq_rot180(p0,p1) | |
19973 | #define __arm_vcmulq_rot180(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19974 | __typeof(p1) __p1 = (p1); \ | |
19975 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19976 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19977 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 19978 | |
e3678b44 SP |
19979 | #define vcmulq_rot270(p0,p1) __arm_vcmulq_rot270(p0,p1) |
19980 | #define __arm_vcmulq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19981 | __typeof(p1) __p1 = (p1); \ | |
19982 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19983 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19984 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 19985 | |
e3678b44 SP |
19986 | #define vcmulq_rot90(p0,p1) __arm_vcmulq_rot90(p0,p1) |
19987 | #define __arm_vcmulq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19988 | __typeof(p1) __p1 = (p1); \ | |
19989 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19990 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
19991 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 19992 | |
e3678b44 SP |
19993 | #define veorq(p0,p1) __arm_veorq(p0,p1) |
19994 | #define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
19995 | __typeof(p1) __p1 = (p1); \ | |
19996 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
19997 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
19998 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
19999 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20000 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20001 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20002 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
20003 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20004 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20005 | |
e3678b44 SP |
20006 | #define vmaxnmaq(p0,p1) __arm_vmaxnmaq(p0,p1) |
20007 | #define __arm_vmaxnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20008 | __typeof(p1) __p1 = (p1); \ | |
20009 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20010 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20011 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20012 | |
e3678b44 SP |
20013 | #define vmaxnmavq(p0,p1) __arm_vmaxnmavq(p0,p1) |
20014 | #define __arm_vmaxnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20015 | __typeof(p1) __p1 = (p1); \ | |
20016 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20017 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20018 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20019 | |
e3678b44 SP |
20020 | #define vmaxnmq(p0,p1) __arm_vmaxnmq(p0,p1) |
20021 | #define __arm_vmaxnmq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20022 | __typeof(p1) __p1 = (p1); \ | |
20023 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20024 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20025 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20026 | |
e3678b44 SP |
20027 | #define vmaxnmvq(p0,p1) __arm_vmaxnmvq(p0,p1) |
20028 | #define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20029 | __typeof(p1) __p1 = (p1); \ | |
20030 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20031 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20032 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20033 | |
e3678b44 SP |
20034 | #define vmaxnmvq(p0,p1) __arm_vmaxnmvq(p0,p1) |
20035 | #define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20036 | __typeof(p1) __p1 = (p1); \ | |
20037 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20038 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20039 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20040 | |
e3678b44 SP |
20041 | #define vminnmaq(p0,p1) __arm_vminnmaq(p0,p1) |
20042 | #define __arm_vminnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20043 | __typeof(p1) __p1 = (p1); \ | |
20044 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20045 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20046 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20047 | |
e3678b44 SP |
20048 | #define vminnmavq(p0,p1) __arm_vminnmavq(p0,p1) |
20049 | #define __arm_vminnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20050 | __typeof(p1) __p1 = (p1); \ | |
20051 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20052 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20053 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20054 | |
e3678b44 SP |
20055 | #define vbrsrq(p0,p1) __arm_vbrsrq(p0,p1) |
20056 | #define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20057 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
20058 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
20059 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
20060 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
20061 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
20062 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
20063 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \ | |
20064 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vbrsrq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), p1), \ | |
20065 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vbrsrq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), p1));}) | |
f9355dee | 20066 | |
e3678b44 SP |
20067 | #define vminnmq(p0,p1) __arm_vminnmq(p0,p1) |
20068 | #define __arm_vminnmq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20069 | __typeof(p1) __p1 = (p1); \ | |
20070 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20071 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20072 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20073 | |
e3678b44 SP |
20074 | #define vsubq(p0,p1) __arm_vsubq(p0,p1) |
20075 | #define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20076 | __typeof(p1) __p1 = (p1); \ | |
20077 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20078 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vsubq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
20079 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vsubq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)), \ | |
20080 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
20081 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20082 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20083 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
20084 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
20085 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
20086 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20087 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20088 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20089 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20090 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20091 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
20092 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20093 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20094 | |
e3678b44 SP |
20095 | #define vminnmvq(p0,p1) __arm_vminnmvq(p0,p1) |
20096 | #define __arm_vminnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20097 | __typeof(p1) __p1 = (p1); \ | |
20098 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20099 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20100 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 20101 | |
e3678b44 SP |
20102 | #define vcmpgeq(p0,p1) __arm_vcmpgeq(p0,p1) |
20103 | #define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20104 | __typeof(p1) __p1 = (p1); \ | |
20105 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20106 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20107 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20108 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20109 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
20110 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20111 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20112 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20113 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
20114 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
20115 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
f9355dee | 20116 | |
e3678b44 SP |
20117 | #define vshlq_r(p0,p1) __arm_vshlq_r(p0,p1) |
20118 | #define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20119 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
20120 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
20121 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
20122 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
20123 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
20124 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
20125 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
f9355dee | 20126 | |
e3678b44 SP |
20127 | #define vshlq_n(p0,p1) __arm_vshlq_n(p0,p1) |
20128 | #define __arm_vshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20129 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
20130 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
20131 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
20132 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
20133 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
20134 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
20135 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
f9355dee | 20136 | |
e3678b44 SP |
20137 | #define vshlltq(p0,p1) __arm_vshlltq(p0,p1) |
20138 | #define __arm_vshlltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20139 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
20140 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
20141 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
20142 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
20143 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) | |
f9355dee | 20144 | |
e3678b44 SP |
20145 | #define vshllbq(p0,p1) __arm_vshllbq(p0,p1) |
20146 | #define __arm_vshllbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20147 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
20148 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
20149 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
20150 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
20151 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) | |
f9355dee | 20152 | |
e3678b44 SP |
20153 | #define vrshrq(p0,p1) __arm_vrshrq(p0,p1) |
20154 | #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20155 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
20156 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
20157 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
20158 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
20159 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
20160 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
20161 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
f9355dee | 20162 | |
e3678b44 SP |
20163 | #define vrshrq(p0,p1) __arm_vrshrq(p0,p1) |
20164 | #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20165 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
20166 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
20167 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
20168 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
20169 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
20170 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
20171 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
f9355dee | 20172 | |
e3678b44 SP |
20173 | #define vrshlq(p0,p1) __arm_vrshlq(p0,p1) |
20174 | #define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20175 | __typeof(p1) __p1 = (p1); \ |
20176 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20177 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ |
20178 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20179 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20180 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20181 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20182 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20183 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20184 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20185 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20186 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20187 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20188 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 20189 | |
e3678b44 SP |
20190 | #define vrmulhq(p0,p1) __arm_vrmulhq(p0,p1) |
20191 | #define __arm_vrmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20192 | __typeof(p1) __p1 = (p1); \ |
20193 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20194 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
20195 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20196 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20197 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20198 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20199 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 20200 | |
e3678b44 SP |
20201 | #define vrhaddq(p0,p1) __arm_vrhaddq(p0,p1) |
20202 | #define __arm_vrhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20203 | __typeof(p1) __p1 = (p1); \ | |
20204 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20205 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20206 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20207 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20208 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20209 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20210 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
20211 | ||
20212 | #define vqsubq(p0,p1) __arm_vqsubq(p0,p1) | |
20213 | #define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20214 | __typeof(p1) __p1 = (p1); \ | |
20215 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20216 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
20217 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20218 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20219 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
20220 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
20221 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
20222 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20223 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20224 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20225 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20226 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20227 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
20228 | ||
20229 | #define vqshluq(p0,p1) __arm_vqshluq(p0,p1) | |
20230 | #define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20231 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
20232 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
20233 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
20234 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));}) | |
f9355dee | 20235 | |
e3678b44 SP |
20236 | #define vqshlq(p0,p1) __arm_vqshlq(p0,p1) |
20237 | #define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20238 | __typeof(p1) __p1 = (p1); \ | |
20239 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20240 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20241 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20242 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20243 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20244 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20245 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
20246 | ||
20247 | #define vqshlq_r(p0,p1) __arm_vqshlq_r(p0,p1) | |
20248 | #define __arm_vqshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20249 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
20250 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
20251 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
20252 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
20253 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
20254 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
20255 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
f9355dee | 20256 | |
e3678b44 SP |
20257 | #define vqshlq_n(p0,p1) __arm_vqshlq_n(p0,p1) |
20258 | #define __arm_vqshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20259 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20260 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
20261 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
20262 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
20263 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
20264 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
20265 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
20266 | ||
20267 | #define vqrshlq(p0,p1) __arm_vqrshlq(p0,p1) | |
20268 | #define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20269 | __typeof(p1) __p1 = (p1); \ |
20270 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20271 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
20272 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20273 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20274 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20275 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20276 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20277 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20278 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20279 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20280 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20281 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20282 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
f9355dee | 20283 | |
e3678b44 SP |
20284 | #define vqrdmulhq(p0,p1) __arm_vqrdmulhq(p0,p1) |
20285 | #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20286 | __typeof(p1) __p1 = (p1); \ |
20287 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20288 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
20289 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20290 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20291 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
20292 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20293 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
f9355dee | 20294 | |
e3678b44 SP |
20295 | #define vmlaldavxq(p0,p1) __arm_vmlaldavxq(p0,p1) |
20296 | #define __arm_vmlaldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20297 | __typeof(p1) __p1 = (p1); \ |
20298 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20299 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
20300 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 20301 | |
e3678b44 SP |
20302 | #define vqmovuntq(p0,p1) __arm_vqmovuntq(p0,p1) |
20303 | #define __arm_vqmovuntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20304 | __typeof(p1) __p1 = (p1); \ |
20305 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20306 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
20307 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
20308 | ||
20309 | #define vqmovntq(p0,p1) __arm_vqmovntq(p0,p1) | |
20310 | #define __arm_vqmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20311 | __typeof(p1) __p1 = (p1); \ | |
20312 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20313 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20314 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20315 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20316 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
20317 | ||
20318 | #define vqmovnbq(p0,p1) __arm_vqmovnbq(p0,p1) | |
20319 | #define __arm_vqmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
20320 | __typeof(p1) __p1 = (p1); \ | |
20321 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20322 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20323 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20324 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20325 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 20326 | |
e3678b44 SP |
20327 | #define vqdmulltq(p0,p1) __arm_vqdmulltq(p0,p1) |
20328 | #define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20329 | __typeof(p1) __p1 = (p1); \ |
20330 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20331 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ |
20332 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20333 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20334 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 20335 | |
e3678b44 SP |
20336 | #define vqmovunbq(p0,p1) __arm_vqmovunbq(p0,p1) |
20337 | #define __arm_vqmovunbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20338 | __typeof(p1) __p1 = (p1); \ |
20339 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20340 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
20341 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 20342 | |
e3678b44 SP |
20343 | #define vqdmullbq(p0,p1) __arm_vqdmullbq(p0,p1) |
20344 | #define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20345 | __typeof(p1) __p1 = (p1); \ |
20346 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20347 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ |
20348 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20349 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20350 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 20351 | |
e3678b44 SP |
20352 | #define vqdmulhq(p0,p1) __arm_vqdmulhq(p0,p1) |
20353 | #define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20354 | __typeof(p1) __p1 = (p1); \ |
20355 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20356 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ |
20357 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20358 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20359 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20360 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20361 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 20362 | |
e3678b44 SP |
20363 | #define vqaddq(p0,p1) __arm_vqaddq(p0,p1) |
20364 | #define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20365 | __typeof(p1) __p1 = (p1); \ |
20366 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20367 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ |
20368 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20369 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20370 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
20371 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
20372 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
20373 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20374 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20375 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20376 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20377 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20378 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 20379 | |
e3678b44 SP |
20380 | #define vmulltq_poly(p0,p1) __arm_vmulltq_poly(p0,p1) |
20381 | #define __arm_vmulltq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20382 | __typeof(p1) __p1 = (p1); \ |
20383 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20384 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ |
20385 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
f9355dee | 20386 | |
e3678b44 SP |
20387 | #define vmullbq_poly(p0,p1) __arm_vmullbq_poly(p0,p1) |
20388 | #define __arm_vmullbq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20389 | __typeof(p1) __p1 = (p1); \ |
20390 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20391 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ |
20392 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
f9355dee | 20393 | |
e3678b44 SP |
20394 | #define vmulltq_int(p0,p1) __arm_vmulltq_int(p0,p1) |
20395 | #define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20396 | __typeof(p1) __p1 = (p1); \ |
20397 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20398 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
20399 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20400 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20401 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20402 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20403 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 20404 | |
e3678b44 SP |
20405 | #define vhaddq(p0,p1) __arm_vhaddq(p0,p1) |
20406 | #define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
0dad5b33 SP |
20407 | __typeof(p1) __p1 = (p1); \ |
20408 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20409 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ |
20410 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20411 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20412 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
20413 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
20414 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
20415 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20416 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20417 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20418 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20419 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20420 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 20421 | |
e3678b44 SP |
20422 | #define vhcaddq_rot270(p0,p1) __arm_vhcaddq_rot270(p0,p1) |
20423 | #define __arm_vhcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20424 | __typeof(p1) __p1 = (p1); \ |
20425 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20426 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
20427 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20428 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 20429 | |
e3678b44 SP |
20430 | #define vhcaddq_rot90(p0,p1) __arm_vhcaddq_rot90(p0,p1) |
20431 | #define __arm_vhcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20432 | __typeof(p1) __p1 = (p1); \ |
20433 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20434 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
20435 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20436 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 20437 | |
e3678b44 SP |
20438 | #define vhsubq(p0,p1) __arm_vhsubq(p0,p1) |
20439 | #define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20440 | __typeof(p1) __p1 = (p1); \ |
20441 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20442 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ |
20443 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20444 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20445 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
20446 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
20447 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
20448 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
20449 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20450 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20451 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20452 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20453 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 20454 | |
e3678b44 SP |
20455 | #define vminq(p0,p1) __arm_vminq(p0,p1) |
20456 | #define __arm_vminq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20457 | __typeof(p1) __p1 = (p1); \ |
20458 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20459 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
20460 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20461 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20462 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20463 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20464 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 20465 | |
e3678b44 SP |
20466 | #define vminaq(p0,p1) __arm_vminaq(p0,p1) |
20467 | #define __arm_vminaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20468 | __typeof(p1) __p1 = (p1); \ |
20469 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20470 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
20471 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20472 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 20473 | |
e3678b44 SP |
20474 | #define vmaxq(p0,p1) __arm_vmaxq(p0,p1) |
20475 | #define __arm_vmaxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20476 | __typeof(p1) __p1 = (p1); \ |
20477 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20478 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
20479 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20480 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20481 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20482 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20483 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 20484 | |
e3678b44 SP |
20485 | #define vmaxaq(p0,p1) __arm_vmaxaq(p0,p1) |
20486 | #define __arm_vmaxaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20487 | __typeof(p1) __p1 = (p1); \ |
20488 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20489 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
20490 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20491 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 20492 | |
e3678b44 SP |
20493 | #define vmovntq(p0,p1) __arm_vmovntq(p0,p1) |
20494 | #define __arm_vmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20495 | __typeof(p1) __p1 = (p1); \ |
20496 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20497 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
20498 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20499 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20500 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 20501 | |
e3678b44 SP |
20502 | #define vmovnbq(p0,p1) __arm_vmovnbq(p0,p1) |
20503 | #define __arm_vmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20504 | __typeof(p1) __p1 = (p1); \ |
20505 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20506 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
20507 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20508 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20509 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 20510 | |
e3678b44 SP |
20511 | #define vmulhq(p0,p1) __arm_vmulhq(p0,p1) |
20512 | #define __arm_vmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20513 | __typeof(p1) __p1 = (p1); \ |
20514 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20515 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
20516 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20517 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20518 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20519 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20520 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 20521 | |
e3678b44 SP |
20522 | #define vmullbq_int(p0,p1) __arm_vmullbq_int(p0,p1) |
20523 | #define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20524 | __typeof(p1) __p1 = (p1); \ |
20525 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20526 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
20527 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20528 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20529 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
20530 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
20531 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 20532 | |
e3678b44 SP |
20533 | #define vcmpgtq(p0,p1) __arm_vcmpgtq(p0,p1) |
20534 | #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20535 | __typeof(p1) __p1 = (p1); \ |
20536 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20537 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
20538 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
20539 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
20540 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
20541 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
20542 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
20543 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
20544 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
20545 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
20546 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
f9355dee | 20547 | |
e3678b44 SP |
20548 | #define vbicq_m_n(p0,p1,p2) __arm_vbicq_m_n(p0,p1,p2) |
20549 | #define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20550 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20551 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
20552 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
20553 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
20554 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
20555 | ||
20556 | #define vqrshrnbq(p0,p1,p2) __arm_vqrshrnbq(p0,p1,p2) | |
20557 | #define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20558 | __typeof(p1) __p1 = (p1); \ |
20559 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20560 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
20561 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
20562 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
20563 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
f9355dee | 20564 | |
e3678b44 SP |
20565 | #define vqrshrunbq(p0,p1,p2) __arm_vqrshrunbq(p0,p1,p2) |
20566 | #define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20567 | __typeof(p1) __p1 = (p1); \ |
20568 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20569 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
20570 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f9355dee | 20571 | |
e3678b44 SP |
20572 | #define vshlcq(p0,p1,p2) __arm_vshlcq(p0,p1,p2) |
20573 | #define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20574 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
20575 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
20576 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
20577 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
20578 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
20579 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
20580 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
20581 | ||
20582 | #define vclsq_m(p0,p1,p2) __arm_vclsq_m(p0,p1,p2) | |
20583 | #define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20584 | __typeof(p1) __p1 = (p1); \ |
20585 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20586 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
20587 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
20588 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f9355dee | 20589 | |
e3678b44 SP |
20590 | #define vclzq_m(p0,p1,p2) __arm_vclzq_m(p0,p1,p2) |
20591 | #define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20592 | __typeof(p1) __p1 = (p1); \ |
20593 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20594 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclzq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
20595 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclzq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
20596 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclzq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
20597 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vclzq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
20598 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
20599 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
f9355dee | 20600 | |
e3678b44 SP |
20601 | #define vmaxaq_m(p0,p1,p2) __arm_vmaxaq_m(p0,p1,p2) |
20602 | #define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20603 | __typeof(p1) __p1 = (p1); \ |
20604 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20605 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
20606 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
20607 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f9355dee | 20608 | |
e3678b44 SP |
20609 | #define vminaq_m(p0,p1,p2) __arm_vminaq_m(p0,p1,p2) |
20610 | #define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20611 | __typeof(p1) __p1 = (p1); \ |
20612 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20613 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
20614 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
20615 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f9355dee | 20616 | |
e3678b44 SP |
20617 | #define vmlaq(p0,p1,p2) __arm_vmlaq(p0,p1,p2) |
20618 | #define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20619 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
20620 | __typeof(p2) __p2 = (p2); \ |
20621 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20622 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
20623 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
20624 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
20625 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
20626 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
20627 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
f9355dee | 20628 | |
e3678b44 SP |
20629 | #define vsriq(p0,p1,p2) __arm_vsriq(p0,p1,p2) |
20630 | #define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20631 | __typeof(p1) __p1 = (p1); \ | |
20632 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20633 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
20634 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
20635 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
20636 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
20637 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
20638 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
f9355dee | 20639 | |
e3678b44 SP |
20640 | #define vsliq(p0,p1,p2) __arm_vsliq(p0,p1,p2) |
20641 | #define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20642 | __typeof(p1) __p1 = (p1); \ | |
20643 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
20644 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
20645 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
20646 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
20647 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
20648 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
20649 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
f9355dee | 20650 | |
e3678b44 SP |
20651 | #define vshlq_m_r(p0,p1,p2) __arm_vshlq_m_r(p0,p1,p2) |
20652 | #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20653 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
20654 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ |
20655 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
20656 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
20657 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
20658 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
20659 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
14782c81 | 20660 | |
e3678b44 SP |
20661 | #define vrshlq_m_n(p0,p1,p2) __arm_vrshlq_m_n(p0,p1,p2) |
20662 | #define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20663 | __typeof(p1) __p1 = (p1); \ | |
f9355dee | 20664 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
20665 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ |
20666 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
20667 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
20668 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
20669 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
20670 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));}) | |
14782c81 | 20671 | |
e3678b44 SP |
20672 | #define vqshlq_m_r(p0,p1,p2) __arm_vqshlq_m_r(p0,p1,p2) |
20673 | #define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20674 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
20675 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ |
20676 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
20677 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
20678 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
20679 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
20680 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
14782c81 | 20681 | |
e3678b44 SP |
20682 | #define vqrshlq_m_n(p0,p1,p2) __arm_vqrshlq_m_n(p0,p1,p2) |
20683 | #define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20684 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
20685 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ |
20686 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
20687 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
20688 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
20689 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
20690 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
f9355dee | 20691 | |
e3678b44 SP |
20692 | #define vqrdmlsdhxq(p0,p1,p2) __arm_vqrdmlsdhxq(p0,p1,p2) |
20693 | #define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
14782c81 | 20694 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
20695 | __typeof(p2) __p2 = (p2); \ |
20696 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20697 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
20698 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
20699 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
14782c81 | 20700 | |
e3678b44 SP |
20701 | #define vqrdmlsdhq(p0,p1,p2) __arm_vqrdmlsdhq(p0,p1,p2) |
20702 | #define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20703 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
20704 | __typeof(p2) __p2 = (p2); \ |
20705 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20706 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
20707 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
20708 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
f9355dee | 20709 | |
e3678b44 SP |
20710 | #define vqrdmlashq(p0,p1,p2) __arm_vqrdmlashq(p0,p1,p2) |
20711 | #define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20712 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
20713 | __typeof(p2) __p2 = (p2); \ |
20714 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20715 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
20716 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
20717 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
20718 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqrdmlashq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
20719 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqrdmlashq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
20720 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqrdmlashq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
f9355dee | 20721 | |
e3678b44 SP |
20722 | #define vqrdmlahq(p0,p1,p2) __arm_vqrdmlahq(p0,p1,p2) |
20723 | #define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20724 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
20725 | __typeof(p2) __p2 = (p2); \ |
20726 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20727 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
20728 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
20729 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
20730 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqrdmlahq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
20731 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqrdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
20732 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqrdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
f9355dee | 20733 | |
e3678b44 SP |
20734 | #define vmlasq(p0,p1,p2) __arm_vmlasq(p0,p1,p2) |
20735 | #define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20736 | __typeof(p1) __p1 = (p1); \ | |
20737 | __typeof(p2) __p2 = (p2); \ | |
20738 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20739 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
20740 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
20741 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
20742 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
20743 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
20744 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
a50f6abf | 20745 | |
e3678b44 SP |
20746 | #define vqdmlahq(p0,p1,p2) __arm_vqdmlahq(p0,p1,p2) |
20747 | #define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee | 20748 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
20749 | __typeof(p2) __p2 = (p2); \ |
20750 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20751 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
20752 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
20753 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
20754 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqdmlahq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
20755 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
20756 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
f9355dee | 20757 | |
e3678b44 SP |
20758 | #define vqrdmladhxq(p0,p1,p2) __arm_vqrdmladhxq(p0,p1,p2) |
20759 | #define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20760 | __typeof(p1) __p1 = (p1); \ | |
20761 | __typeof(p2) __p2 = (p2); \ | |
20762 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20763 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
20764 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
20765 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
f9355dee | 20766 | |
e3678b44 SP |
20767 | #define vqrdmladhq(p0,p1,p2) __arm_vqrdmladhq(p0,p1,p2) |
20768 | #define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20769 | __typeof(p1) __p1 = (p1); \ | |
20770 | __typeof(p2) __p2 = (p2); \ | |
20771 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20772 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
20773 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
20774 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
a50f6abf | 20775 | |
e3678b44 SP |
20776 | #define vqnegq_m(p0,p1,p2) __arm_vqnegq_m(p0,p1,p2) |
20777 | #define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20778 | __typeof(p1) __p1 = (p1); \ |
20779 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20780 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
20781 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
20782 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
20783 | ||
20784 | #define vqdmlsdhxq(p0,p1,p2) __arm_vqdmlsdhxq(p0,p1,p2) | |
20785 | #define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20786 | __typeof(p1) __p1 = (p1); \ | |
20787 | __typeof(p2) __p2 = (p2); \ | |
20788 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20789 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
20790 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
20791 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
20792 | ||
20793 | #define vqdmlsdhq(p0,p1,p2) __arm_vqdmlsdhq(p0,p1,p2) | |
20794 | #define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20795 | __typeof(p1) __p1 = (p1); \ | |
20796 | __typeof(p2) __p2 = (p2); \ | |
20797 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20798 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
20799 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
20800 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
20801 | ||
20802 | #define vqdmladhxq(p0,p1,p2) __arm_vqdmladhxq(p0,p1,p2) | |
20803 | #define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20804 | __typeof(p1) __p1 = (p1); \ | |
20805 | __typeof(p2) __p2 = (p2); \ | |
20806 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20807 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
20808 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
20809 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
20810 | ||
20811 | #define vqdmladhq(p0,p1,p2) __arm_vqdmladhq(p0,p1,p2) | |
20812 | #define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20813 | __typeof(p1) __p1 = (p1); \ | |
20814 | __typeof(p2) __p2 = (p2); \ | |
20815 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20816 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
20817 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
20818 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
a50f6abf | 20819 | |
e3678b44 SP |
20820 | #define vmovlbq_m(p0,p1,p2) __arm_vmovlbq_m(p0,p1,p2) |
20821 | #define __arm_vmovlbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20822 | __typeof(p1) __p1 = (p1); \ |
20823 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20824 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovlbq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
20825 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovlbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
20826 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
20827 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
a50f6abf | 20828 | |
e3678b44 SP |
20829 | #define vmovnbq_m(p0,p1,p2) __arm_vmovnbq_m(p0,p1,p2) |
20830 | #define __arm_vmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20831 | __typeof(p1) __p1 = (p1); \ |
20832 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20833 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
20834 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
20835 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
20836 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
a50f6abf | 20837 | |
e3678b44 SP |
20838 | #define vmovntq_m(p0,p1,p2) __arm_vmovntq_m(p0,p1,p2) |
20839 | #define __arm_vmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20840 | __typeof(p1) __p1 = (p1); \ |
20841 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20842 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
20843 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
20844 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
20845 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
a50f6abf | 20846 | |
e3678b44 SP |
20847 | #define vmovltq_m(p0,p1,p2) __arm_vmovltq_m(p0,p1,p2) |
20848 | #define __arm_vmovltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20849 | __typeof(p1) __p1 = (p1); \ |
20850 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20851 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovltq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
20852 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
20853 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovltq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
20854 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovltq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
a50f6abf | 20855 | |
e3678b44 SP |
20856 | #define vshrntq(p0,p1,p2) __arm_vshrntq(p0,p1,p2) |
20857 | #define __arm_vshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20858 | __typeof(p1) __p1 = (p1); \ |
20859 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20860 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
20861 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
20862 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
20863 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
a50f6abf | 20864 | |
e3678b44 SP |
20865 | #define vshrnbq(p0,p1,p2) __arm_vshrnbq(p0,p1,p2) |
20866 | #define __arm_vshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20867 | __typeof(p1) __p1 = (p1); \ |
20868 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20869 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
20870 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
20871 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
20872 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
a50f6abf | 20873 | |
e3678b44 SP |
20874 | #define vrshrntq(p0,p1,p2) __arm_vrshrntq(p0,p1,p2) |
20875 | #define __arm_vrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20876 | __typeof(p1) __p1 = (p1); \ |
20877 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20878 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
20879 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
20880 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
20881 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
a50f6abf | 20882 | |
e3678b44 SP |
20883 | #define vcvtaq_m(p0,p1,p2) __arm_vcvtaq_m(p0,p1,p2) |
20884 | #define __arm_vcvtaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20885 | __typeof(p1) __p1 = (p1); \ |
20886 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20887 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtaq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
20888 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtaq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
20889 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtaq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
20890 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtaq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
a50f6abf | 20891 | |
e3678b44 SP |
20892 | #define vcvtq_m(p0,p1,p2) __arm_vcvtq_m(p0,p1,p2) |
20893 | #define __arm_vcvtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20894 | __typeof(p1) __p1 = (p1); \ |
20895 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20896 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcvtq_m_f16_s16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
20897 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcvtq_m_f32_s32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
20898 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcvtq_m_f16_u16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
20899 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcvtq_m_f32_u32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
20900 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
20901 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
20902 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
20903 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
a50f6abf | 20904 | |
db5db9d2 SP |
20905 | #define vcvtq_m_n(p0,p1,p2,p3) __arm_vcvtq_m_n(p0,p1,p2,p3) |
20906 | #define __arm_vcvtq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
20907 | __typeof(p1) __p1 = (p1); \ | |
20908 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
20909 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_n_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \ |
20910 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_n_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3), \ | |
20911 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_n_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \ | |
20912 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_n_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3), \ | |
db5db9d2 SP |
20913 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcvtq_m_n_f16_s16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ |
20914 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcvtq_m_n_f32_s32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
20915 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcvtq_m_n_f16_u16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
20916 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcvtq_m_n_f32_u32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
20917 | ||
e3678b44 SP |
20918 | #define vabsq_m(p0,p1,p2) __arm_vabsq_m(p0,p1,p2) |
20919 | #define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20920 | __typeof(p1) __p1 = (p1); \ |
20921 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20922 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
20923 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
20924 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
20925 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabsq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
20926 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabsq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
6df4618c | 20927 | |
e3678b44 SP |
20928 | #define vcmlaq(p0,p1,p2) __arm_vcmlaq(p0,p1,p2) |
20929 | #define __arm_vcmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20930 | __typeof(p1) __p1 = (p1); \ | |
20931 | __typeof(p2) __p2 = (p2); \ | |
20932 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20933 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ | |
20934 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
20935 | ||
20936 | #define vcmlaq_rot180(p0,p1,p2) __arm_vcmlaq_rot180(p0,p1,p2) | |
20937 | #define __arm_vcmlaq_rot180(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20938 | __typeof(p1) __p1 = (p1); \ | |
20939 | __typeof(p2) __p2 = (p2); \ | |
20940 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20941 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot180_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ | |
20942 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot180_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
20943 | ||
20944 | #define vcmlaq_rot270(p0,p1,p2) __arm_vcmlaq_rot270(p0,p1,p2) | |
20945 | #define __arm_vcmlaq_rot270(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20946 | __typeof(p1) __p1 = (p1); \ | |
20947 | __typeof(p2) __p2 = (p2); \ | |
20948 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20949 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ | |
20950 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
20951 | ||
20952 | #define vcmlaq_rot90(p0,p1,p2) __arm_vcmlaq_rot90(p0,p1,p2) | |
20953 | #define __arm_vcmlaq_rot90(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
20954 | __typeof(p1) __p1 = (p1); \ | |
20955 | __typeof(p2) __p2 = (p2); \ | |
20956 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
20957 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ | |
20958 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
20959 | ||
e3678b44 SP |
20960 | #define vrndxq_m(p0,p1,p2) __arm_vrndxq_m(p0,p1,p2) |
20961 | #define __arm_vrndxq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20962 | __typeof(p1) __p1 = (p1); \ |
20963 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20964 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndxq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
20965 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndxq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
6df4618c | 20966 | |
e3678b44 SP |
20967 | #define vrndq_m(p0,p1,p2) __arm_vrndq_m(p0,p1,p2) |
20968 | #define __arm_vrndq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20969 | __typeof(p1) __p1 = (p1); \ |
20970 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20971 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
20972 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
6df4618c | 20973 | |
e3678b44 SP |
20974 | #define vrndpq_m(p0,p1,p2) __arm_vrndpq_m(p0,p1,p2) |
20975 | #define __arm_vrndpq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20976 | __typeof(p1) __p1 = (p1); \ |
20977 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20978 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndpq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
20979 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndpq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
6df4618c | 20980 | |
e3678b44 SP |
20981 | #define vcmpgtq_m(p0,p1,p2) __arm_vcmpgtq_m(p0,p1,p2) |
20982 | #define __arm_vcmpgtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20983 | __typeof(p1) __p1 = (p1); \ |
20984 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
20985 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
20986 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
20987 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
20988 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
20989 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
20990 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
20991 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgtq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
20992 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgtq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2), \ | |
20993 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
20994 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
6df4618c | 20995 | |
e3678b44 SP |
20996 | #define vcmpleq_m(p0,p1,p2) __arm_vcmpleq_m(p0,p1,p2) |
20997 | #define __arm_vcmpleq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
20998 | __typeof(p1) __p1 = (p1); \ |
20999 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21000 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
21001 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21002 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21003 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpleq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21004 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpleq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
21005 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpleq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
21006 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
21007 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
21008 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpleq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
21009 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpleq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) | |
6df4618c | 21010 | |
e3678b44 SP |
21011 | #define vcmpltq_m(p0,p1,p2) __arm_vcmpltq_m(p0,p1,p2) |
21012 | #define __arm_vcmpltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21013 | __typeof(p1) __p1 = (p1); \ |
21014 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21015 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
21016 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21017 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21018 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21019 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
21020 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpltq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
21021 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
21022 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
21023 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpltq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
21024 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpltq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) | |
6df4618c | 21025 | |
e3678b44 SP |
21026 | #define vcmpneq_m(p0,p1,p2) __arm_vcmpneq_m(p0,p1,p2) |
21027 | #define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21028 | __typeof(p1) __p1 = (p1); \ |
21029 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21030 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
21031 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21032 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21033 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
21034 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21035 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
21036 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpneq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21037 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpneq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
21038 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
21039 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
21040 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
21041 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
21042 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
21043 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \ | |
21044 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpneq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
21045 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpneq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) | |
a50f6abf | 21046 | |
e3678b44 SP |
21047 | #define vcvtbq_m(p0,p1,p2) __arm_vcvtbq_m(p0,p1,p2) |
21048 | #define __arm_vcvtbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
4be8cf77 SP |
21049 | __typeof(p1) __p1 = (p1); \ |
21050 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21051 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float16x8_t]: __arm_vcvtbq_m_f32_f16 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21052 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float32x4_t]: __arm_vcvtbq_m_f16_f32 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
4be8cf77 | 21053 | |
e3678b44 SP |
21054 | #define vcvttq_m(p0,p1,p2) __arm_vcvttq_m(p0,p1,p2) |
21055 | #define __arm_vcvttq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21056 | __typeof(p1) __p1 = (p1); \ |
21057 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21058 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float16x8_t]: __arm_vcvttq_m_f32_f16 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21059 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float32x4_t]: __arm_vcvttq_m_f16_f32 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
4be8cf77 | 21060 | |
e3678b44 SP |
21061 | #define vcvtmq_m(p0,p1,p2) __arm_vcvtmq_m(p0,p1,p2) |
21062 | #define __arm_vcvtmq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
d71dba7b SP |
21063 | __typeof(p1) __p1 = (p1); \ |
21064 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21065 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtmq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21066 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtmq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
21067 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtmq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21068 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtmq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
d71dba7b | 21069 | |
e3678b44 SP |
21070 | #define vcvtnq_m(p0,p1,p2) __arm_vcvtnq_m(p0,p1,p2) |
21071 | #define __arm_vcvtnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21072 | __typeof(p1) __p1 = (p1); \ |
21073 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21074 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtnq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21075 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtnq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
21076 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtnq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21077 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtnq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
21078 | ||
21079 | #define vcvtpq_m(p0,p1,p2) __arm_vcvtpq_m(p0,p1,p2) | |
21080 | #define __arm_vcvtpq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21081 | __typeof(p1) __p1 = (p1); \ | |
21082 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21083 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtpq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21084 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtpq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
21085 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtpq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21086 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtpq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
21087 | ||
21088 | #define vcmpltq(p0,p1) __arm_vcmpltq(p0,p1) | |
21089 | #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21090 | __typeof(p1) __p1 = (p1); \ | |
21091 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21092 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
21093 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
21094 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21095 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpltq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
21096 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpltq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)), \ | |
21097 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21098 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21099 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21100 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
21101 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
21102 | ||
21103 | #define vdupq_m(p0,p1,p2) __arm_vdupq_m(p0,p1,p2) | |
21104 | #define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21105 | __typeof(p1) __p1 = (p1); \ | |
21106 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21107 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vdupq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
21108 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vdupq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
21109 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vdupq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
21110 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
21111 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
21112 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \ | |
21113 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vdupq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
21114 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vdupq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) | |
21115 | ||
e3678b44 SP |
21116 | #define vfmaq(p0,p1,p2) __arm_vfmaq(p0,p1,p2) |
21117 | #define __arm_vfmaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21118 | __typeof(p1) __p1 = (p1); \ | |
21119 | __typeof(p2) __p2 = (p2); \ | |
21120 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
261014a1 SP |
21121 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vfmaq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t)), \ |
21122 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vfmaq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t)), \ | |
e3678b44 SP |
21123 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ |
21124 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
d71dba7b | 21125 | |
e3678b44 SP |
21126 | #define vfmsq(p0,p1,p2) __arm_vfmsq(p0,p1,p2) |
21127 | #define __arm_vfmsq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c | 21128 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
21129 | __typeof(p2) __p2 = (p2); \ |
21130 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21131 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmsq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ | |
21132 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmsq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
f9355dee | 21133 | |
261014a1 SP |
21134 | #define vfmasq(p0,p1,p2) __arm_vfmasq(p0,p1,p2) |
21135 | #define __arm_vfmasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21136 | __typeof(p1) __p1 = (p1); \ | |
21137 | __typeof(p2) __p2 = (p2); \ | |
21138 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21139 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vfmasq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t)), \ | |
21140 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vfmasq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t)));}) | |
21141 | ||
e3678b44 SP |
21142 | #define vmaxnmaq_m(p0,p1,p2) __arm_vmaxnmaq_m(p0,p1,p2) |
21143 | #define __arm_vmaxnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f9355dee SP |
21144 | __typeof(p1) __p1 = (p1); \ |
21145 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21146 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21147 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 21148 | |
e3678b44 SP |
21149 | #define vmaxnmavq_m(p0,p1,p2) __arm_vmaxnmavq_m(p0,p1,p2) |
21150 | #define __arm_vmaxnmavq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
21151 | __typeof(p1) __p1 = (p1); \ |
21152 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21153 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21154 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 21155 | |
e3678b44 SP |
21156 | #define vmaxnmvq_m(p0,p1,p2) __arm_vmaxnmvq_m(p0,p1,p2) |
21157 | #define __arm_vmaxnmvq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
21158 | __typeof(p1) __p1 = (p1); \ |
21159 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21160 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21161 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 21162 | |
e3678b44 SP |
21163 | #define vmaxnmavq_p(p0,p1,p2) __arm_vmaxnmavq_p(p0,p1,p2) |
21164 | #define __arm_vmaxnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
21165 | __typeof(p1) __p1 = (p1); \ |
21166 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21167 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_p_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21168 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_p_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 21169 | |
e3678b44 SP |
21170 | #define vmaxnmvq_p(p0,p1,p2) __arm_vmaxnmvq_p(p0,p1,p2) |
21171 | #define __arm_vmaxnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
21172 | __typeof(p1) __p1 = (p1); \ |
21173 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21174 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_p_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21175 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_p_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 21176 | |
e3678b44 SP |
21177 | #define vminnmaq_m(p0,p1,p2) __arm_vminnmaq_m(p0,p1,p2) |
21178 | #define __arm_vminnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
21179 | __typeof(p1) __p1 = (p1); \ |
21180 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21181 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21182 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 21183 | |
e3678b44 SP |
21184 | #define vminnmavq_p(p0,p1,p2) __arm_vminnmavq_p(p0,p1,p2) |
21185 | #define __arm_vminnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
21186 | __typeof(p1) __p1 = (p1); \ |
21187 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21188 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_p_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21189 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_p_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 21190 | |
e3678b44 SP |
21191 | #define vminnmvq_p(p0,p1,p2) __arm_vminnmvq_p(p0,p1,p2) |
21192 | #define __arm_vminnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
21193 | __typeof(p1) __p1 = (p1); \ |
21194 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21195 | int (*)[__ARM_mve_type_float16_t][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_p_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21196 | int (*)[__ARM_mve_type_float32_t][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_p_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 21197 | |
e3678b44 SP |
21198 | #define vrndnq_m(p0,p1,p2) __arm_vrndnq_m(p0,p1,p2) |
21199 | #define __arm_vrndnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
21200 | __typeof(p1) __p1 = (p1); \ |
21201 | __typeof(p2) __p2 = (p2); \ | |
e3678b44 SP |
21202 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
21203 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndnq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21204 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndnq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __p2));}) | |
8165795c | 21205 | |
e3678b44 SP |
21206 | #define vrndaq_m(p0,p1,p2) __arm_vrndaq_m(p0,p1,p2) |
21207 | #define __arm_vrndaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
21208 | __typeof(p1) __p1 = (p1); \ |
21209 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21210 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21211 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 21212 | |
e3678b44 SP |
21213 | #define vrndmq_m(p0,p1,p2) __arm_vrndmq_m(p0,p1,p2) |
21214 | #define __arm_vrndmq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c SP |
21215 | __typeof(p1) __p1 = (p1); \ |
21216 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
21217 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
21218 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 21219 | |
e3678b44 SP |
21220 | #define vrev64q_m(p0,p1,p2) __arm_vrev64q_m(p0,p1,p2) |
21221 | #define __arm_vrev64q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 21222 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
21223 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
21224 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev64q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
21225 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev64q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21226 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrev64q_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21227 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev64q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
21228 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21229 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
21230 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrev64q_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21231 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrev64q_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 21232 | |
e3678b44 SP |
21233 | #define vrev32q_m(p0,p1,p2) __arm_vrev32q_m(p0,p1,p2) |
21234 | #define __arm_vrev32q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21235 | __typeof(p1) __p1 = (p1); \ | |
21236 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21237 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev32q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
21238 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev32q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21239 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev32q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
21240 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev32q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21241 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrev32q_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2));}) | |
8165795c | 21242 | |
e3678b44 SP |
21243 | #define vpselq(p0,p1,p2) __arm_vpselq(p0,p1,p2) |
21244 | #define __arm_vpselq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 21245 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
21246 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
21247 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vpselq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
21248 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vpselq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21249 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vpselq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21250 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int64x2_t]: __arm_vpselq_s64 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \ | |
21251 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vpselq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
21252 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vpselq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21253 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vpselq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
21254 | int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint64x2_t]: __arm_vpselq_u64 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint64x2_t), p2), \ | |
21255 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vpselq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21256 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vpselq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 21257 | |
e3678b44 SP |
21258 | #define vcmpgeq(p0,p1) __arm_vcmpgeq(p0,p1) |
21259 | #define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 21260 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
21261 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
21262 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21263 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21264 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21265 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
21266 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
21267 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21268 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
21269 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
21270 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
21271 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
8165795c | 21272 | |
e3678b44 SP |
21273 | #define vcmpeqq_m(p0,p1,p2) __arm_vcmpeqq_m(p0,p1,p2) |
21274 | #define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 21275 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
21276 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
21277 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
21278 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21279 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21280 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
21281 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21282 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
21283 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
21284 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
21285 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
21286 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
21287 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
21288 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \ | |
21289 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21290 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
21291 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpeqq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
21292 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpeqq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) | |
8165795c | 21293 | |
e3678b44 SP |
21294 | #define vcmpgtq(p0,p1) __arm_vcmpgtq(p0,p1) |
21295 | #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 21296 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
21297 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
21298 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21299 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21300 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21301 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
21302 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
21303 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
21304 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
21305 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
21306 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ | |
21307 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) | |
8165795c | 21308 | |
e3678b44 SP |
21309 | #define vrshrnbq(p0,p1,p2) __arm_vrshrnbq(p0,p1,p2) |
21310 | #define __arm_vrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 21311 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
21312 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
21313 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21314 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21315 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21316 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
21317 | ||
21318 | #define vrev16q_m(p0,p1,p2) __arm_vrev16q_m(p0,p1,p2) | |
21319 | #define __arm_vrev16q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21320 | __typeof(p1) __p1 = (p1); \ | |
21321 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21322 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev16q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
21323 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev16q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2));}) | |
21324 | ||
21325 | #define vqshruntq(p0,p1,p2) __arm_vqshruntq(p0,p1,p2) | |
21326 | #define __arm_vqshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21327 | __typeof(p1) __p1 = (p1); \ | |
21328 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21329 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21330 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
21331 | ||
21332 | #define vqshrunbq_n(p0,p1,p2) __arm_vqshrunbq_n(p0,p1,p2) | |
21333 | #define __arm_vqshrunbq_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21334 | __typeof(p1) __p1 = (p1); \ | |
21335 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21336 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21337 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
21338 | ||
21339 | #define vqshrnbq(p0,p1,p2) __arm_vqshrnbq(p0,p1,p2) | |
21340 | #define __arm_vqshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21341 | __typeof(p1) __p1 = (p1); \ | |
21342 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21343 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21344 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21345 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21346 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
21347 | ||
21348 | #define vqshrntq(p0,p1,p2) __arm_vqshrntq(p0,p1,p2) | |
21349 | #define __arm_vqshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21350 | __typeof(p1) __p1 = (p1); \ | |
21351 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21352 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21353 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21354 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21355 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
21356 | ||
21357 | #define vqrshruntq(p0,p1,p2) __arm_vqrshruntq(p0,p1,p2) | |
21358 | #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21359 | __typeof(p1) __p1 = (p1); \ | |
21360 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21361 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21362 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
21363 | ||
21364 | #define vqmovnbq_m(p0,p1,p2) __arm_vqmovnbq_m(p0,p1,p2) | |
21365 | #define __arm_vqmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21366 | __typeof(p1) __p1 = (p1); \ | |
21367 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21368 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21369 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21370 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21371 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
21372 | ||
21373 | #define vqmovntq_m(p0,p1,p2) __arm_vqmovntq_m(p0,p1,p2) | |
21374 | #define __arm_vqmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21375 | __typeof(p1) __p1 = (p1); \ | |
21376 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21377 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21378 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21379 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21380 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
21381 | ||
21382 | #define vqmovunbq_m(p0,p1,p2) __arm_vqmovunbq_m(p0,p1,p2) | |
21383 | #define __arm_vqmovunbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21384 | __typeof(p1) __p1 = (p1); \ | |
21385 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21386 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21387 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
21388 | ||
21389 | #define vqmovuntq_m(p0,p1,p2) __arm_vqmovuntq_m(p0,p1,p2) | |
21390 | #define __arm_vqmovuntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21391 | __typeof(p1) __p1 = (p1); \ | |
21392 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21393 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21394 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
21395 | ||
21396 | #define vqrshrntq(p0,p1,p2) __arm_vqrshrntq(p0,p1,p2) | |
21397 | #define __arm_vqrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21398 | __typeof(p1) __p1 = (p1); \ | |
21399 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21400 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21401 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21402 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21403 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
21404 | ||
21405 | #define vqrshruntq(p0,p1,p2) __arm_vqrshruntq(p0,p1,p2) | |
21406 | #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21407 | __typeof(p1) __p1 = (p1); \ | |
21408 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21409 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21410 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
21411 | ||
21412 | #define vnegq_m(p0,p1,p2) __arm_vnegq_m(p0,p1,p2) | |
21413 | #define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21414 | __typeof(p1) __p1 = (p1); \ | |
21415 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21416 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
21417 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21418 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21419 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vnegq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21420 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vnegq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
21421 | ||
21422 | #define vcmpgeq_m(p0,p1,p2) __arm_vcmpgeq_m(p0,p1,p2) | |
21423 | #define __arm_vcmpgeq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21424 | __typeof(p1) __p1 = (p1); \ | |
21425 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21426 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
21427 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21428 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21429 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
21430 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
21431 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
21432 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgeq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ | |
21433 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgeq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2), \ | |
21434 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
21435 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
21436 | ||
21437 | #define vabdq_m(p0,p1,p2,p3) __arm_vabdq_m(p0,p1,p2,p3) | |
21438 | #define __arm_vabdq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21439 | __typeof(p1) __p1 = (p1); \ | |
21440 | __typeof(p2) __p2 = (p2); \ | |
21441 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21442 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
21443 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
21444 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21445 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
21446 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
21447 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21448 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21449 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
21450 | ||
21451 | #define vaddq_m(p0,p1,p2,p3) __arm_vaddq_m(p0,p1,p2,p3) | |
21452 | #define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21453 | __typeof(p1) __p1 = (p1); \ | |
21454 | __typeof(p2) __p2 = (p2); \ | |
21455 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21456 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
21457 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
21458 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21459 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
21460 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
21461 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21462 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21463 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
21464 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
21465 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
21466 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
21467 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
21468 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
21469 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
21470 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vaddq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
21471 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vaddq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
21472 | ||
21473 | #define vandq_m(p0,p1,p2,p3) __arm_vandq_m(p0,p1,p2,p3) | |
21474 | #define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21475 | __typeof(p1) __p1 = (p1); \ | |
21476 | __typeof(p2) __p2 = (p2); \ | |
21477 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21478 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
21479 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
21480 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21481 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
21482 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
21483 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21484 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21485 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
21486 | ||
21487 | #define vbicq_m(p0,p1,p2,p3) __arm_vbicq_m(p0,p1,p2,p3) | |
21488 | #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21489 | __typeof(p1) __p1 = (p1); \ | |
21490 | __typeof(p2) __p2 = (p2); \ | |
21491 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21492 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
21493 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
21494 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21495 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
21496 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
21497 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21498 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21499 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
21500 | ||
21501 | #define vbrsrq_m(p0,p1,p2,p3) __arm_vbrsrq_m(p0,p1,p2,p3) | |
21502 | #define __arm_vbrsrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21503 | __typeof(p1) __p1 = (p1); \ | |
21504 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21505 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbrsrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
21506 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbrsrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
21507 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbrsrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
21508 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
21509 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
21510 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3), \ | |
21511 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbrsrq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \ | |
21512 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbrsrq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3));}) | |
21513 | ||
21514 | #define vcaddq_rot270_m(p0,p1,p2,p3) __arm_vcaddq_rot270_m(p0,p1,p2,p3) | |
21515 | #define __arm_vcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21516 | __typeof(p1) __p1 = (p1); \ | |
21517 | __typeof(p2) __p2 = (p2); \ | |
21518 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21519 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
21520 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
21521 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21522 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
21523 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
21524 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21525 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21526 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
21527 | ||
21528 | #define vcaddq_rot90_m(p0,p1,p2,p3) __arm_vcaddq_rot90_m(p0,p1,p2,p3) | |
21529 | #define __arm_vcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21530 | __typeof(p1) __p1 = (p1); \ | |
21531 | __typeof(p2) __p2 = (p2); \ | |
21532 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21533 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
21534 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
21535 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21536 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
21537 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
21538 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21539 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21540 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
21541 | ||
21542 | #define vcmlaq_m(p0,p1,p2,p3) __arm_vcmlaq_m(p0,p1,p2,p3) | |
21543 | #define __arm_vcmlaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21544 | __typeof(p1) __p1 = (p1); \ | |
21545 | __typeof(p2) __p2 = (p2); \ | |
21546 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21547 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21548 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
21549 | ||
21550 | #define vcmlaq_rot180_m(p0,p1,p2,p3) __arm_vcmlaq_rot180_m(p0,p1,p2,p3) | |
21551 | #define __arm_vcmlaq_rot180_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21552 | __typeof(p1) __p1 = (p1); \ | |
21553 | __typeof(p2) __p2 = (p2); \ | |
21554 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21555 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot180_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21556 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot180_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
21557 | ||
21558 | #define vcmlaq_rot270_m(p0,p1,p2,p3) __arm_vcmlaq_rot270_m(p0,p1,p2,p3) | |
21559 | #define __arm_vcmlaq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21560 | __typeof(p1) __p1 = (p1); \ | |
21561 | __typeof(p2) __p2 = (p2); \ | |
21562 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21563 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21564 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
21565 | ||
21566 | #define vcmlaq_rot90_m(p0,p1,p2,p3) __arm_vcmlaq_rot90_m(p0,p1,p2,p3) | |
21567 | #define __arm_vcmlaq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21568 | __typeof(p1) __p1 = (p1); \ | |
21569 | __typeof(p2) __p2 = (p2); \ | |
21570 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21571 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot90_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21572 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot90_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
8165795c | 21573 | |
532e9e24 SP |
21574 | #define vcmulq_m(p0,p1,p2,p3) __arm_vcmulq_m(p0,p1,p2,p3) |
21575 | #define __arm_vcmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 21576 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
21577 | __typeof(p2) __p2 = (p2); \ |
21578 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21579 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21580 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
8165795c | 21581 | |
532e9e24 SP |
21582 | #define vcmulq_rot180_m(p0,p1,p2,p3) __arm_vcmulq_rot180_m(p0,p1,p2,p3) |
21583 | #define __arm_vcmulq_rot180_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 21584 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
21585 | __typeof(p2) __p2 = (p2); \ |
21586 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21587 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21588 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
8165795c | 21589 | |
532e9e24 SP |
21590 | #define vcmulq_rot270_m(p0,p1,p2,p3) __arm_vcmulq_rot270_m(p0,p1,p2,p3) |
21591 | #define __arm_vcmulq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 21592 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
21593 | __typeof(p2) __p2 = (p2); \ |
21594 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21595 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21596 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
8165795c | 21597 | |
532e9e24 SP |
21598 | #define vcmulq_rot90_m(p0,p1,p2,p3) __arm_vcmulq_rot90_m(p0,p1,p2,p3) |
21599 | #define __arm_vcmulq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 21600 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
21601 | __typeof(p2) __p2 = (p2); \ |
21602 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)] [__ARM_mve_typeid(__p2)])0, \ | |
21603 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_m_f16(__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21604 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_m_f32(__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
33203b4c | 21605 | |
532e9e24 SP |
21606 | #define veorq_m(p0,p1,p2,p3) __arm_veorq_m(p0,p1,p2,p3) |
21607 | #define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c | 21608 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
21609 | __typeof(p2) __p2 = (p2); \ |
21610 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21611 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
21612 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
21613 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21614 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
21615 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
21616 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21617 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21618 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
33203b4c | 21619 | |
532e9e24 SP |
21620 | #define vfmaq_m(p0,p1,p2,p3) __arm_vfmaq_m(p0,p1,p2,p3) |
21621 | #define __arm_vfmaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 21622 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
21623 | __typeof(p2) __p2 = (p2); \ |
21624 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21625 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21626 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
21627 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vfmaq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
21628 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vfmaq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
0dad5b33 | 21629 | |
532e9e24 SP |
21630 | #define vfmasq_m(p0,p1,p2,p3) __arm_vfmasq_m(p0,p1,p2,p3) |
21631 | #define __arm_vfmasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 21632 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
21633 | __typeof(p2) __p2 = (p2); \ |
21634 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21635 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vfmasq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
21636 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vfmasq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
8165795c | 21637 | |
532e9e24 SP |
21638 | #define vfmsq_m(p0,p1,p2,p3) __arm_vfmsq_m(p0,p1,p2,p3) |
21639 | #define __arm_vfmsq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 21640 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
21641 | __typeof(p2) __p2 = (p2); \ |
21642 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21643 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmsq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21644 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmsq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
8165795c | 21645 | |
532e9e24 SP |
21646 | #define vmaxnmq_m(p0,p1,p2,p3) __arm_vmaxnmq_m(p0,p1,p2,p3) |
21647 | #define __arm_vmaxnmq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
0dad5b33 | 21648 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
21649 | __typeof(p2) __p2 = (p2); \ |
21650 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21651 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21652 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
0dad5b33 | 21653 | |
532e9e24 SP |
21654 | #define vminnmq_m(p0,p1,p2,p3) __arm_vminnmq_m(p0,p1,p2,p3) |
21655 | #define __arm_vminnmq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
0dad5b33 | 21656 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
21657 | __typeof(p2) __p2 = (p2); \ |
21658 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21659 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21660 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
0dad5b33 | 21661 | |
532e9e24 SP |
21662 | #define vmulq_m(p0,p1,p2,p3) __arm_vmulq_m(p0,p1,p2,p3) |
21663 | #define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8165795c | 21664 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
21665 | __typeof(p2) __p2 = (p2); \ |
21666 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21667 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
21668 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
21669 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21670 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
21671 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
21672 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21673 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21674 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
21675 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
21676 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
21677 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
21678 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
21679 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
21680 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
21681 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vmulq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
21682 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vmulq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
0dad5b33 | 21683 | |
532e9e24 SP |
21684 | #define vornq_m(p0,p1,p2,p3) __arm_vornq_m(p0,p1,p2,p3) |
21685 | #define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
0dad5b33 | 21686 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
21687 | __typeof(p2) __p2 = (p2); \ |
21688 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21689 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
21690 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
21691 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21692 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
21693 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
21694 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21695 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21696 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
0dad5b33 | 21697 | |
532e9e24 SP |
21698 | #define vsubq_m(p0,p1,p2,p3) __arm_vsubq_m(p0,p1,p2,p3) |
21699 | #define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
0dad5b33 | 21700 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
21701 | __typeof(p2) __p2 = (p2); \ |
21702 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21703 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
21704 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
21705 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21706 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
21707 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
21708 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21709 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21710 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
21711 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
21712 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
21713 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
21714 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
21715 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
21716 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
21717 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vsubq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
21718 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vsubq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
e3678b44 | 21719 | |
532e9e24 SP |
21720 | #define vorrq_m(p0,p1,p2,p3) __arm_vorrq_m(p0,p1,p2,p3) |
21721 | #define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
e3678b44 | 21722 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
21723 | __typeof(p2) __p2 = (p2); \ |
21724 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21725 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
21726 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
21727 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21728 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
21729 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
21730 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21731 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
21732 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
0dad5b33 | 21733 | |
bf1e3d5a SP |
21734 | #define vld1q(p0) __arm_vld1q(p0) |
21735 | #define __arm_vld1q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
21736 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
21737 | int (*)[__ARM_mve_type_int8_t_const_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce(__p0, int8_t const *)), \ | |
21738 | int (*)[__ARM_mve_type_int16_t_const_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce(__p0, int16_t const *)), \ | |
21739 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce(__p0, int32_t const *)), \ | |
21740 | int (*)[__ARM_mve_type_uint8_t_const_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce(__p0, uint8_t const *)), \ | |
21741 | int (*)[__ARM_mve_type_uint16_t_const_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce(__p0, uint16_t const *)), \ | |
21742 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce(__p0, uint32_t const *)), \ | |
21743 | int (*)[__ARM_mve_type_float16_t_const_ptr]: __arm_vld1q_f16 (__ARM_mve_coerce(__p0, float16_t const *)), \ | |
21744 | int (*)[__ARM_mve_type_float32_t_const_ptr]: __arm_vld1q_f32 (__ARM_mve_coerce(__p0, float32_t const *)));}) | |
21745 | ||
4cc23303 SP |
21746 | #define vldrhq_gather_offset(p0,p1) __arm_vldrhq_gather_offset(p0,p1) |
21747 | #define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21748 | __typeof(p1) __p1 = (p1); \ | |
21749 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21750 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21751 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
21752 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21753 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
21754 | int (*)[__ARM_mve_type_float16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_f16 (__ARM_mve_coerce(__p0, float16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
21755 | ||
21756 | #define vldrhq_gather_offset_z(p0,p1,p2) __arm_vldrhq_gather_offset_z(p0,p1,p2) | |
21757 | #define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21758 | __typeof(p1) __p1 = (p1); \ | |
21759 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21760 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21761 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
21762 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21763 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
21764 | int (*)[__ARM_mve_type_float16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_f16 (__ARM_mve_coerce(__p0, float16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
21765 | ||
21766 | #define vldrhq_gather_shifted_offset(p0,p1) __arm_vldrhq_gather_shifted_offset(p0,p1) | |
21767 | #define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21768 | __typeof(p1) __p1 = (p1); \ | |
21769 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21770 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21771 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
21772 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21773 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
21774 | int (*)[__ARM_mve_type_float16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_f16 (__ARM_mve_coerce(__p0, float16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
21775 | ||
21776 | #define vldrhq_gather_shifted_offset_z(p0,p1,p2) __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) | |
21777 | #define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21778 | __typeof(p1) __p1 = (p1); \ | |
21779 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21780 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21781 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
21782 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21783 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
21784 | int (*)[__ARM_mve_type_float16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_f16 (__ARM_mve_coerce(__p0, float16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
21785 | ||
21786 | #define vldrwq_gather_offset(p0,p1) __arm_vldrwq_gather_offset(p0,p1) | |
21787 | #define __arm_vldrwq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21788 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
21789 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1), \ | |
21790 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1), \ | |
21791 | int (*)[__ARM_mve_type_float32_t_const_ptr]: __arm_vldrwq_gather_offset_f32 (__ARM_mve_coerce(__p0, float32_t const *), p1));}) | |
21792 | ||
21793 | #define vldrwq_gather_offset_z(p0,p1,p2) __arm_vldrwq_gather_offset_z(p0,p1,p2) | |
21794 | #define __arm_vldrwq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21795 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
21796 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1, p2), \ | |
21797 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1, p2), \ | |
21798 | int (*)[__ARM_mve_type_float32_t_const_ptr]: __arm_vldrwq_gather_offset_z_f32 (__ARM_mve_coerce(__p0, float32_t const *), p1, p2));}) | |
21799 | ||
21800 | #define vldrwq_gather_shifted_offset(p0,p1) __arm_vldrwq_gather_shifted_offset(p0,p1) | |
21801 | #define __arm_vldrwq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21802 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
21803 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1), \ | |
21804 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1), \ | |
21805 | int (*)[__ARM_mve_type_float32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_f32 (__ARM_mve_coerce(__p0, float32_t const *), p1));}) | |
21806 | ||
21807 | #define vldrwq_gather_shifted_offset_z(p0,p1,p2) __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) | |
21808 | #define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21809 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
21810 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1, p2), \ | |
21811 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1, p2), \ | |
21812 | int (*)[__ARM_mve_type_float32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_z_f32 (__ARM_mve_coerce(__p0, float32_t const *), p1, p2));}) | |
21813 | ||
5cad47e0 SP |
21814 | #define vst1q(p0,p1) __arm_vst1q(p0,p1) |
21815 | #define __arm_vst1q(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21816 | __typeof(p1) __p1 = (p1); \ | |
21817 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21818 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
21819 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21820 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21821 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
21822 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21823 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
21824 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
21825 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
21826 | ||
21827 | #define vstrhq(p0,p1) __arm_vstrhq(p0,p1) | |
21828 | #define __arm_vstrhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21829 | __typeof(p1) __p1 = (p1); \ | |
21830 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21831 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
21832 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21833 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
21834 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
21835 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)));}) | |
21836 | ||
21837 | #define vstrhq_p(p0,p1,p2) __arm_vstrhq_p(p0,p1,p2) | |
21838 | #define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21839 | __typeof(p1) __p1 = (p1); \ | |
21840 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21841 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
21842 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21843 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
21844 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
21845 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2));}) | |
21846 | ||
21847 | #define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) | |
21848 | #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21849 | __typeof(p1) __p1 = (p1); \ | |
21850 | __typeof(p2) __p2 = (p2); \ | |
21851 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21852 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
21853 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21854 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
21855 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21856 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) | |
21857 | ||
21858 | #define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2) | |
21859 | #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21860 | __typeof(p1) __p1 = (p1); \ | |
21861 | __typeof(p2) __p2 = (p2); \ | |
21862 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21863 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
21864 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
21865 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
21866 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
21867 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) | |
21868 | ||
21869 | #define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
21870 | #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21871 | __typeof(p1) __p1 = (p1); \ | |
21872 | __typeof(p2) __p2 = (p2); \ | |
21873 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21874 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
21875 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21876 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
21877 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21878 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) | |
21879 | ||
21880 | #define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) | |
21881 | #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21882 | __typeof(p1) __p1 = (p1); \ | |
21883 | __typeof(p2) __p2 = (p2); \ | |
21884 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21885 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
21886 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
21887 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
21888 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
21889 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) | |
21890 | ||
21891 | #define vstrwq_p(p0,p1,p2) __arm_vstrwq_p(p0,p1,p2) | |
21892 | #define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21893 | __typeof(p1) __p1 = (p1); \ | |
21894 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21895 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
21896 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
21897 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
21898 | ||
21899 | #define vstrwq(p0,p1) __arm_vstrwq(p0,p1) | |
21900 | #define __arm_vstrwq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
21901 | __typeof(p1) __p1 = (p1); \ | |
21902 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
21903 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
21904 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
21905 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
21906 | ||
7a5fffa5 SP |
21907 | #define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2) |
21908 | #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21909 | __typeof(p1) __p1 = (p1); \ | |
21910 | __typeof(p2) __p2 = (p2); \ | |
21911 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21912 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
21913 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
21914 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
21915 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
21916 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) | |
21917 | ||
21918 | #define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) | |
21919 | #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21920 | __typeof(p1) __p1 = (p1); \ | |
21921 | __typeof(p2) __p2 = (p2); \ | |
21922 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21923 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
21924 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21925 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
21926 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21927 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) | |
21928 | ||
21929 | #define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) | |
21930 | #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21931 | __typeof(p1) __p1 = (p1); \ | |
21932 | __typeof(p2) __p2 = (p2); \ | |
21933 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21934 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
21935 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
21936 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
21937 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
21938 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) | |
21939 | ||
21940 | #define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
21941 | #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21942 | __typeof(p1) __p1 = (p1); \ | |
21943 | __typeof(p2) __p2 = (p2); \ | |
21944 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
21945 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
21946 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21947 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
21948 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21949 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) | |
21950 | ||
21951 | #define vstrwq_scatter_base(p0,p1,p2) __arm_vstrwq_scatter_base(p0,p1,p2) | |
21952 | #define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ | |
21953 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
21954 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
21955 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
21956 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
21957 | ||
21958 | #define vstrwq_scatter_base_p(p0,p1,p2,p3) __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) | |
21959 | #define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ | |
21960 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
21961 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21962 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32(p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21963 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_p_f32(p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
21964 | ||
21965 | #define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) | |
21966 | #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21967 | __typeof(p1) __p1 = (p1); \ | |
21968 | __typeof(p2) __p2 = (p2); \ | |
21969 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
21970 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
21971 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
21972 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
21973 | ||
21974 | #define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) | |
21975 | #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21976 | __typeof(p1) __p1 = (p1); \ | |
21977 | __typeof(p2) __p2 = (p2); \ | |
21978 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
21979 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21980 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21981 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
21982 | ||
21983 | #define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) | |
21984 | #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
21985 | __typeof(p1) __p1 = (p1); \ | |
21986 | __typeof(p2) __p2 = (p2); \ | |
21987 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
21988 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
21989 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
21990 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
21991 | ||
21992 | #define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) | |
21993 | #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
21994 | __typeof(p1) __p1 = (p1); \ | |
21995 | __typeof(p2) __p2 = (p2); \ | |
21996 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
21997 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
21998 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
21999 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
22000 | ||
22001 | #define vstrwq_scatter_shifted_offset(p0,p1,p2) __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) | |
22002 | #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22003 | __typeof(p1) __p1 = (p1); \ | |
22004 | __typeof(p2) __p2 = (p2); \ | |
22005 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
22006 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
22007 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
22008 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
22009 | ||
22010 | #define vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
22011 | #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22012 | __typeof(p1) __p1 = (p1); \ | |
22013 | __typeof(p2) __p2 = (p2); \ | |
22014 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
22015 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22016 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22017 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22018 | ||
22019 | #define vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
22020 | #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
22021 | __typeof(p1) __p1 = (p1); \ | |
22022 | __typeof(p2) __p2 = (p2); \ | |
22023 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
22024 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22025 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22026 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22027 | ||
22028 | #define vstrwq_scatter_shifted_offset(p0,p1,p2) __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) | |
22029 | #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
22030 | __typeof(p1) __p1 = (p1); \ | |
22031 | __typeof(p2) __p2 = (p2); \ | |
22032 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
22033 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
22034 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
22035 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
22036 | ||
85a94e87 SP |
22037 | #define vuninitializedq(p0) __arm_vuninitializedq(p0) |
22038 | #define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22039 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22040 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \ | |
22041 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \ | |
22042 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \ | |
22043 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \ | |
22044 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \ | |
22045 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \ | |
22046 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \ | |
22047 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 (), \ | |
22048 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vuninitializedq_f16 (), \ | |
22049 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vuninitializedq_f32 ());}) | |
22050 | ||
22051 | #define vreinterpretq_f16(p0) __arm_vreinterpretq_f16(p0) | |
22052 | #define __arm_vreinterpretq_f16(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22053 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22054 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22055 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22056 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
22057 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
22058 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22059 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
22060 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
22061 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
22062 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_f16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
22063 | ||
22064 | #define vreinterpretq_f32(p0) __arm_vreinterpretq_f32(p0) | |
22065 | #define __arm_vreinterpretq_f32(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22066 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22067 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22068 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22069 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
22070 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
22071 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22072 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
22073 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
22074 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
22075 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) | |
22076 | ||
22077 | #define vreinterpretq_s16(p0) __arm_vreinterpretq_s16(p0) | |
22078 | #define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22079 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22080 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
22081 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22082 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
22083 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
22084 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22085 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
22086 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
22087 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
22088 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
22089 | ||
22090 | #define vreinterpretq_s32(p0) __arm_vreinterpretq_s32(p0) | |
22091 | #define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22092 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22093 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
22094 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22095 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22096 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
22097 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22098 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
22099 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
22100 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
22101 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
22102 | ||
22103 | #define vreinterpretq_s64(p0) __arm_vreinterpretq_s64(p0) | |
22104 | #define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22105 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22106 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
22107 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22108 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
22109 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22110 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22111 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
22112 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
22113 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
22114 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
22115 | ||
22116 | #define vreinterpretq_s8(p0) __arm_vreinterpretq_s8(p0) | |
22117 | #define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22118 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22119 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
22120 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22121 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
22122 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
22123 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22124 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
22125 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
22126 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
22127 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
22128 | ||
22129 | #define vreinterpretq_u16(p0) __arm_vreinterpretq_u16(p0) | |
22130 | #define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22131 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22132 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
22133 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22134 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
22135 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
22136 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22137 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22138 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
22139 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
22140 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
22141 | ||
22142 | #define vreinterpretq_u32(p0) __arm_vreinterpretq_u32(p0) | |
22143 | #define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22144 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22145 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
22146 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22147 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22148 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
22149 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22150 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
22151 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
22152 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
22153 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
22154 | ||
22155 | #define vreinterpretq_u64(p0) __arm_vreinterpretq_u64(p0) | |
22156 | #define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22157 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22158 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
22159 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22160 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
22161 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22162 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22163 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
22164 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
22165 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
22166 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
22167 | ||
22168 | #define vreinterpretq_u8(p0) __arm_vreinterpretq_u8(p0) | |
22169 | #define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22170 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22171 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
22172 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22173 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
22174 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
22175 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22176 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
22177 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
22178 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
22179 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
22180 | ||
41e1a7ff SP |
22181 | #define vstrwq_scatter_base_wb(p0,p1,p2) __arm_vstrwq_scatter_base_wb(p0,p1,p2) |
22182 | #define __arm_vstrwq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ | |
22183 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
22184 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
22185 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
22186 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_wb_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
22187 | ||
22188 | #define vstrwq_scatter_base_wb_p(p0,p1,p2,p3) __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) | |
22189 | #define __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ | |
22190 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
22191 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22192 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22193 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_wb_p_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22194 | ||
261014a1 SP |
22195 | #define vabdq_x(p1,p2,p3) __arm_vabdq_x(p1,p2,p3) |
22196 | #define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22197 | __typeof(p2) __p2 = (p2); \ | |
22198 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22199 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22200 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22201 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22202 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22203 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22204 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22205 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22206 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22207 | ||
22208 | #define vabsq_x(p1,p2) __arm_vabsq_x(p1,p2) | |
22209 | #define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
22210 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
22211 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
22212 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22213 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22214 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22215 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
22216 | ||
22217 | #define vaddq_x(p1,p2,p3) __arm_vaddq_x(p1,p2,p3) | |
22218 | #define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22219 | __typeof(p2) __p2 = (p2); \ | |
22220 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22221 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22222 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22223 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22224 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
22225 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
22226 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
22227 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22228 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22229 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22230 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
22231 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
22232 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
22233 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22234 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
22235 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vaddq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
22236 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vaddq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
22237 | ||
22238 | #define vandq_x(p1,p2,p3) __arm_vandq_x(p1,p2,p3) | |
22239 | #define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22240 | __typeof(p2) __p2 = (p2); \ | |
22241 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22242 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22243 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22244 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22245 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22246 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22247 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22248 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22249 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22250 | ||
22251 | #define vbicq_x(p1,p2,p3) __arm_vbicq_x(p1,p2,p3) | |
22252 | #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22253 | __typeof(p2) __p2 = (p2); \ | |
22254 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22255 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22256 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22257 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22258 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22259 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22260 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22261 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22262 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22263 | ||
22264 | #define vbrsrq_x(p1,p2,p3) __arm_vbrsrq_x(p1,p2,p3) | |
22265 | #define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22266 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
22267 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
22268 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
22269 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
22270 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
22271 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
22272 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3), \ | |
22273 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vbrsrq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2, p3), \ | |
22274 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vbrsrq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2, p3));}) | |
22275 | ||
22276 | #define vcaddq_rot270_x(p1,p2,p3) __arm_vcaddq_rot270_x(p1,p2,p3) | |
22277 | #define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22278 | __typeof(p2) __p2 = (p2); \ | |
22279 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22280 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22281 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22282 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22283 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22284 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22285 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22286 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22287 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22288 | ||
22289 | #define vcaddq_rot90_x(p1,p2,p3) __arm_vcaddq_rot90_x(p1,p2,p3) | |
22290 | #define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22291 | __typeof(p2) __p2 = (p2); \ | |
22292 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22293 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22294 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22295 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22296 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22297 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22298 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22299 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22300 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22301 | ||
22302 | #define vcmulq_rot180_x(p1,p2,p3) __arm_vcmulq_rot180_x(p1,p2,p3) | |
22303 | #define __arm_vcmulq_rot180_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22304 | __typeof(p2) __p2 = (p2); \ | |
22305 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22306 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22307 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22308 | ||
22309 | #define vcmulq_rot270_x(p1,p2,p3) __arm_vcmulq_rot270_x(p1,p2,p3) | |
22310 | #define __arm_vcmulq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22311 | __typeof(p2) __p2 = (p2); \ | |
22312 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22313 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22314 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22315 | ||
22316 | #define vcmulq_x(p1,p2,p3) __arm_vcmulq_x(p1,p2,p3) | |
22317 | #define __arm_vcmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22318 | __typeof(p2) __p2 = (p2); \ | |
22319 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22320 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22321 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22322 | ||
22323 | #define vcvtq_x(p1,p2) __arm_vcvtq_x(p1,p2) | |
22324 | #define __arm_vcvtq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
22325 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
22326 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22327 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_x_f32_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22328 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22329 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
22330 | ||
22331 | #define vcvtq_x_n(p1,p2,p3) __arm_vcvtq_x_n(p1,p2,p3) | |
22332 | #define __arm_vcvtq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22333 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
22334 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_n_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
22335 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_x_n_f32_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
22336 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_n_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
22337 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_n_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
22338 | ||
22339 | #define veorq_x(p1,p2,p3) __arm_veorq_x(p1,p2,p3) | |
22340 | #define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22341 | __typeof(p2) __p2 = (p2); \ | |
22342 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22343 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22344 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22345 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22346 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22347 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22348 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22349 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22350 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22351 | ||
22352 | #define vmaxnmq_x(p1,p2,p3) __arm_vmaxnmq_x(p1,p2,p3) | |
22353 | #define __arm_vmaxnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22354 | __typeof(p2) __p2 = (p2); \ | |
22355 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22356 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22357 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22358 | ||
22359 | #define vminnmq_x(p1,p2,p3) __arm_vminnmq_x(p1,p2,p3) | |
22360 | #define __arm_vminnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22361 | __typeof(p2) __p2 = (p2); \ | |
22362 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22363 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22364 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22365 | ||
22366 | #define vmulq_x(p1,p2,p3) __arm_vmulq_x(p1,p2,p3) | |
22367 | #define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22368 | __typeof(p2) __p2 = (p2); \ | |
22369 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22370 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22371 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22372 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22373 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
22374 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
22375 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
22376 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22377 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22378 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22379 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
22380 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
22381 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
22382 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22383 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
22384 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vmulq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
22385 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vmulq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
22386 | ||
22387 | #define vnegq_x(p1,p2) __arm_vnegq_x(p1,p2) | |
22388 | #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
22389 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
22390 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
22391 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22392 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22393 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vnegq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22394 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vnegq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
22395 | ||
22396 | #define vornq_x(p1,p2,p3) __arm_vornq_x(p1,p2,p3) | |
22397 | #define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22398 | __typeof(p2) __p2 = (p2); \ | |
22399 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22400 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22401 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22402 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22403 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22404 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22405 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22406 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22407 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22408 | ||
22409 | #define vorrq_x(p1,p2,p3) __arm_vorrq_x(p1,p2,p3) | |
22410 | #define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22411 | __typeof(p2) __p2 = (p2); \ | |
22412 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22413 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
22414 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
22415 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22416 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
22417 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
22418 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
22419 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22420 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22421 | ||
22422 | #define vrev32q_x(p1,p2) __arm_vrev32q_x(p1,p2) | |
22423 | #define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
22424 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
22425 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
22426 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22427 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
22428 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22429 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev32q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2));}) | |
22430 | ||
22431 | #define vrev64q_x(p1,p2) __arm_vrev64q_x(p1,p2) | |
22432 | #define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
22433 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
22434 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
22435 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
22436 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
22437 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
22438 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
22439 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
22440 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev64q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22441 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrev64q_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
22442 | ||
22443 | #define vrndaq_x(p1,p2) __arm_vrndaq_x(p1,p2) | |
22444 | #define __arm_vrndaq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
22445 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
22446 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22447 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndaq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
22448 | ||
22449 | #define vrndmq_x(p1,p2) __arm_vrndmq_x(p1,p2) | |
22450 | #define __arm_vrndmq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
22451 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
22452 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22453 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
22454 | ||
22455 | #define vrndnq_x(p1,p2) __arm_vrndnq_x(p1,p2) | |
22456 | #define __arm_vrndnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
22457 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
22458 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22459 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
22460 | ||
22461 | #define vrndpq_x(p1,p2) __arm_vrndpq_x(p1,p2) | |
22462 | #define __arm_vrndpq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
22463 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
22464 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22465 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
22466 | ||
22467 | #define vrndq_x(p1,p2) __arm_vrndq_x(p1,p2) | |
22468 | #define __arm_vrndq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
22469 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
22470 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22471 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
22472 | ||
22473 | #define vrndxq_x(p1,p2) __arm_vrndxq_x(p1,p2) | |
22474 | #define __arm_vrndxq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
22475 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
22476 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
22477 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
22478 | ||
22479 | #define vsubq_x(p1,p2,p3) __arm_vsubq_x(p1,p2,p3) | |
22480 | #define __arm_vsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22481 | __typeof(p2) __p2 = (p2); \ | |
22482 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22483 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22484 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
22485 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vsubq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ | |
22486 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vsubq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) | |
22487 | ||
22488 | #define vcmulq_rot90_x(p1,p2,p3) __arm_vcmulq_rot90_x(p1,p2,p3) | |
22489 | #define __arm_vcmulq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
22490 | __typeof(p2) __p2 = (p2); \ | |
22491 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
22492 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
22493 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
22494 | ||
e3678b44 | 22495 | #else /* MVE Integer. */ |
14782c81 | 22496 | |
41e1a7ff SP |
22497 | #define vstrwq_scatter_base_wb(p0,p1,p2) __arm_vstrwq_scatter_base_wb(p0,p1,p2) |
22498 | #define __arm_vstrwq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ | |
22499 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
22500 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
22501 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
22502 | ||
22503 | #define vstrwq_scatter_base_wb_p(p0,p1,p2,p3) __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) | |
22504 | #define __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ | |
22505 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
22506 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
22507 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
22508 | ||
14782c81 SP |
22509 | #define vst4q(p0,p1) __arm_vst4q(p0,p1) |
22510 | #define __arm_vst4q(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22511 | __typeof(p1) __p1 = (p1); \ | |
22512 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22513 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \ | |
22514 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \ | |
22515 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \ | |
22516 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \ | |
22517 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \ | |
22518 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)));}) | |
22519 | ||
6df4618c SP |
22520 | #define vabsq(p0) __arm_vabsq(p0) |
22521 | #define __arm_vabsq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22522 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22523 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22524 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22525 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
22526 | ||
22527 | #define vclsq(p0) __arm_vclsq(p0) | |
22528 | #define __arm_vclsq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22529 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22530 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22531 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22532 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
22533 | ||
22534 | #define vclzq(p0) __arm_vclzq(p0) | |
22535 | #define __arm_vclzq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22536 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22537 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22538 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22539 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
22540 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22541 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
22542 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
22543 | ||
22544 | #define vnegq(p0) __arm_vnegq(p0) | |
22545 | #define __arm_vnegq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22546 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22547 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22548 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22549 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
22550 | ||
22551 | #define vaddlvq(p0) __arm_vaddlvq(p0) | |
22552 | #define __arm_vaddlvq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22553 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22554 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
22555 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
22556 | ||
22557 | #define vaddvq(p0) __arm_vaddvq(p0) | |
22558 | #define __arm_vaddvq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22559 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22560 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vaddvq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22561 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vaddvq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22562 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddvq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
22563 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vaddvq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22564 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vaddvq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
22565 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddvq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
22566 | ||
22567 | #define vmovlbq(p0) __arm_vmovlbq(p0) | |
22568 | #define __arm_vmovlbq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22569 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22570 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22571 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22572 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22573 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) | |
22574 | ||
22575 | #define vmovltq(p0) __arm_vmovltq(p0) | |
22576 | #define __arm_vmovltq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22577 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22578 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22579 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22580 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22581 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) | |
22582 | ||
22583 | #define vmvnq(p0) __arm_vmvnq(p0) | |
22584 | #define __arm_vmvnq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22585 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22586 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22587 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22588 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
22589 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22590 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
22591 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
22592 | ||
22593 | #define vrev16q(p0) __arm_vrev16q(p0) | |
22594 | #define __arm_vrev16q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22595 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22596 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22597 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)));}) | |
22598 | ||
22599 | #define vrev32q(p0) __arm_vrev32q(p0) | |
22600 | #define __arm_vrev32q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22601 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22602 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22603 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22604 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22605 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) | |
22606 | ||
5db0eb95 SP |
22607 | #define vrev64q(p0) __arm_vrev64q(p0) |
22608 | #define __arm_vrev64q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22609 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22610 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22611 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22612 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
22613 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
22614 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
22615 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
22616 | ||
6df4618c SP |
22617 | #define vqabsq(p0) __arm_vqabsq(p0) |
22618 | #define __arm_vqabsq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22619 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22620 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22621 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22622 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
22623 | ||
22624 | #define vqnegq(p0) __arm_vqnegq(p0) | |
22625 | #define __arm_vqnegq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
22626 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22627 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
22628 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
22629 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
22630 | ||
f166a8cd SP |
22631 | #define vshrq(p0,p1) __arm_vshrq(p0,p1) |
22632 | #define __arm_vshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22633 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22634 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
22635 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
22636 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
22637 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
22638 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
22639 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
22640 | ||
d71dba7b SP |
22641 | #define vaddlvq_p(p0,p1) __arm_vaddlvq_p(p0,p1) |
22642 | #define __arm_vaddlvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22643 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22644 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
22645 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
22646 | ||
22647 | #define vcmpneq(p0,p1) __arm_vcmpneq(p0,p1) | |
22648 | #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22649 | __typeof(p1) __p1 = (p1); \ | |
22650 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22651 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22652 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22653 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22654 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22655 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22656 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
22657 | ||
22658 | #define vshlq(p0,p1) __arm_vshlq(p0,p1) | |
22659 | #define __arm_vshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22660 | __typeof(p1) __p1 = (p1); \ | |
22661 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22662 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22663 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22664 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22665 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22666 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22667 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
22668 | ||
33203b4c SP |
22669 | #define vsubq(p0,p1) __arm_vsubq(p0,p1) |
22670 | #define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22671 | __typeof(p1) __p1 = (p1); \ | |
22672 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22673 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22674 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22675 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22676 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22677 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
f9355dee | 22678 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ |
33203b4c SP |
22679 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ |
22680 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
22681 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22682 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
22683 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
22684 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
22685 | ||
22686 | #define vshlq_r(p0,p1) __arm_vshlq_r(p0,p1) | |
22687 | #define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22688 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22689 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
22690 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
22691 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
22692 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
22693 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
22694 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
22695 | ||
33203b4c SP |
22696 | #define vrshlq(p0,p1) __arm_vrshlq(p0,p1) |
22697 | #define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c SP |
22698 | __typeof(p1) __p1 = (p1); \ |
22699 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22700 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22701 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22702 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22703 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22704 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
f9355dee | 22705 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ |
33203b4c SP |
22706 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
22707 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22708 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22709 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22710 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22711 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
22712 | ||
22713 | #define vrmulhq(p0,p1) __arm_vrmulhq(p0,p1) | |
22714 | #define __arm_vrmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22715 | __typeof(p1) __p1 = (p1); \ | |
22716 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22717 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22718 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22719 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22720 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22721 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22722 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
22723 | ||
22724 | #define vrhaddq(p0,p1) __arm_vrhaddq(p0,p1) | |
22725 | #define __arm_vrhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22726 | __typeof(p1) __p1 = (p1); \ | |
22727 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22728 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22729 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22730 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22731 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22732 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22733 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
22734 | ||
f9355dee SP |
22735 | #define vqsubq(p0,p1) __arm_vqsubq(p0,p1) |
22736 | #define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c SP |
22737 | __typeof(p1) __p1 = (p1); \ |
22738 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22739 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
22740 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
22741 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22742 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
22743 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
f9355dee | 22744 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ |
33203b4c SP |
22745 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
22746 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22747 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22748 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22749 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22750 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
22751 | ||
22752 | #define vqshlq(p0,p1) __arm_vqshlq(p0,p1) | |
22753 | #define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22754 | __typeof(p1) __p1 = (p1); \ | |
22755 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22756 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22757 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22758 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22759 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22760 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22761 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
22762 | ||
22763 | #define vqshlq_r(p0,p1) __arm_vqshlq_r(p0,p1) | |
22764 | #define __arm_vqshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22765 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22766 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
22767 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
22768 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
22769 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
22770 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
22771 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
22772 | ||
22773 | #define vqshluq(p0,p1) __arm_vqshluq(p0,p1) | |
22774 | #define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22775 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22776 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
22777 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
22778 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));}) | |
22779 | ||
f9355dee SP |
22780 | #define vrshrq(p0,p1) __arm_vrshrq(p0,p1) |
22781 | #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c SP |
22782 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
22783 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
22784 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
22785 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
22786 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
22787 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
22788 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
22789 | ||
22790 | #define vshlq_n(p0,p1) __arm_vshlq_n(p0,p1) | |
22791 | #define __arm_vshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22792 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22793 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
22794 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
22795 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
22796 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
22797 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
22798 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
22799 | ||
33203b4c SP |
22800 | #define vqshlq_n(p0,p1) __arm_vqshlq_n(p0,p1) |
22801 | #define __arm_vqshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22802 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
22803 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
22804 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
22805 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
22806 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
22807 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
22808 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
22809 | ||
33203b4c SP |
22810 | #define vqrshlq(p0,p1) __arm_vqrshlq(p0,p1) |
22811 | #define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22812 | __typeof(p1) __p1 = (p1); \ | |
22813 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22814 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22815 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22816 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22817 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22818 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
f9355dee SP |
22819 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ |
22820 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22821 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22822 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22823 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22824 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22825 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
33203b4c SP |
22826 | |
22827 | #define vqrdmulhq(p0,p1) __arm_vqrdmulhq(p0,p1) | |
22828 | #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22829 | __typeof(p1) __p1 = (p1); \ | |
22830 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22831 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22832 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
f9355dee SP |
22833 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ |
22834 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
22835 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
22836 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
33203b4c SP |
22837 | |
22838 | #define vqdmulhq(p0,p1) __arm_vqdmulhq(p0,p1) | |
22839 | #define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22840 | __typeof(p1) __p1 = (p1); \ | |
22841 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
f9355dee SP |
22842 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ |
22843 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
22844 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
33203b4c SP |
22845 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
22846 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22847 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
22848 | ||
f9355dee SP |
22849 | #define vqaddq(p0,p1) __arm_vqaddq(p0,p1) |
22850 | #define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c SP |
22851 | __typeof(p1) __p1 = (p1); \ |
22852 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22853 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
22854 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
22855 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22856 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
22857 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
f9355dee | 22858 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ |
33203b4c SP |
22859 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
22860 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22861 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22862 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22863 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22864 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
22865 | ||
33203b4c SP |
22866 | #define vorrq(p0,p1) __arm_vorrq(p0,p1) |
22867 | #define __arm_vorrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22868 | __typeof(p1) __p1 = (p1); \ | |
22869 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22870 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22871 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22872 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22873 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22874 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22875 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
22876 | ||
22877 | #define vornq(p0,p1) __arm_vornq(p0,p1) | |
22878 | #define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22879 | __typeof(p1) __p1 = (p1); \ | |
22880 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22881 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22882 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22883 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22884 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22885 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22886 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
22887 | ||
22888 | #define vmulq_n(p0,p1) __arm_vmulq_n(p0,p1) | |
22889 | #define __arm_vmulq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22890 | __typeof(p1) __p1 = (p1); \ | |
22891 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22892 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
22893 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
22894 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22895 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
22896 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
22897 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
22898 | ||
22899 | #define vmulq(p0,p1) __arm_vmulq(p0,p1) | |
22900 | #define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22901 | __typeof(p1) __p1 = (p1); \ | |
22902 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22903 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22904 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22905 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22906 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22907 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22908 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
22909 | ||
22910 | #define vmulltq_int(p0,p1) __arm_vmulltq_int(p0,p1) | |
22911 | #define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22912 | __typeof(p1) __p1 = (p1); \ | |
22913 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22914 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22915 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22916 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22917 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22918 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22919 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
22920 | ||
22921 | #define vmullbq_int(p0,p1) __arm_vmullbq_int(p0,p1) | |
22922 | #define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22923 | __typeof(p1) __p1 = (p1); \ | |
22924 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22925 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22926 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22927 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22928 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22929 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22930 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
22931 | ||
22932 | #define vmulhq(p0,p1) __arm_vmulhq(p0,p1) | |
22933 | #define __arm_vmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22934 | __typeof(p1) __p1 = (p1); \ | |
22935 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22936 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22937 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22938 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22939 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22940 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22941 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
22942 | ||
22943 | #define vminq(p0,p1) __arm_vminq(p0,p1) | |
22944 | #define __arm_vminq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22945 | __typeof(p1) __p1 = (p1); \ | |
22946 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22947 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22948 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22949 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22950 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22951 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22952 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
22953 | ||
22954 | #define vminaq(p0,p1) __arm_vminaq(p0,p1) | |
22955 | #define __arm_vminaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22956 | __typeof(p1) __p1 = (p1); \ | |
22957 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22958 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22959 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22960 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
22961 | ||
22962 | #define vmaxq(p0,p1) __arm_vmaxq(p0,p1) | |
22963 | #define __arm_vmaxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22964 | __typeof(p1) __p1 = (p1); \ | |
22965 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22966 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22967 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22968 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22969 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22970 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22971 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
22972 | ||
22973 | #define vmaxaq(p0,p1) __arm_vmaxaq(p0,p1) | |
22974 | #define __arm_vmaxaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
22975 | __typeof(p1) __p1 = (p1); \ | |
22976 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22977 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
22978 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22979 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
22980 | ||
f9355dee SP |
22981 | #define vhsubq(p0,p1) __arm_vhsubq(p0,p1) |
22982 | #define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c SP |
22983 | __typeof(p1) __p1 = (p1); \ |
22984 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
22985 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
22986 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
22987 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
22988 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
22989 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
f9355dee | 22990 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ |
33203b4c SP |
22991 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
22992 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
22993 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
22994 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
22995 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
22996 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
22997 | ||
22998 | #define vhcaddq_rot90(p0,p1) __arm_vhcaddq_rot90(p0,p1) | |
22999 | #define __arm_vhcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23000 | __typeof(p1) __p1 = (p1); \ | |
23001 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23002 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23003 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23004 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23005 | ||
23006 | #define vhcaddq_rot270(p0,p1) __arm_vhcaddq_rot270(p0,p1) | |
23007 | #define __arm_vhcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23008 | __typeof(p1) __p1 = (p1); \ | |
23009 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23010 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23011 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23012 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23013 | ||
f9355dee SP |
23014 | #define vhaddq(p0,p1) __arm_vhaddq(p0,p1) |
23015 | #define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
33203b4c SP |
23016 | __typeof(p1) __p1 = (p1); \ |
23017 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23018 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
23019 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23020 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23021 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
23022 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
f9355dee | 23023 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ |
33203b4c SP |
23024 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
23025 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23026 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23027 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23028 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23029 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23030 | ||
23031 | #define veorq(p0,p1) __arm_veorq(p0,p1) | |
23032 | #define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23033 | __typeof(p1) __p1 = (p1); \ | |
23034 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23035 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23036 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23037 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23038 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23039 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23040 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23041 | ||
23042 | #define vcaddq_rot90(p0,p1) __arm_vcaddq_rot90(p0,p1) | |
23043 | #define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23044 | __typeof(p1) __p1 = (p1); \ | |
23045 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23046 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23047 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23048 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23049 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23050 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23051 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23052 | ||
23053 | #define vcaddq_rot270(p0,p1) __arm_vcaddq_rot270(p0,p1) | |
23054 | #define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23055 | __typeof(p1) __p1 = (p1); \ | |
23056 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23057 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23058 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23059 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23060 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23061 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23062 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23063 | ||
23064 | #define vbrsrq(p0,p1) __arm_vbrsrq(p0,p1) | |
23065 | #define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23066 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23067 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
23068 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
23069 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
23070 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
23071 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
23072 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
23073 | ||
23074 | #define vbicq(p0,p1) __arm_vbicq(p0,p1) | |
23075 | #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23076 | __typeof(p1) __p1 = (p1); \ | |
23077 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23078 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23079 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23080 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23081 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23082 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23083 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23084 | ||
23085 | #define vaddq(p0,p1) __arm_vaddq(p0,p1) | |
23086 | #define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23087 | __typeof(p1) __p1 = (p1); \ | |
23088 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23089 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23090 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23091 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23092 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23093 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
f9355dee | 23094 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ |
33203b4c SP |
23095 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ |
23096 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
23097 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ | |
23098 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
23099 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23100 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
23101 | ||
23102 | #define vandq(p0,p1) __arm_vandq(p0,p1) | |
23103 | #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23104 | __typeof(p1) __p1 = (p1); \ | |
23105 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23106 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23107 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23108 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23109 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23110 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23111 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23112 | ||
23113 | #define vabdq(p0,p1) __arm_vabdq(p0,p1) | |
23114 | #define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23115 | __typeof(p1) __p1 = (p1); \ | |
23116 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23117 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23118 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23119 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23120 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23121 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23122 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23123 | ||
23124 | #define vaddvaq(p0,p1) __arm_vaddvaq(p0,p1) | |
23125 | #define __arm_vaddvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23126 | __typeof(p1) __p1 = (p1); \ | |
23127 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23128 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23129 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23130 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23131 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23132 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23133 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23134 | ||
23135 | #define vaddvq_p(p0,p1) __arm_vaddvq_p(p0,p1) | |
23136 | #define __arm_vaddvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23137 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23138 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vaddvq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
23139 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vaddvq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
23140 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddvq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
23141 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vaddvq_p_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
23142 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vaddvq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
23143 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
23144 | ||
23145 | #define vcmpcsq(p0,p1) __arm_vcmpcsq(p0,p1) | |
23146 | #define __arm_vcmpcsq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23147 | __typeof(p1) __p1 = (p1); \ | |
23148 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23149 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23150 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23151 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
23152 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpcsq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
23153 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpcsq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
23154 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpcsq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
23155 | ||
23156 | #define vcmpeqq(p0,p1) __arm_vcmpeqq(p0,p1) | |
23157 | #define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23158 | __typeof(p1) __p1 = (p1); \ | |
23159 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23160 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23161 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23162 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23163 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23164 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23165 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
23166 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
23167 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23168 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23169 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
23170 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
23171 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
23172 | ||
23173 | #define vmlsdavxq(p0,p1) __arm_vmlsdavxq(p0,p1) | |
23174 | #define __arm_vmlsdavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23175 | __typeof(p1) __p1 = (p1); \ | |
23176 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23177 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23178 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23179 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23180 | ||
23181 | #define vmlsdavq(p0,p1) __arm_vmlsdavq(p0,p1) | |
23182 | #define __arm_vmlsdavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23183 | __typeof(p1) __p1 = (p1); \ | |
23184 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23185 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23186 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23187 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23188 | ||
23189 | #define vmladavxq(p0,p1) __arm_vmladavxq(p0,p1) | |
23190 | #define __arm_vmladavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23191 | __typeof(p1) __p1 = (p1); \ | |
23192 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23193 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23194 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23195 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23196 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23197 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23198 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23199 | ||
23200 | #define vmladavq(p0,p1) __arm_vmladavq(p0,p1) | |
23201 | #define __arm_vmladavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23202 | __typeof(p1) __p1 = (p1); \ | |
23203 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23204 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23205 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23206 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23207 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23208 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23209 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23210 | ||
23211 | #define vminvq(p0,p1) __arm_vminvq(p0,p1) | |
23212 | #define __arm_vminvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23213 | __typeof(p1) __p1 = (p1); \ | |
23214 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23215 | int (*)[__ARM_mve_type_int8_t][__ARM_mve_type_int8x16_t]: __arm_vminvq_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23216 | int (*)[__ARM_mve_type_int16_t][__ARM_mve_type_int16x8_t]: __arm_vminvq_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23217 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vminvq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23218 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_uint8x16_t]: __arm_vminvq_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23219 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_uint16x8_t]: __arm_vminvq_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23220 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vminvq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23221 | ||
23222 | #define vminavq(p0,p1) __arm_vminavq(p0,p1) | |
23223 | #define __arm_vminavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23224 | __typeof(p1) __p1 = (p1); \ | |
23225 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23226 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_int8x16_t]: __arm_vminavq_s8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23227 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_int16x8_t]: __arm_vminavq_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23228 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_int32x4_t]: __arm_vminavq_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23229 | ||
23230 | #define vmaxvq(p0,p1) __arm_vmaxvq(p0,p1) | |
23231 | #define __arm_vmaxvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23232 | __typeof(p1) __p1 = (p1); \ | |
23233 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23234 | int (*)[__ARM_mve_type_int8_t][__ARM_mve_type_int8x16_t]: __arm_vmaxvq_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23235 | int (*)[__ARM_mve_type_int16_t][__ARM_mve_type_int16x8_t]: __arm_vmaxvq_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23236 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vmaxvq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23237 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxvq_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23238 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23239 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23240 | ||
23241 | #define vmaxavq(p0,p1) __arm_vmaxavq(p0,p1) | |
23242 | #define __arm_vmaxavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23243 | __typeof(p1) __p1 = (p1); \ | |
23244 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23245 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_int8x16_t]: __arm_vmaxavq_s8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23246 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23247 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23248 | ||
23249 | #define vcmpneq(p0,p1) __arm_vcmpneq(p0,p1) | |
23250 | #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23251 | __typeof(p1) __p1 = (p1); \ | |
23252 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23253 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23254 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23255 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23256 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23257 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23258 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
23259 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
23260 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23261 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23262 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
23263 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
23264 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
23265 | ||
f9355dee SP |
23266 | |
23267 | #define vqmovntq(p0,p1) __arm_vqmovntq(p0,p1) | |
23268 | #define __arm_vqmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23269 | __typeof(p1) __p1 = (p1); \ | |
23270 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23271 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23272 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23273 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23274 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23275 | ||
23276 | #define vqmovnbq(p0,p1) __arm_vqmovnbq(p0,p1) | |
23277 | #define __arm_vqmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23278 | __typeof(p1) __p1 = (p1); \ | |
23279 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23280 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23281 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23282 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23283 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23284 | ||
23285 | #define vmulltq_poly(p0,p1) __arm_vmulltq_poly(p0,p1) | |
23286 | #define __arm_vmulltq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23287 | __typeof(p1) __p1 = (p1); \ | |
23288 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23289 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23290 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
23291 | ||
23292 | #define vmullbq_poly(p0,p1) __arm_vmullbq_poly(p0,p1) | |
23293 | #define __arm_vmullbq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23294 | __typeof(p1) __p1 = (p1); \ | |
23295 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23296 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23297 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
23298 | ||
23299 | #define vmovntq(p0,p1) __arm_vmovntq(p0,p1) | |
23300 | #define __arm_vmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23301 | __typeof(p1) __p1 = (p1); \ | |
23302 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23303 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23304 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23305 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23306 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23307 | ||
23308 | #define vmovnbq(p0,p1) __arm_vmovnbq(p0,p1) | |
23309 | #define __arm_vmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23310 | __typeof(p1) __p1 = (p1); \ | |
23311 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23312 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23313 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23314 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23315 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23316 | ||
23317 | #define vmlaldavxq(p0,p1) __arm_vmlaldavxq(p0,p1) | |
23318 | #define __arm_vmlaldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23319 | __typeof(p1) __p1 = (p1); \ | |
23320 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23321 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23322 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23323 | ||
23324 | #define vqmovuntq(p0,p1) __arm_vqmovuntq(p0,p1) | |
23325 | #define __arm_vqmovuntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23326 | __typeof(p1) __p1 = (p1); \ | |
23327 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23328 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23329 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23330 | ||
23331 | #define vshlltq(p0,p1) __arm_vshlltq(p0,p1) | |
23332 | #define __arm_vshlltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23333 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23334 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
23335 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
23336 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
23337 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) | |
23338 | ||
23339 | #define vshllbq(p0,p1) __arm_vshllbq(p0,p1) | |
23340 | #define __arm_vshllbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23341 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23342 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
23343 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
23344 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
23345 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) | |
23346 | ||
23347 | #define vmlaldavq(p0,p1) __arm_vmlaldavq(p0,p1) | |
23348 | #define __arm_vmlaldavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23349 | __typeof(p1) __p1 = (p1); \ | |
23350 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23351 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23352 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23353 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23354 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23355 | ||
23356 | #define vqmovunbq(p0,p1) __arm_vqmovunbq(p0,p1) | |
23357 | #define __arm_vqmovunbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23358 | __typeof(p1) __p1 = (p1); \ | |
23359 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23360 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23361 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23362 | ||
23363 | #define vqdmulltq(p0,p1) __arm_vqdmulltq(p0,p1) | |
23364 | #define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23365 | __typeof(p1) __p1 = (p1); \ | |
23366 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23367 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23368 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23369 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23370 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23371 | ||
23372 | #define vqdmullbq(p0,p1) __arm_vqdmullbq(p0,p1) | |
23373 | #define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23374 | __typeof(p1) __p1 = (p1); \ | |
23375 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23376 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23377 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23378 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23379 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23380 | ||
23381 | #define vcmpgeq_n(p0,p1) __arm_vcmpgeq_n(p0,p1) | |
23382 | #define __arm_vcmpgeq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23383 | __typeof(p1) __p1 = (p1); \ | |
23384 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23385 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
23386 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23387 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
23388 | ||
33203b4c SP |
23389 | #define vcmpgeq(p0,p1) __arm_vcmpgeq(p0,p1) |
23390 | #define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23391 | __typeof(p1) __p1 = (p1); \ | |
23392 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23393 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23394 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23395 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23396 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
23397 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23398 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
23399 | ||
23400 | #define vcmpgtq(p0,p1) __arm_vcmpgtq(p0,p1) | |
23401 | #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23402 | __typeof(p1) __p1 = (p1); \ | |
23403 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23404 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23405 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23406 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23407 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
23408 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23409 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
23410 | ||
23411 | #define vcmphiq(p0,p1) __arm_vcmphiq(p0,p1) | |
23412 | #define __arm_vcmphiq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23413 | __typeof(p1) __p1 = (p1); \ | |
23414 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23415 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23416 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23417 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
23418 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmphiq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
23419 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmphiq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
23420 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmphiq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
23421 | ||
23422 | #define vcmpleq(p0,p1) __arm_vcmpleq(p0,p1) | |
23423 | #define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23424 | __typeof(p1) __p1 = (p1); \ | |
23425 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23426 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23427 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23428 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23429 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
23430 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23431 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
23432 | ||
23433 | #define vcmpltq(p0,p1) __arm_vcmpltq(p0,p1) | |
23434 | #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23435 | __typeof(p1) __p1 = (p1); \ | |
23436 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23437 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23438 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23439 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23440 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
23441 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23442 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) | |
23443 | ||
8165795c SP |
23444 | #define vcmpneq_m(p0,p1,p2) __arm_vcmpneq_m(p0,p1,p2) |
23445 | #define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23446 | __typeof(p1) __p1 = (p1); \ | |
23447 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23448 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23449 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
23450 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
23451 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
23452 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
23453 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
23454 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \ | |
23455 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23456 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
23457 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
23458 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23459 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
23460 | ||
23461 | #define vcmpneq(p0,p1) __arm_vcmpneq(p0,p1) | |
23462 | #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23463 | __typeof(p1) __p1 = (p1); \ | |
23464 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23465 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
23466 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23467 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23468 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
23469 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
23470 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
23471 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ | |
23472 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ | |
23473 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ | |
23474 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ | |
23475 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ | |
23476 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) | |
23477 | ||
f9355dee SP |
23478 | #define vaddlvaq(p0,p1) __arm_vaddlvaq(p0,p1) |
23479 | #define __arm_vaddlvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23480 | __typeof(p1) __p1 = (p1); \ | |
23481 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23482 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23483 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23484 | ||
23485 | #define vrmlaldavhq(p0,p1) __arm_vrmlaldavhq(p0,p1) | |
23486 | #define __arm_vrmlaldavhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23487 | __typeof(p1) __p1 = (p1); \ | |
23488 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23489 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
23490 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
23491 | ||
23492 | #define vrmlaldavhxq(p0,p1) __arm_vrmlaldavhxq(p0,p1) | |
23493 | #define __arm_vrmlaldavhxq(p0,p1) __arm_vrmlaldavhxq_s32(p0,p1) | |
23494 | ||
23495 | #define vrmlsldavhq(p0,p1) __arm_vrmlsldavhq(p0,p1) | |
23496 | #define __arm_vrmlsldavhq(p0,p1) __arm_vrmlsldavhq_s32(p0,p1) | |
23497 | ||
23498 | #define vrmlsldavhq(p0,p1) __arm_vrmlsldavhq(p0,p1) | |
23499 | #define __arm_vrmlsldavhq(p0,p1) __arm_vrmlsldavhq_s32(p0,p1) | |
23500 | ||
23501 | #define vrmlsldavhxq(p0,p1) __arm_vrmlsldavhxq(p0,p1) | |
23502 | #define __arm_vrmlsldavhxq(p0,p1) __arm_vrmlsldavhxq_s32(p0,p1) | |
23503 | ||
23504 | #define vmlsldavxq(p0,p1) __arm_vmlsldavxq(p0,p1) | |
23505 | #define __arm_vmlsldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23506 | __typeof(p1) __p1 = (p1); \ | |
23507 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23508 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23509 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23510 | ||
23511 | #define vmlsldavq(p0,p1) __arm_vmlsldavq(p0,p1) | |
23512 | #define __arm_vmlsldavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
23513 | __typeof(p1) __p1 = (p1); \ | |
23514 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23515 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
23516 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
23517 | ||
0dad5b33 SP |
23518 | #define vabavq(p0,p1,p2) __arm_vabavq(p0,p1,p2) |
23519 | #define __arm_vabavq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23520 | __typeof(p1) __p1 = (p1); \ | |
23521 | __typeof(p2) __p2 = (p2); \ | |
23522 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23523 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
23524 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
23525 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
23526 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \ | |
23527 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
23528 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
23529 | ||
23530 | #define vshlcq(p0,p1,p2) __arm_vshlcq(p0,p1,p2) | |
23531 | #define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23532 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23533 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
23534 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
23535 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
23536 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
23537 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
23538 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
23539 | ||
23540 | #define vrmlaldavhaq(p0,p1,p2) __arm_vrmlaldavhaq(p0,p1,p2) | |
23541 | #define __arm_vrmlaldavhaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23542 | __typeof(p1) __p1 = (p1); \ | |
23543 | __typeof(p2) __p2 = (p2); \ | |
23544 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23545 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
23546 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
23547 | ||
23548 | #define vcmpeqq_m(p0,p1,p2) __arm_vcmpeqq_m(p0,p1,p2) | |
23549 | #define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23550 | __typeof(p1) __p1 = (p1); \ | |
23551 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23552 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23553 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23554 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
23555 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
23556 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23557 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
23558 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
23559 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
23560 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
23561 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
23562 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
23563 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));}) | |
23564 | ||
23565 | #define vbicq_m_n(p0,p1,p2) __arm_vbicq_m_n(p0,p1,p2) | |
23566 | #define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23567 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23568 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
23569 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
23570 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
23571 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
23572 | ||
23573 | #define vqrshrnbq(p0,p1,p2) __arm_vqrshrnbq(p0,p1,p2) | |
23574 | #define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23575 | __typeof(p1) __p1 = (p1); \ | |
23576 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23577 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23578 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
23579 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23580 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
23581 | ||
23582 | #define vqrshrunbq(p0,p1,p2) __arm_vqrshrunbq(p0,p1,p2) | |
23583 | #define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23584 | __typeof(p1) __p1 = (p1); \ | |
23585 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23586 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23587 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
23588 | ||
8165795c SP |
23589 | #define vqrdmlsdhq(p0,p1,p2) __arm_vqrdmlsdhq(p0,p1,p2) |
23590 | #define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23591 | __typeof(p1) __p1 = (p1); \ | |
23592 | __typeof(p2) __p2 = (p2); \ | |
23593 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23594 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
23595 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
23596 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
23597 | ||
23598 | #define vqrdmlsdhxq(p0,p1,p2) __arm_vqrdmlsdhxq(p0,p1,p2) | |
23599 | #define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23600 | __typeof(p1) __p1 = (p1); \ | |
23601 | __typeof(p2) __p2 = (p2); \ | |
23602 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23603 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
23604 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
23605 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
23606 | ||
23607 | #define vqrshlq_m_n(p0,p1,p2) __arm_vqrshlq_m_n(p0,p1,p2) | |
23608 | #define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23609 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23610 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
23611 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
23612 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
23613 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
23614 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
23615 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
23616 | ||
23617 | #define vqshlq_m_r(p0,p1,p2) __arm_vqshlq_m_r(p0,p1,p2) | |
23618 | #define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23619 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23620 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
23621 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
23622 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
23623 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
23624 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
23625 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
23626 | ||
23627 | #define vrev64q_m(p0,p1,p2) __arm_vrev64q_m(p0,p1,p2) | |
23628 | #define __arm_vrev64q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23629 | __typeof(p1) __p1 = (p1); \ | |
23630 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23631 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev64q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23632 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev64q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23633 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrev64q_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
23634 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev64q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
23635 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23636 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
23637 | ||
23638 | #define vrshlq_m_n(p0,p1,p2) __arm_vrshlq_m_n(p0,p1,p2) | |
23639 | #define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23640 | __typeof(p1) __p1 = (p1); \ | |
23641 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23642 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
23643 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
23644 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
23645 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
23646 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
23647 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));}) | |
23648 | ||
23649 | #define vshlq_m_r(p0,p1,p2) __arm_vshlq_m_r(p0,p1,p2) | |
23650 | #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23651 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
23652 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
23653 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
23654 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
23655 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
23656 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
23657 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
23658 | ||
23659 | #define vsliq(p0,p1,p2) __arm_vsliq(p0,p1,p2) | |
23660 | #define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23661 | __typeof(p1) __p1 = (p1); \ | |
23662 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23663 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23664 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23665 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
23666 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
23667 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23668 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
23669 | ||
23670 | #define vsriq(p0,p1,p2) __arm_vsriq(p0,p1,p2) | |
23671 | #define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23672 | __typeof(p1) __p1 = (p1); \ | |
23673 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23674 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23675 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23676 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
23677 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
23678 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23679 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
23680 | ||
23681 | #define vqrdmlashq(p0,p1,p2) __arm_vqrdmlashq(p0,p1,p2) | |
23682 | #define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23683 | __typeof(p1) __p1 = (p1); \ | |
23684 | __typeof(p2) __p2 = (p2); \ | |
23685 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23686 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
23687 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
23688 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
23689 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqrdmlashq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
23690 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqrdmlashq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
23691 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqrdmlashq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
23692 | ||
23693 | #define vqrdmlahq(p0,p1,p2) __arm_vqrdmlahq(p0,p1,p2) | |
23694 | #define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23695 | __typeof(p1) __p1 = (p1); \ | |
23696 | __typeof(p2) __p2 = (p2); \ | |
23697 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23698 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
23699 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
23700 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
23701 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqrdmlahq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
23702 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqrdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
23703 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqrdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
23704 | ||
23705 | #define vqrdmladhxq(p0,p1,p2) __arm_vqrdmladhxq(p0,p1,p2) | |
23706 | #define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23707 | __typeof(p1) __p1 = (p1); \ | |
23708 | __typeof(p2) __p2 = (p2); \ | |
23709 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23710 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
23711 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
23712 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
23713 | ||
23714 | #define vqrdmladhq(p0,p1,p2) __arm_vqrdmladhq(p0,p1,p2) | |
23715 | #define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23716 | __typeof(p1) __p1 = (p1); \ | |
23717 | __typeof(p2) __p2 = (p2); \ | |
23718 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23719 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
23720 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
23721 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
23722 | ||
23723 | #define vqnegq_m(p0,p1,p2) __arm_vqnegq_m(p0,p1,p2) | |
23724 | #define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23725 | __typeof(p1) __p1 = (p1); \ | |
23726 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23727 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23728 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23729 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
23730 | ||
23731 | #define vqdmlsdhxq(p0,p1,p2) __arm_vqdmlsdhxq(p0,p1,p2) | |
23732 | #define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23733 | __typeof(p1) __p1 = (p1); \ | |
23734 | __typeof(p2) __p2 = (p2); \ | |
23735 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23736 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
23737 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
23738 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
23739 | ||
23740 | #define vabsq_m(p0,p1,p2) __arm_vabsq_m(p0,p1,p2) | |
23741 | #define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23742 | __typeof(p1) __p1 = (p1); \ | |
23743 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23744 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23745 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23746 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
23747 | ||
23748 | #define vclsq_m(p0,p1,p2) __arm_vclsq_m(p0,p1,p2) | |
23749 | #define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23750 | __typeof(p1) __p1 = (p1); \ | |
23751 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23752 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23753 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23754 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
23755 | ||
23756 | #define vclzq_m(p0,p1,p2) __arm_vclzq_m(p0,p1,p2) | |
23757 | #define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23758 | __typeof(p1) __p1 = (p1); \ | |
23759 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23760 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclzq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23761 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclzq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23762 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclzq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
23763 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vclzq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
23764 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23765 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
23766 | ||
23767 | #define vcmpgeq_m(p0,p1,p2) __arm_vcmpgeq_m(p0,p1,p2) | |
23768 | #define __arm_vcmpgeq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23769 | __typeof(p1) __p1 = (p1); \ | |
23770 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23771 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23772 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23773 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
23774 | ||
23775 | #define vcmpgeq_m_n(p0,p1,p2) __arm_vcmpgeq_m_n(p0,p1,p2) | |
23776 | #define __arm_vcmpgeq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23777 | __typeof(p1) __p1 = (p1); \ | |
23778 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23779 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
23780 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
23781 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) | |
23782 | ||
23783 | #define vdupq_m(p0,p1,p2) __arm_vdupq_m(p0,p1,p2) | |
23784 | #define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23785 | __typeof(p1) __p1 = (p1); \ | |
23786 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23787 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vdupq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
23788 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vdupq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
23789 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vdupq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ | |
23790 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
23791 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
23792 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));}) | |
23793 | ||
23794 | #define vmaxaq_m(p0,p1,p2) __arm_vmaxaq_m(p0,p1,p2) | |
23795 | #define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23796 | __typeof(p1) __p1 = (p1); \ | |
23797 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23798 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23799 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23800 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
23801 | ||
23802 | #define vmlaq(p0,p1,p2) __arm_vmlaq(p0,p1,p2) | |
23803 | #define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23804 | __typeof(p1) __p1 = (p1); \ | |
23805 | __typeof(p2) __p2 = (p2); \ | |
23806 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23807 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
23808 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
23809 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
23810 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
23811 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
23812 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
23813 | ||
23814 | #define vmlasq(p0,p1,p2) __arm_vmlasq(p0,p1,p2) | |
23815 | #define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23816 | __typeof(p1) __p1 = (p1); \ | |
23817 | __typeof(p2) __p2 = (p2); \ | |
23818 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23819 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
23820 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
23821 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
23822 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
23823 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
23824 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
23825 | ||
8165795c SP |
23826 | #define vnegq_m(p0,p1,p2) __arm_vnegq_m(p0,p1,p2) |
23827 | #define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23828 | __typeof(p1) __p1 = (p1); \ | |
23829 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23830 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23831 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23832 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
23833 | ||
23834 | #define vpselq(p0,p1,p2) __arm_vpselq(p0,p1,p2) | |
23835 | #define __arm_vpselq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23836 | __typeof(p1) __p1 = (p1); \ | |
23837 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23838 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vpselq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23839 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vpselq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23840 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vpselq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
23841 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int64x2_t]: __arm_vpselq_s64 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \ | |
23842 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vpselq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
23843 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vpselq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23844 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vpselq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
23845 | int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint64x2_t]: __arm_vpselq_u64 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint64x2_t), p2));}) | |
23846 | ||
23847 | #define vqdmlahq(p0,p1,p2) __arm_vqdmlahq(p0,p1,p2) | |
23848 | #define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23849 | __typeof(p1) __p1 = (p1); \ | |
23850 | __typeof(p2) __p2 = (p2); \ | |
23851 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23852 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ | |
23853 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ | |
23854 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ | |
23855 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqdmlahq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ | |
23856 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ | |
23857 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) | |
23858 | ||
23859 | #define vqdmlsdhq(p0,p1,p2) __arm_vqdmlsdhq(p0,p1,p2) | |
23860 | #define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23861 | __typeof(p1) __p1 = (p1); \ | |
23862 | __typeof(p2) __p2 = (p2); \ | |
23863 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23864 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
23865 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
23866 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
23867 | ||
23868 | #define vqdmladhxq(p0,p1,p2) __arm_vqdmladhxq(p0,p1,p2) | |
23869 | #define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23870 | __typeof(p1) __p1 = (p1); \ | |
23871 | __typeof(p2) __p2 = (p2); \ | |
23872 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23873 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
23874 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
23875 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
23876 | ||
23877 | #define vqdmladhq(p0,p1,p2) __arm_vqdmladhq(p0,p1,p2) | |
23878 | #define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23879 | __typeof(p1) __p1 = (p1); \ | |
23880 | __typeof(p2) __p2 = (p2); \ | |
23881 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23882 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
23883 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
23884 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
23885 | ||
8165795c SP |
23886 | #define vminaq_m(p0,p1,p2) __arm_vminaq_m(p0,p1,p2) |
23887 | #define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23888 | __typeof(p1) __p1 = (p1); \ | |
23889 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23890 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23891 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23892 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
23893 | ||
23894 | #define vrmlaldavhaq(p0,p1,p2) __arm_vrmlaldavhaq(p0,p1,p2) | |
23895 | #define __arm_vrmlaldavhaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23896 | __typeof(p1) __p1 = (p1); \ | |
23897 | __typeof(p2) __p2 = (p2); \ | |
23898 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23899 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
23900 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
23901 | ||
23902 | #define vmlsdavxq_p(p0,p1,p2) __arm_vmlsdavxq_p(p0,p1,p2) | |
23903 | #define __arm_vmlsdavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23904 | __typeof(p1) __p1 = (p1); \ | |
23905 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23906 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavxq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23907 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23908 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
23909 | ||
23910 | #define vmlsdavq_p(p0,p1,p2) __arm_vmlsdavq_p(p0,p1,p2) | |
23911 | #define __arm_vmlsdavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23912 | __typeof(p1) __p1 = (p1); \ | |
23913 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23914 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23915 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23916 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
23917 | ||
23918 | #define vmlsdavaxq(p0,p1,p2) __arm_vmlsdavaxq(p0,p1,p2) | |
23919 | #define __arm_vmlsdavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23920 | __typeof(p1) __p1 = (p1); \ | |
23921 | __typeof(p2) __p2 = (p2); \ | |
23922 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23923 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaxq_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
23924 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaxq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
23925 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaxq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
23926 | ||
23927 | #define vmlsdavaq(p0,p1,p2) __arm_vmlsdavaq(p0,p1,p2) | |
23928 | #define __arm_vmlsdavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23929 | __typeof(p1) __p1 = (p1); \ | |
23930 | __typeof(p2) __p2 = (p2); \ | |
23931 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23932 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaq_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
23933 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
23934 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
23935 | ||
23936 | #define vaddvaq_p(p0,p1,p2) __arm_vaddvaq_p(p0,p1,p2) | |
23937 | #define __arm_vaddvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23938 | __typeof(p1) __p1 = (p1); \ | |
23939 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23940 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23941 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23942 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
23943 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_p_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
23944 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23945 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
23946 | ||
23947 | #define vcmpcsq_m_n(p0,p1,p2) __arm_vcmpcsq_m_n(p0,p1,p2) | |
23948 | #define __arm_vcmpcsq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23949 | __typeof(p1) __p1 = (p1); \ | |
23950 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23951 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
23952 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23953 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
23954 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpcsq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
23955 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpcsq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
23956 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpcsq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));}) | |
23957 | ||
23958 | #define vcmpcsq_m(p0,p1,p2) __arm_vcmpcsq_m(p0,p1,p2) | |
23959 | #define __arm_vcmpcsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23960 | __typeof(p1) __p1 = (p1); \ | |
23961 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23962 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
23963 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23964 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
23965 | ||
23966 | #define vmladavxq_p(p0,p1,p2) __arm_vmladavxq_p(p0,p1,p2) | |
23967 | #define __arm_vmladavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23968 | __typeof(p1) __p1 = (p1); \ | |
23969 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23970 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavxq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23971 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23972 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
23973 | ||
23974 | #define vmladavq_p(p0,p1,p2) __arm_vmladavq_p(p0,p1,p2) | |
23975 | #define __arm_vmladavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23976 | __typeof(p1) __p1 = (p1); \ | |
23977 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
23978 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
23979 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
23980 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
23981 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavq_p_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
23982 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
23983 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
23984 | ||
23985 | #define vmladavaxq(p0,p1,p2) __arm_vmladavaxq(p0,p1,p2) | |
23986 | #define __arm_vmladavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23987 | __typeof(p1) __p1 = (p1); \ | |
23988 | __typeof(p2) __p2 = (p2); \ | |
23989 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
23990 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
23991 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
23992 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
23993 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaxq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \ | |
23994 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaxq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
23995 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaxq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
23996 | ||
23997 | #define vmladavaq(p0,p1,p2) __arm_vmladavaq(p0,p1,p2) | |
23998 | #define __arm_vmladavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
23999 | __typeof(p1) __p1 = (p1); \ | |
24000 | __typeof(p2) __p2 = (p2); \ | |
24001 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24002 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
24003 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24004 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
24005 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \ | |
24006 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
24007 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
24008 | ||
24009 | #define vminvq_p(p0,p1,p2) __arm_vminvq_p(p0,p1,p2) | |
24010 | #define __arm_vminvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24011 | __typeof(p1) __p1 = (p1); \ | |
24012 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24013 | int (*)[__ARM_mve_type_int8_t][__ARM_mve_type_int8x16_t]: __arm_vminvq_p_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24014 | int (*)[__ARM_mve_type_int16_t][__ARM_mve_type_int16x8_t]: __arm_vminvq_p_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24015 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vminvq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24016 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_uint8x16_t]: __arm_vminvq_p_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24017 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_uint16x8_t]: __arm_vminvq_p_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24018 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vminvq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24019 | ||
24020 | #define vminavq_p(p0,p1,p2) __arm_vminavq_p(p0,p1,p2) | |
24021 | #define __arm_vminavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24022 | __typeof(p1) __p1 = (p1); \ | |
24023 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24024 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_int8x16_t]: __arm_vminavq_p_s8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24025 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_int16x8_t]: __arm_vminavq_p_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24026 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_int32x4_t]: __arm_vminavq_p_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24027 | ||
24028 | #define vmaxvq_p(p0,p1,p2) __arm_vmaxvq_p(p0,p1,p2) | |
24029 | #define __arm_vmaxvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24030 | __typeof(p1) __p1 = (p1); \ | |
24031 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24032 | int (*)[__ARM_mve_type_int8_t][__ARM_mve_type_int8x16_t]: __arm_vmaxvq_p_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24033 | int (*)[__ARM_mve_type_int16_t][__ARM_mve_type_int16x8_t]: __arm_vmaxvq_p_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24034 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vmaxvq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24035 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxvq_p_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24036 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_p_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24037 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24038 | ||
24039 | #define vmaxavq_p(p0,p1,p2) __arm_vmaxavq_p(p0,p1,p2) | |
24040 | #define __arm_vmaxavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24041 | __typeof(p1) __p1 = (p1); \ | |
24042 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24043 | int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_int8x16_t]: __arm_vmaxavq_p_s8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24044 | int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_p_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24045 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_p_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24046 | ||
24047 | #define vcmpltq_m(p0,p1,p2) __arm_vcmpltq_m(p0,p1,p2) | |
24048 | #define __arm_vcmpltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24049 | __typeof(p1) __p1 = (p1); \ | |
24050 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24051 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24052 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24053 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24054 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpltq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
24055 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
24056 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) | |
24057 | ||
24058 | #define vcmpleq_m(p0,p1,p2) __arm_vcmpleq_m(p0,p1,p2) | |
24059 | #define __arm_vcmpleq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24060 | __typeof(p1) __p1 = (p1); \ | |
24061 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24062 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24063 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24064 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24065 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpleq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
24066 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
24067 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) | |
24068 | ||
24069 | #define vcmphiq_m(p0,p1,p2) __arm_vcmphiq_m(p0,p1,p2) | |
24070 | #define __arm_vcmphiq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24071 | __typeof(p1) __p1 = (p1); \ | |
24072 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24073 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmphiq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ | |
24074 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmphiq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ | |
24075 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmphiq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \ | |
24076 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24077 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24078 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24079 | ||
24080 | #define vcmpgtq_m(p0,p1,p2) __arm_vcmpgtq_m(p0,p1,p2) | |
24081 | #define __arm_vcmpgtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24082 | __typeof(p1) __p1 = (p1); \ | |
24083 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24084 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24085 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24086 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24087 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ | |
24088 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ | |
24089 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) | |
24090 | ||
e3678b44 SP |
24091 | #define vshrntq(p0,p1,p2) __arm_vshrntq(p0,p1,p2) |
24092 | #define __arm_vshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24093 | __typeof(p1) __p1 = (p1); \ | |
24094 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24095 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24096 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24097 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24098 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24099 | ||
24100 | #define vrshrntq(p0,p1,p2) __arm_vrshrntq(p0,p1,p2) | |
24101 | #define __arm_vrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24102 | __typeof(p1) __p1 = (p1); \ | |
24103 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24104 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24105 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24106 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24107 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24108 | ||
24109 | #define vmovlbq_m(p0,p1,p2) __arm_vmovlbq_m(p0,p1,p2) | |
24110 | #define __arm_vmovlbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24111 | __typeof(p1) __p1 = (p1); \ | |
24112 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24113 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovlbq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24114 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovlbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24115 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24116 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
24117 | ||
24118 | #define vmovnbq_m(p0,p1,p2) __arm_vmovnbq_m(p0,p1,p2) | |
24119 | #define __arm_vmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24120 | __typeof(p1) __p1 = (p1); \ | |
24121 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24122 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24123 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24124 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24125 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24126 | ||
24127 | #define vmovntq_m(p0,p1,p2) __arm_vmovntq_m(p0,p1,p2) | |
24128 | #define __arm_vmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24129 | __typeof(p1) __p1 = (p1); \ | |
24130 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24131 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24132 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24133 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24134 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24135 | ||
24136 | #define vshrnbq(p0,p1,p2) __arm_vshrnbq(p0,p1,p2) | |
24137 | #define __arm_vshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24138 | __typeof(p1) __p1 = (p1); \ | |
24139 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24140 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24141 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24142 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24143 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24144 | ||
24145 | #define vrshrnbq(p0,p1,p2) __arm_vrshrnbq(p0,p1,p2) | |
24146 | #define __arm_vrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24147 | __typeof(p1) __p1 = (p1); \ | |
24148 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24149 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24150 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24151 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24152 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24153 | ||
24154 | #define vrev32q_m(p0,p1,p2) __arm_vrev32q_m(p0,p1,p2) | |
24155 | #define __arm_vrev32q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24156 | __typeof(p1) __p1 = (p1); \ | |
24157 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24158 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev32q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24159 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev32q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24160 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev32q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24161 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev32q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
24162 | ||
24163 | #define vqshruntq(p0,p1,p2) __arm_vqshruntq(p0,p1,p2) | |
24164 | #define __arm_vqshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24165 | __typeof(p1) __p1 = (p1); \ | |
24166 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24167 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24168 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24169 | ||
24170 | #define vrev16q_m(p0,p1,p2) __arm_vrev16q_m(p0,p1,p2) | |
24171 | #define __arm_vrev16q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24172 | __typeof(p1) __p1 = (p1); \ | |
24173 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24174 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev16q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24175 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev16q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2));}) | |
24176 | ||
24177 | #define vqshrntq(p0,p1,p2) __arm_vqshrntq(p0,p1,p2) | |
24178 | #define __arm_vqshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24179 | __typeof(p1) __p1 = (p1); \ | |
24180 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24181 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24182 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24183 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24184 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24185 | ||
24186 | #define vqrshruntq(p0,p1,p2) __arm_vqrshruntq(p0,p1,p2) | |
24187 | #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24188 | __typeof(p1) __p1 = (p1); \ | |
24189 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24190 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24191 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24192 | ||
24193 | #define vqrshrntq(p0,p1,p2) __arm_vqrshrntq(p0,p1,p2) | |
24194 | #define __arm_vqrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24195 | __typeof(p1) __p1 = (p1); \ | |
24196 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24197 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24198 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24199 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24200 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24201 | ||
24202 | #define vqshrnbq(p0,p1,p2) __arm_vqshrnbq(p0,p1,p2) | |
24203 | #define __arm_vqshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24204 | __typeof(p1) __p1 = (p1); \ | |
24205 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24206 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24207 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24208 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24209 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24210 | ||
24211 | #define vqmovuntq_m(p0,p1,p2) __arm_vqmovuntq_m(p0,p1,p2) | |
24212 | #define __arm_vqmovuntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24213 | __typeof(p1) __p1 = (p1); \ | |
24214 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24215 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24216 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24217 | ||
24218 | #define vqmovntq_m(p0,p1,p2) __arm_vqmovntq_m(p0,p1,p2) | |
24219 | #define __arm_vqmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24220 | __typeof(p1) __p1 = (p1); \ | |
24221 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24222 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24223 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24224 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24225 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24226 | ||
24227 | #define vqmovnbq_m(p0,p1,p2) __arm_vqmovnbq_m(p0,p1,p2) | |
24228 | #define __arm_vqmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24229 | __typeof(p1) __p1 = (p1); \ | |
24230 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24231 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24232 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24233 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24234 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24235 | ||
24236 | #define vmovltq_m(p0,p1,p2) __arm_vmovltq_m(p0,p1,p2) | |
24237 | #define __arm_vmovltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24238 | __typeof(p1) __p1 = (p1); \ | |
24239 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24240 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovltq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24241 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24242 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovltq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24243 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovltq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
24244 | ||
24245 | #define vqmovunbq_m(p0,p1,p2) __arm_vqmovunbq_m(p0,p1,p2) | |
24246 | #define __arm_vqmovunbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24247 | __typeof(p1) __p1 = (p1); \ | |
24248 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24249 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24250 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24251 | ||
24252 | #define vaddlvaq_p(p0,p1,p2) __arm_vaddlvaq_p(p0,p1,p2) | |
24253 | #define __arm_vaddlvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24254 | __typeof(p1) __p1 = (p1); \ | |
24255 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24256 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24257 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24258 | ||
24259 | #define vmlaldavaq(p0,p1,p2) __arm_vmlaldavaq(p0,p1,p2) | |
24260 | #define __arm_vmlaldavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24261 | __typeof(p1) __p1 = (p1); \ | |
24262 | __typeof(p2) __p2 = (p2); \ | |
24263 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24264 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24265 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
24266 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
24267 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
24268 | ||
24269 | #define vmlaldavaxq(p0,p1,p2) __arm_vmlaldavaxq(p0,p1,p2) | |
24270 | #define __arm_vmlaldavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24271 | __typeof(p1) __p1 = (p1); \ | |
24272 | __typeof(p2) __p2 = (p2); \ | |
24273 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24274 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24275 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
24276 | ||
24277 | #define vmlaldavq_p(p0,p1,p2) __arm_vmlaldavq_p(p0,p1,p2) | |
24278 | #define __arm_vmlaldavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24279 | __typeof(p1) __p1 = (p1); \ | |
24280 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24281 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24282 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24283 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24284 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24285 | ||
24286 | #define vmlaldavxq_p(p0,p1,p2) __arm_vmlaldavxq_p(p0,p1,p2) | |
24287 | #define __arm_vmlaldavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24288 | __typeof(p1) __p1 = (p1); \ | |
24289 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24290 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24291 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24292 | ||
24293 | #define vmlsldavaq(p0,p1,p2) __arm_vmlsldavaq(p0,p1,p2) | |
24294 | #define __arm_vmlsldavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24295 | __typeof(p1) __p1 = (p1); \ | |
24296 | __typeof(p2) __p2 = (p2); \ | |
24297 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24298 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24299 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
24300 | ||
24301 | #define vmlsldavaxq(p0,p1,p2) __arm_vmlsldavaxq(p0,p1,p2) | |
24302 | #define __arm_vmlsldavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24303 | __typeof(p1) __p1 = (p1); \ | |
24304 | __typeof(p2) __p2 = (p2); \ | |
24305 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24306 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaxq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24307 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaxq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
24308 | ||
24309 | #define vmlsldavq_p(p0,p1,p2) __arm_vmlsldavq_p(p0,p1,p2) | |
24310 | #define __arm_vmlsldavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24311 | __typeof(p1) __p1 = (p1); \ | |
24312 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24313 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24314 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24315 | ||
24316 | #define vmlsldavxq_p(p0,p1,p2) __arm_vmlsldavxq_p(p0,p1,p2) | |
24317 | #define __arm_vmlsldavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24318 | __typeof(p1) __p1 = (p1); \ | |
24319 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24320 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24321 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24322 | ||
24323 | #define vrmlaldavhaxq(p0,p1,p2) __arm_vrmlaldavhaxq(p0,p1,p2) | |
24324 | #define __arm_vrmlaldavhaxq(p0,p1,p2) __arm_vrmlaldavhaxq_s32(p0,p1,p2) | |
24325 | ||
24326 | #define vrmlaldavhq_p(p0,p1,p2) __arm_vrmlaldavhq_p(p0,p1,p2) | |
24327 | #define __arm_vrmlaldavhq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24328 | __typeof(p1) __p1 = (p1); \ | |
24329 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24330 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24331 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24332 | ||
24333 | #define vrmlaldavhxq_p(p0,p1,p2) __arm_vrmlaldavhxq_p(p0,p1,p2) | |
24334 | #define __arm_vrmlaldavhxq_p(p0,p1,p2) __arm_vrmlaldavhxq_p_s32(p0,p1,p2) | |
24335 | ||
24336 | #define vrmlsldavhaq(p0,p1,p2) __arm_vrmlsldavhaq(p0,p1,p2) | |
24337 | #define __arm_vrmlsldavhaq(p0,p1,p2) __arm_vrmlsldavhaq_s32(p0,p1,p2) | |
24338 | ||
24339 | #define vrmlsldavhaxq(p0,p1,p2) __arm_vrmlsldavhaxq(p0,p1,p2) | |
24340 | #define __arm_vrmlsldavhaxq(p0,p1,p2) __arm_vrmlsldavhaxq_s32(p0,p1,p2) | |
24341 | ||
24342 | #define vrmlsldavhq_p(p0,p1,p2) __arm_vrmlsldavhq_p(p0,p1,p2) | |
24343 | #define __arm_vrmlsldavhq_p(p0,p1,p2) __arm_vrmlsldavhq_p_s32(p0,p1,p2) | |
24344 | ||
24345 | #define vrmlsldavhxq_p(p0,p1,p2) __arm_vrmlsldavhxq_p(p0,p1,p2) | |
24346 | #define __arm_vrmlsldavhxq_p(p0,p1,p2) __arm_vrmlsldavhxq_p_s32(p0,p1,p2) | |
24347 | ||
db5db9d2 SP |
24348 | #define vsubq_m(p0,p1,p2,p3) __arm_vsubq_m(p0,p1,p2,p3) |
24349 | #define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24350 | __typeof(p1) __p1 = (p1); \ | |
24351 | __typeof(p2) __p2 = (p2); \ | |
24352 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
8eb3b6b9 SP |
24353 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ |
24354 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
24355 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
24356 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
24357 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
24358 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
db5db9d2 SP |
24359 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
24360 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24361 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24362 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24363 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24364 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24365 | ||
24366 | #define vabavq_p(p0,p1,p2,p3) __arm_vabavq_p(p0,p1,p2,p3) | |
24367 | #define __arm_vabavq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24368 | __typeof(p1) __p1 = (p1); \ | |
24369 | __typeof(p2) __p2 = (p2); \ | |
24370 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24371 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_p_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
24372 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24373 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24374 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_p_u8(__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24375 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_p_u16(__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24376 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_p_u32(__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24377 | ||
8eb3b6b9 SP |
24378 | #define vabdq_m(p0,p1,p2,p3) __arm_vabdq_m(p0,p1,p2,p3) |
24379 | #define __arm_vabdq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24380 | __typeof(p1) __p1 = (p1); \ | |
24381 | __typeof(p2) __p2 = (p2); \ | |
24382 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24383 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
24384 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24385 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24386 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24387 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24388 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24389 | ||
24390 | #define vandq_m(p0,p1,p2,p3) __arm_vandq_m(p0,p1,p2,p3) | |
24391 | #define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24392 | __typeof(p1) __p1 = (p1); \ | |
24393 | __typeof(p2) __p2 = (p2); \ | |
24394 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24395 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
24396 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24397 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24398 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24399 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24400 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24401 | ||
24402 | #define vbicq_m(p0,p1,p2,p3) __arm_vbicq_m(p0,p1,p2,p3) | |
24403 | #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24404 | __typeof(p1) __p1 = (p1); \ | |
24405 | __typeof(p2) __p2 = (p2); \ | |
24406 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24407 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
24408 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24409 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24410 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24411 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24412 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24413 | ||
532e9e24 SP |
24414 | #define vbrsrq_m(p0,p1,p2,p3) __arm_vbrsrq_m(p0,p1,p2,p3) |
24415 | #define __arm_vbrsrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
24416 | __typeof(p1) __p1 = (p1); \ |
24417 | __typeof(p2) __p2 = (p2); \ | |
532e9e24 SP |
24418 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
24419 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbrsrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __p2, p3), \ | |
24420 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbrsrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __p2, p3), \ | |
24421 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbrsrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __p2, p3), \ | |
24422 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __p2, p3), \ | |
24423 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __p2, p3), \ | |
24424 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __p2, p3));}) | |
8eb3b6b9 | 24425 | |
532e9e24 SP |
24426 | #define vcaddq_rot270_m(p0,p1,p2,p3) __arm_vcaddq_rot270_m(p0,p1,p2,p3) |
24427 | #define __arm_vcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
24428 | __typeof(p1) __p1 = (p1); \ |
24429 | __typeof(p2) __p2 = (p2); \ | |
24430 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
24431 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
24432 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24433 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24434 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24435 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24436 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 24437 | |
532e9e24 SP |
24438 | #define vcaddq_rot90_m(p0,p1,p2,p3) __arm_vcaddq_rot90_m(p0,p1,p2,p3) |
24439 | #define __arm_vcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
24440 | __typeof(p1) __p1 = (p1); \ |
24441 | __typeof(p2) __p2 = (p2); \ | |
24442 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
24443 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
24444 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24445 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24446 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24447 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24448 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 24449 | |
532e9e24 SP |
24450 | #define veorq_m(p0,p1,p2,p3) __arm_veorq_m(p0,p1,p2,p3) |
24451 | #define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
24452 | __typeof(p1) __p1 = (p1); \ |
24453 | __typeof(p2) __p2 = (p2); \ | |
24454 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
24455 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
24456 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24457 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24458 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24459 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24460 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 24461 | |
532e9e24 SP |
24462 | #define vmladavaq_p(p0,p1,p2,p3) __arm_vmladavaq_p(p0,p1,p2,p3) |
24463 | #define __arm_vmladavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
24464 | __typeof(p1) __p1 = (p1); \ |
24465 | __typeof(p2) __p2 = (p2); \ | |
24466 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
24467 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
24468 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24469 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24470 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_p_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24471 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24472 | int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 SP |
24473 | |
24474 | #define vornq_m(p0,p1,p2,p3) __arm_vornq_m(p0,p1,p2,p3) | |
24475 | #define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24476 | __typeof(p1) __p1 = (p1); \ | |
24477 | __typeof(p2) __p2 = (p2); \ | |
24478 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24479 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
24480 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24481 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24482 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24483 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24484 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24485 | ||
24486 | #define vorrq_m(p0,p1,p2,p3) __arm_vorrq_m(p0,p1,p2,p3) | |
24487 | #define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24488 | __typeof(p1) __p1 = (p1); \ | |
24489 | __typeof(p2) __p2 = (p2); \ | |
24490 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24491 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
24492 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24493 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24494 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24495 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24496 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24497 | ||
532e9e24 SP |
24498 | #define vaddq_m(p0,p1,p2,p3) __arm_vaddq_m(p0,p1,p2,p3) |
24499 | #define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
24500 | __typeof(p1) __p1 = (p1); \ |
24501 | __typeof(p2) __p2 = (p2); \ | |
24502 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
24503 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ |
24504 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
24505 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
24506 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
24507 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
24508 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
24509 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
24510 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24511 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24512 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24513 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24514 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24515 | ||
24516 | #define vmulq_m(p0,p1,p2,p3) __arm_vmulq_m(p0,p1,p2,p3) | |
24517 | #define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24518 | __typeof(p1) __p1 = (p1); \ | |
24519 | __typeof(p2) __p2 = (p2); \ | |
24520 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24521 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
24522 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
24523 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
24524 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
24525 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
24526 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
24527 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
24528 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24529 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24530 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24531 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24532 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24533 | ||
4ff68575 SP |
24534 | #define vstrbq(p0,p1) __arm_vstrbq(p0,p1) |
24535 | #define __arm_vstrbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24536 | __typeof(p1) __p1 = (p1); \ | |
24537 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24538 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24539 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24540 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24541 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24542 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24543 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24544 | ||
24545 | #define vstrbq_scatter_offset(p0,p1,p2) __arm_vstrbq_scatter_offset(p0,p1,p2) | |
24546 | #define __arm_vstrbq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24547 | __typeof(p1) __p1 = (p1); \ | |
24548 | __typeof(p2) __p2 = (p2); \ | |
24549 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24550 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
24551 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24552 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
24553 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \ | |
24554 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
24555 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
24556 | ||
24557 | #define vstrwq_scatter_base(p0,p1,p2) __arm_vstrwq_scatter_base(p0,p1,p2) | |
24558 | #define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ | |
24559 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
24560 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
24561 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_u32(p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
24562 | ||
535a8645 SP |
24563 | #define vldrbq_gather_offset(p0,p1) __arm_vldrbq_gather_offset(p0,p1) |
24564 | #define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24565 | __typeof(p1) __p1 = (p1); \ | |
24566 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24567 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24568 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24569 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
24570 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24571 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24572 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24573 | ||
24574 | #define vstrbq_p(p0,p1,p2) __arm_vstrbq_p(p0,p1,p2) | |
24575 | #define __arm_vstrbq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24576 | __typeof(p1) __p1 = (p1); \ | |
24577 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24578 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24579 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_p_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24580 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_p_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24581 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
24582 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_p_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24583 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_p_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24584 | ||
24585 | #define vstrbq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrbq_scatter_offset_p(p0,p1,p2,p3) | |
24586 | #define __arm_vstrbq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24587 | __typeof(p1) __p1 = (p1); \ | |
24588 | __typeof(p2) __p2 = (p2); \ | |
24589 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24590 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
24591 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24592 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24593 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
24594 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24595 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24596 | ||
24597 | #define vstrwq_scatter_base_p(p0,p1,p2,p3) __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) | |
24598 | #define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ | |
24599 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
24600 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24601 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24602 | ||
bf1e3d5a SP |
24603 | #define vld1q(p0) __arm_vld1q(p0) |
24604 | #define __arm_vld1q(p0) ({ __typeof(p0) __p0 = (p0); \ | |
24605 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24606 | int (*)[__ARM_mve_type_int8_t_const_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce(__p0, int8_t const *)), \ | |
24607 | int (*)[__ARM_mve_type_int16_t_const_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce(__p0, int16_t const *)), \ | |
24608 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce(__p0, int32_t const *)), \ | |
24609 | int (*)[__ARM_mve_type_uint8_t_const_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce(__p0, uint8_t const *)), \ | |
24610 | int (*)[__ARM_mve_type_uint16_t_const_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce(__p0, uint16_t const *)), \ | |
24611 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce(__p0, uint32_t const *)));}) | |
24612 | ||
24613 | #define vldrhq_gather_offset(p0,p1) __arm_vldrhq_gather_offset(p0,p1) | |
24614 | #define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24615 | __typeof(p1) __p1 = (p1); \ | |
24616 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24617 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24618 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
24619 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24620 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24621 | ||
24622 | #define vldrhq_gather_offset_z(p0,p1,p2) __arm_vldrhq_gather_offset_z(p0,p1,p2) | |
24623 | #define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24624 | __typeof(p1) __p1 = (p1); \ | |
24625 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24626 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24627 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
24628 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24629 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24630 | ||
24631 | #define vldrhq_gather_shifted_offset(p0,p1) __arm_vldrhq_gather_shifted_offset(p0,p1) | |
24632 | #define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24633 | __typeof(p1) __p1 = (p1); \ | |
24634 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24635 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24636 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
24637 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24638 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24639 | ||
24640 | #define vldrhq_gather_shifted_offset_z(p0,p1,p2) __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) | |
24641 | #define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24642 | __typeof(p1) __p1 = (p1); \ | |
24643 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24644 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24645 | int (*)[__ARM_mve_type_int16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce(__p0, int16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
24646 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24647 | int (*)[__ARM_mve_type_uint16_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce(__p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24648 | ||
4cc23303 SP |
24649 | #define vldrwq_gather_offset(p0,p1) __arm_vldrwq_gather_offset(p0,p1) |
24650 | #define __arm_vldrwq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24651 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24652 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1), \ | |
24653 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1));}) | |
24654 | ||
24655 | #define vldrwq_gather_offset_z(p0,p1,p2) __arm_vldrwq_gather_offset_z(p0,p1,p2) | |
24656 | #define __arm_vldrwq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24657 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24658 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1, p2), \ | |
24659 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1, p2));}) | |
24660 | ||
24661 | #define vldrwq_gather_shifted_offset(p0,p1) __arm_vldrwq_gather_shifted_offset(p0,p1) | |
24662 | #define __arm_vldrwq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24663 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24664 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1), \ | |
24665 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1));}) | |
24666 | ||
24667 | #define vldrwq_gather_shifted_offset_z(p0,p1,p2) __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) | |
24668 | #define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24669 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24670 | int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1, p2), \ | |
24671 | int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1, p2));}) | |
24672 | ||
5cad47e0 SP |
24673 | #define vst1q(p0,p1) __arm_vst1q(p0,p1) |
24674 | #define __arm_vst1q(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24675 | __typeof(p1) __p1 = (p1); \ | |
24676 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24677 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
24678 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24679 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24680 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
24681 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24682 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24683 | ||
24684 | #define vstrhq(p0,p1) __arm_vstrhq(p0,p1) | |
24685 | #define __arm_vstrhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24686 | __typeof(p1) __p1 = (p1); \ | |
24687 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24688 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
24689 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24690 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
24691 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24692 | ||
24693 | #define vstrhq_p(p0,p1,p2) __arm_vstrhq_p(p0,p1,p2) | |
24694 | #define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24695 | __typeof(p1) __p1 = (p1); \ | |
24696 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24697 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24698 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24699 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
24700 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24701 | ||
24702 | #define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) | |
24703 | #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24704 | __typeof(p1) __p1 = (p1); \ | |
24705 | __typeof(p2) __p2 = (p2); \ | |
24706 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24707 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24708 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24709 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24710 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24711 | ||
24712 | #define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2) | |
24713 | #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24714 | __typeof(p1) __p1 = (p1); \ | |
24715 | __typeof(p2) __p2 = (p2); \ | |
24716 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24717 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24718 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
24719 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
24720 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
24721 | ||
24722 | #define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
24723 | #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24724 | __typeof(p1) __p1 = (p1); \ | |
24725 | __typeof(p2) __p2 = (p2); \ | |
24726 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24727 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24728 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24729 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24730 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24731 | ||
24732 | #define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) | |
24733 | #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24734 | __typeof(p1) __p1 = (p1); \ | |
24735 | __typeof(p2) __p2 = (p2); \ | |
24736 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24737 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24738 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
24739 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
24740 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
24741 | ||
24742 | ||
24743 | #define vstrwq(p0,p1) __arm_vstrwq(p0,p1) | |
24744 | #define __arm_vstrwq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
24745 | __typeof(p1) __p1 = (p1); \ | |
24746 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24747 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
24748 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
24749 | ||
24750 | #define vstrwq_p(p0,p1,p2) __arm_vstrwq_p(p0,p1,p2) | |
24751 | #define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24752 | __typeof(p1) __p1 = (p1); \ | |
24753 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
24754 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
24755 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
24756 | ||
7a5fffa5 SP |
24757 | #define vstrdq_scatter_base_p(p0,p1,p2,p3) __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) |
24758 | #define __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ | |
24759 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
24760 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ | |
24761 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) | |
24762 | ||
24763 | #define vstrdq_scatter_base(p0,p1,p2) __arm_vstrdq_scatter_base(p0,p1,p2) | |
24764 | #define __arm_vstrdq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ | |
24765 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
24766 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \ | |
24767 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) | |
24768 | ||
24769 | #define vstrdq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrdq_scatter_offset_p(p0,p1,p2,p3) | |
24770 | #define __arm_vstrdq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24771 | __typeof(p1) __p1 = (p1); \ | |
24772 | __typeof(p2) __p2 = (p2); \ | |
24773 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
24774 | int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ | |
24775 | int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) | |
24776 | ||
24777 | #define vstrdq_scatter_offset(p0,p1,p2) __arm_vstrdq_scatter_offset(p0,p1,p2) | |
24778 | #define __arm_vstrdq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24779 | __typeof(p1) __p1 = (p1); \ | |
24780 | __typeof(p2) __p2 = (p2); \ | |
24781 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
24782 | int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t)), \ | |
24783 | int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) | |
24784 | ||
24785 | #define vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
24786 | #define __arm_vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24787 | __typeof(p1) __p1 = (p1); \ | |
24788 | __typeof(p2) __p2 = (p2); \ | |
24789 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
24790 | int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ | |
24791 | int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) | |
24792 | ||
24793 | #define vstrdq_scatter_shifted_offset(p0,p1,p2) __arm_vstrdq_scatter_shifted_offset(p0,p1,p2) | |
24794 | #define __arm_vstrdq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24795 | __typeof(p1) __p1 = (p1); \ | |
24796 | __typeof(p2) __p2 = (p2); \ | |
24797 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
24798 | int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t)), \ | |
24799 | int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) | |
24800 | ||
24801 | #define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2) | |
24802 | #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24803 | __typeof(p1) __p1 = (p1); \ | |
24804 | __typeof(p2) __p2 = (p2); \ | |
24805 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24806 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24807 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
24808 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
24809 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
24810 | ||
24811 | #define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) | |
24812 | #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24813 | __typeof(p1) __p1 = (p1); \ | |
24814 | __typeof(p2) __p2 = (p2); \ | |
24815 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24816 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24817 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24818 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24819 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24820 | ||
24821 | #define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) | |
24822 | #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24823 | __typeof(p1) __p1 = (p1); \ | |
24824 | __typeof(p2) __p2 = (p2); \ | |
24825 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24826 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
24827 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
24828 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
24829 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
24830 | ||
24831 | #define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
24832 | #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24833 | __typeof(p1) __p1 = (p1); \ | |
24834 | __typeof(p2) __p2 = (p2); \ | |
24835 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
24836 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
24837 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24838 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
24839 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24840 | ||
24841 | #define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) | |
24842 | #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24843 | __typeof(p1) __p1 = (p1); \ | |
24844 | __typeof(p2) __p2 = (p2); \ | |
24845 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
24846 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
24847 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
24848 | ||
24849 | #define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) | |
24850 | #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24851 | __typeof(p1) __p1 = (p1); \ | |
24852 | __typeof(p2) __p2 = (p2); \ | |
24853 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
24854 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24855 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
24856 | ||
24857 | #define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) | |
24858 | #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24859 | __typeof(p1) __p1 = (p1); \ | |
24860 | __typeof(p2) __p2 = (p2); \ | |
24861 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
24862 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24863 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24864 | ||
24865 | #define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) | |
24866 | #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24867 | __typeof(p1) __p1 = (p1); \ | |
24868 | __typeof(p2) __p2 = (p2); \ | |
24869 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
24870 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
24871 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
24872 | ||
24873 | #define vstrwq_scatter_shifted_offset(p0,p1,p2) __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) | |
24874 | #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
24875 | __typeof(p1) __p1 = (p1); \ | |
24876 | __typeof(p2) __p2 = (p2); \ | |
24877 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
24878 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
24879 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
24880 | ||
24881 | #define vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) | |
24882 | #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
24883 | __typeof(p1) __p1 = (p1); \ | |
24884 | __typeof(p2) __p2 = (p2); \ | |
24885 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
24886 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
24887 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
24888 | ||
85a94e87 SP |
24889 | #define vuninitializedq(p0) __arm_vuninitializedq(p0) |
24890 | #define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
24891 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24892 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \ | |
24893 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \ | |
24894 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \ | |
24895 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \ | |
24896 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \ | |
24897 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \ | |
24898 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \ | |
24899 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 ());}) | |
24900 | ||
24901 | #define vreinterpretq_s16(p0) __arm_vreinterpretq_s16(p0) | |
24902 | #define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \ | |
24903 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24904 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
24905 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
24906 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
24907 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
24908 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
24909 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
24910 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
24911 | ||
24912 | #define vreinterpretq_s32(p0) __arm_vreinterpretq_s32(p0) | |
24913 | #define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \ | |
24914 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24915 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
24916 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
24917 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
24918 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
24919 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
24920 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
24921 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
24922 | ||
24923 | #define vreinterpretq_s64(p0) __arm_vreinterpretq_s64(p0) | |
24924 | #define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \ | |
24925 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24926 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
24927 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
24928 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
24929 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
24930 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
24931 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
24932 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
24933 | ||
24934 | #define vreinterpretq_s8(p0) __arm_vreinterpretq_s8(p0) | |
24935 | #define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \ | |
24936 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24937 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
24938 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
24939 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
24940 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
24941 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
24942 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
24943 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
24944 | ||
24945 | #define vreinterpretq_u16(p0) __arm_vreinterpretq_u16(p0) | |
24946 | #define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \ | |
24947 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24948 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
24949 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
24950 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
24951 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
24952 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
24953 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
24954 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
24955 | ||
24956 | #define vreinterpretq_u32(p0) __arm_vreinterpretq_u32(p0) | |
24957 | #define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \ | |
24958 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24959 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
24960 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
24961 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
24962 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
24963 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
24964 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
24965 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
24966 | ||
24967 | #define vreinterpretq_u64(p0) __arm_vreinterpretq_u64(p0) | |
24968 | #define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \ | |
24969 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24970 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
24971 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
24972 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
24973 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
24974 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
24975 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
24976 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)));}) | |
24977 | ||
24978 | #define vreinterpretq_u8(p0) __arm_vreinterpretq_u8(p0) | |
24979 | #define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \ | |
24980 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
24981 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
24982 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
24983 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
24984 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
24985 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
24986 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
24987 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
24988 | ||
261014a1 SP |
24989 | #define vabsq_x(p1,p2) __arm_vabsq_x(p1,p2) |
24990 | #define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
24991 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
24992 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
24993 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
24994 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
24995 | ||
24996 | #define vaddq_x(p1,p2,p3) __arm_vaddq_x(p1,p2,p3) | |
24997 | #define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
24998 | __typeof(p2) __p2 = (p2); \ | |
24999 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25000 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25001 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25002 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25003 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
25004 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
25005 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
25006 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25007 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25008 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
25009 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
25010 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
25011 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) | |
25012 | ||
25013 | #define vcaddq_rot270_x(p1,p2,p3) __arm_vcaddq_rot270_x(p1,p2,p3) | |
25014 | #define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25015 | __typeof(p2) __p2 = (p2); \ | |
25016 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25017 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25018 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25019 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25020 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25021 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25022 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25023 | ||
25024 | #define vcaddq_rot90_x(p1,p2,p3) __arm_vcaddq_rot90_x(p1,p2,p3) | |
25025 | #define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25026 | __typeof(p2) __p2 = (p2); \ | |
25027 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25028 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25029 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25030 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25031 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25032 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25033 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25034 | ||
25035 | #define veorq_x(p1,p2,p3) __arm_veorq_x(p1,p2,p3) | |
25036 | #define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25037 | __typeof(p2) __p2 = (p2); \ | |
25038 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25039 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25040 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25041 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25042 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25043 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25044 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25045 | ||
25046 | #define vmaxq_x(p1,p2,p3) __arm_vmaxq_x(p1,p2,p3) | |
25047 | #define __arm_vmaxq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25048 | __typeof(p2) __p2 = (p2); \ | |
25049 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25050 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25051 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25052 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25053 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25054 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25055 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25056 | ||
25057 | #define vminq_x(p1,p2,p3) __arm_vminq_x(p1,p2,p3) | |
25058 | #define __arm_vminq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25059 | __typeof(p2) __p2 = (p2); \ | |
25060 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25061 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25062 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25063 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25064 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25065 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25066 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25067 | ||
25068 | #define vmovlbq_x(p1,p2) __arm_vmovlbq_x(p1,p2) | |
25069 | #define __arm_vmovlbq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25070 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25071 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25072 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25073 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
25074 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
25075 | ||
25076 | #define vmovltq_x(p1,p2) __arm_vmovltq_x(p1,p2) | |
25077 | #define __arm_vmovltq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25078 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25079 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25080 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25081 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
25082 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
25083 | ||
25084 | #define vmulhq_x(p1,p2,p3) __arm_vmulhq_x(p1,p2,p3) | |
25085 | #define __arm_vmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25086 | __typeof(p2) __p2 = (p2); \ | |
25087 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25088 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25089 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25090 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25091 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25092 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25093 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25094 | ||
25095 | #define vmullbq_int_x(p1,p2,p3) __arm_vmullbq_int_x(p1,p2,p3) | |
25096 | #define __arm_vmullbq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25097 | __typeof(p2) __p2 = (p2); \ | |
25098 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25099 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25100 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25101 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25102 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25103 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25104 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25105 | ||
25106 | #define vmullbq_poly_x(p1,p2,p3) __arm_vmullbq_poly_x(p1,p2,p3) | |
25107 | #define __arm_vmullbq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25108 | __typeof(p2) __p2 = (p2); \ | |
25109 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25110 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25111 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) | |
25112 | ||
25113 | #define vmulltq_int_x(p1,p2,p3) __arm_vmulltq_int_x(p1,p2,p3) | |
25114 | #define __arm_vmulltq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25115 | __typeof(p2) __p2 = (p2); \ | |
25116 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25117 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25118 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25119 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25120 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25121 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25122 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25123 | ||
25124 | #define vmulltq_poly_x(p1,p2,p3) __arm_vmulltq_poly_x(p1,p2,p3) | |
25125 | #define __arm_vmulltq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25126 | __typeof(p2) __p2 = (p2); \ | |
25127 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25128 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25129 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) | |
25130 | ||
25131 | #define vmulq_x(p1,p2,p3) __arm_vmulq_x(p1,p2,p3) | |
25132 | #define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25133 | __typeof(p2) __p2 = (p2); \ | |
25134 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25135 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25136 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25137 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25138 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
25139 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
25140 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
25141 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25142 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25143 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
25144 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
25145 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
25146 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) | |
25147 | ||
25148 | #define vnegq_x(p1,p2) __arm_vnegq_x(p1,p2) | |
25149 | #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25150 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25151 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25152 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25153 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
25154 | ||
25155 | #define vornq_x(p1,p2,p3) __arm_vornq_x(p1,p2,p3) | |
25156 | #define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25157 | __typeof(p2) __p2 = (p2); \ | |
25158 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25159 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25160 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25161 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25162 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25163 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25164 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25165 | ||
25166 | #define vorrq_x(p1,p2,p3) __arm_vorrq_x(p1,p2,p3) | |
25167 | #define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25168 | __typeof(p2) __p2 = (p2); \ | |
25169 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25170 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25171 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25172 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25173 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25174 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25175 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25176 | ||
25177 | #define vrev32q_x(p1,p2) __arm_vrev32q_x(p1,p2) | |
25178 | #define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25179 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25180 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25181 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25182 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
25183 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
25184 | ||
25185 | #define vrev64q_x(p1,p2) __arm_vrev64q_x(p1,p2) | |
25186 | #define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25187 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25188 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25189 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25190 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
25191 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
25192 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25193 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25194 | ||
25195 | #define vabdq_x(p1,p2,p3) __arm_vabdq_x(p1,p2,p3) | |
25196 | #define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25197 | __typeof(p2) __p2 = (p2); \ | |
25198 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25199 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25200 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25201 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25202 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25203 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25204 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25205 | ||
25206 | #define vandq_x(p1,p2,p3) __arm_vandq_x(p1,p2,p3) | |
25207 | #define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25208 | __typeof(p2) __p2 = (p2); \ | |
25209 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25210 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25211 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25212 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25213 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25214 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25215 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25216 | ||
25217 | #define vbicq_x(p1,p2,p3) __arm_vbicq_x(p1,p2,p3) | |
25218 | #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25219 | __typeof(p2) __p2 = (p2); \ | |
25220 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25221 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25222 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25223 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25224 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25225 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25226 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25227 | ||
25228 | #define vbrsrq_x(p1,p2,p3) __arm_vbrsrq_x(p1,p2,p3) | |
25229 | #define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25230 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25231 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25232 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25233 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25234 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25235 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25236 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25237 | ||
429d607b SP |
25238 | #endif /* MVE Integer. */ |
25239 | ||
261014a1 SP |
25240 | #define vmvnq_x(p1,p2) __arm_vmvnq_x(p1,p2) |
25241 | #define __arm_vmvnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25242 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25243 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25244 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25245 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
25246 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
25247 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25248 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25249 | ||
25250 | #define vrev16q_x(p1,p2) __arm_vrev16q_x(p1,p2) | |
25251 | #define __arm_vrev16q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25252 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25253 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25254 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2));}) | |
25255 | ||
25256 | #define vrhaddq_x(p1,p2,p3) __arm_vrhaddq_x(p1,p2,p3) | |
25257 | #define __arm_vrhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25258 | __typeof(p2) __p2 = (p2); \ | |
25259 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25260 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25261 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25262 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25263 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25264 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25265 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25266 | ||
25267 | #define vshlq_x(p0,p1,p2,p3) __arm_vshlq_x(p0,p1,p2,p3) | |
25268 | #define __arm_vshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25269 | __typeof(p2) __p2 = (p2); \ | |
25270 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25271 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25272 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25273 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25274 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25275 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25276 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
25277 | ||
25278 | #define vrmulhq_x(p1,p2,p3) __arm_vrmulhq_x(p1,p2,p3) | |
25279 | #define __arm_vrmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25280 | __typeof(p2) __p2 = (p2); \ | |
25281 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25282 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25283 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25284 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25285 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25286 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25287 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25288 | ||
25289 | #define vrshlq_x(p1,p2,p3) __arm_vrshlq_x(p1,p2,p3) | |
25290 | #define __arm_vrshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25291 | __typeof(p2) __p2 = (p2); \ | |
25292 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25293 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25294 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25295 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25296 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25297 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25298 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
25299 | ||
25300 | #define vrshrq_x(p1,p2,p3) __arm_vrshrq_x(p1,p2,p3) | |
25301 | #define __arm_vrshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25302 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25303 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25304 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25305 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25306 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25307 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25308 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25309 | ||
25310 | #define vshllbq_x(p1,p2,p3) __arm_vshllbq_x(p1,p2,p3) | |
25311 | #define __arm_vshllbq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25312 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25313 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25314 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25315 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25316 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) | |
25317 | ||
25318 | #define vshlltq_x(p1,p2,p3) __arm_vshlltq_x(p1,p2,p3) | |
25319 | #define __arm_vshlltq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25320 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25321 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25322 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25323 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25324 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) | |
25325 | ||
25326 | #define vshlq_x_n(p1,p2,p3) __arm_vshlq_x_n(p1,p2,p3) | |
25327 | #define __arm_vshlq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25328 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25329 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25330 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25331 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25332 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25333 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25334 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25335 | ||
25336 | #define vdwdupq_x_u8(p1,p2,p3,p4) __arm_vdwdupq_x_u8(p1,p2,p3,p4) | |
25337 | #define __arm_vdwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ | |
25338 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25339 | int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
25340 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
25341 | ||
25342 | #define vdwdupq_x_u16(p1,p2,p3,p4) __arm_vdwdupq_x_u16(p1,p2,p3,p4) | |
25343 | #define __arm_vdwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ | |
25344 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25345 | int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
25346 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
25347 | ||
25348 | #define vdwdupq_x_u32(p1,p2,p3,p4) __arm_vdwdupq_x_u32(p1,p2,p3,p4) | |
25349 | #define __arm_vdwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ | |
25350 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25351 | int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
25352 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
25353 | ||
25354 | #define viwdupq_x_u8(p1,p2,p3,p4) __arm_viwdupq_x_u8(p1,p2,p3,p4) | |
25355 | #define __arm_viwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ | |
25356 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25357 | int (*)[__ARM_mve_type_uint32_t]: __arm_viwdupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
25358 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
25359 | ||
25360 | #define viwdupq_x_u16(p1,p2,p3,p4) __arm_viwdupq_x_u16(p1,p2,p3,p4) | |
25361 | #define __arm_viwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ | |
25362 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25363 | int (*)[__ARM_mve_type_uint32_t]: __arm_viwdupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
25364 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
25365 | ||
25366 | #define viwdupq_x_u32(p1,p2,p3,p4) __arm_viwdupq_x_u32(p1,p2,p3,p4) | |
25367 | #define __arm_viwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ | |
25368 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25369 | int (*)[__ARM_mve_type_uint32_t]: __arm_viwdupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
25370 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
25371 | ||
25372 | #define vidupq_x_u8(p1,p2,p3) __arm_vidupq_x_u8(p1,p2,p3) | |
25373 | #define __arm_vidupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25374 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25375 | int (*)[__ARM_mve_type_uint32_t]: __arm_vidupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
25376 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
25377 | ||
25378 | #define vddupq_x_u8(p1,p2,p3) __arm_vddupq_x_u8(p1,p2,p3) | |
25379 | #define __arm_vddupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25380 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25381 | int (*)[__ARM_mve_type_uint32_t]: __arm_vddupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
25382 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
25383 | ||
25384 | #define vidupq_x_u16(p1,p2,p3) __arm_vidupq_x_u16(p1,p2,p3) | |
25385 | #define __arm_vidupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25386 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25387 | int (*)[__ARM_mve_type_uint32_t]: __arm_vidupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
25388 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
25389 | ||
25390 | #define vddupq_x_u16(p1,p2,p3) __arm_vddupq_x_u16(p1,p2,p3) | |
25391 | #define __arm_vddupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25392 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25393 | int (*)[__ARM_mve_type_uint32_t]: __arm_vddupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
25394 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
25395 | ||
25396 | #define vidupq_x_u32(p1,p2,p3) __arm_vidupq_x_u32(p1,p2,p3) | |
25397 | #define __arm_vidupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25398 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25399 | int (*)[__ARM_mve_type_uint32_t]: __arm_vidupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
25400 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
25401 | ||
25402 | #define vddupq_x_u32(p1,p2,p3) __arm_vddupq_x_u32(p1,p2,p3) | |
25403 | #define __arm_vddupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25404 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25405 | int (*)[__ARM_mve_type_uint32_t]: __arm_vddupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
25406 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
25407 | ||
25408 | #define vhaddq_x(p1,p2,p3) __arm_vhaddq_x(p1,p2,p3) | |
25409 | #define vshrq_x(p1,p2,p3) __arm_vshrq_x(p1,p2,p3) | |
25410 | #define __arm_vshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25411 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25412 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25413 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25414 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25415 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25416 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25417 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25418 | ||
25419 | #define __arm_vhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25420 | __typeof(p2) __p2 = (p2); \ | |
25421 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25422 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
25423 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
25424 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
25425 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhaddq_x_n_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
25426 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhaddq_x_n_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
25427 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhaddq_x_n_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
25428 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25429 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25430 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25431 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25432 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25433 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25434 | ||
25435 | #define vhcaddq_rot270_x(p1,p2,p3) __arm_vhcaddq_rot270_x(p1,p2,p3) | |
25436 | #define __arm_vhcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25437 | __typeof(p2) __p2 = (p2); \ | |
25438 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25439 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25440 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25441 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
25442 | ||
25443 | #define vhcaddq_rot90_x(p1,p2,p3) __arm_vhcaddq_rot90_x(p1,p2,p3) | |
25444 | #define __arm_vhcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25445 | __typeof(p2) __p2 = (p2); \ | |
25446 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25447 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25448 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25449 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
25450 | ||
25451 | #define vhsubq_x(p1,p2,p3) __arm_vhsubq_x(p1,p2,p3) | |
25452 | #define __arm_vhsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
25453 | __typeof(p2) __p2 = (p2); \ | |
25454 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25455 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhsubq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
25456 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhsubq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
25457 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhsubq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
25458 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhsubq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
25459 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhsubq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
25460 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhsubq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
25461 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25462 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25463 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25464 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25465 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25466 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25467 | ||
25468 | #define vclsq_x(p1,p2) __arm_vclsq_x(p1,p2) | |
25469 | #define __arm_vclsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25470 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25471 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25472 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25473 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
25474 | ||
25475 | #define vclzq_x(p1,p2) __arm_vclzq_x(p1,p2) | |
25476 | #define __arm_vclzq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
25477 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
25478 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
25479 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25480 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
25481 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
25482 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25483 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25484 | ||
25485 | #define vadciq(p0,p1,p2) __arm_vadciq(p0,p1,p2) | |
25486 | #define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25487 | __typeof(p1) __p1 = (p1); \ | |
25488 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25489 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
25490 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25491 | ||
41e1a7ff SP |
25492 | #define vstrdq_scatter_base_wb_p(p0,p1,p2,p3) __arm_vstrdq_scatter_base_wb_p(p0,p1,p2,p3) |
25493 | #define __arm_vstrdq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ | |
25494 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
25495 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ | |
25496 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_wb_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) | |
25497 | ||
25498 | #define vstrdq_scatter_base_wb(p0,p1,p2) __arm_vstrdq_scatter_base_wb(p0,p1,p2) | |
25499 | #define __arm_vstrdq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ | |
25500 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
25501 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \ | |
25502 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_wb_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) | |
25503 | ||
4cc23303 SP |
25504 | #define vldrdq_gather_offset(p0,p1) __arm_vldrdq_gather_offset(p0,p1) |
25505 | #define __arm_vldrdq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25506 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25507 | int (*)[__ARM_mve_type_int64_t_const_ptr]: __arm_vldrdq_gather_offset_s64 (__ARM_mve_coerce(__p0, int64_t const *), p1), \ | |
25508 | int (*)[__ARM_mve_type_uint64_t_const_ptr]: __arm_vldrdq_gather_offset_u64 (__ARM_mve_coerce(__p0, uint64_t const *), p1));}) | |
25509 | ||
25510 | #define vldrdq_gather_offset_z(p0,p1,p2) __arm_vldrdq_gather_offset_z(p0,p1,p2) | |
25511 | #define __arm_vldrdq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25512 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25513 | int (*)[__ARM_mve_type_int64_t_const_ptr]: __arm_vldrdq_gather_offset_z_s64 (__ARM_mve_coerce(__p0, int64_t const *), p1, p2), \ | |
25514 | int (*)[__ARM_mve_type_uint64_t_const_ptr]: __arm_vldrdq_gather_offset_z_u64 (__ARM_mve_coerce(__p0, uint64_t const *), p1, p2));}) | |
25515 | ||
25516 | #define vldrdq_gather_shifted_offset(p0,p1) __arm_vldrdq_gather_shifted_offset(p0,p1) | |
25517 | #define __arm_vldrdq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
25518 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25519 | int (*)[__ARM_mve_type_int64_t_const_ptr]: __arm_vldrdq_gather_shifted_offset_s64 (__ARM_mve_coerce(__p0, int64_t const *), p1), \ | |
25520 | int (*)[__ARM_mve_type_uint64_t_const_ptr]: __arm_vldrdq_gather_shifted_offset_u64 (__ARM_mve_coerce(__p0, uint64_t const *), p1));}) | |
25521 | ||
25522 | #define vldrdq_gather_shifted_offset_z(p0,p1,p2) __arm_vldrdq_gather_shifted_offset_z(p0,p1,p2) | |
25523 | #define __arm_vldrdq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25524 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25525 | int (*)[__ARM_mve_type_int64_t_const_ptr]: __arm_vldrdq_gather_shifted_offset_z_s64 (__ARM_mve_coerce(__p0, int64_t const *), p1, p2), \ | |
25526 | int (*)[__ARM_mve_type_uint64_t_const_ptr]: __arm_vldrdq_gather_shifted_offset_z_u64 (__ARM_mve_coerce(__p0, uint64_t const *), p1, p2));}) | |
25527 | ||
25528 | ||
535a8645 SP |
25529 | #define vldrbq_gather_offset_z(p0,p1,p2) __arm_vldrbq_gather_offset_z(p0,p1,p2) |
25530 | #define __arm_vldrbq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25531 | __typeof(p1) __p1 = (p1); \ | |
25532 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25533 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_s8 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
25534 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_s16 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25535 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_s32 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
25536 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_u8 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
25537 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_u16 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25538 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_u32 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
25539 | ||
8eb3b6b9 SP |
25540 | #define vqrdmlahq_m(p0,p1,p2,p3) __arm_vqrdmlahq_m(p0,p1,p2,p3) |
25541 | #define __arm_vqrdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25542 | __typeof(p1) __p1 = (p1); \ | |
25543 | __typeof(p2) __p2 = (p2); \ | |
25544 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25545 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
25546 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
25547 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));}) | |
25548 | ||
25549 | #define vqrdmlashq_m(p0,p1,p2,p3) __arm_vqrdmlashq_m(p0,p1,p2,p3) | |
25550 | #define __arm_vqrdmlashq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25551 | __typeof(p1) __p1 = (p1); \ | |
25552 | __typeof(p2) __p2 = (p2); \ | |
25553 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25554 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlashq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
25555 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
25556 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));}) | |
25557 | ||
25558 | #define vqrshlq_m(p0,p1,p2,p3) __arm_vqrshlq_m(p0,p1,p2,p3) | |
25559 | #define __arm_vqrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25560 | __typeof(p1) __p1 = (p1); \ | |
25561 | __typeof(p2) __p2 = (p2); \ | |
25562 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25563 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25564 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25565 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25566 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25567 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25568 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
25569 | ||
25570 | #define vqshlq_m_n(p0,p1,p2,p3) __arm_vqshlq_m_n(p0,p1,p2,p3) | |
25571 | #define __arm_vqshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25572 | __typeof(p1) __p1 = (p1); \ | |
25573 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25574 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25575 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25576 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25577 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25578 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25579 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25580 | ||
25581 | #define vqshlq_m(p0,p1,p2,p3) __arm_vqshlq_m(p0,p1,p2,p3) | |
25582 | #define __arm_vqshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25583 | __typeof(p1) __p1 = (p1); \ | |
25584 | __typeof(p2) __p2 = (p2); \ | |
25585 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25586 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25587 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25588 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25589 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25590 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25591 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
25592 | ||
25593 | #define vrhaddq_m(p0,p1,p2,p3) __arm_vrhaddq_m(p0,p1,p2,p3) | |
25594 | #define __arm_vrhaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25595 | __typeof(p1) __p1 = (p1); \ | |
25596 | __typeof(p2) __p2 = (p2); \ | |
25597 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25598 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25599 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25600 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25601 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25602 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25603 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25604 | ||
25605 | #define vrmulhq_m(p0,p1,p2,p3) __arm_vrmulhq_m(p0,p1,p2,p3) | |
25606 | #define __arm_vrmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25607 | __typeof(p1) __p1 = (p1); \ | |
25608 | __typeof(p2) __p2 = (p2); \ | |
25609 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25610 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25611 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25612 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25613 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25614 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25615 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25616 | ||
532e9e24 SP |
25617 | #define vrshlq_m(p0,p1,p2,p3) __arm_vrshlq_m(p0,p1,p2,p3) |
25618 | #define __arm_vrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
25619 | __typeof(p1) __p1 = (p1); \ |
25620 | __typeof(p2) __p2 = (p2); \ | |
25621 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
25622 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
25623 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25624 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25625 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25626 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25627 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 25628 | |
532e9e24 SP |
25629 | #define vrshrq_m(p0,p1,p2,p3) __arm_vrshrq_m(p0,p1,p2,p3) |
25630 | #define __arm_vrshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 | 25631 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
25632 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
25633 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25634 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25635 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25636 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrshrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25637 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25638 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25639 | ||
25640 | #define vshrq_m(p0,p1,p2,p3) __arm_vshrq_m(p0,p1,p2,p3) | |
25641 | #define __arm_vshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25642 | __typeof(p1) __p1 = (p1); \ | |
25643 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25644 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25645 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25646 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25647 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vshrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25648 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vshrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25649 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vshrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25650 | ||
25651 | #define vsliq_m(p0,p1,p2,p3) __arm_vsliq_m(p0,p1,p2,p3) | |
25652 | #define __arm_vsliq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25653 | __typeof(p1) __p1 = (p1); \ | |
25654 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25655 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25656 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25657 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25658 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25659 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25660 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
8eb3b6b9 SP |
25661 | |
25662 | #define vqsubq_m(p0,p1,p2,p3) __arm_vqsubq_m(p0,p1,p2,p3) | |
25663 | #define __arm_vqsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25664 | __typeof(p1) __p1 = (p1); \ | |
25665 | __typeof(p2) __p2 = (p2); \ | |
25666 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25667 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
25668 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
25669 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
25670 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
25671 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
25672 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
25673 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25674 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25675 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25676 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
25677 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25678 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25679 | ||
25680 | #define vqrdmulhq_m(p0,p1,p2,p3) __arm_vqrdmulhq_m(p0,p1,p2,p3) | |
25681 | #define __arm_vqrdmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25682 | __typeof(p1) __p1 = (p1); \ | |
25683 | __typeof(p2) __p2 = (p2); \ | |
25684 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25685 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25686 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25687 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25688 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
25689 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
25690 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));}) | |
25691 | ||
8eb3b6b9 SP |
25692 | #define vqrdmlsdhxq_m(p0,p1,p2,p3) __arm_vqrdmlsdhxq_m(p0,p1,p2,p3) |
25693 | #define __arm_vqrdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25694 | __typeof(p1) __p1 = (p1); \ | |
25695 | __typeof(p2) __p2 = (p2); \ | |
25696 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25697 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25698 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25699 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
25700 | ||
25701 | #define vqrdmlsdhq_m(p0,p1,p2,p3) __arm_vqrdmlsdhq_m(p0,p1,p2,p3) | |
25702 | #define __arm_vqrdmlsdhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25703 | __typeof(p1) __p1 = (p1); \ | |
25704 | __typeof(p2) __p2 = (p2); \ | |
25705 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25706 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25707 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25708 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
25709 | ||
532e9e24 SP |
25710 | #define vshllbq_m(p0,p1,p2,p3) __arm_vshllbq_m(p0,p1,p2,p3) |
25711 | #define __arm_vshllbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25712 | __typeof(p1) __p1 = (p1); \ | |
25713 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25714 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vshllbq_m_n_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25715 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vshllbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25716 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshllbq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25717 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshllbq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) | |
25718 | ||
25719 | #define vshrntq_m(p0,p1,p2,p3) __arm_vshrntq_m(p0,p1,p2,p3) | |
25720 | #define __arm_vshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25721 | __typeof(p1) __p1 = (p1); \ | |
25722 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25723 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25724 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25725 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25726 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25727 | ||
25728 | #define vshrnbq_m(p0,p1,p2,p3) __arm_vshrnbq_m(p0,p1,p2,p3) | |
25729 | #define __arm_vshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25730 | __typeof(p1) __p1 = (p1); \ | |
25731 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25732 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25733 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25734 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25735 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25736 | ||
25737 | #define vshlltq_m(p0,p1,p2,p3) __arm_vshlltq_m(p0,p1,p2,p3) | |
25738 | #define __arm_vshlltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25739 | __typeof(p1) __p1 = (p1); \ | |
25740 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25741 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vshlltq_m_n_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
25742 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vshlltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25743 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshlltq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25744 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshlltq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) | |
25745 | ||
25746 | #define vrshrntq_m(p0,p1,p2,p3) __arm_vrshrntq_m(p0,p1,p2,p3) | |
25747 | #define __arm_vrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25748 | __typeof(p1) __p1 = (p1); \ | |
25749 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25750 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25751 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25752 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25753 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25754 | ||
25755 | #define vqshruntq_m(p0,p1,p2,p3) __arm_vqshruntq_m(p0,p1,p2,p3) | |
25756 | #define __arm_vqshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25757 | __typeof(p1) __p1 = (p1); \ | |
25758 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25759 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25760 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) | |
25761 | ||
25762 | #define vqshrunbq_m(p0,p1,p2,p3) __arm_vqshrunbq_m(p0,p1,p2,p3) | |
25763 | #define __arm_vqshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25764 | __typeof(p1) __p1 = (p1); \ | |
25765 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25766 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25767 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) | |
25768 | ||
25769 | #define vqrshrnbq_m(p0,p1,p2,p3) __arm_vqrshrnbq_m(p0,p1,p2,p3) | |
25770 | #define __arm_vqrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25771 | __typeof(p1) __p1 = (p1); \ | |
25772 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25773 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25774 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25775 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25776 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25777 | ||
25778 | #define vqrshrntq_m(p0,p1,p2,p3) __arm_vqrshrntq_m(p0,p1,p2,p3) | |
25779 | #define __arm_vqrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25780 | __typeof(p1) __p1 = (p1); \ | |
25781 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25782 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25783 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25784 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25785 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25786 | ||
25787 | #define vqrshrunbq_m(p0,p1,p2,p3) __arm_vqrshrunbq_m(p0,p1,p2,p3) | |
25788 | #define __arm_vqrshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25789 | __typeof(p1) __p1 = (p1); \ | |
25790 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25791 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25792 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) | |
25793 | ||
25794 | #define vqrshruntq_m(p0,p1,p2,p3) __arm_vqrshruntq_m(p0,p1,p2,p3) | |
25795 | #define __arm_vqrshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25796 | __typeof(p1) __p1 = (p1); \ | |
25797 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25798 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25799 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) | |
25800 | ||
25801 | #define vqshrnbq_m(p0,p1,p2,p3) __arm_vqshrnbq_m(p0,p1,p2,p3) | |
25802 | #define __arm_vqshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25803 | __typeof(p1) __p1 = (p1); \ | |
25804 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25805 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25806 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25807 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25808 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25809 | ||
25810 | #define vqshrntq_m(p0,p1,p2,p3) __arm_vqshrntq_m(p0,p1,p2,p3) | |
25811 | #define __arm_vqshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25812 | __typeof(p1) __p1 = (p1); \ | |
25813 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25814 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25815 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25816 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25817 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25818 | ||
25819 | #define vrshrnbq_m(p0,p1,p2,p3) __arm_vrshrnbq_m(p0,p1,p2,p3) | |
25820 | #define __arm_vrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25821 | __typeof(p1) __p1 = (p1); \ | |
25822 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
25823 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25824 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25825 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25826 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
25827 | ||
25828 | #define vmlaldavaq_p(p0,p1,p2,p3) __arm_vmlaldavaq_p(p0,p1,p2,p3) | |
25829 | #define __arm_vmlaldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
25830 | __typeof(p1) __p1 = (p1); \ |
25831 | __typeof(p2) __p2 = (p2); \ | |
25832 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
25833 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_p_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
25834 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25835 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_p_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25836 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 25837 | |
532e9e24 SP |
25838 | #define vmlaldavaxq_p(p0,p1,p2,p3) __arm_vmlaldavaxq_p(p0,p1,p2,p3) |
25839 | #define __arm_vmlaldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
25840 | __typeof(p1) __p1 = (p1); \ |
25841 | __typeof(p2) __p2 = (p2); \ | |
25842 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
25843 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_p_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
25844 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25845 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaxq_p_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
25846 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaxq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 25847 | |
532e9e24 SP |
25848 | #define vmlsldavaq_p(p0,p1,p2,p3) __arm_vmlsldavaq_p(p0,p1,p2,p3) |
25849 | #define __arm_vmlsldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
25850 | __typeof(p1) __p1 = (p1); \ |
25851 | __typeof(p2) __p2 = (p2); \ | |
25852 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
25853 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
25854 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 25855 | |
532e9e24 SP |
25856 | #define vmlsldavaxq_p(p0,p1,p2,p3) __arm_vmlsldavaxq_p(p0,p1,p2,p3) |
25857 | #define __arm_vmlsldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
25858 | __typeof(p1) __p1 = (p1); \ |
25859 | __typeof(p2) __p2 = (p2); \ | |
25860 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
25861 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaxq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
25862 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaxq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 25863 | |
532e9e24 SP |
25864 | #define vrmlaldavhaq_p(p0,p1,p2,p3) __arm_vrmlaldavhaq_p(p0,p1,p2,p3) |
25865 | #define __arm_vrmlaldavhaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
25866 | __typeof(p1) __p1 = (p1); \ |
25867 | __typeof(p2) __p2 = (p2); \ | |
25868 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
25869 | int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ |
25870 | int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
25871 | ||
25872 | #define vrmlaldavhaxq_p(p0,p1,p2,p3) __arm_vrmlaldavhaxq_p(p0,p1,p2,p3) | |
25873 | #define __arm_vrmlaldavhaxq_p(p0,p1,p2,p3) __arm_vrmlaldavhaxq_p_s32(p0,p1,p2,p3) | |
25874 | ||
25875 | #define vrmlsldavhaq_p(p0,p1,p2,p3) __arm_vrmlsldavhaq_p(p0,p1,p2,p3) | |
25876 | #define __arm_vrmlsldavhaq_p(p0,p1,p2,p3) __arm_vrmlsldavhaq_p_s32(p0,p1,p2,p3) | |
25877 | ||
25878 | #define vrmlsldavhaxq_p(p0,p1,p2,p3) __arm_vrmlsldavhaxq_p(p0,p1,p2,p3) | |
25879 | #define __arm_vrmlsldavhaxq_p(p0,p1,p2,p3) __arm_vrmlsldavhaxq_p_s32(p0,p1,p2,p3) | |
25880 | ||
25881 | #define vqdmladhq_m(p0,p1,p2,p3) __arm_vqdmladhq_m(p0,p1,p2,p3) | |
25882 | #define __arm_vqdmladhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25883 | __typeof(p1) __p1 = (p1); \ | |
25884 | __typeof(p2) __p2 = (p2); \ | |
25885 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25886 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25887 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25888 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
25889 | ||
25890 | #define vqdmladhxq_m(p0,p1,p2,p3) __arm_vqdmladhxq_m(p0,p1,p2,p3) | |
25891 | #define __arm_vqdmladhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
25892 | __typeof(p1) __p1 = (p1); \ | |
25893 | __typeof(p2) __p2 = (p2); \ | |
25894 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25895 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25896 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25897 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 25898 | |
532e9e24 SP |
25899 | #define vqdmlsdhq_m(p0,p1,p2,p3) __arm_vqdmlsdhq_m(p0,p1,p2,p3) |
25900 | #define __arm_vqdmlsdhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
25901 | __typeof(p1) __p1 = (p1); \ |
25902 | __typeof(p2) __p2 = (p2); \ | |
25903 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
25904 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
25905 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25906 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
f2170a37 | 25907 | |
532e9e24 SP |
25908 | #define vqdmlsdhxq_m(p0,p1,p2,p3) __arm_vqdmlsdhxq_m(p0,p1,p2,p3) |
25909 | #define __arm_vqdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
25910 | __typeof(p1) __p1 = (p1); \ |
25911 | __typeof(p2) __p2 = (p2); \ | |
25912 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
25913 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
25914 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25915 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
f2170a37 | 25916 | |
532e9e24 SP |
25917 | #define vqabsq_m(p0,p1,p2) __arm_vqabsq_m(p0,p1,p2) |
25918 | #define __arm_vqabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
25919 | __typeof(p1) __p1 = (p1); \ |
25920 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
25921 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
25922 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25923 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f2170a37 | 25924 | |
532e9e24 SP |
25925 | #define vmvnq_m(p0,p1,p2) __arm_vmvnq_m(p0,p1,p2) |
25926 | #define __arm_vmvnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
25927 | __typeof(p1) __p1 = (p1); \ |
25928 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
25929 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmvnq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
25930 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmvnq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
25931 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmvnq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
25932 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmvnq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
25933 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmvnq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
25934 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmvnq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
25935 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vmvnq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1(__p1, int) , p2), \ | |
25936 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmvnq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1(__p1, int) , p2), \ | |
25937 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vmvnq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1(__p1, int) , p2), \ | |
25938 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vmvnq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1(__p1, int) , p2));}) | |
f2170a37 | 25939 | |
532e9e24 SP |
25940 | #define vorrq_m_n(p0,p1,p2) __arm_vorrq_m_n(p0,p1,p2) |
25941 | #define __arm_vorrq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25942 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25943 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vorrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
25944 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vorrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
25945 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
25946 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
25947 | ||
25948 | #define vqshrunbq(p0,p1,p2) __arm_vqshrunbq(p0,p1,p2) | |
25949 | #define __arm_vqshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
25950 | __typeof(p1) __p1 = (p1); \ |
25951 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
25952 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
25953 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f2170a37 | 25954 | |
532e9e24 SP |
25955 | #define vqshluq_m(p0,p1,p2,p3) __arm_vqshluq_m(p0,p1,p2,p3) |
25956 | #define __arm_vqshluq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
25957 | __typeof(p1) __p1 = (p1); \ |
25958 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
25959 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshluq_m_n_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ |
25960 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshluq_m_n_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25961 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshluq_m_n_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) | |
f2170a37 | 25962 | |
532e9e24 SP |
25963 | #define vshlq_m(p0,p1,p2,p3) __arm_vshlq_m(p0,p1,p2,p3) |
25964 | #define __arm_vshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 | 25965 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
25966 | __typeof(p2) __p2 = (p2); \ |
25967 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
25968 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25969 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25970 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
25971 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
25972 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
25973 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
f2170a37 | 25974 | |
532e9e24 SP |
25975 | #define vshlq_m_n(p0,p1,p2,p3) __arm_vshlq_m_n(p0,p1,p2,p3) |
25976 | #define __arm_vshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
25977 | __typeof(p1) __p1 = (p1); \ |
25978 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
25979 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ |
25980 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
25981 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
25982 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
25983 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
25984 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
f2170a37 | 25985 | |
532e9e24 SP |
25986 | #define vshlq_m_r(p0,p1,p2) __arm_vshlq_m_r(p0,p1,p2) |
25987 | #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
25988 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
25989 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
25990 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
25991 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
25992 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
25993 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
25994 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
25995 | ||
25996 | #define vsriq_m(p0,p1,p2,p3) __arm_vsriq_m(p0,p1,p2,p3) | |
25997 | #define __arm_vsriq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
25998 | __typeof(p1) __p1 = (p1); \ |
25999 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
26000 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ |
26001 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
26002 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
26003 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
26004 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
26005 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
f2170a37 | 26006 | |
532e9e24 SP |
26007 | #define vhaddq_m(p0,p1,p2,p3) __arm_vhaddq_m(p0,p1,p2,p3) |
26008 | #define __arm_vhaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26009 | __typeof(p1) __p1 = (p1); \ |
26010 | __typeof(p2) __p2 = (p2); \ | |
26011 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26012 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ |
26013 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26014 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
26015 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
26016 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
26017 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
26018 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26019 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26020 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26021 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26022 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26023 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 26024 | |
532e9e24 SP |
26025 | #define vhcaddq_rot270_m(p0,p1,p2,p3) __arm_vhcaddq_rot270_m(p0,p1,p2,p3) |
26026 | #define __arm_vhcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26027 | __typeof(p1) __p1 = (p1); \ |
26028 | __typeof(p2) __p2 = (p2); \ | |
26029 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26030 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
26031 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26032 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
f2170a37 | 26033 | |
532e9e24 SP |
26034 | #define vhcaddq_rot90_m(p0,p1,p2,p3) __arm_vhcaddq_rot90_m(p0,p1,p2,p3) |
26035 | #define __arm_vhcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 | 26036 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26037 | __typeof(p2) __p2 = (p2); \ |
26038 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26039 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26040 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26041 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
f2170a37 | 26042 | |
532e9e24 SP |
26043 | #define vhsubq_m(p0,p1,p2,p3) __arm_vhsubq_m(p0,p1,p2,p3) |
26044 | #define __arm_vhsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 | 26045 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26046 | __typeof(p2) __p2 = (p2); \ |
26047 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26048 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26049 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26050 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26051 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26052 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26053 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
26054 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
26055 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26056 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
26057 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
26058 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
26059 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) | |
f2170a37 | 26060 | |
532e9e24 SP |
26061 | #define vmaxq_m(p0,p1,p2,p3) __arm_vmaxq_m(p0,p1,p2,p3) |
26062 | #define __arm_vmaxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 | 26063 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26064 | __typeof(p2) __p2 = (p2); \ |
26065 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26066 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26067 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26068 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26069 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26070 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26071 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 26072 | |
532e9e24 SP |
26073 | #define vminq_m(p0,p1,p2,p3) __arm_vminq_m(p0,p1,p2,p3) |
26074 | #define __arm_vminq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 | 26075 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26076 | __typeof(p2) __p2 = (p2); \ |
26077 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26078 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26079 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26080 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26081 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26082 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26083 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 26084 | |
532e9e24 SP |
26085 | #define vmlaq_m(p0,p1,p2,p3) __arm_vmlaq_m(p0,p1,p2,p3) |
26086 | #define __arm_vmlaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 | 26087 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26088 | __typeof(p2) __p2 = (p2); \ |
26089 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26090 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlaq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ | |
26091 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlaq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26092 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlaq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
26093 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlaq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
26094 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlaq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
26095 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlaq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) | |
f2170a37 | 26096 | |
532e9e24 SP |
26097 | #define vmlasq_m(p0,p1,p2,p3) __arm_vmlasq_m(p0,p1,p2,p3) |
26098 | #define __arm_vmlasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26099 | __typeof(p1) __p1 = (p1); \ |
26100 | __typeof(p2) __p2 = (p2); \ | |
26101 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26102 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlasq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ |
26103 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlasq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26104 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlasq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
26105 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlasq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
26106 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlasq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
26107 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlasq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) | |
f2170a37 | 26108 | |
532e9e24 SP |
26109 | #define vmulhq_m(p0,p1,p2,p3) __arm_vmulhq_m(p0,p1,p2,p3) |
26110 | #define __arm_vmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26111 | __typeof(p1) __p1 = (p1); \ |
26112 | __typeof(p2) __p2 = (p2); \ | |
26113 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26114 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
26115 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26116 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26117 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26118 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26119 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 26120 | |
532e9e24 SP |
26121 | #define vmullbq_int_m(p0,p1,p2,p3) __arm_vmullbq_int_m(p0,p1,p2,p3) |
26122 | #define __arm_vmullbq_int_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26123 | __typeof(p1) __p1 = (p1); \ |
26124 | __typeof(p2) __p2 = (p2); \ | |
532e9e24 SP |
26125 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
26126 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26127 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26128 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26129 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26130 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26131 | int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_m_u32 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 26132 | |
532e9e24 SP |
26133 | #define vmulltq_int_m(p0,p1,p2,p3) __arm_vmulltq_int_m(p0,p1,p2,p3) |
26134 | #define __arm_vmulltq_int_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26135 | __typeof(p1) __p1 = (p1); \ |
26136 | __typeof(p2) __p2 = (p2); \ | |
532e9e24 SP |
26137 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
26138 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26139 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26140 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26141 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26142 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26143 | int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_m_u32 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 26144 | |
532e9e24 SP |
26145 | #define vmulltq_poly_m(p0,p1,p2,p3) __arm_vmulltq_poly_m(p0,p1,p2,p3) |
26146 | #define __arm_vmulltq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
f2170a37 SP |
26147 | __typeof(p1) __p1 = (p1); \ |
26148 | __typeof(p2) __p2 = (p2); \ | |
26149 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26150 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ |
26151 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) | |
e3678b44 | 26152 | |
532e9e24 SP |
26153 | #define vqaddq_m(p0,p1,p2,p3) __arm_vqaddq_m(p0,p1,p2,p3) |
26154 | #define __arm_vqaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
26155 | __typeof(p1) __p1 = (p1); \ |
26156 | __typeof(p2) __p2 = (p2); \ | |
26157 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26158 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ |
26159 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26160 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
26161 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ | |
26162 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ | |
26163 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \ | |
26164 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26165 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26166 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26167 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26168 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
26169 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 26170 | |
532e9e24 SP |
26171 | #define vqdmlahq_m(p0,p1,p2,p3) __arm_vqdmlahq_m(p0,p1,p2,p3) |
26172 | #define __arm_vqdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
26173 | __typeof(p1) __p1 = (p1); \ |
26174 | __typeof(p2) __p2 = (p2); \ | |
26175 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26176 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ |
26177 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26178 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));}) | |
8eb3b6b9 | 26179 | |
532e9e24 SP |
26180 | #define vqdmulhq_m(p0,p1,p2,p3) __arm_vqdmulhq_m(p0,p1,p2,p3) |
26181 | #define __arm_vqdmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
26182 | __typeof(p1) __p1 = (p1); \ |
26183 | __typeof(p2) __p2 = (p2); \ | |
26184 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26185 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ |
26186 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26187 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
26188 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26189 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26190 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 26191 | |
532e9e24 SP |
26192 | #define vqdmullbq_m(p0,p1,p2,p3) __arm_vqdmullbq_m(p0,p1,p2,p3) |
26193 | #define __arm_vqdmullbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 SP |
26194 | __typeof(p1) __p1 = (p1); \ |
26195 | __typeof(p2) __p2 = (p2); \ | |
26196 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
26197 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
26198 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
26199 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmullbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26200 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmullbq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));}) | |
8eb3b6b9 | 26201 | |
532e9e24 SP |
26202 | #define vqdmulltq_m(p0,p1,p2,p3) __arm_vqdmulltq_m(p0,p1,p2,p3) |
26203 | #define __arm_vqdmulltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
e3678b44 | 26204 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26205 | __typeof(p2) __p2 = (p2); \ |
26206 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26207 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmulltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ | |
26208 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmulltq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ | |
26209 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26210 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
e3678b44 | 26211 | |
532e9e24 SP |
26212 | #define vqrdmladhq_m(p0,p1,p2,p3) __arm_vqrdmladhq_m(p0,p1,p2,p3) |
26213 | #define __arm_vqrdmladhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
e3678b44 | 26214 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26215 | __typeof(p2) __p2 = (p2); \ |
26216 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26217 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26218 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26219 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
e3678b44 | 26220 | |
532e9e24 SP |
26221 | #define vqrdmladhxq_m(p0,p1,p2,p3) __arm_vqrdmladhxq_m(p0,p1,p2,p3) |
26222 | #define __arm_vqrdmladhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
e3678b44 | 26223 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26224 | __typeof(p2) __p2 = (p2); \ |
26225 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26226 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26227 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26228 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
14782c81 | 26229 | |
532e9e24 SP |
26230 | #define vmlsdavaxq_p(p0,p1,p2,p3) __arm_vmlsdavaxq_p(p0,p1,p2,p3) |
26231 | #define __arm_vmlsdavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
db5db9d2 | 26232 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26233 | __typeof(p2) __p2 = (p2); \ |
26234 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26235 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaxq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26236 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaxq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26237 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaxq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
db5db9d2 | 26238 | |
532e9e24 SP |
26239 | #define vmlsdavaq_p(p0,p1,p2,p3) __arm_vmlsdavaq_p(p0,p1,p2,p3) |
26240 | #define __arm_vmlsdavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
db5db9d2 SP |
26241 | __typeof(p1) __p1 = (p1); \ |
26242 | __typeof(p2) __p2 = (p2); \ | |
532e9e24 SP |
26243 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
26244 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaq_p_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26245 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26246 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
db5db9d2 | 26247 | |
532e9e24 SP |
26248 | #define vmladavaxq_p(p0,p1,p2,p3) __arm_vmladavaxq_p(p0,p1,p2,p3) |
26249 | #define __arm_vmladavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
8eb3b6b9 | 26250 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26251 | __typeof(p2) __p2 = (p2); \ |
26252 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26253 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
26254 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
26255 | int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 26256 | |
532e9e24 SP |
26257 | #define vmullbq_poly_m(p0,p1,p2,p3) __arm_vmullbq_poly_m(p0,p1,p2,p3) |
26258 | #define __arm_vmullbq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
db5db9d2 | 26259 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
26260 | __typeof(p2) __p2 = (p2); \ |
26261 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
26262 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
26263 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) | |
db5db9d2 | 26264 | |
535a8645 SP |
26265 | #define vldrbq_gather_offset(p0,p1) __arm_vldrbq_gather_offset(p0,p1) |
26266 | #define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26267 | __typeof(p1) __p1 = (p1); \ | |
26268 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26269 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
26270 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
26271 | int (*)[__ARM_mve_type_int8_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32 (__ARM_mve_coerce(__p0, int8_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
26272 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
26273 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
26274 | int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
26275 | ||
92f80065 SP |
26276 | #define vidupq_m(p0,p1,p2,p3) __arm_vidupq_m(p0,p1,p2,p3) |
26277 | #define __arm_vidupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26278 | __typeof(p1) __p1 = (p1); \ | |
26279 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26280 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t]: __arm_vidupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
26281 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t]: __arm_vidupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
26282 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vidupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
26283 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \ | |
26284 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \ | |
26285 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
26286 | ||
26287 | #define vddupq_m(p0,p1,p2,p3) __arm_vddupq_m(p0,p1,p2,p3) | |
26288 | #define __arm_vddupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
26289 | __typeof(p1) __p1 = (p1); \ | |
26290 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26291 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t]: __arm_vddupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
26292 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t]: __arm_vddupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
26293 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vddupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3), \ | |
26294 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \ | |
26295 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \ | |
26296 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
26297 | ||
26298 | #define vidupq_u16(p0,p1) __arm_vidupq_u16(p0,p1) | |
26299 | #define __arm_vidupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26300 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26301 | int (*)[__ARM_mve_type_uint32_t]: __arm_vidupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1), \ | |
26302 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) | |
26303 | ||
26304 | #define vidupq_u32(p0,p1) __arm_vidupq_u32(p0,p1) | |
26305 | #define __arm_vidupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26306 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26307 | int (*)[__ARM_mve_type_uint32_t]: __arm_vidupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1), \ | |
26308 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) | |
26309 | ||
26310 | #define vidupq_u8(p0,p1) __arm_vidupq_u8(p0,p1) | |
26311 | #define __arm_vidupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26312 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26313 | int (*)[__ARM_mve_type_uint32_t]: __arm_vidupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1), \ | |
26314 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) | |
26315 | ||
26316 | #define vddupq_u16(p0,p1) __arm_vddupq_u16(p0,p1) | |
26317 | #define __arm_vddupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26318 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26319 | int (*)[__ARM_mve_type_uint32_t]: __arm_vddupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1), \ | |
26320 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) | |
26321 | ||
26322 | #define vddupq_u32(p0,p1) __arm_vddupq_u32(p0,p1) | |
26323 | #define __arm_vddupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26324 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26325 | int (*)[__ARM_mve_type_uint32_t]: __arm_vddupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1), \ | |
26326 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) | |
26327 | ||
26328 | #define vddupq_u8(p0,p1) __arm_vddupq_u8(p0,p1) | |
26329 | #define __arm_vddupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
26330 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26331 | int (*)[__ARM_mve_type_uint32_t]: __arm_vddupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1), \ | |
26332 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) | |
26333 | ||
26334 | #define viwdupq_m(p0,p1,p2,p3,p4) __arm_viwdupq_m(p0,p1,p2,p3,p4) | |
26335 | #define __arm_viwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ | |
26336 | __typeof(p1) __p1 = (p1); \ | |
26337 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26338 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t]: __arm_viwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
26339 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t]: __arm_viwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
26340 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_viwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
26341 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \ | |
26342 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \ | |
26343 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
26344 | ||
26345 | #define viwdupq_u16(p0,p1,p2) __arm_viwdupq_u16(p0,p1,p2) | |
26346 | #define __arm_viwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26347 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26348 | int (*)[__ARM_mve_type_uint32_t]: __arm_viwdupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ | |
26349 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) | |
26350 | ||
26351 | #define viwdupq_u32(p0,p1,p2) __arm_viwdupq_u32(p0,p1,p2) | |
26352 | #define __arm_viwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26353 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26354 | int (*)[__ARM_mve_type_uint32_t]: __arm_viwdupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ | |
26355 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) | |
26356 | ||
26357 | #define viwdupq_u8(p0,p1,p2) __arm_viwdupq_u8(p0,p1,p2) | |
26358 | #define __arm_viwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26359 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26360 | int (*)[__ARM_mve_type_uint32_t]: __arm_viwdupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ | |
26361 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) | |
26362 | ||
26363 | #define vdwdupq_m(p0,p1,p2,p3,p4) __arm_vdwdupq_m(p0,p1,p2,p3,p4) | |
26364 | #define __arm_vdwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ | |
26365 | __typeof(p1) __p1 = (p1); \ | |
26366 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
26367 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t]: __arm_vdwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
26368 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t]: __arm_vdwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
26369 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vdwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ | |
26370 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \ | |
26371 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \ | |
26372 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
26373 | ||
26374 | #define vdwdupq_u16(p0,p1,p2) __arm_vdwdupq_u16(p0,p1,p2) | |
26375 | #define __arm_vdwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26376 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26377 | int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ | |
26378 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) | |
26379 | ||
26380 | #define vdwdupq_u32(p0,p1,p2) __arm_vdwdupq_u32(p0,p1,p2) | |
26381 | #define __arm_vdwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26382 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26383 | int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ | |
26384 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) | |
26385 | ||
26386 | #define vdwdupq_u8(p0,p1,p2) __arm_vdwdupq_u8(p0,p1,p2) | |
26387 | #define __arm_vdwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
26388 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
26389 | int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ | |
26390 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) | |
26391 | ||
63c8f7d6 SP |
26392 | #ifdef __cplusplus |
26393 | } | |
26394 | #endif | |
26395 | ||
26396 | #endif /* _GCC_ARM_MVE_H. */ |