]>
Commit | Line | Data |
---|---|---|
63c8f7d6 SP |
1 | /* Arm MVE intrinsics include file. |
2 | ||
7adcbafe | 3 | Copyright (C) 2019-2022 Free Software Foundation, Inc. |
63c8f7d6 SP |
4 | Contributed by Arm. |
5 | ||
6 | This file is part of GCC. | |
7 | ||
8 | GCC is free software; you can redistribute it and/or modify it | |
9 | under the terms of the GNU General Public License as published | |
10 | by the Free Software Foundation; either version 3, or (at your | |
11 | option) any later version. | |
12 | ||
13 | GCC is distributed in the hope that it will be useful, but WITHOUT | |
14 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public | |
16 | License for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with GCC; see the file COPYING3. If not see | |
20 | <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | #ifndef _GCC_ARM_MVE_H | |
23 | #define _GCC_ARM_MVE_H | |
24 | ||
85244449 SP |
25 | #if __ARM_BIG_ENDIAN |
26 | #error "MVE intrinsics are not supported in Big-Endian mode." | |
3b6e79ae | 27 | #elif !__ARM_FEATURE_MVE |
63c8f7d6 | 28 | #error "MVE feature not supported" |
3b6e79ae | 29 | #else |
63c8f7d6 SP |
30 | |
31 | #include <stdint.h> | |
32 | #ifndef __cplusplus | |
33 | #include <stdbool.h> | |
34 | #endif | |
78bf9163 | 35 | #include "arm_mve_types.h" |
14782c81 SP |
36 | |
37 | #ifndef __ARM_MVE_PRESERVE_USER_NAMESPACE | |
6a90680b ASDV |
38 | #define vst4q(__addr, __value) __arm_vst4q(__addr, __value) |
39 | #define vdupq_n(__a) __arm_vdupq_n(__a) | |
40 | #define vabsq(__a) __arm_vabsq(__a) | |
41 | #define vclsq(__a) __arm_vclsq(__a) | |
42 | #define vclzq(__a) __arm_vclzq(__a) | |
43 | #define vnegq(__a) __arm_vnegq(__a) | |
44 | #define vaddlvq(__a) __arm_vaddlvq(__a) | |
45 | #define vaddvq(__a) __arm_vaddvq(__a) | |
46 | #define vmovlbq(__a) __arm_vmovlbq(__a) | |
47 | #define vmovltq(__a) __arm_vmovltq(__a) | |
48 | #define vmvnq(__a) __arm_vmvnq(__a) | |
49 | #define vrev16q(__a) __arm_vrev16q(__a) | |
50 | #define vrev32q(__a) __arm_vrev32q(__a) | |
51 | #define vrev64q(__a) __arm_vrev64q(__a) | |
52 | #define vqabsq(__a) __arm_vqabsq(__a) | |
53 | #define vqnegq(__a) __arm_vqnegq(__a) | |
54 | #define vshrq(__a, __imm) __arm_vshrq(__a, __imm) | |
55 | #define vaddlvq_p(__a, __p) __arm_vaddlvq_p(__a, __p) | |
56 | #define vcmpneq(__a, __b) __arm_vcmpneq(__a, __b) | |
57 | #define vshlq(__a, __b) __arm_vshlq(__a, __b) | |
58 | #define vsubq(__a, __b) __arm_vsubq(__a, __b) | |
59 | #define vrmulhq(__a, __b) __arm_vrmulhq(__a, __b) | |
60 | #define vrhaddq(__a, __b) __arm_vrhaddq(__a, __b) | |
61 | #define vqsubq(__a, __b) __arm_vqsubq(__a, __b) | |
62 | #define vqaddq(__a, __b) __arm_vqaddq(__a, __b) | |
63 | #define vorrq(__a, __b) __arm_vorrq(__a, __b) | |
64 | #define vornq(__a, __b) __arm_vornq(__a, __b) | |
65 | #define vmulq(__a, __b) __arm_vmulq(__a, __b) | |
66 | #define vmulltq_int(__a, __b) __arm_vmulltq_int(__a, __b) | |
67 | #define vmullbq_int(__a, __b) __arm_vmullbq_int(__a, __b) | |
68 | #define vmulhq(__a, __b) __arm_vmulhq(__a, __b) | |
69 | #define vmladavq(__a, __b) __arm_vmladavq(__a, __b) | |
70 | #define vminvq(__a, __b) __arm_vminvq(__a, __b) | |
71 | #define vminq(__a, __b) __arm_vminq(__a, __b) | |
72 | #define vmaxvq(__a, __b) __arm_vmaxvq(__a, __b) | |
73 | #define vmaxq(__a, __b) __arm_vmaxq(__a, __b) | |
74 | #define vhsubq(__a, __b) __arm_vhsubq(__a, __b) | |
75 | #define vhaddq(__a, __b) __arm_vhaddq(__a, __b) | |
76 | #define veorq(__a, __b) __arm_veorq(__a, __b) | |
77 | #define vcmphiq(__a, __b) __arm_vcmphiq(__a, __b) | |
78 | #define vcmpeqq(__a, __b) __arm_vcmpeqq(__a, __b) | |
79 | #define vcmpcsq(__a, __b) __arm_vcmpcsq(__a, __b) | |
80 | #define vcaddq_rot90(__a, __b) __arm_vcaddq_rot90(__a, __b) | |
81 | #define vcaddq_rot270(__a, __b) __arm_vcaddq_rot270(__a, __b) | |
82 | #define vbicq(__a, __b) __arm_vbicq(__a, __b) | |
83 | #define vandq(__a, __b) __arm_vandq(__a, __b) | |
84 | #define vaddvq_p(__a, __p) __arm_vaddvq_p(__a, __p) | |
85 | #define vaddvaq(__a, __b) __arm_vaddvaq(__a, __b) | |
86 | #define vaddq(__a, __b) __arm_vaddq(__a, __b) | |
87 | #define vabdq(__a, __b) __arm_vabdq(__a, __b) | |
88 | #define vshlq_r(__a, __b) __arm_vshlq_r(__a, __b) | |
89 | #define vrshlq(__a, __b) __arm_vrshlq(__a, __b) | |
90 | #define vqshlq(__a, __b) __arm_vqshlq(__a, __b) | |
91 | #define vqshlq_r(__a, __b) __arm_vqshlq_r(__a, __b) | |
92 | #define vqrshlq(__a, __b) __arm_vqrshlq(__a, __b) | |
93 | #define vminavq(__a, __b) __arm_vminavq(__a, __b) | |
94 | #define vminaq(__a, __b) __arm_vminaq(__a, __b) | |
95 | #define vmaxavq(__a, __b) __arm_vmaxavq(__a, __b) | |
96 | #define vmaxaq(__a, __b) __arm_vmaxaq(__a, __b) | |
97 | #define vbrsrq(__a, __b) __arm_vbrsrq(__a, __b) | |
98 | #define vshlq_n(__a, __imm) __arm_vshlq_n(__a, __imm) | |
99 | #define vrshrq(__a, __imm) __arm_vrshrq(__a, __imm) | |
100 | #define vqshlq_n(__a, __imm) __arm_vqshlq_n(__a, __imm) | |
101 | #define vcmpltq(__a, __b) __arm_vcmpltq(__a, __b) | |
102 | #define vcmpleq(__a, __b) __arm_vcmpleq(__a, __b) | |
103 | #define vcmpgtq(__a, __b) __arm_vcmpgtq(__a, __b) | |
104 | #define vcmpgeq(__a, __b) __arm_vcmpgeq(__a, __b) | |
105 | #define vqshluq(__a, __imm) __arm_vqshluq(__a, __imm) | |
106 | #define vqrdmulhq(__a, __b) __arm_vqrdmulhq(__a, __b) | |
107 | #define vqdmulhq(__a, __b) __arm_vqdmulhq(__a, __b) | |
108 | #define vmlsdavxq(__a, __b) __arm_vmlsdavxq(__a, __b) | |
109 | #define vmlsdavq(__a, __b) __arm_vmlsdavq(__a, __b) | |
110 | #define vmladavxq(__a, __b) __arm_vmladavxq(__a, __b) | |
111 | #define vhcaddq_rot90(__a, __b) __arm_vhcaddq_rot90(__a, __b) | |
112 | #define vhcaddq_rot270(__a, __b) __arm_vhcaddq_rot270(__a, __b) | |
113 | #define vqmovntq(__a, __b) __arm_vqmovntq(__a, __b) | |
114 | #define vqmovnbq(__a, __b) __arm_vqmovnbq(__a, __b) | |
115 | #define vmulltq_poly(__a, __b) __arm_vmulltq_poly(__a, __b) | |
116 | #define vmullbq_poly(__a, __b) __arm_vmullbq_poly(__a, __b) | |
117 | #define vmovntq(__a, __b) __arm_vmovntq(__a, __b) | |
118 | #define vmovnbq(__a, __b) __arm_vmovnbq(__a, __b) | |
119 | #define vmlaldavq(__a, __b) __arm_vmlaldavq(__a, __b) | |
120 | #define vqmovuntq(__a, __b) __arm_vqmovuntq(__a, __b) | |
121 | #define vqmovunbq(__a, __b) __arm_vqmovunbq(__a, __b) | |
122 | #define vshlltq(__a, __imm) __arm_vshlltq(__a, __imm) | |
123 | #define vshllbq(__a, __imm) __arm_vshllbq(__a, __imm) | |
124 | #define vqdmulltq(__a, __b) __arm_vqdmulltq(__a, __b) | |
125 | #define vqdmullbq(__a, __b) __arm_vqdmullbq(__a, __b) | |
126 | #define vmlsldavxq(__a, __b) __arm_vmlsldavxq(__a, __b) | |
127 | #define vmlsldavq(__a, __b) __arm_vmlsldavq(__a, __b) | |
128 | #define vmlaldavxq(__a, __b) __arm_vmlaldavxq(__a, __b) | |
129 | #define vrmlaldavhq(__a, __b) __arm_vrmlaldavhq(__a, __b) | |
130 | #define vaddlvaq(__a, __b) __arm_vaddlvaq(__a, __b) | |
131 | #define vrmlsldavhxq(__a, __b) __arm_vrmlsldavhxq(__a, __b) | |
132 | #define vrmlsldavhq(__a, __b) __arm_vrmlsldavhq(__a, __b) | |
133 | #define vrmlaldavhxq(__a, __b) __arm_vrmlaldavhxq(__a, __b) | |
134 | #define vabavq(__a, __b, __c) __arm_vabavq(__a, __b, __c) | |
135 | #define vbicq_m_n(__a, __imm, __p) __arm_vbicq_m_n(__a, __imm, __p) | |
136 | #define vqrshrnbq(__a, __b, __imm) __arm_vqrshrnbq(__a, __b, __imm) | |
137 | #define vqrshrunbq(__a, __b, __imm) __arm_vqrshrunbq(__a, __b, __imm) | |
138 | #define vrmlaldavhaq(__a, __b, __c) __arm_vrmlaldavhaq(__a, __b, __c) | |
139 | #define vshlcq(__a, __b, __imm) __arm_vshlcq(__a, __b, __imm) | |
140 | #define vpselq(__a, __b, __p) __arm_vpselq(__a, __b, __p) | |
141 | #define vrev64q_m(__inactive, __a, __p) __arm_vrev64q_m(__inactive, __a, __p) | |
142 | #define vqrdmlashq(__a, __b, __c) __arm_vqrdmlashq(__a, __b, __c) | |
143 | #define vqrdmlahq(__a, __b, __c) __arm_vqrdmlahq(__a, __b, __c) | |
afb198ee | 144 | #define vqdmlashq(__a, __b, __c) __arm_vqdmlashq(__a, __b, __c) |
6a90680b ASDV |
145 | #define vqdmlahq(__a, __b, __c) __arm_vqdmlahq(__a, __b, __c) |
146 | #define vmvnq_m(__inactive, __a, __p) __arm_vmvnq_m(__inactive, __a, __p) | |
147 | #define vmlasq(__a, __b, __c) __arm_vmlasq(__a, __b, __c) | |
148 | #define vmlaq(__a, __b, __c) __arm_vmlaq(__a, __b, __c) | |
149 | #define vmladavq_p(__a, __b, __p) __arm_vmladavq_p(__a, __b, __p) | |
150 | #define vmladavaq(__a, __b, __c) __arm_vmladavaq(__a, __b, __c) | |
151 | #define vminvq_p(__a, __b, __p) __arm_vminvq_p(__a, __b, __p) | |
152 | #define vmaxvq_p(__a, __b, __p) __arm_vmaxvq_p(__a, __b, __p) | |
153 | #define vdupq_m(__inactive, __a, __p) __arm_vdupq_m(__inactive, __a, __p) | |
154 | #define vcmpneq_m(__a, __b, __p) __arm_vcmpneq_m(__a, __b, __p) | |
155 | #define vcmphiq_m(__a, __b, __p) __arm_vcmphiq_m(__a, __b, __p) | |
156 | #define vcmpeqq_m(__a, __b, __p) __arm_vcmpeqq_m(__a, __b, __p) | |
157 | #define vcmpcsq_m(__a, __b, __p) __arm_vcmpcsq_m(__a, __b, __p) | |
158 | #define vcmpcsq_m_n(__a, __b, __p) __arm_vcmpcsq_m_n(__a, __b, __p) | |
159 | #define vclzq_m(__inactive, __a, __p) __arm_vclzq_m(__inactive, __a, __p) | |
160 | #define vaddvaq_p(__a, __b, __p) __arm_vaddvaq_p(__a, __b, __p) | |
161 | #define vsriq(__a, __b, __imm) __arm_vsriq(__a, __b, __imm) | |
162 | #define vsliq(__a, __b, __imm) __arm_vsliq(__a, __b, __imm) | |
163 | #define vshlq_m_r(__a, __b, __p) __arm_vshlq_m_r(__a, __b, __p) | |
164 | #define vrshlq_m_n(__a, __b, __p) __arm_vrshlq_m_n(__a, __b, __p) | |
165 | #define vqshlq_m_r(__a, __b, __p) __arm_vqshlq_m_r(__a, __b, __p) | |
166 | #define vqrshlq_m_n(__a, __b, __p) __arm_vqrshlq_m_n(__a, __b, __p) | |
167 | #define vminavq_p(__a, __b, __p) __arm_vminavq_p(__a, __b, __p) | |
168 | #define vminaq_m(__a, __b, __p) __arm_vminaq_m(__a, __b, __p) | |
169 | #define vmaxavq_p(__a, __b, __p) __arm_vmaxavq_p(__a, __b, __p) | |
170 | #define vmaxaq_m(__a, __b, __p) __arm_vmaxaq_m(__a, __b, __p) | |
171 | #define vcmpltq_m(__a, __b, __p) __arm_vcmpltq_m(__a, __b, __p) | |
172 | #define vcmpleq_m(__a, __b, __p) __arm_vcmpleq_m(__a, __b, __p) | |
173 | #define vcmpgtq_m(__a, __b, __p) __arm_vcmpgtq_m(__a, __b, __p) | |
174 | #define vcmpgeq_m(__a, __b, __p) __arm_vcmpgeq_m(__a, __b, __p) | |
175 | #define vqnegq_m(__inactive, __a, __p) __arm_vqnegq_m(__inactive, __a, __p) | |
176 | #define vqabsq_m(__inactive, __a, __p) __arm_vqabsq_m(__inactive, __a, __p) | |
177 | #define vnegq_m(__inactive, __a, __p) __arm_vnegq_m(__inactive, __a, __p) | |
178 | #define vmlsdavxq_p(__a, __b, __p) __arm_vmlsdavxq_p(__a, __b, __p) | |
179 | #define vmlsdavq_p(__a, __b, __p) __arm_vmlsdavq_p(__a, __b, __p) | |
180 | #define vmladavxq_p(__a, __b, __p) __arm_vmladavxq_p(__a, __b, __p) | |
181 | #define vclsq_m(__inactive, __a, __p) __arm_vclsq_m(__inactive, __a, __p) | |
182 | #define vabsq_m(__inactive, __a, __p) __arm_vabsq_m(__inactive, __a, __p) | |
183 | #define vqrdmlsdhxq(__inactive, __a, __b) __arm_vqrdmlsdhxq(__inactive, __a, __b) | |
184 | #define vqrdmlsdhq(__inactive, __a, __b) __arm_vqrdmlsdhq(__inactive, __a, __b) | |
185 | #define vqrdmladhxq(__inactive, __a, __b) __arm_vqrdmladhxq(__inactive, __a, __b) | |
186 | #define vqrdmladhq(__inactive, __a, __b) __arm_vqrdmladhq(__inactive, __a, __b) | |
187 | #define vqdmlsdhxq(__inactive, __a, __b) __arm_vqdmlsdhxq(__inactive, __a, __b) | |
188 | #define vqdmlsdhq(__inactive, __a, __b) __arm_vqdmlsdhq(__inactive, __a, __b) | |
189 | #define vqdmladhxq(__inactive, __a, __b) __arm_vqdmladhxq(__inactive, __a, __b) | |
190 | #define vqdmladhq(__inactive, __a, __b) __arm_vqdmladhq(__inactive, __a, __b) | |
191 | #define vmlsdavaxq(__a, __b, __c) __arm_vmlsdavaxq(__a, __b, __c) | |
192 | #define vmlsdavaq(__a, __b, __c) __arm_vmlsdavaq(__a, __b, __c) | |
193 | #define vmladavaxq(__a, __b, __c) __arm_vmladavaxq(__a, __b, __c) | |
194 | #define vrmlaldavhaxq(__a, __b, __c) __arm_vrmlaldavhaxq(__a, __b, __c) | |
195 | #define vrmlsldavhaq(__a, __b, __c) __arm_vrmlsldavhaq(__a, __b, __c) | |
196 | #define vrmlsldavhaxq(__a, __b, __c) __arm_vrmlsldavhaxq(__a, __b, __c) | |
197 | #define vaddlvaq_p(__a, __b, __p) __arm_vaddlvaq_p(__a, __b, __p) | |
198 | #define vrev16q_m(__inactive, __a, __p) __arm_vrev16q_m(__inactive, __a, __p) | |
199 | #define vrmlaldavhq_p(__a, __b, __p) __arm_vrmlaldavhq_p(__a, __b, __p) | |
200 | #define vrmlaldavhxq_p(__a, __b, __p) __arm_vrmlaldavhxq_p(__a, __b, __p) | |
201 | #define vrmlsldavhq_p(__a, __b, __p) __arm_vrmlsldavhq_p(__a, __b, __p) | |
202 | #define vrmlsldavhxq_p(__a, __b, __p) __arm_vrmlsldavhxq_p(__a, __b, __p) | |
203 | #define vorrq_m_n(__a, __imm, __p) __arm_vorrq_m_n(__a, __imm, __p) | |
204 | #define vqrshrntq(__a, __b, __imm) __arm_vqrshrntq(__a, __b, __imm) | |
205 | #define vqshrnbq(__a, __b, __imm) __arm_vqshrnbq(__a, __b, __imm) | |
206 | #define vqshrntq(__a, __b, __imm) __arm_vqshrntq(__a, __b, __imm) | |
207 | #define vrshrnbq(__a, __b, __imm) __arm_vrshrnbq(__a, __b, __imm) | |
208 | #define vrshrntq(__a, __b, __imm) __arm_vrshrntq(__a, __b, __imm) | |
209 | #define vshrnbq(__a, __b, __imm) __arm_vshrnbq(__a, __b, __imm) | |
210 | #define vshrntq(__a, __b, __imm) __arm_vshrntq(__a, __b, __imm) | |
211 | #define vmlaldavaq(__a, __b, __c) __arm_vmlaldavaq(__a, __b, __c) | |
212 | #define vmlaldavaxq(__a, __b, __c) __arm_vmlaldavaxq(__a, __b, __c) | |
213 | #define vmlsldavaq(__a, __b, __c) __arm_vmlsldavaq(__a, __b, __c) | |
214 | #define vmlsldavaxq(__a, __b, __c) __arm_vmlsldavaxq(__a, __b, __c) | |
215 | #define vmlaldavq_p(__a, __b, __p) __arm_vmlaldavq_p(__a, __b, __p) | |
216 | #define vmlaldavxq_p(__a, __b, __p) __arm_vmlaldavxq_p(__a, __b, __p) | |
217 | #define vmlsldavq_p(__a, __b, __p) __arm_vmlsldavq_p(__a, __b, __p) | |
218 | #define vmlsldavxq_p(__a, __b, __p) __arm_vmlsldavxq_p(__a, __b, __p) | |
219 | #define vmovlbq_m(__inactive, __a, __p) __arm_vmovlbq_m(__inactive, __a, __p) | |
220 | #define vmovltq_m(__inactive, __a, __p) __arm_vmovltq_m(__inactive, __a, __p) | |
221 | #define vmovnbq_m(__a, __b, __p) __arm_vmovnbq_m(__a, __b, __p) | |
222 | #define vmovntq_m(__a, __b, __p) __arm_vmovntq_m(__a, __b, __p) | |
223 | #define vqmovnbq_m(__a, __b, __p) __arm_vqmovnbq_m(__a, __b, __p) | |
224 | #define vqmovntq_m(__a, __b, __p) __arm_vqmovntq_m(__a, __b, __p) | |
225 | #define vrev32q_m(__inactive, __a, __p) __arm_vrev32q_m(__inactive, __a, __p) | |
226 | #define vqrshruntq(__a, __b, __imm) __arm_vqrshruntq(__a, __b, __imm) | |
227 | #define vqshrunbq(__a, __b, __imm) __arm_vqshrunbq(__a, __b, __imm) | |
228 | #define vqshruntq(__a, __b, __imm) __arm_vqshruntq(__a, __b, __imm) | |
229 | #define vqmovunbq_m(__a, __b, __p) __arm_vqmovunbq_m(__a, __b, __p) | |
230 | #define vqmovuntq_m(__a, __b, __p) __arm_vqmovuntq_m(__a, __b, __p) | |
231 | #define vsriq_m(__a, __b, __imm, __p) __arm_vsriq_m(__a, __b, __imm, __p) | |
232 | #define vsubq_m(__inactive, __a, __b, __p) __arm_vsubq_m(__inactive, __a, __b, __p) | |
233 | #define vqshluq_m(__inactive, __a, __imm, __p) __arm_vqshluq_m(__inactive, __a, __imm, __p) | |
234 | #define vabavq_p(__a, __b, __c, __p) __arm_vabavq_p(__a, __b, __c, __p) | |
235 | #define vshlq_m(__inactive, __a, __b, __p) __arm_vshlq_m(__inactive, __a, __b, __p) | |
236 | #define vabdq_m(__inactive, __a, __b, __p) __arm_vabdq_m(__inactive, __a, __b, __p) | |
237 | #define vaddq_m(__inactive, __a, __b, __p) __arm_vaddq_m(__inactive, __a, __b, __p) | |
238 | #define vandq_m(__inactive, __a, __b, __p) __arm_vandq_m(__inactive, __a, __b, __p) | |
239 | #define vbicq_m(__inactive, __a, __b, __p) __arm_vbicq_m(__inactive, __a, __b, __p) | |
240 | #define vbrsrq_m(__inactive, __a, __b, __p) __arm_vbrsrq_m(__inactive, __a, __b, __p) | |
241 | #define vcaddq_rot270_m(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m(__inactive, __a, __b, __p) | |
242 | #define vcaddq_rot90_m(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m(__inactive, __a, __b, __p) | |
243 | #define veorq_m(__inactive, __a, __b, __p) __arm_veorq_m(__inactive, __a, __b, __p) | |
244 | #define vhaddq_m(__inactive, __a, __b, __p) __arm_vhaddq_m(__inactive, __a, __b, __p) | |
245 | #define vhcaddq_rot270_m(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m(__inactive, __a, __b, __p) | |
246 | #define vhcaddq_rot90_m(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m(__inactive, __a, __b, __p) | |
247 | #define vhsubq_m(__inactive, __a, __b, __p) __arm_vhsubq_m(__inactive, __a, __b, __p) | |
248 | #define vmaxq_m(__inactive, __a, __b, __p) __arm_vmaxq_m(__inactive, __a, __b, __p) | |
249 | #define vminq_m(__inactive, __a, __b, __p) __arm_vminq_m(__inactive, __a, __b, __p) | |
250 | #define vmladavaq_p(__a, __b, __c, __p) __arm_vmladavaq_p(__a, __b, __c, __p) | |
251 | #define vmladavaxq_p(__a, __b, __c, __p) __arm_vmladavaxq_p(__a, __b, __c, __p) | |
252 | #define vmlaq_m(__a, __b, __c, __p) __arm_vmlaq_m(__a, __b, __c, __p) | |
253 | #define vmlasq_m(__a, __b, __c, __p) __arm_vmlasq_m(__a, __b, __c, __p) | |
254 | #define vmlsdavaq_p(__a, __b, __c, __p) __arm_vmlsdavaq_p(__a, __b, __c, __p) | |
255 | #define vmlsdavaxq_p(__a, __b, __c, __p) __arm_vmlsdavaxq_p(__a, __b, __c, __p) | |
256 | #define vmulhq_m(__inactive, __a, __b, __p) __arm_vmulhq_m(__inactive, __a, __b, __p) | |
257 | #define vmullbq_int_m(__inactive, __a, __b, __p) __arm_vmullbq_int_m(__inactive, __a, __b, __p) | |
258 | #define vmulltq_int_m(__inactive, __a, __b, __p) __arm_vmulltq_int_m(__inactive, __a, __b, __p) | |
259 | #define vmulq_m(__inactive, __a, __b, __p) __arm_vmulq_m(__inactive, __a, __b, __p) | |
260 | #define vornq_m(__inactive, __a, __b, __p) __arm_vornq_m(__inactive, __a, __b, __p) | |
261 | #define vorrq_m(__inactive, __a, __b, __p) __arm_vorrq_m(__inactive, __a, __b, __p) | |
262 | #define vqaddq_m(__inactive, __a, __b, __p) __arm_vqaddq_m(__inactive, __a, __b, __p) | |
263 | #define vqdmladhq_m(__inactive, __a, __b, __p) __arm_vqdmladhq_m(__inactive, __a, __b, __p) | |
afb198ee | 264 | #define vqdmlashq_m(__a, __b, __c, __p) __arm_vqdmlashq_m(__a, __b, __c, __p) |
6a90680b ASDV |
265 | #define vqdmladhxq_m(__inactive, __a, __b, __p) __arm_vqdmladhxq_m(__inactive, __a, __b, __p) |
266 | #define vqdmlahq_m(__a, __b, __c, __p) __arm_vqdmlahq_m(__a, __b, __c, __p) | |
267 | #define vqdmlsdhq_m(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m(__inactive, __a, __b, __p) | |
268 | #define vqdmlsdhxq_m(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m(__inactive, __a, __b, __p) | |
269 | #define vqdmulhq_m(__inactive, __a, __b, __p) __arm_vqdmulhq_m(__inactive, __a, __b, __p) | |
270 | #define vqrdmladhq_m(__inactive, __a, __b, __p) __arm_vqrdmladhq_m(__inactive, __a, __b, __p) | |
271 | #define vqrdmladhxq_m(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m(__inactive, __a, __b, __p) | |
272 | #define vqrdmlahq_m(__a, __b, __c, __p) __arm_vqrdmlahq_m(__a, __b, __c, __p) | |
273 | #define vqrdmlashq_m(__a, __b, __c, __p) __arm_vqrdmlashq_m(__a, __b, __c, __p) | |
274 | #define vqrdmlsdhq_m(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m(__inactive, __a, __b, __p) | |
275 | #define vqrdmlsdhxq_m(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m(__inactive, __a, __b, __p) | |
276 | #define vqrdmulhq_m(__inactive, __a, __b, __p) __arm_vqrdmulhq_m(__inactive, __a, __b, __p) | |
277 | #define vqrshlq_m(__inactive, __a, __b, __p) __arm_vqrshlq_m(__inactive, __a, __b, __p) | |
278 | #define vqshlq_m_n(__inactive, __a, __imm, __p) __arm_vqshlq_m_n(__inactive, __a, __imm, __p) | |
279 | #define vqshlq_m(__inactive, __a, __b, __p) __arm_vqshlq_m(__inactive, __a, __b, __p) | |
280 | #define vqsubq_m(__inactive, __a, __b, __p) __arm_vqsubq_m(__inactive, __a, __b, __p) | |
281 | #define vrhaddq_m(__inactive, __a, __b, __p) __arm_vrhaddq_m(__inactive, __a, __b, __p) | |
282 | #define vrmulhq_m(__inactive, __a, __b, __p) __arm_vrmulhq_m(__inactive, __a, __b, __p) | |
283 | #define vrshlq_m(__inactive, __a, __b, __p) __arm_vrshlq_m(__inactive, __a, __b, __p) | |
284 | #define vrshrq_m(__inactive, __a, __imm, __p) __arm_vrshrq_m(__inactive, __a, __imm, __p) | |
285 | #define vshlq_m_n(__inactive, __a, __imm, __p) __arm_vshlq_m_n(__inactive, __a, __imm, __p) | |
286 | #define vshrq_m(__inactive, __a, __imm, __p) __arm_vshrq_m(__inactive, __a, __imm, __p) | |
287 | #define vsliq_m(__a, __b, __imm, __p) __arm_vsliq_m(__a, __b, __imm, __p) | |
288 | #define vmlaldavaq_p(__a, __b, __c, __p) __arm_vmlaldavaq_p(__a, __b, __c, __p) | |
289 | #define vmlaldavaxq_p(__a, __b, __c, __p) __arm_vmlaldavaxq_p(__a, __b, __c, __p) | |
290 | #define vmlsldavaq_p(__a, __b, __c, __p) __arm_vmlsldavaq_p(__a, __b, __c, __p) | |
291 | #define vmlsldavaxq_p(__a, __b, __c, __p) __arm_vmlsldavaxq_p(__a, __b, __c, __p) | |
292 | #define vmullbq_poly_m(__inactive, __a, __b, __p) __arm_vmullbq_poly_m(__inactive, __a, __b, __p) | |
293 | #define vmulltq_poly_m(__inactive, __a, __b, __p) __arm_vmulltq_poly_m(__inactive, __a, __b, __p) | |
294 | #define vqdmullbq_m(__inactive, __a, __b, __p) __arm_vqdmullbq_m(__inactive, __a, __b, __p) | |
295 | #define vqdmulltq_m(__inactive, __a, __b, __p) __arm_vqdmulltq_m(__inactive, __a, __b, __p) | |
296 | #define vqrshrnbq_m(__a, __b, __imm, __p) __arm_vqrshrnbq_m(__a, __b, __imm, __p) | |
297 | #define vqrshrntq_m(__a, __b, __imm, __p) __arm_vqrshrntq_m(__a, __b, __imm, __p) | |
298 | #define vqrshrunbq_m(__a, __b, __imm, __p) __arm_vqrshrunbq_m(__a, __b, __imm, __p) | |
299 | #define vqrshruntq_m(__a, __b, __imm, __p) __arm_vqrshruntq_m(__a, __b, __imm, __p) | |
300 | #define vqshrnbq_m(__a, __b, __imm, __p) __arm_vqshrnbq_m(__a, __b, __imm, __p) | |
301 | #define vqshrntq_m(__a, __b, __imm, __p) __arm_vqshrntq_m(__a, __b, __imm, __p) | |
302 | #define vqshrunbq_m(__a, __b, __imm, __p) __arm_vqshrunbq_m(__a, __b, __imm, __p) | |
303 | #define vqshruntq_m(__a, __b, __imm, __p) __arm_vqshruntq_m(__a, __b, __imm, __p) | |
304 | #define vrmlaldavhaq_p(__a, __b, __c, __p) __arm_vrmlaldavhaq_p(__a, __b, __c, __p) | |
305 | #define vrmlaldavhaxq_p(__a, __b, __c, __p) __arm_vrmlaldavhaxq_p(__a, __b, __c, __p) | |
306 | #define vrmlsldavhaq_p(__a, __b, __c, __p) __arm_vrmlsldavhaq_p(__a, __b, __c, __p) | |
307 | #define vrmlsldavhaxq_p(__a, __b, __c, __p) __arm_vrmlsldavhaxq_p(__a, __b, __c, __p) | |
308 | #define vrshrnbq_m(__a, __b, __imm, __p) __arm_vrshrnbq_m(__a, __b, __imm, __p) | |
309 | #define vrshrntq_m(__a, __b, __imm, __p) __arm_vrshrntq_m(__a, __b, __imm, __p) | |
310 | #define vshllbq_m(__inactive, __a, __imm, __p) __arm_vshllbq_m(__inactive, __a, __imm, __p) | |
311 | #define vshlltq_m(__inactive, __a, __imm, __p) __arm_vshlltq_m(__inactive, __a, __imm, __p) | |
312 | #define vshrnbq_m(__a, __b, __imm, __p) __arm_vshrnbq_m(__a, __b, __imm, __p) | |
313 | #define vshrntq_m(__a, __b, __imm, __p) __arm_vshrntq_m(__a, __b, __imm, __p) | |
314 | #define vstrbq_scatter_offset(__base, __offset, __value) __arm_vstrbq_scatter_offset(__base, __offset, __value) | |
315 | #define vstrbq(__addr, __value) __arm_vstrbq(__addr, __value) | |
316 | #define vstrwq_scatter_base(__addr, __offset, __value) __arm_vstrwq_scatter_base(__addr, __offset, __value) | |
317 | #define vldrbq_gather_offset(__base, __offset) __arm_vldrbq_gather_offset(__base, __offset) | |
318 | #define vstrbq_p(__addr, __value, __p) __arm_vstrbq_p(__addr, __value, __p) | |
319 | #define vstrbq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p(__base, __offset, __value, __p) | |
320 | #define vstrwq_scatter_base_p(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p(__addr, __offset, __value, __p) | |
321 | #define vldrbq_gather_offset_z(__base, __offset, __p) __arm_vldrbq_gather_offset_z(__base, __offset, __p) | |
322 | #define vld1q(__base) __arm_vld1q(__base) | |
323 | #define vldrhq_gather_offset(__base, __offset) __arm_vldrhq_gather_offset(__base, __offset) | |
324 | #define vldrhq_gather_offset_z(__base, __offset, __p) __arm_vldrhq_gather_offset_z(__base, __offset, __p) | |
325 | #define vldrhq_gather_shifted_offset(__base, __offset) __arm_vldrhq_gather_shifted_offset(__base, __offset) | |
326 | #define vldrhq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z(__base, __offset, __p) | |
327 | #define vldrdq_gather_offset(__base, __offset) __arm_vldrdq_gather_offset(__base, __offset) | |
328 | #define vldrdq_gather_offset_z(__base, __offset, __p) __arm_vldrdq_gather_offset_z(__base, __offset, __p) | |
329 | #define vldrdq_gather_shifted_offset(__base, __offset) __arm_vldrdq_gather_shifted_offset(__base, __offset) | |
330 | #define vldrdq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z(__base, __offset, __p) | |
331 | #define vldrwq_gather_offset(__base, __offset) __arm_vldrwq_gather_offset(__base, __offset) | |
332 | #define vldrwq_gather_offset_z(__base, __offset, __p) __arm_vldrwq_gather_offset_z(__base, __offset, __p) | |
333 | #define vldrwq_gather_shifted_offset(__base, __offset) __arm_vldrwq_gather_shifted_offset(__base, __offset) | |
334 | #define vldrwq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z(__base, __offset, __p) | |
335 | #define vst1q(__addr, __value) __arm_vst1q(__addr, __value) | |
336 | #define vstrhq_scatter_offset(__base, __offset, __value) __arm_vstrhq_scatter_offset(__base, __offset, __value) | |
337 | #define vstrhq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p(__base, __offset, __value, __p) | |
338 | #define vstrhq_scatter_shifted_offset(__base, __offset, __value) __arm_vstrhq_scatter_shifted_offset(__base, __offset, __value) | |
339 | #define vstrhq_scatter_shifted_offset_p(__base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p(__base, __offset, __value, __p) | |
340 | #define vstrhq(__addr, __value) __arm_vstrhq(__addr, __value) | |
341 | #define vstrhq_p(__addr, __value, __p) __arm_vstrhq_p(__addr, __value, __p) | |
342 | #define vstrwq(__addr, __value) __arm_vstrwq(__addr, __value) | |
343 | #define vstrwq_p(__addr, __value, __p) __arm_vstrwq_p(__addr, __value, __p) | |
344 | #define vstrdq_scatter_base_p(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p(__addr, __offset, __value, __p) | |
345 | #define vstrdq_scatter_base(__addr, __offset, __value) __arm_vstrdq_scatter_base(__addr, __offset, __value) | |
346 | #define vstrdq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p(__base, __offset, __value, __p) | |
347 | #define vstrdq_scatter_offset(__base, __offset, __value) __arm_vstrdq_scatter_offset(__base, __offset, __value) | |
348 | #define vstrdq_scatter_shifted_offset_p(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p(__base, __offset, __value, __p) | |
349 | #define vstrdq_scatter_shifted_offset(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset(__base, __offset, __value) | |
350 | #define vstrwq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p(__base, __offset, __value, __p) | |
351 | #define vstrwq_scatter_offset(__base, __offset, __value) __arm_vstrwq_scatter_offset(__base, __offset, __value) | |
352 | #define vstrwq_scatter_shifted_offset_p(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p(__base, __offset, __value, __p) | |
353 | #define vstrwq_scatter_shifted_offset(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset(__base, __offset, __value) | |
354 | #define vuninitializedq(__v) __arm_vuninitializedq(__v) | |
355 | #define vreinterpretq_s16(__a) __arm_vreinterpretq_s16(__a) | |
356 | #define vreinterpretq_s32(__a) __arm_vreinterpretq_s32(__a) | |
357 | #define vreinterpretq_s64(__a) __arm_vreinterpretq_s64(__a) | |
358 | #define vreinterpretq_s8(__a) __arm_vreinterpretq_s8(__a) | |
359 | #define vreinterpretq_u16(__a) __arm_vreinterpretq_u16(__a) | |
360 | #define vreinterpretq_u32(__a) __arm_vreinterpretq_u32(__a) | |
361 | #define vreinterpretq_u64(__a) __arm_vreinterpretq_u64(__a) | |
362 | #define vreinterpretq_u8(__a) __arm_vreinterpretq_u8(__a) | |
363 | #define vddupq_m(__inactive, __a, __imm, __p) __arm_vddupq_m(__inactive, __a, __imm, __p) | |
364 | #define vddupq_u8(__a, __imm) __arm_vddupq_u8(__a, __imm) | |
365 | #define vddupq_u32(__a, __imm) __arm_vddupq_u32(__a, __imm) | |
366 | #define vddupq_u16(__a, __imm) __arm_vddupq_u16(__a, __imm) | |
367 | #define vdwdupq_m(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m(__inactive, __a, __b, __imm, __p) | |
368 | #define vdwdupq_u8(__a, __b, __imm) __arm_vdwdupq_u8(__a, __b, __imm) | |
369 | #define vdwdupq_u32(__a, __b, __imm) __arm_vdwdupq_u32(__a, __b, __imm) | |
370 | #define vdwdupq_u16(__a, __b, __imm) __arm_vdwdupq_u16(__a, __b, __imm) | |
371 | #define vidupq_m(__inactive, __a, __imm, __p) __arm_vidupq_m(__inactive, __a, __imm, __p) | |
372 | #define vidupq_u8(__a, __imm) __arm_vidupq_u8(__a, __imm) | |
373 | #define vidupq_u32(__a, __imm) __arm_vidupq_u32(__a, __imm) | |
374 | #define vidupq_u16(__a, __imm) __arm_vidupq_u16(__a, __imm) | |
375 | #define viwdupq_m(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m(__inactive, __a, __b, __imm, __p) | |
376 | #define viwdupq_u8(__a, __b, __imm) __arm_viwdupq_u8(__a, __b, __imm) | |
377 | #define viwdupq_u32(__a, __b, __imm) __arm_viwdupq_u32(__a, __b, __imm) | |
378 | #define viwdupq_u16(__a, __b, __imm) __arm_viwdupq_u16(__a, __b, __imm) | |
379 | #define vstrdq_scatter_base_wb(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb(__addr, __offset, __value) | |
380 | #define vstrdq_scatter_base_wb_p(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p(__addr, __offset, __value, __p) | |
381 | #define vstrwq_scatter_base_wb_p(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p(__addr, __offset, __value, __p) | |
382 | #define vstrwq_scatter_base_wb(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb(__addr, __offset, __value) | |
383 | #define vddupq_x_u8(__a, __imm, __p) __arm_vddupq_x_u8(__a, __imm, __p) | |
384 | #define vddupq_x_u16(__a, __imm, __p) __arm_vddupq_x_u16(__a, __imm, __p) | |
385 | #define vddupq_x_u32(__a, __imm, __p) __arm_vddupq_x_u32(__a, __imm, __p) | |
386 | #define vdwdupq_x_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_u8(__a, __b, __imm, __p) | |
387 | #define vdwdupq_x_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_u16(__a, __b, __imm, __p) | |
388 | #define vdwdupq_x_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_u32(__a, __b, __imm, __p) | |
389 | #define vidupq_x_u8(__a, __imm, __p) __arm_vidupq_x_u8(__a, __imm, __p) | |
390 | #define vidupq_x_u16(__a, __imm, __p) __arm_vidupq_x_u16(__a, __imm, __p) | |
391 | #define vidupq_x_u32(__a, __imm, __p) __arm_vidupq_x_u32(__a, __imm, __p) | |
392 | #define viwdupq_x_u8(__a, __b, __imm, __p) __arm_viwdupq_x_u8(__a, __b, __imm, __p) | |
393 | #define viwdupq_x_u16(__a, __b, __imm, __p) __arm_viwdupq_x_u16(__a, __b, __imm, __p) | |
394 | #define viwdupq_x_u32(__a, __b, __imm, __p) __arm_viwdupq_x_u32(__a, __b, __imm, __p) | |
395 | #define vminq_x(__a, __b, __p) __arm_vminq_x(__a, __b, __p) | |
396 | #define vmaxq_x(__a, __b, __p) __arm_vmaxq_x(__a, __b, __p) | |
397 | #define vabdq_x(__a, __b, __p) __arm_vabdq_x(__a, __b, __p) | |
398 | #define vabsq_x(__a, __p) __arm_vabsq_x(__a, __p) | |
399 | #define vaddq_x(__a, __b, __p) __arm_vaddq_x(__a, __b, __p) | |
400 | #define vclsq_x(__a, __p) __arm_vclsq_x(__a, __p) | |
401 | #define vclzq_x(__a, __p) __arm_vclzq_x(__a, __p) | |
402 | #define vnegq_x(__a, __p) __arm_vnegq_x(__a, __p) | |
403 | #define vmulhq_x(__a, __b, __p) __arm_vmulhq_x(__a, __b, __p) | |
404 | #define vmullbq_poly_x(__a, __b, __p) __arm_vmullbq_poly_x(__a, __b, __p) | |
405 | #define vmullbq_int_x(__a, __b, __p) __arm_vmullbq_int_x(__a, __b, __p) | |
406 | #define vmulltq_poly_x(__a, __b, __p) __arm_vmulltq_poly_x(__a, __b, __p) | |
407 | #define vmulltq_int_x(__a, __b, __p) __arm_vmulltq_int_x(__a, __b, __p) | |
408 | #define vmulq_x(__a, __b, __p) __arm_vmulq_x(__a, __b, __p) | |
409 | #define vsubq_x(__a, __b, __p) __arm_vsubq_x(__a, __b, __p) | |
410 | #define vcaddq_rot90_x(__a, __b, __p) __arm_vcaddq_rot90_x(__a, __b, __p) | |
411 | #define vcaddq_rot270_x(__a, __b, __p) __arm_vcaddq_rot270_x(__a, __b, __p) | |
412 | #define vhaddq_x(__a, __b, __p) __arm_vhaddq_x(__a, __b, __p) | |
413 | #define vhcaddq_rot90_x(__a, __b, __p) __arm_vhcaddq_rot90_x(__a, __b, __p) | |
414 | #define vhcaddq_rot270_x(__a, __b, __p) __arm_vhcaddq_rot270_x(__a, __b, __p) | |
415 | #define vhsubq_x(__a, __b, __p) __arm_vhsubq_x(__a, __b, __p) | |
416 | #define vrhaddq_x(__a, __b, __p) __arm_vrhaddq_x(__a, __b, __p) | |
417 | #define vrmulhq_x(__a, __b, __p) __arm_vrmulhq_x(__a, __b, __p) | |
418 | #define vandq_x(__a, __b, __p) __arm_vandq_x(__a, __b, __p) | |
419 | #define vbicq_x(__a, __b, __p) __arm_vbicq_x(__a, __b, __p) | |
420 | #define vbrsrq_x(__a, __b, __p) __arm_vbrsrq_x(__a, __b, __p) | |
421 | #define veorq_x(__a, __b, __p) __arm_veorq_x(__a, __b, __p) | |
422 | #define vmovlbq_x(__a, __p) __arm_vmovlbq_x(__a, __p) | |
423 | #define vmovltq_x(__a, __p) __arm_vmovltq_x(__a, __p) | |
424 | #define vmvnq_x(__a, __p) __arm_vmvnq_x(__a, __p) | |
425 | #define vornq_x(__a, __b, __p) __arm_vornq_x(__a, __b, __p) | |
426 | #define vorrq_x(__a, __b, __p) __arm_vorrq_x(__a, __b, __p) | |
427 | #define vrev16q_x(__a, __p) __arm_vrev16q_x(__a, __p) | |
428 | #define vrev32q_x(__a, __p) __arm_vrev32q_x(__a, __p) | |
429 | #define vrev64q_x(__a, __p) __arm_vrev64q_x(__a, __p) | |
430 | #define vrshlq_x(__a, __b, __p) __arm_vrshlq_x(__a, __b, __p) | |
431 | #define vshllbq_x(__a, __imm, __p) __arm_vshllbq_x(__a, __imm, __p) | |
432 | #define vshlltq_x(__a, __imm, __p) __arm_vshlltq_x(__a, __imm, __p) | |
433 | #define vshlq_x(__a, __b, __p) __arm_vshlq_x(__a, __b, __p) | |
434 | #define vshlq_x_n(__a, __imm, __p) __arm_vshlq_x_n(__a, __imm, __p) | |
435 | #define vrshrq_x(__a, __imm, __p) __arm_vrshrq_x(__a, __imm, __p) | |
436 | #define vshrq_x(__a, __imm, __p) __arm_vshrq_x(__a, __imm, __p) | |
437 | #define vadciq(__a, __b, __carry_out) __arm_vadciq(__a, __b, __carry_out) | |
438 | #define vadciq_m(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m(__inactive, __a, __b, __carry_out, __p) | |
439 | #define vadcq(__a, __b, __carry) __arm_vadcq(__a, __b, __carry) | |
440 | #define vadcq_m(__inactive, __a, __b, __carry, __p) __arm_vadcq_m(__inactive, __a, __b, __carry, __p) | |
441 | #define vsbciq(__a, __b, __carry_out) __arm_vsbciq(__a, __b, __carry_out) | |
442 | #define vsbciq_m(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m(__inactive, __a, __b, __carry_out, __p) | |
443 | #define vsbcq(__a, __b, __carry) __arm_vsbcq(__a, __b, __carry) | |
444 | #define vsbcq_m(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m(__inactive, __a, __b, __carry, __p) | |
445 | #define vst1q_p(__addr, __value, __p) __arm_vst1q_p(__addr, __value, __p) | |
446 | #define vst2q(__addr, __value) __arm_vst2q(__addr, __value) | |
447 | #define vld1q_z(__base, __p) __arm_vld1q_z(__base, __p) | |
448 | #define vld2q(__addr) __arm_vld2q(__addr) | |
449 | #define vld4q(__addr) __arm_vld4q(__addr) | |
450 | #define vsetq_lane(__a, __b, __idx) __arm_vsetq_lane(__a, __b, __idx) | |
451 | #define vgetq_lane(__a, __idx) __arm_vgetq_lane(__a, __idx) | |
452 | #define vshlcq_m(__a, __b, __imm, __p) __arm_vshlcq_m(__a, __b, __imm, __p) | |
453 | #define vrndxq(__a) __arm_vrndxq(__a) | |
454 | #define vrndq(__a) __arm_vrndq(__a) | |
455 | #define vrndpq(__a) __arm_vrndpq(__a) | |
456 | #define vrndnq(__a) __arm_vrndnq(__a) | |
457 | #define vrndmq(__a) __arm_vrndmq(__a) | |
458 | #define vrndaq(__a) __arm_vrndaq(__a) | |
459 | #define vcvttq_f32(__a) __arm_vcvttq_f32(__a) | |
460 | #define vcvtbq_f32(__a) __arm_vcvtbq_f32(__a) | |
461 | #define vcvtq(__a) __arm_vcvtq(__a) | |
462 | #define vcvtq_n(__a, __imm6) __arm_vcvtq_n(__a, __imm6) | |
463 | #define vminnmvq(__a, __b) __arm_vminnmvq(__a, __b) | |
464 | #define vminnmq(__a, __b) __arm_vminnmq(__a, __b) | |
465 | #define vminnmavq(__a, __b) __arm_vminnmavq(__a, __b) | |
466 | #define vminnmaq(__a, __b) __arm_vminnmaq(__a, __b) | |
467 | #define vmaxnmvq(__a, __b) __arm_vmaxnmvq(__a, __b) | |
468 | #define vmaxnmq(__a, __b) __arm_vmaxnmq(__a, __b) | |
469 | #define vmaxnmavq(__a, __b) __arm_vmaxnmavq(__a, __b) | |
470 | #define vmaxnmaq(__a, __b) __arm_vmaxnmaq(__a, __b) | |
471 | #define vcmulq_rot90(__a, __b) __arm_vcmulq_rot90(__a, __b) | |
472 | #define vcmulq_rot270(__a, __b) __arm_vcmulq_rot270(__a, __b) | |
473 | #define vcmulq_rot180(__a, __b) __arm_vcmulq_rot180(__a, __b) | |
474 | #define vcmulq(__a, __b) __arm_vcmulq(__a, __b) | |
475 | #define vcvtaq_m(__inactive, __a, __p) __arm_vcvtaq_m(__inactive, __a, __p) | |
476 | #define vcvtq_m(__inactive, __a, __p) __arm_vcvtq_m(__inactive, __a, __p) | |
477 | #define vcvtbq_m(__a, __b, __p) __arm_vcvtbq_m(__a, __b, __p) | |
478 | #define vcvttq_m(__a, __b, __p) __arm_vcvttq_m(__a, __b, __p) | |
479 | #define vcmlaq(__a, __b, __c) __arm_vcmlaq(__a, __b, __c) | |
480 | #define vcmlaq_rot180(__a, __b, __c) __arm_vcmlaq_rot180(__a, __b, __c) | |
481 | #define vcmlaq_rot270(__a, __b, __c) __arm_vcmlaq_rot270(__a, __b, __c) | |
482 | #define vcmlaq_rot90(__a, __b, __c) __arm_vcmlaq_rot90(__a, __b, __c) | |
483 | #define vfmaq(__a, __b, __c) __arm_vfmaq(__a, __b, __c) | |
484 | #define vfmasq(__a, __b, __c) __arm_vfmasq(__a, __b, __c) | |
485 | #define vfmsq(__a, __b, __c) __arm_vfmsq(__a, __b, __c) | |
486 | #define vcvtmq_m(__inactive, __a, __p) __arm_vcvtmq_m(__inactive, __a, __p) | |
487 | #define vcvtnq_m(__inactive, __a, __p) __arm_vcvtnq_m(__inactive, __a, __p) | |
488 | #define vcvtpq_m(__inactive, __a, __p) __arm_vcvtpq_m(__inactive, __a, __p) | |
489 | #define vmaxnmaq_m(__a, __b, __p) __arm_vmaxnmaq_m(__a, __b, __p) | |
490 | #define vmaxnmavq_p(__a, __b, __p) __arm_vmaxnmavq_p(__a, __b, __p) | |
491 | #define vmaxnmvq_p(__a, __b, __p) __arm_vmaxnmvq_p(__a, __b, __p) | |
492 | #define vminnmaq_m(__a, __b, __p) __arm_vminnmaq_m(__a, __b, __p) | |
493 | #define vminnmavq_p(__a, __b, __p) __arm_vminnmavq_p(__a, __b, __p) | |
494 | #define vminnmvq_p(__a, __b, __p) __arm_vminnmvq_p(__a, __b, __p) | |
495 | #define vrndaq_m(__inactive, __a, __p) __arm_vrndaq_m(__inactive, __a, __p) | |
496 | #define vrndmq_m(__inactive, __a, __p) __arm_vrndmq_m(__inactive, __a, __p) | |
497 | #define vrndnq_m(__inactive, __a, __p) __arm_vrndnq_m(__inactive, __a, __p) | |
498 | #define vrndpq_m(__inactive, __a, __p) __arm_vrndpq_m(__inactive, __a, __p) | |
499 | #define vrndq_m(__inactive, __a, __p) __arm_vrndq_m(__inactive, __a, __p) | |
500 | #define vrndxq_m(__inactive, __a, __p) __arm_vrndxq_m(__inactive, __a, __p) | |
501 | #define vcvtq_m_n(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n(__inactive, __a, __imm6, __p) | |
502 | #define vcmlaq_m(__a, __b, __c, __p) __arm_vcmlaq_m(__a, __b, __c, __p) | |
503 | #define vcmlaq_rot180_m(__a, __b, __c, __p) __arm_vcmlaq_rot180_m(__a, __b, __c, __p) | |
504 | #define vcmlaq_rot270_m(__a, __b, __c, __p) __arm_vcmlaq_rot270_m(__a, __b, __c, __p) | |
505 | #define vcmlaq_rot90_m(__a, __b, __c, __p) __arm_vcmlaq_rot90_m(__a, __b, __c, __p) | |
506 | #define vcmulq_m(__inactive, __a, __b, __p) __arm_vcmulq_m(__inactive, __a, __b, __p) | |
507 | #define vcmulq_rot180_m(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m(__inactive, __a, __b, __p) | |
508 | #define vcmulq_rot270_m(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m(__inactive, __a, __b, __p) | |
509 | #define vcmulq_rot90_m(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m(__inactive, __a, __b, __p) | |
510 | #define vfmaq_m(__a, __b, __c, __p) __arm_vfmaq_m(__a, __b, __c, __p) | |
511 | #define vfmasq_m(__a, __b, __c, __p) __arm_vfmasq_m(__a, __b, __c, __p) | |
512 | #define vfmsq_m(__a, __b, __c, __p) __arm_vfmsq_m(__a, __b, __c, __p) | |
513 | #define vmaxnmq_m(__inactive, __a, __b, __p) __arm_vmaxnmq_m(__inactive, __a, __b, __p) | |
514 | #define vminnmq_m(__inactive, __a, __b, __p) __arm_vminnmq_m(__inactive, __a, __b, __p) | |
515 | #define vreinterpretq_f16(__a) __arm_vreinterpretq_f16(__a) | |
516 | #define vreinterpretq_f32(__a) __arm_vreinterpretq_f32(__a) | |
517 | #define vminnmq_x(__a, __b, __p) __arm_vminnmq_x(__a, __b, __p) | |
518 | #define vmaxnmq_x(__a, __b, __p) __arm_vmaxnmq_x(__a, __b, __p) | |
519 | #define vcmulq_x(__a, __b, __p) __arm_vcmulq_x(__a, __b, __p) | |
520 | #define vcmulq_rot90_x(__a, __b, __p) __arm_vcmulq_rot90_x(__a, __b, __p) | |
521 | #define vcmulq_rot180_x(__a, __b, __p) __arm_vcmulq_rot180_x(__a, __b, __p) | |
522 | #define vcmulq_rot270_x(__a, __b, __p) __arm_vcmulq_rot270_x(__a, __b, __p) | |
523 | #define vcvtq_x(__a, __p) __arm_vcvtq_x(__a, __p) | |
524 | #define vcvtq_x_n(__a, __imm6, __p) __arm_vcvtq_x_n(__a, __imm6, __p) | |
525 | #define vrndq_x(__a, __p) __arm_vrndq_x(__a, __p) | |
526 | #define vrndnq_x(__a, __p) __arm_vrndnq_x(__a, __p) | |
527 | #define vrndmq_x(__a, __p) __arm_vrndmq_x(__a, __p) | |
528 | #define vrndpq_x(__a, __p) __arm_vrndpq_x(__a, __p) | |
529 | #define vrndaq_x(__a, __p) __arm_vrndaq_x(__a, __p) | |
530 | #define vrndxq_x(__a, __p) __arm_vrndxq_x(__a, __p) | |
531 | ||
532 | ||
14782c81 SP |
533 | #define vst4q_s8( __addr, __value) __arm_vst4q_s8( __addr, __value) |
534 | #define vst4q_s16( __addr, __value) __arm_vst4q_s16( __addr, __value) | |
535 | #define vst4q_s32( __addr, __value) __arm_vst4q_s32( __addr, __value) | |
536 | #define vst4q_u8( __addr, __value) __arm_vst4q_u8( __addr, __value) | |
537 | #define vst4q_u16( __addr, __value) __arm_vst4q_u16( __addr, __value) | |
538 | #define vst4q_u32( __addr, __value) __arm_vst4q_u32( __addr, __value) | |
539 | #define vst4q_f16( __addr, __value) __arm_vst4q_f16( __addr, __value) | |
540 | #define vst4q_f32( __addr, __value) __arm_vst4q_f32( __addr, __value) | |
a50f6abf SP |
541 | #define vrndxq_f16(__a) __arm_vrndxq_f16(__a) |
542 | #define vrndxq_f32(__a) __arm_vrndxq_f32(__a) | |
543 | #define vrndq_f16(__a) __arm_vrndq_f16(__a) | |
544 | #define vrndq_f32(__a) __arm_vrndq_f32(__a) | |
545 | #define vrndpq_f16(__a) __arm_vrndpq_f16(__a) | |
546 | #define vrndpq_f32(__a) __arm_vrndpq_f32(__a) | |
547 | #define vrndnq_f16(__a) __arm_vrndnq_f16(__a) | |
548 | #define vrndnq_f32(__a) __arm_vrndnq_f32(__a) | |
549 | #define vrndmq_f16(__a) __arm_vrndmq_f16(__a) | |
550 | #define vrndmq_f32(__a) __arm_vrndmq_f32(__a) | |
551 | #define vrndaq_f16(__a) __arm_vrndaq_f16(__a) | |
552 | #define vrndaq_f32(__a) __arm_vrndaq_f32(__a) | |
553 | #define vrev64q_f16(__a) __arm_vrev64q_f16(__a) | |
554 | #define vrev64q_f32(__a) __arm_vrev64q_f32(__a) | |
555 | #define vnegq_f16(__a) __arm_vnegq_f16(__a) | |
556 | #define vnegq_f32(__a) __arm_vnegq_f32(__a) | |
557 | #define vdupq_n_f16(__a) __arm_vdupq_n_f16(__a) | |
558 | #define vdupq_n_f32(__a) __arm_vdupq_n_f32(__a) | |
559 | #define vabsq_f16(__a) __arm_vabsq_f16(__a) | |
560 | #define vabsq_f32(__a) __arm_vabsq_f32(__a) | |
561 | #define vrev32q_f16(__a) __arm_vrev32q_f16(__a) | |
562 | #define vcvttq_f32_f16(__a) __arm_vcvttq_f32_f16(__a) | |
563 | #define vcvtbq_f32_f16(__a) __arm_vcvtbq_f32_f16(__a) | |
564 | #define vcvtq_f16_s16(__a) __arm_vcvtq_f16_s16(__a) | |
565 | #define vcvtq_f32_s32(__a) __arm_vcvtq_f32_s32(__a) | |
566 | #define vcvtq_f16_u16(__a) __arm_vcvtq_f16_u16(__a) | |
567 | #define vcvtq_f32_u32(__a) __arm_vcvtq_f32_u32(__a) | |
6df4618c SP |
568 | #define vdupq_n_s8(__a) __arm_vdupq_n_s8(__a) |
569 | #define vdupq_n_s16(__a) __arm_vdupq_n_s16(__a) | |
570 | #define vdupq_n_s32(__a) __arm_vdupq_n_s32(__a) | |
571 | #define vabsq_s8(__a) __arm_vabsq_s8(__a) | |
572 | #define vabsq_s16(__a) __arm_vabsq_s16(__a) | |
573 | #define vabsq_s32(__a) __arm_vabsq_s32(__a) | |
574 | #define vclsq_s8(__a) __arm_vclsq_s8(__a) | |
575 | #define vclsq_s16(__a) __arm_vclsq_s16(__a) | |
576 | #define vclsq_s32(__a) __arm_vclsq_s32(__a) | |
577 | #define vclzq_s8(__a) __arm_vclzq_s8(__a) | |
578 | #define vclzq_s16(__a) __arm_vclzq_s16(__a) | |
579 | #define vclzq_s32(__a) __arm_vclzq_s32(__a) | |
580 | #define vnegq_s8(__a) __arm_vnegq_s8(__a) | |
581 | #define vnegq_s16(__a) __arm_vnegq_s16(__a) | |
582 | #define vnegq_s32(__a) __arm_vnegq_s32(__a) | |
583 | #define vaddlvq_s32(__a) __arm_vaddlvq_s32(__a) | |
584 | #define vaddvq_s8(__a) __arm_vaddvq_s8(__a) | |
585 | #define vaddvq_s16(__a) __arm_vaddvq_s16(__a) | |
586 | #define vaddvq_s32(__a) __arm_vaddvq_s32(__a) | |
587 | #define vmovlbq_s8(__a) __arm_vmovlbq_s8(__a) | |
588 | #define vmovlbq_s16(__a) __arm_vmovlbq_s16(__a) | |
589 | #define vmovltq_s8(__a) __arm_vmovltq_s8(__a) | |
590 | #define vmovltq_s16(__a) __arm_vmovltq_s16(__a) | |
591 | #define vmvnq_s8(__a) __arm_vmvnq_s8(__a) | |
592 | #define vmvnq_s16(__a) __arm_vmvnq_s16(__a) | |
593 | #define vmvnq_s32(__a) __arm_vmvnq_s32(__a) | |
5db0eb95 SP |
594 | #define vmvnq_n_s16( __imm) __arm_vmvnq_n_s16( __imm) |
595 | #define vmvnq_n_s32( __imm) __arm_vmvnq_n_s32( __imm) | |
6df4618c SP |
596 | #define vrev16q_s8(__a) __arm_vrev16q_s8(__a) |
597 | #define vrev32q_s8(__a) __arm_vrev32q_s8(__a) | |
598 | #define vrev32q_s16(__a) __arm_vrev32q_s16(__a) | |
5db0eb95 SP |
599 | #define vrev64q_s8(__a) __arm_vrev64q_s8(__a) |
600 | #define vrev64q_s16(__a) __arm_vrev64q_s16(__a) | |
601 | #define vrev64q_s32(__a) __arm_vrev64q_s32(__a) | |
6df4618c SP |
602 | #define vqabsq_s8(__a) __arm_vqabsq_s8(__a) |
603 | #define vqabsq_s16(__a) __arm_vqabsq_s16(__a) | |
604 | #define vqabsq_s32(__a) __arm_vqabsq_s32(__a) | |
605 | #define vqnegq_s8(__a) __arm_vqnegq_s8(__a) | |
606 | #define vqnegq_s16(__a) __arm_vqnegq_s16(__a) | |
607 | #define vqnegq_s32(__a) __arm_vqnegq_s32(__a) | |
608 | #define vcvtaq_s16_f16(__a) __arm_vcvtaq_s16_f16(__a) | |
609 | #define vcvtaq_s32_f32(__a) __arm_vcvtaq_s32_f32(__a) | |
610 | #define vcvtnq_s16_f16(__a) __arm_vcvtnq_s16_f16(__a) | |
611 | #define vcvtnq_s32_f32(__a) __arm_vcvtnq_s32_f32(__a) | |
612 | #define vcvtpq_s16_f16(__a) __arm_vcvtpq_s16_f16(__a) | |
613 | #define vcvtpq_s32_f32(__a) __arm_vcvtpq_s32_f32(__a) | |
614 | #define vcvtmq_s16_f16(__a) __arm_vcvtmq_s16_f16(__a) | |
615 | #define vcvtmq_s32_f32(__a) __arm_vcvtmq_s32_f32(__a) | |
5db0eb95 SP |
616 | #define vcvtq_s16_f16(__a) __arm_vcvtq_s16_f16(__a) |
617 | #define vcvtq_s32_f32(__a) __arm_vcvtq_s32_f32(__a) | |
618 | #define vrev64q_u8(__a) __arm_vrev64q_u8(__a) | |
619 | #define vrev64q_u16(__a) __arm_vrev64q_u16(__a) | |
620 | #define vrev64q_u32(__a) __arm_vrev64q_u32(__a) | |
6df4618c SP |
621 | #define vmvnq_u8(__a) __arm_vmvnq_u8(__a) |
622 | #define vmvnq_u16(__a) __arm_vmvnq_u16(__a) | |
623 | #define vmvnq_u32(__a) __arm_vmvnq_u32(__a) | |
624 | #define vdupq_n_u8(__a) __arm_vdupq_n_u8(__a) | |
625 | #define vdupq_n_u16(__a) __arm_vdupq_n_u16(__a) | |
626 | #define vdupq_n_u32(__a) __arm_vdupq_n_u32(__a) | |
627 | #define vclzq_u8(__a) __arm_vclzq_u8(__a) | |
628 | #define vclzq_u16(__a) __arm_vclzq_u16(__a) | |
629 | #define vclzq_u32(__a) __arm_vclzq_u32(__a) | |
630 | #define vaddvq_u8(__a) __arm_vaddvq_u8(__a) | |
631 | #define vaddvq_u16(__a) __arm_vaddvq_u16(__a) | |
632 | #define vaddvq_u32(__a) __arm_vaddvq_u32(__a) | |
633 | #define vrev32q_u8(__a) __arm_vrev32q_u8(__a) | |
634 | #define vrev32q_u16(__a) __arm_vrev32q_u16(__a) | |
635 | #define vmovltq_u8(__a) __arm_vmovltq_u8(__a) | |
636 | #define vmovltq_u16(__a) __arm_vmovltq_u16(__a) | |
637 | #define vmovlbq_u8(__a) __arm_vmovlbq_u8(__a) | |
638 | #define vmovlbq_u16(__a) __arm_vmovlbq_u16(__a) | |
5db0eb95 SP |
639 | #define vmvnq_n_u16( __imm) __arm_vmvnq_n_u16( __imm) |
640 | #define vmvnq_n_u32( __imm) __arm_vmvnq_n_u32( __imm) | |
6df4618c SP |
641 | #define vrev16q_u8(__a) __arm_vrev16q_u8(__a) |
642 | #define vaddlvq_u32(__a) __arm_vaddlvq_u32(__a) | |
5db0eb95 SP |
643 | #define vcvtq_u16_f16(__a) __arm_vcvtq_u16_f16(__a) |
644 | #define vcvtq_u32_f32(__a) __arm_vcvtq_u32_f32(__a) | |
6df4618c SP |
645 | #define vcvtpq_u16_f16(__a) __arm_vcvtpq_u16_f16(__a) |
646 | #define vcvtpq_u32_f32(__a) __arm_vcvtpq_u32_f32(__a) | |
647 | #define vcvtnq_u16_f16(__a) __arm_vcvtnq_u16_f16(__a) | |
5a448362 | 648 | #define vcvtnq_u32_f32(__a) __arm_vcvtnq_u32_f32(__a) |
6df4618c SP |
649 | #define vcvtmq_u16_f16(__a) __arm_vcvtmq_u16_f16(__a) |
650 | #define vcvtmq_u32_f32(__a) __arm_vcvtmq_u32_f32(__a) | |
651 | #define vcvtaq_u16_f16(__a) __arm_vcvtaq_u16_f16(__a) | |
652 | #define vcvtaq_u32_f32(__a) __arm_vcvtaq_u32_f32(__a) | |
a475f153 SP |
653 | #define vctp16q(__a) __arm_vctp16q(__a) |
654 | #define vctp32q(__a) __arm_vctp32q(__a) | |
655 | #define vctp64q(__a) __arm_vctp64q(__a) | |
656 | #define vctp8q(__a) __arm_vctp8q(__a) | |
657 | #define vpnot(__a) __arm_vpnot(__a) | |
4be8cf77 SP |
658 | #define vsubq_n_f16(__a, __b) __arm_vsubq_n_f16(__a, __b) |
659 | #define vsubq_n_f32(__a, __b) __arm_vsubq_n_f32(__a, __b) | |
660 | #define vbrsrq_n_f16(__a, __b) __arm_vbrsrq_n_f16(__a, __b) | |
661 | #define vbrsrq_n_f32(__a, __b) __arm_vbrsrq_n_f32(__a, __b) | |
662 | #define vcvtq_n_f16_s16(__a, __imm6) __arm_vcvtq_n_f16_s16(__a, __imm6) | |
663 | #define vcvtq_n_f32_s32(__a, __imm6) __arm_vcvtq_n_f32_s32(__a, __imm6) | |
664 | #define vcvtq_n_f16_u16(__a, __imm6) __arm_vcvtq_n_f16_u16(__a, __imm6) | |
665 | #define vcvtq_n_f32_u32(__a, __imm6) __arm_vcvtq_n_f32_u32(__a, __imm6) | |
666 | #define vcreateq_f16(__a, __b) __arm_vcreateq_f16(__a, __b) | |
667 | #define vcreateq_f32(__a, __b) __arm_vcreateq_f32(__a, __b) | |
f166a8cd SP |
668 | #define vcvtq_n_s16_f16(__a, __imm6) __arm_vcvtq_n_s16_f16(__a, __imm6) |
669 | #define vcvtq_n_s32_f32(__a, __imm6) __arm_vcvtq_n_s32_f32(__a, __imm6) | |
670 | #define vcvtq_n_u16_f16(__a, __imm6) __arm_vcvtq_n_u16_f16(__a, __imm6) | |
671 | #define vcvtq_n_u32_f32(__a, __imm6) __arm_vcvtq_n_u32_f32(__a, __imm6) | |
672 | #define vcreateq_u8(__a, __b) __arm_vcreateq_u8(__a, __b) | |
673 | #define vcreateq_u16(__a, __b) __arm_vcreateq_u16(__a, __b) | |
674 | #define vcreateq_u32(__a, __b) __arm_vcreateq_u32(__a, __b) | |
675 | #define vcreateq_u64(__a, __b) __arm_vcreateq_u64(__a, __b) | |
676 | #define vcreateq_s8(__a, __b) __arm_vcreateq_s8(__a, __b) | |
677 | #define vcreateq_s16(__a, __b) __arm_vcreateq_s16(__a, __b) | |
678 | #define vcreateq_s32(__a, __b) __arm_vcreateq_s32(__a, __b) | |
679 | #define vcreateq_s64(__a, __b) __arm_vcreateq_s64(__a, __b) | |
680 | #define vshrq_n_s8(__a, __imm) __arm_vshrq_n_s8(__a, __imm) | |
681 | #define vshrq_n_s16(__a, __imm) __arm_vshrq_n_s16(__a, __imm) | |
682 | #define vshrq_n_s32(__a, __imm) __arm_vshrq_n_s32(__a, __imm) | |
683 | #define vshrq_n_u8(__a, __imm) __arm_vshrq_n_u8(__a, __imm) | |
684 | #define vshrq_n_u16(__a, __imm) __arm_vshrq_n_u16(__a, __imm) | |
685 | #define vshrq_n_u32(__a, __imm) __arm_vshrq_n_u32(__a, __imm) | |
d71dba7b SP |
686 | #define vaddlvq_p_s32(__a, __p) __arm_vaddlvq_p_s32(__a, __p) |
687 | #define vaddlvq_p_u32(__a, __p) __arm_vaddlvq_p_u32(__a, __p) | |
688 | #define vcmpneq_s8(__a, __b) __arm_vcmpneq_s8(__a, __b) | |
689 | #define vcmpneq_s16(__a, __b) __arm_vcmpneq_s16(__a, __b) | |
690 | #define vcmpneq_s32(__a, __b) __arm_vcmpneq_s32(__a, __b) | |
691 | #define vcmpneq_u8(__a, __b) __arm_vcmpneq_u8(__a, __b) | |
692 | #define vcmpneq_u16(__a, __b) __arm_vcmpneq_u16(__a, __b) | |
693 | #define vcmpneq_u32(__a, __b) __arm_vcmpneq_u32(__a, __b) | |
694 | #define vshlq_s8(__a, __b) __arm_vshlq_s8(__a, __b) | |
695 | #define vshlq_s16(__a, __b) __arm_vshlq_s16(__a, __b) | |
696 | #define vshlq_s32(__a, __b) __arm_vshlq_s32(__a, __b) | |
697 | #define vshlq_u8(__a, __b) __arm_vshlq_u8(__a, __b) | |
698 | #define vshlq_u16(__a, __b) __arm_vshlq_u16(__a, __b) | |
699 | #define vshlq_u32(__a, __b) __arm_vshlq_u32(__a, __b) | |
33203b4c SP |
700 | #define vsubq_u8(__a, __b) __arm_vsubq_u8(__a, __b) |
701 | #define vsubq_n_u8(__a, __b) __arm_vsubq_n_u8(__a, __b) | |
702 | #define vrmulhq_u8(__a, __b) __arm_vrmulhq_u8(__a, __b) | |
703 | #define vrhaddq_u8(__a, __b) __arm_vrhaddq_u8(__a, __b) | |
704 | #define vqsubq_u8(__a, __b) __arm_vqsubq_u8(__a, __b) | |
705 | #define vqsubq_n_u8(__a, __b) __arm_vqsubq_n_u8(__a, __b) | |
706 | #define vqaddq_u8(__a, __b) __arm_vqaddq_u8(__a, __b) | |
707 | #define vqaddq_n_u8(__a, __b) __arm_vqaddq_n_u8(__a, __b) | |
708 | #define vorrq_u8(__a, __b) __arm_vorrq_u8(__a, __b) | |
709 | #define vornq_u8(__a, __b) __arm_vornq_u8(__a, __b) | |
710 | #define vmulq_u8(__a, __b) __arm_vmulq_u8(__a, __b) | |
711 | #define vmulq_n_u8(__a, __b) __arm_vmulq_n_u8(__a, __b) | |
712 | #define vmulltq_int_u8(__a, __b) __arm_vmulltq_int_u8(__a, __b) | |
713 | #define vmullbq_int_u8(__a, __b) __arm_vmullbq_int_u8(__a, __b) | |
714 | #define vmulhq_u8(__a, __b) __arm_vmulhq_u8(__a, __b) | |
715 | #define vmladavq_u8(__a, __b) __arm_vmladavq_u8(__a, __b) | |
716 | #define vminvq_u8(__a, __b) __arm_vminvq_u8(__a, __b) | |
717 | #define vminq_u8(__a, __b) __arm_vminq_u8(__a, __b) | |
718 | #define vmaxvq_u8(__a, __b) __arm_vmaxvq_u8(__a, __b) | |
719 | #define vmaxq_u8(__a, __b) __arm_vmaxq_u8(__a, __b) | |
720 | #define vhsubq_u8(__a, __b) __arm_vhsubq_u8(__a, __b) | |
721 | #define vhsubq_n_u8(__a, __b) __arm_vhsubq_n_u8(__a, __b) | |
722 | #define vhaddq_u8(__a, __b) __arm_vhaddq_u8(__a, __b) | |
723 | #define vhaddq_n_u8(__a, __b) __arm_vhaddq_n_u8(__a, __b) | |
724 | #define veorq_u8(__a, __b) __arm_veorq_u8(__a, __b) | |
725 | #define vcmpneq_n_u8(__a, __b) __arm_vcmpneq_n_u8(__a, __b) | |
726 | #define vcmphiq_u8(__a, __b) __arm_vcmphiq_u8(__a, __b) | |
727 | #define vcmphiq_n_u8(__a, __b) __arm_vcmphiq_n_u8(__a, __b) | |
728 | #define vcmpeqq_u8(__a, __b) __arm_vcmpeqq_u8(__a, __b) | |
729 | #define vcmpeqq_n_u8(__a, __b) __arm_vcmpeqq_n_u8(__a, __b) | |
730 | #define vcmpcsq_u8(__a, __b) __arm_vcmpcsq_u8(__a, __b) | |
731 | #define vcmpcsq_n_u8(__a, __b) __arm_vcmpcsq_n_u8(__a, __b) | |
732 | #define vcaddq_rot90_u8(__a, __b) __arm_vcaddq_rot90_u8(__a, __b) | |
733 | #define vcaddq_rot270_u8(__a, __b) __arm_vcaddq_rot270_u8(__a, __b) | |
734 | #define vbicq_u8(__a, __b) __arm_vbicq_u8(__a, __b) | |
735 | #define vandq_u8(__a, __b) __arm_vandq_u8(__a, __b) | |
736 | #define vaddvq_p_u8(__a, __p) __arm_vaddvq_p_u8(__a, __p) | |
737 | #define vaddvaq_u8(__a, __b) __arm_vaddvaq_u8(__a, __b) | |
738 | #define vaddq_n_u8(__a, __b) __arm_vaddq_n_u8(__a, __b) | |
739 | #define vabdq_u8(__a, __b) __arm_vabdq_u8(__a, __b) | |
740 | #define vshlq_r_u8(__a, __b) __arm_vshlq_r_u8(__a, __b) | |
741 | #define vrshlq_u8(__a, __b) __arm_vrshlq_u8(__a, __b) | |
742 | #define vrshlq_n_u8(__a, __b) __arm_vrshlq_n_u8(__a, __b) | |
743 | #define vqshlq_u8(__a, __b) __arm_vqshlq_u8(__a, __b) | |
744 | #define vqshlq_r_u8(__a, __b) __arm_vqshlq_r_u8(__a, __b) | |
745 | #define vqrshlq_u8(__a, __b) __arm_vqrshlq_u8(__a, __b) | |
746 | #define vqrshlq_n_u8(__a, __b) __arm_vqrshlq_n_u8(__a, __b) | |
747 | #define vminavq_s8(__a, __b) __arm_vminavq_s8(__a, __b) | |
748 | #define vminaq_s8(__a, __b) __arm_vminaq_s8(__a, __b) | |
749 | #define vmaxavq_s8(__a, __b) __arm_vmaxavq_s8(__a, __b) | |
750 | #define vmaxaq_s8(__a, __b) __arm_vmaxaq_s8(__a, __b) | |
751 | #define vbrsrq_n_u8(__a, __b) __arm_vbrsrq_n_u8(__a, __b) | |
752 | #define vshlq_n_u8(__a, __imm) __arm_vshlq_n_u8(__a, __imm) | |
753 | #define vrshrq_n_u8(__a, __imm) __arm_vrshrq_n_u8(__a, __imm) | |
754 | #define vqshlq_n_u8(__a, __imm) __arm_vqshlq_n_u8(__a, __imm) | |
755 | #define vcmpneq_n_s8(__a, __b) __arm_vcmpneq_n_s8(__a, __b) | |
756 | #define vcmpltq_s8(__a, __b) __arm_vcmpltq_s8(__a, __b) | |
757 | #define vcmpltq_n_s8(__a, __b) __arm_vcmpltq_n_s8(__a, __b) | |
758 | #define vcmpleq_s8(__a, __b) __arm_vcmpleq_s8(__a, __b) | |
759 | #define vcmpleq_n_s8(__a, __b) __arm_vcmpleq_n_s8(__a, __b) | |
760 | #define vcmpgtq_s8(__a, __b) __arm_vcmpgtq_s8(__a, __b) | |
761 | #define vcmpgtq_n_s8(__a, __b) __arm_vcmpgtq_n_s8(__a, __b) | |
762 | #define vcmpgeq_s8(__a, __b) __arm_vcmpgeq_s8(__a, __b) | |
763 | #define vcmpgeq_n_s8(__a, __b) __arm_vcmpgeq_n_s8(__a, __b) | |
764 | #define vcmpeqq_s8(__a, __b) __arm_vcmpeqq_s8(__a, __b) | |
765 | #define vcmpeqq_n_s8(__a, __b) __arm_vcmpeqq_n_s8(__a, __b) | |
766 | #define vqshluq_n_s8(__a, __imm) __arm_vqshluq_n_s8(__a, __imm) | |
767 | #define vaddvq_p_s8(__a, __p) __arm_vaddvq_p_s8(__a, __p) | |
768 | #define vsubq_s8(__a, __b) __arm_vsubq_s8(__a, __b) | |
769 | #define vsubq_n_s8(__a, __b) __arm_vsubq_n_s8(__a, __b) | |
770 | #define vshlq_r_s8(__a, __b) __arm_vshlq_r_s8(__a, __b) | |
771 | #define vrshlq_s8(__a, __b) __arm_vrshlq_s8(__a, __b) | |
772 | #define vrshlq_n_s8(__a, __b) __arm_vrshlq_n_s8(__a, __b) | |
773 | #define vrmulhq_s8(__a, __b) __arm_vrmulhq_s8(__a, __b) | |
774 | #define vrhaddq_s8(__a, __b) __arm_vrhaddq_s8(__a, __b) | |
775 | #define vqsubq_s8(__a, __b) __arm_vqsubq_s8(__a, __b) | |
776 | #define vqsubq_n_s8(__a, __b) __arm_vqsubq_n_s8(__a, __b) | |
777 | #define vqshlq_s8(__a, __b) __arm_vqshlq_s8(__a, __b) | |
778 | #define vqshlq_r_s8(__a, __b) __arm_vqshlq_r_s8(__a, __b) | |
779 | #define vqrshlq_s8(__a, __b) __arm_vqrshlq_s8(__a, __b) | |
780 | #define vqrshlq_n_s8(__a, __b) __arm_vqrshlq_n_s8(__a, __b) | |
781 | #define vqrdmulhq_s8(__a, __b) __arm_vqrdmulhq_s8(__a, __b) | |
782 | #define vqrdmulhq_n_s8(__a, __b) __arm_vqrdmulhq_n_s8(__a, __b) | |
783 | #define vqdmulhq_s8(__a, __b) __arm_vqdmulhq_s8(__a, __b) | |
784 | #define vqdmulhq_n_s8(__a, __b) __arm_vqdmulhq_n_s8(__a, __b) | |
785 | #define vqaddq_s8(__a, __b) __arm_vqaddq_s8(__a, __b) | |
786 | #define vqaddq_n_s8(__a, __b) __arm_vqaddq_n_s8(__a, __b) | |
787 | #define vorrq_s8(__a, __b) __arm_vorrq_s8(__a, __b) | |
788 | #define vornq_s8(__a, __b) __arm_vornq_s8(__a, __b) | |
789 | #define vmulq_s8(__a, __b) __arm_vmulq_s8(__a, __b) | |
790 | #define vmulq_n_s8(__a, __b) __arm_vmulq_n_s8(__a, __b) | |
791 | #define vmulltq_int_s8(__a, __b) __arm_vmulltq_int_s8(__a, __b) | |
792 | #define vmullbq_int_s8(__a, __b) __arm_vmullbq_int_s8(__a, __b) | |
793 | #define vmulhq_s8(__a, __b) __arm_vmulhq_s8(__a, __b) | |
794 | #define vmlsdavxq_s8(__a, __b) __arm_vmlsdavxq_s8(__a, __b) | |
795 | #define vmlsdavq_s8(__a, __b) __arm_vmlsdavq_s8(__a, __b) | |
796 | #define vmladavxq_s8(__a, __b) __arm_vmladavxq_s8(__a, __b) | |
797 | #define vmladavq_s8(__a, __b) __arm_vmladavq_s8(__a, __b) | |
798 | #define vminvq_s8(__a, __b) __arm_vminvq_s8(__a, __b) | |
799 | #define vminq_s8(__a, __b) __arm_vminq_s8(__a, __b) | |
800 | #define vmaxvq_s8(__a, __b) __arm_vmaxvq_s8(__a, __b) | |
801 | #define vmaxq_s8(__a, __b) __arm_vmaxq_s8(__a, __b) | |
802 | #define vhsubq_s8(__a, __b) __arm_vhsubq_s8(__a, __b) | |
803 | #define vhsubq_n_s8(__a, __b) __arm_vhsubq_n_s8(__a, __b) | |
804 | #define vhcaddq_rot90_s8(__a, __b) __arm_vhcaddq_rot90_s8(__a, __b) | |
805 | #define vhcaddq_rot270_s8(__a, __b) __arm_vhcaddq_rot270_s8(__a, __b) | |
806 | #define vhaddq_s8(__a, __b) __arm_vhaddq_s8(__a, __b) | |
807 | #define vhaddq_n_s8(__a, __b) __arm_vhaddq_n_s8(__a, __b) | |
808 | #define veorq_s8(__a, __b) __arm_veorq_s8(__a, __b) | |
809 | #define vcaddq_rot90_s8(__a, __b) __arm_vcaddq_rot90_s8(__a, __b) | |
810 | #define vcaddq_rot270_s8(__a, __b) __arm_vcaddq_rot270_s8(__a, __b) | |
811 | #define vbrsrq_n_s8(__a, __b) __arm_vbrsrq_n_s8(__a, __b) | |
812 | #define vbicq_s8(__a, __b) __arm_vbicq_s8(__a, __b) | |
813 | #define vandq_s8(__a, __b) __arm_vandq_s8(__a, __b) | |
814 | #define vaddvaq_s8(__a, __b) __arm_vaddvaq_s8(__a, __b) | |
815 | #define vaddq_n_s8(__a, __b) __arm_vaddq_n_s8(__a, __b) | |
816 | #define vabdq_s8(__a, __b) __arm_vabdq_s8(__a, __b) | |
817 | #define vshlq_n_s8(__a, __imm) __arm_vshlq_n_s8(__a, __imm) | |
818 | #define vrshrq_n_s8(__a, __imm) __arm_vrshrq_n_s8(__a, __imm) | |
819 | #define vqshlq_n_s8(__a, __imm) __arm_vqshlq_n_s8(__a, __imm) | |
820 | #define vsubq_u16(__a, __b) __arm_vsubq_u16(__a, __b) | |
821 | #define vsubq_n_u16(__a, __b) __arm_vsubq_n_u16(__a, __b) | |
822 | #define vrmulhq_u16(__a, __b) __arm_vrmulhq_u16(__a, __b) | |
823 | #define vrhaddq_u16(__a, __b) __arm_vrhaddq_u16(__a, __b) | |
824 | #define vqsubq_u16(__a, __b) __arm_vqsubq_u16(__a, __b) | |
825 | #define vqsubq_n_u16(__a, __b) __arm_vqsubq_n_u16(__a, __b) | |
826 | #define vqaddq_u16(__a, __b) __arm_vqaddq_u16(__a, __b) | |
827 | #define vqaddq_n_u16(__a, __b) __arm_vqaddq_n_u16(__a, __b) | |
828 | #define vorrq_u16(__a, __b) __arm_vorrq_u16(__a, __b) | |
829 | #define vornq_u16(__a, __b) __arm_vornq_u16(__a, __b) | |
830 | #define vmulq_u16(__a, __b) __arm_vmulq_u16(__a, __b) | |
831 | #define vmulq_n_u16(__a, __b) __arm_vmulq_n_u16(__a, __b) | |
832 | #define vmulltq_int_u16(__a, __b) __arm_vmulltq_int_u16(__a, __b) | |
833 | #define vmullbq_int_u16(__a, __b) __arm_vmullbq_int_u16(__a, __b) | |
834 | #define vmulhq_u16(__a, __b) __arm_vmulhq_u16(__a, __b) | |
835 | #define vmladavq_u16(__a, __b) __arm_vmladavq_u16(__a, __b) | |
836 | #define vminvq_u16(__a, __b) __arm_vminvq_u16(__a, __b) | |
837 | #define vminq_u16(__a, __b) __arm_vminq_u16(__a, __b) | |
838 | #define vmaxvq_u16(__a, __b) __arm_vmaxvq_u16(__a, __b) | |
839 | #define vmaxq_u16(__a, __b) __arm_vmaxq_u16(__a, __b) | |
840 | #define vhsubq_u16(__a, __b) __arm_vhsubq_u16(__a, __b) | |
841 | #define vhsubq_n_u16(__a, __b) __arm_vhsubq_n_u16(__a, __b) | |
842 | #define vhaddq_u16(__a, __b) __arm_vhaddq_u16(__a, __b) | |
843 | #define vhaddq_n_u16(__a, __b) __arm_vhaddq_n_u16(__a, __b) | |
844 | #define veorq_u16(__a, __b) __arm_veorq_u16(__a, __b) | |
845 | #define vcmpneq_n_u16(__a, __b) __arm_vcmpneq_n_u16(__a, __b) | |
846 | #define vcmphiq_u16(__a, __b) __arm_vcmphiq_u16(__a, __b) | |
847 | #define vcmphiq_n_u16(__a, __b) __arm_vcmphiq_n_u16(__a, __b) | |
848 | #define vcmpeqq_u16(__a, __b) __arm_vcmpeqq_u16(__a, __b) | |
849 | #define vcmpeqq_n_u16(__a, __b) __arm_vcmpeqq_n_u16(__a, __b) | |
850 | #define vcmpcsq_u16(__a, __b) __arm_vcmpcsq_u16(__a, __b) | |
851 | #define vcmpcsq_n_u16(__a, __b) __arm_vcmpcsq_n_u16(__a, __b) | |
852 | #define vcaddq_rot90_u16(__a, __b) __arm_vcaddq_rot90_u16(__a, __b) | |
853 | #define vcaddq_rot270_u16(__a, __b) __arm_vcaddq_rot270_u16(__a, __b) | |
854 | #define vbicq_u16(__a, __b) __arm_vbicq_u16(__a, __b) | |
855 | #define vandq_u16(__a, __b) __arm_vandq_u16(__a, __b) | |
856 | #define vaddvq_p_u16(__a, __p) __arm_vaddvq_p_u16(__a, __p) | |
857 | #define vaddvaq_u16(__a, __b) __arm_vaddvaq_u16(__a, __b) | |
858 | #define vaddq_n_u16(__a, __b) __arm_vaddq_n_u16(__a, __b) | |
859 | #define vabdq_u16(__a, __b) __arm_vabdq_u16(__a, __b) | |
860 | #define vshlq_r_u16(__a, __b) __arm_vshlq_r_u16(__a, __b) | |
861 | #define vrshlq_u16(__a, __b) __arm_vrshlq_u16(__a, __b) | |
862 | #define vrshlq_n_u16(__a, __b) __arm_vrshlq_n_u16(__a, __b) | |
863 | #define vqshlq_u16(__a, __b) __arm_vqshlq_u16(__a, __b) | |
864 | #define vqshlq_r_u16(__a, __b) __arm_vqshlq_r_u16(__a, __b) | |
865 | #define vqrshlq_u16(__a, __b) __arm_vqrshlq_u16(__a, __b) | |
866 | #define vqrshlq_n_u16(__a, __b) __arm_vqrshlq_n_u16(__a, __b) | |
867 | #define vminavq_s16(__a, __b) __arm_vminavq_s16(__a, __b) | |
868 | #define vminaq_s16(__a, __b) __arm_vminaq_s16(__a, __b) | |
869 | #define vmaxavq_s16(__a, __b) __arm_vmaxavq_s16(__a, __b) | |
870 | #define vmaxaq_s16(__a, __b) __arm_vmaxaq_s16(__a, __b) | |
871 | #define vbrsrq_n_u16(__a, __b) __arm_vbrsrq_n_u16(__a, __b) | |
872 | #define vshlq_n_u16(__a, __imm) __arm_vshlq_n_u16(__a, __imm) | |
873 | #define vrshrq_n_u16(__a, __imm) __arm_vrshrq_n_u16(__a, __imm) | |
874 | #define vqshlq_n_u16(__a, __imm) __arm_vqshlq_n_u16(__a, __imm) | |
875 | #define vcmpneq_n_s16(__a, __b) __arm_vcmpneq_n_s16(__a, __b) | |
876 | #define vcmpltq_s16(__a, __b) __arm_vcmpltq_s16(__a, __b) | |
877 | #define vcmpltq_n_s16(__a, __b) __arm_vcmpltq_n_s16(__a, __b) | |
878 | #define vcmpleq_s16(__a, __b) __arm_vcmpleq_s16(__a, __b) | |
879 | #define vcmpleq_n_s16(__a, __b) __arm_vcmpleq_n_s16(__a, __b) | |
880 | #define vcmpgtq_s16(__a, __b) __arm_vcmpgtq_s16(__a, __b) | |
881 | #define vcmpgtq_n_s16(__a, __b) __arm_vcmpgtq_n_s16(__a, __b) | |
882 | #define vcmpgeq_s16(__a, __b) __arm_vcmpgeq_s16(__a, __b) | |
883 | #define vcmpgeq_n_s16(__a, __b) __arm_vcmpgeq_n_s16(__a, __b) | |
884 | #define vcmpeqq_s16(__a, __b) __arm_vcmpeqq_s16(__a, __b) | |
885 | #define vcmpeqq_n_s16(__a, __b) __arm_vcmpeqq_n_s16(__a, __b) | |
886 | #define vqshluq_n_s16(__a, __imm) __arm_vqshluq_n_s16(__a, __imm) | |
887 | #define vaddvq_p_s16(__a, __p) __arm_vaddvq_p_s16(__a, __p) | |
888 | #define vsubq_s16(__a, __b) __arm_vsubq_s16(__a, __b) | |
889 | #define vsubq_n_s16(__a, __b) __arm_vsubq_n_s16(__a, __b) | |
890 | #define vshlq_r_s16(__a, __b) __arm_vshlq_r_s16(__a, __b) | |
891 | #define vrshlq_s16(__a, __b) __arm_vrshlq_s16(__a, __b) | |
892 | #define vrshlq_n_s16(__a, __b) __arm_vrshlq_n_s16(__a, __b) | |
893 | #define vrmulhq_s16(__a, __b) __arm_vrmulhq_s16(__a, __b) | |
894 | #define vrhaddq_s16(__a, __b) __arm_vrhaddq_s16(__a, __b) | |
895 | #define vqsubq_s16(__a, __b) __arm_vqsubq_s16(__a, __b) | |
896 | #define vqsubq_n_s16(__a, __b) __arm_vqsubq_n_s16(__a, __b) | |
897 | #define vqshlq_s16(__a, __b) __arm_vqshlq_s16(__a, __b) | |
898 | #define vqshlq_r_s16(__a, __b) __arm_vqshlq_r_s16(__a, __b) | |
899 | #define vqrshlq_s16(__a, __b) __arm_vqrshlq_s16(__a, __b) | |
900 | #define vqrshlq_n_s16(__a, __b) __arm_vqrshlq_n_s16(__a, __b) | |
901 | #define vqrdmulhq_s16(__a, __b) __arm_vqrdmulhq_s16(__a, __b) | |
902 | #define vqrdmulhq_n_s16(__a, __b) __arm_vqrdmulhq_n_s16(__a, __b) | |
903 | #define vqdmulhq_s16(__a, __b) __arm_vqdmulhq_s16(__a, __b) | |
904 | #define vqdmulhq_n_s16(__a, __b) __arm_vqdmulhq_n_s16(__a, __b) | |
905 | #define vqaddq_s16(__a, __b) __arm_vqaddq_s16(__a, __b) | |
906 | #define vqaddq_n_s16(__a, __b) __arm_vqaddq_n_s16(__a, __b) | |
907 | #define vorrq_s16(__a, __b) __arm_vorrq_s16(__a, __b) | |
908 | #define vornq_s16(__a, __b) __arm_vornq_s16(__a, __b) | |
909 | #define vmulq_s16(__a, __b) __arm_vmulq_s16(__a, __b) | |
910 | #define vmulq_n_s16(__a, __b) __arm_vmulq_n_s16(__a, __b) | |
911 | #define vmulltq_int_s16(__a, __b) __arm_vmulltq_int_s16(__a, __b) | |
912 | #define vmullbq_int_s16(__a, __b) __arm_vmullbq_int_s16(__a, __b) | |
913 | #define vmulhq_s16(__a, __b) __arm_vmulhq_s16(__a, __b) | |
914 | #define vmlsdavxq_s16(__a, __b) __arm_vmlsdavxq_s16(__a, __b) | |
915 | #define vmlsdavq_s16(__a, __b) __arm_vmlsdavq_s16(__a, __b) | |
916 | #define vmladavxq_s16(__a, __b) __arm_vmladavxq_s16(__a, __b) | |
917 | #define vmladavq_s16(__a, __b) __arm_vmladavq_s16(__a, __b) | |
918 | #define vminvq_s16(__a, __b) __arm_vminvq_s16(__a, __b) | |
919 | #define vminq_s16(__a, __b) __arm_vminq_s16(__a, __b) | |
920 | #define vmaxvq_s16(__a, __b) __arm_vmaxvq_s16(__a, __b) | |
921 | #define vmaxq_s16(__a, __b) __arm_vmaxq_s16(__a, __b) | |
922 | #define vhsubq_s16(__a, __b) __arm_vhsubq_s16(__a, __b) | |
923 | #define vhsubq_n_s16(__a, __b) __arm_vhsubq_n_s16(__a, __b) | |
924 | #define vhcaddq_rot90_s16(__a, __b) __arm_vhcaddq_rot90_s16(__a, __b) | |
925 | #define vhcaddq_rot270_s16(__a, __b) __arm_vhcaddq_rot270_s16(__a, __b) | |
926 | #define vhaddq_s16(__a, __b) __arm_vhaddq_s16(__a, __b) | |
927 | #define vhaddq_n_s16(__a, __b) __arm_vhaddq_n_s16(__a, __b) | |
928 | #define veorq_s16(__a, __b) __arm_veorq_s16(__a, __b) | |
929 | #define vcaddq_rot90_s16(__a, __b) __arm_vcaddq_rot90_s16(__a, __b) | |
930 | #define vcaddq_rot270_s16(__a, __b) __arm_vcaddq_rot270_s16(__a, __b) | |
931 | #define vbrsrq_n_s16(__a, __b) __arm_vbrsrq_n_s16(__a, __b) | |
932 | #define vbicq_s16(__a, __b) __arm_vbicq_s16(__a, __b) | |
933 | #define vandq_s16(__a, __b) __arm_vandq_s16(__a, __b) | |
934 | #define vaddvaq_s16(__a, __b) __arm_vaddvaq_s16(__a, __b) | |
935 | #define vaddq_n_s16(__a, __b) __arm_vaddq_n_s16(__a, __b) | |
936 | #define vabdq_s16(__a, __b) __arm_vabdq_s16(__a, __b) | |
937 | #define vshlq_n_s16(__a, __imm) __arm_vshlq_n_s16(__a, __imm) | |
938 | #define vrshrq_n_s16(__a, __imm) __arm_vrshrq_n_s16(__a, __imm) | |
939 | #define vqshlq_n_s16(__a, __imm) __arm_vqshlq_n_s16(__a, __imm) | |
940 | #define vsubq_u32(__a, __b) __arm_vsubq_u32(__a, __b) | |
941 | #define vsubq_n_u32(__a, __b) __arm_vsubq_n_u32(__a, __b) | |
942 | #define vrmulhq_u32(__a, __b) __arm_vrmulhq_u32(__a, __b) | |
943 | #define vrhaddq_u32(__a, __b) __arm_vrhaddq_u32(__a, __b) | |
944 | #define vqsubq_u32(__a, __b) __arm_vqsubq_u32(__a, __b) | |
945 | #define vqsubq_n_u32(__a, __b) __arm_vqsubq_n_u32(__a, __b) | |
946 | #define vqaddq_u32(__a, __b) __arm_vqaddq_u32(__a, __b) | |
947 | #define vqaddq_n_u32(__a, __b) __arm_vqaddq_n_u32(__a, __b) | |
948 | #define vorrq_u32(__a, __b) __arm_vorrq_u32(__a, __b) | |
949 | #define vornq_u32(__a, __b) __arm_vornq_u32(__a, __b) | |
950 | #define vmulq_u32(__a, __b) __arm_vmulq_u32(__a, __b) | |
951 | #define vmulq_n_u32(__a, __b) __arm_vmulq_n_u32(__a, __b) | |
952 | #define vmulltq_int_u32(__a, __b) __arm_vmulltq_int_u32(__a, __b) | |
953 | #define vmullbq_int_u32(__a, __b) __arm_vmullbq_int_u32(__a, __b) | |
954 | #define vmulhq_u32(__a, __b) __arm_vmulhq_u32(__a, __b) | |
955 | #define vmladavq_u32(__a, __b) __arm_vmladavq_u32(__a, __b) | |
956 | #define vminvq_u32(__a, __b) __arm_vminvq_u32(__a, __b) | |
957 | #define vminq_u32(__a, __b) __arm_vminq_u32(__a, __b) | |
958 | #define vmaxvq_u32(__a, __b) __arm_vmaxvq_u32(__a, __b) | |
959 | #define vmaxq_u32(__a, __b) __arm_vmaxq_u32(__a, __b) | |
960 | #define vhsubq_u32(__a, __b) __arm_vhsubq_u32(__a, __b) | |
961 | #define vhsubq_n_u32(__a, __b) __arm_vhsubq_n_u32(__a, __b) | |
962 | #define vhaddq_u32(__a, __b) __arm_vhaddq_u32(__a, __b) | |
963 | #define vhaddq_n_u32(__a, __b) __arm_vhaddq_n_u32(__a, __b) | |
964 | #define veorq_u32(__a, __b) __arm_veorq_u32(__a, __b) | |
965 | #define vcmpneq_n_u32(__a, __b) __arm_vcmpneq_n_u32(__a, __b) | |
966 | #define vcmphiq_u32(__a, __b) __arm_vcmphiq_u32(__a, __b) | |
967 | #define vcmphiq_n_u32(__a, __b) __arm_vcmphiq_n_u32(__a, __b) | |
968 | #define vcmpeqq_u32(__a, __b) __arm_vcmpeqq_u32(__a, __b) | |
969 | #define vcmpeqq_n_u32(__a, __b) __arm_vcmpeqq_n_u32(__a, __b) | |
970 | #define vcmpcsq_u32(__a, __b) __arm_vcmpcsq_u32(__a, __b) | |
971 | #define vcmpcsq_n_u32(__a, __b) __arm_vcmpcsq_n_u32(__a, __b) | |
972 | #define vcaddq_rot90_u32(__a, __b) __arm_vcaddq_rot90_u32(__a, __b) | |
973 | #define vcaddq_rot270_u32(__a, __b) __arm_vcaddq_rot270_u32(__a, __b) | |
974 | #define vbicq_u32(__a, __b) __arm_vbicq_u32(__a, __b) | |
975 | #define vandq_u32(__a, __b) __arm_vandq_u32(__a, __b) | |
976 | #define vaddvq_p_u32(__a, __p) __arm_vaddvq_p_u32(__a, __p) | |
977 | #define vaddvaq_u32(__a, __b) __arm_vaddvaq_u32(__a, __b) | |
978 | #define vaddq_n_u32(__a, __b) __arm_vaddq_n_u32(__a, __b) | |
979 | #define vabdq_u32(__a, __b) __arm_vabdq_u32(__a, __b) | |
980 | #define vshlq_r_u32(__a, __b) __arm_vshlq_r_u32(__a, __b) | |
981 | #define vrshlq_u32(__a, __b) __arm_vrshlq_u32(__a, __b) | |
982 | #define vrshlq_n_u32(__a, __b) __arm_vrshlq_n_u32(__a, __b) | |
983 | #define vqshlq_u32(__a, __b) __arm_vqshlq_u32(__a, __b) | |
984 | #define vqshlq_r_u32(__a, __b) __arm_vqshlq_r_u32(__a, __b) | |
985 | #define vqrshlq_u32(__a, __b) __arm_vqrshlq_u32(__a, __b) | |
986 | #define vqrshlq_n_u32(__a, __b) __arm_vqrshlq_n_u32(__a, __b) | |
987 | #define vminavq_s32(__a, __b) __arm_vminavq_s32(__a, __b) | |
988 | #define vminaq_s32(__a, __b) __arm_vminaq_s32(__a, __b) | |
989 | #define vmaxavq_s32(__a, __b) __arm_vmaxavq_s32(__a, __b) | |
990 | #define vmaxaq_s32(__a, __b) __arm_vmaxaq_s32(__a, __b) | |
991 | #define vbrsrq_n_u32(__a, __b) __arm_vbrsrq_n_u32(__a, __b) | |
992 | #define vshlq_n_u32(__a, __imm) __arm_vshlq_n_u32(__a, __imm) | |
993 | #define vrshrq_n_u32(__a, __imm) __arm_vrshrq_n_u32(__a, __imm) | |
994 | #define vqshlq_n_u32(__a, __imm) __arm_vqshlq_n_u32(__a, __imm) | |
995 | #define vcmpneq_n_s32(__a, __b) __arm_vcmpneq_n_s32(__a, __b) | |
996 | #define vcmpltq_s32(__a, __b) __arm_vcmpltq_s32(__a, __b) | |
997 | #define vcmpltq_n_s32(__a, __b) __arm_vcmpltq_n_s32(__a, __b) | |
998 | #define vcmpleq_s32(__a, __b) __arm_vcmpleq_s32(__a, __b) | |
999 | #define vcmpleq_n_s32(__a, __b) __arm_vcmpleq_n_s32(__a, __b) | |
1000 | #define vcmpgtq_s32(__a, __b) __arm_vcmpgtq_s32(__a, __b) | |
1001 | #define vcmpgtq_n_s32(__a, __b) __arm_vcmpgtq_n_s32(__a, __b) | |
1002 | #define vcmpgeq_s32(__a, __b) __arm_vcmpgeq_s32(__a, __b) | |
1003 | #define vcmpgeq_n_s32(__a, __b) __arm_vcmpgeq_n_s32(__a, __b) | |
1004 | #define vcmpeqq_s32(__a, __b) __arm_vcmpeqq_s32(__a, __b) | |
1005 | #define vcmpeqq_n_s32(__a, __b) __arm_vcmpeqq_n_s32(__a, __b) | |
1006 | #define vqshluq_n_s32(__a, __imm) __arm_vqshluq_n_s32(__a, __imm) | |
1007 | #define vaddvq_p_s32(__a, __p) __arm_vaddvq_p_s32(__a, __p) | |
1008 | #define vsubq_s32(__a, __b) __arm_vsubq_s32(__a, __b) | |
1009 | #define vsubq_n_s32(__a, __b) __arm_vsubq_n_s32(__a, __b) | |
1010 | #define vshlq_r_s32(__a, __b) __arm_vshlq_r_s32(__a, __b) | |
1011 | #define vrshlq_s32(__a, __b) __arm_vrshlq_s32(__a, __b) | |
1012 | #define vrshlq_n_s32(__a, __b) __arm_vrshlq_n_s32(__a, __b) | |
1013 | #define vrmulhq_s32(__a, __b) __arm_vrmulhq_s32(__a, __b) | |
1014 | #define vrhaddq_s32(__a, __b) __arm_vrhaddq_s32(__a, __b) | |
1015 | #define vqsubq_s32(__a, __b) __arm_vqsubq_s32(__a, __b) | |
1016 | #define vqsubq_n_s32(__a, __b) __arm_vqsubq_n_s32(__a, __b) | |
1017 | #define vqshlq_s32(__a, __b) __arm_vqshlq_s32(__a, __b) | |
1018 | #define vqshlq_r_s32(__a, __b) __arm_vqshlq_r_s32(__a, __b) | |
1019 | #define vqrshlq_s32(__a, __b) __arm_vqrshlq_s32(__a, __b) | |
1020 | #define vqrshlq_n_s32(__a, __b) __arm_vqrshlq_n_s32(__a, __b) | |
1021 | #define vqrdmulhq_s32(__a, __b) __arm_vqrdmulhq_s32(__a, __b) | |
1022 | #define vqrdmulhq_n_s32(__a, __b) __arm_vqrdmulhq_n_s32(__a, __b) | |
1023 | #define vqdmulhq_s32(__a, __b) __arm_vqdmulhq_s32(__a, __b) | |
1024 | #define vqdmulhq_n_s32(__a, __b) __arm_vqdmulhq_n_s32(__a, __b) | |
1025 | #define vqaddq_s32(__a, __b) __arm_vqaddq_s32(__a, __b) | |
1026 | #define vqaddq_n_s32(__a, __b) __arm_vqaddq_n_s32(__a, __b) | |
1027 | #define vorrq_s32(__a, __b) __arm_vorrq_s32(__a, __b) | |
1028 | #define vornq_s32(__a, __b) __arm_vornq_s32(__a, __b) | |
1029 | #define vmulq_s32(__a, __b) __arm_vmulq_s32(__a, __b) | |
1030 | #define vmulq_n_s32(__a, __b) __arm_vmulq_n_s32(__a, __b) | |
1031 | #define vmulltq_int_s32(__a, __b) __arm_vmulltq_int_s32(__a, __b) | |
1032 | #define vmullbq_int_s32(__a, __b) __arm_vmullbq_int_s32(__a, __b) | |
1033 | #define vmulhq_s32(__a, __b) __arm_vmulhq_s32(__a, __b) | |
1034 | #define vmlsdavxq_s32(__a, __b) __arm_vmlsdavxq_s32(__a, __b) | |
1035 | #define vmlsdavq_s32(__a, __b) __arm_vmlsdavq_s32(__a, __b) | |
1036 | #define vmladavxq_s32(__a, __b) __arm_vmladavxq_s32(__a, __b) | |
1037 | #define vmladavq_s32(__a, __b) __arm_vmladavq_s32(__a, __b) | |
1038 | #define vminvq_s32(__a, __b) __arm_vminvq_s32(__a, __b) | |
1039 | #define vminq_s32(__a, __b) __arm_vminq_s32(__a, __b) | |
1040 | #define vmaxvq_s32(__a, __b) __arm_vmaxvq_s32(__a, __b) | |
1041 | #define vmaxq_s32(__a, __b) __arm_vmaxq_s32(__a, __b) | |
1042 | #define vhsubq_s32(__a, __b) __arm_vhsubq_s32(__a, __b) | |
1043 | #define vhsubq_n_s32(__a, __b) __arm_vhsubq_n_s32(__a, __b) | |
1044 | #define vhcaddq_rot90_s32(__a, __b) __arm_vhcaddq_rot90_s32(__a, __b) | |
1045 | #define vhcaddq_rot270_s32(__a, __b) __arm_vhcaddq_rot270_s32(__a, __b) | |
1046 | #define vhaddq_s32(__a, __b) __arm_vhaddq_s32(__a, __b) | |
1047 | #define vhaddq_n_s32(__a, __b) __arm_vhaddq_n_s32(__a, __b) | |
1048 | #define veorq_s32(__a, __b) __arm_veorq_s32(__a, __b) | |
1049 | #define vcaddq_rot90_s32(__a, __b) __arm_vcaddq_rot90_s32(__a, __b) | |
1050 | #define vcaddq_rot270_s32(__a, __b) __arm_vcaddq_rot270_s32(__a, __b) | |
1051 | #define vbrsrq_n_s32(__a, __b) __arm_vbrsrq_n_s32(__a, __b) | |
1052 | #define vbicq_s32(__a, __b) __arm_vbicq_s32(__a, __b) | |
1053 | #define vandq_s32(__a, __b) __arm_vandq_s32(__a, __b) | |
1054 | #define vaddvaq_s32(__a, __b) __arm_vaddvaq_s32(__a, __b) | |
1055 | #define vaddq_n_s32(__a, __b) __arm_vaddq_n_s32(__a, __b) | |
1056 | #define vabdq_s32(__a, __b) __arm_vabdq_s32(__a, __b) | |
1057 | #define vshlq_n_s32(__a, __imm) __arm_vshlq_n_s32(__a, __imm) | |
1058 | #define vrshrq_n_s32(__a, __imm) __arm_vrshrq_n_s32(__a, __imm) | |
1059 | #define vqshlq_n_s32(__a, __imm) __arm_vqshlq_n_s32(__a, __imm) | |
f9355dee SP |
1060 | #define vqmovntq_u16(__a, __b) __arm_vqmovntq_u16(__a, __b) |
1061 | #define vqmovnbq_u16(__a, __b) __arm_vqmovnbq_u16(__a, __b) | |
1062 | #define vmulltq_poly_p8(__a, __b) __arm_vmulltq_poly_p8(__a, __b) | |
1063 | #define vmullbq_poly_p8(__a, __b) __arm_vmullbq_poly_p8(__a, __b) | |
1064 | #define vmovntq_u16(__a, __b) __arm_vmovntq_u16(__a, __b) | |
1065 | #define vmovnbq_u16(__a, __b) __arm_vmovnbq_u16(__a, __b) | |
1066 | #define vmlaldavq_u16(__a, __b) __arm_vmlaldavq_u16(__a, __b) | |
1067 | #define vqmovuntq_s16(__a, __b) __arm_vqmovuntq_s16(__a, __b) | |
1068 | #define vqmovunbq_s16(__a, __b) __arm_vqmovunbq_s16(__a, __b) | |
1069 | #define vshlltq_n_u8(__a, __imm) __arm_vshlltq_n_u8(__a, __imm) | |
1070 | #define vshllbq_n_u8(__a, __imm) __arm_vshllbq_n_u8(__a, __imm) | |
1071 | #define vorrq_n_u16(__a, __imm) __arm_vorrq_n_u16(__a, __imm) | |
1072 | #define vbicq_n_u16(__a, __imm) __arm_vbicq_n_u16(__a, __imm) | |
1073 | #define vcmpneq_n_f16(__a, __b) __arm_vcmpneq_n_f16(__a, __b) | |
1074 | #define vcmpneq_f16(__a, __b) __arm_vcmpneq_f16(__a, __b) | |
1075 | #define vcmpltq_n_f16(__a, __b) __arm_vcmpltq_n_f16(__a, __b) | |
1076 | #define vcmpltq_f16(__a, __b) __arm_vcmpltq_f16(__a, __b) | |
1077 | #define vcmpleq_n_f16(__a, __b) __arm_vcmpleq_n_f16(__a, __b) | |
1078 | #define vcmpleq_f16(__a, __b) __arm_vcmpleq_f16(__a, __b) | |
1079 | #define vcmpgtq_n_f16(__a, __b) __arm_vcmpgtq_n_f16(__a, __b) | |
1080 | #define vcmpgtq_f16(__a, __b) __arm_vcmpgtq_f16(__a, __b) | |
1081 | #define vcmpgeq_n_f16(__a, __b) __arm_vcmpgeq_n_f16(__a, __b) | |
1082 | #define vcmpgeq_f16(__a, __b) __arm_vcmpgeq_f16(__a, __b) | |
1083 | #define vcmpeqq_n_f16(__a, __b) __arm_vcmpeqq_n_f16(__a, __b) | |
1084 | #define vcmpeqq_f16(__a, __b) __arm_vcmpeqq_f16(__a, __b) | |
1085 | #define vsubq_f16(__a, __b) __arm_vsubq_f16(__a, __b) | |
1086 | #define vqmovntq_s16(__a, __b) __arm_vqmovntq_s16(__a, __b) | |
1087 | #define vqmovnbq_s16(__a, __b) __arm_vqmovnbq_s16(__a, __b) | |
1088 | #define vqdmulltq_s16(__a, __b) __arm_vqdmulltq_s16(__a, __b) | |
1089 | #define vqdmulltq_n_s16(__a, __b) __arm_vqdmulltq_n_s16(__a, __b) | |
1090 | #define vqdmullbq_s16(__a, __b) __arm_vqdmullbq_s16(__a, __b) | |
1091 | #define vqdmullbq_n_s16(__a, __b) __arm_vqdmullbq_n_s16(__a, __b) | |
1092 | #define vorrq_f16(__a, __b) __arm_vorrq_f16(__a, __b) | |
1093 | #define vornq_f16(__a, __b) __arm_vornq_f16(__a, __b) | |
1094 | #define vmulq_n_f16(__a, __b) __arm_vmulq_n_f16(__a, __b) | |
1095 | #define vmulq_f16(__a, __b) __arm_vmulq_f16(__a, __b) | |
1096 | #define vmovntq_s16(__a, __b) __arm_vmovntq_s16(__a, __b) | |
1097 | #define vmovnbq_s16(__a, __b) __arm_vmovnbq_s16(__a, __b) | |
1098 | #define vmlsldavxq_s16(__a, __b) __arm_vmlsldavxq_s16(__a, __b) | |
1099 | #define vmlsldavq_s16(__a, __b) __arm_vmlsldavq_s16(__a, __b) | |
1100 | #define vmlaldavxq_s16(__a, __b) __arm_vmlaldavxq_s16(__a, __b) | |
1101 | #define vmlaldavq_s16(__a, __b) __arm_vmlaldavq_s16(__a, __b) | |
1102 | #define vminnmvq_f16(__a, __b) __arm_vminnmvq_f16(__a, __b) | |
1103 | #define vminnmq_f16(__a, __b) __arm_vminnmq_f16(__a, __b) | |
1104 | #define vminnmavq_f16(__a, __b) __arm_vminnmavq_f16(__a, __b) | |
1105 | #define vminnmaq_f16(__a, __b) __arm_vminnmaq_f16(__a, __b) | |
1106 | #define vmaxnmvq_f16(__a, __b) __arm_vmaxnmvq_f16(__a, __b) | |
1107 | #define vmaxnmq_f16(__a, __b) __arm_vmaxnmq_f16(__a, __b) | |
1108 | #define vmaxnmavq_f16(__a, __b) __arm_vmaxnmavq_f16(__a, __b) | |
1109 | #define vmaxnmaq_f16(__a, __b) __arm_vmaxnmaq_f16(__a, __b) | |
1110 | #define veorq_f16(__a, __b) __arm_veorq_f16(__a, __b) | |
1111 | #define vcmulq_rot90_f16(__a, __b) __arm_vcmulq_rot90_f16(__a, __b) | |
1112 | #define vcmulq_rot270_f16(__a, __b) __arm_vcmulq_rot270_f16(__a, __b) | |
1113 | #define vcmulq_rot180_f16(__a, __b) __arm_vcmulq_rot180_f16(__a, __b) | |
1114 | #define vcmulq_f16(__a, __b) __arm_vcmulq_f16(__a, __b) | |
1115 | #define vcaddq_rot90_f16(__a, __b) __arm_vcaddq_rot90_f16(__a, __b) | |
1116 | #define vcaddq_rot270_f16(__a, __b) __arm_vcaddq_rot270_f16(__a, __b) | |
1117 | #define vbicq_f16(__a, __b) __arm_vbicq_f16(__a, __b) | |
1118 | #define vandq_f16(__a, __b) __arm_vandq_f16(__a, __b) | |
1119 | #define vaddq_n_f16(__a, __b) __arm_vaddq_n_f16(__a, __b) | |
1120 | #define vabdq_f16(__a, __b) __arm_vabdq_f16(__a, __b) | |
1121 | #define vshlltq_n_s8(__a, __imm) __arm_vshlltq_n_s8(__a, __imm) | |
1122 | #define vshllbq_n_s8(__a, __imm) __arm_vshllbq_n_s8(__a, __imm) | |
1123 | #define vorrq_n_s16(__a, __imm) __arm_vorrq_n_s16(__a, __imm) | |
1124 | #define vbicq_n_s16(__a, __imm) __arm_vbicq_n_s16(__a, __imm) | |
1125 | #define vqmovntq_u32(__a, __b) __arm_vqmovntq_u32(__a, __b) | |
1126 | #define vqmovnbq_u32(__a, __b) __arm_vqmovnbq_u32(__a, __b) | |
1127 | #define vmulltq_poly_p16(__a, __b) __arm_vmulltq_poly_p16(__a, __b) | |
1128 | #define vmullbq_poly_p16(__a, __b) __arm_vmullbq_poly_p16(__a, __b) | |
1129 | #define vmovntq_u32(__a, __b) __arm_vmovntq_u32(__a, __b) | |
1130 | #define vmovnbq_u32(__a, __b) __arm_vmovnbq_u32(__a, __b) | |
1131 | #define vmlaldavq_u32(__a, __b) __arm_vmlaldavq_u32(__a, __b) | |
1132 | #define vqmovuntq_s32(__a, __b) __arm_vqmovuntq_s32(__a, __b) | |
1133 | #define vqmovunbq_s32(__a, __b) __arm_vqmovunbq_s32(__a, __b) | |
1134 | #define vshlltq_n_u16(__a, __imm) __arm_vshlltq_n_u16(__a, __imm) | |
1135 | #define vshllbq_n_u16(__a, __imm) __arm_vshllbq_n_u16(__a, __imm) | |
1136 | #define vorrq_n_u32(__a, __imm) __arm_vorrq_n_u32(__a, __imm) | |
1137 | #define vbicq_n_u32(__a, __imm) __arm_vbicq_n_u32(__a, __imm) | |
1138 | #define vcmpneq_n_f32(__a, __b) __arm_vcmpneq_n_f32(__a, __b) | |
1139 | #define vcmpneq_f32(__a, __b) __arm_vcmpneq_f32(__a, __b) | |
1140 | #define vcmpltq_n_f32(__a, __b) __arm_vcmpltq_n_f32(__a, __b) | |
1141 | #define vcmpltq_f32(__a, __b) __arm_vcmpltq_f32(__a, __b) | |
1142 | #define vcmpleq_n_f32(__a, __b) __arm_vcmpleq_n_f32(__a, __b) | |
1143 | #define vcmpleq_f32(__a, __b) __arm_vcmpleq_f32(__a, __b) | |
1144 | #define vcmpgtq_n_f32(__a, __b) __arm_vcmpgtq_n_f32(__a, __b) | |
1145 | #define vcmpgtq_f32(__a, __b) __arm_vcmpgtq_f32(__a, __b) | |
1146 | #define vcmpgeq_n_f32(__a, __b) __arm_vcmpgeq_n_f32(__a, __b) | |
1147 | #define vcmpgeq_f32(__a, __b) __arm_vcmpgeq_f32(__a, __b) | |
1148 | #define vcmpeqq_n_f32(__a, __b) __arm_vcmpeqq_n_f32(__a, __b) | |
1149 | #define vcmpeqq_f32(__a, __b) __arm_vcmpeqq_f32(__a, __b) | |
1150 | #define vsubq_f32(__a, __b) __arm_vsubq_f32(__a, __b) | |
1151 | #define vqmovntq_s32(__a, __b) __arm_vqmovntq_s32(__a, __b) | |
1152 | #define vqmovnbq_s32(__a, __b) __arm_vqmovnbq_s32(__a, __b) | |
1153 | #define vqdmulltq_s32(__a, __b) __arm_vqdmulltq_s32(__a, __b) | |
1154 | #define vqdmulltq_n_s32(__a, __b) __arm_vqdmulltq_n_s32(__a, __b) | |
1155 | #define vqdmullbq_s32(__a, __b) __arm_vqdmullbq_s32(__a, __b) | |
1156 | #define vqdmullbq_n_s32(__a, __b) __arm_vqdmullbq_n_s32(__a, __b) | |
1157 | #define vorrq_f32(__a, __b) __arm_vorrq_f32(__a, __b) | |
1158 | #define vornq_f32(__a, __b) __arm_vornq_f32(__a, __b) | |
1159 | #define vmulq_n_f32(__a, __b) __arm_vmulq_n_f32(__a, __b) | |
1160 | #define vmulq_f32(__a, __b) __arm_vmulq_f32(__a, __b) | |
1161 | #define vmovntq_s32(__a, __b) __arm_vmovntq_s32(__a, __b) | |
1162 | #define vmovnbq_s32(__a, __b) __arm_vmovnbq_s32(__a, __b) | |
1163 | #define vmlsldavxq_s32(__a, __b) __arm_vmlsldavxq_s32(__a, __b) | |
1164 | #define vmlsldavq_s32(__a, __b) __arm_vmlsldavq_s32(__a, __b) | |
1165 | #define vmlaldavxq_s32(__a, __b) __arm_vmlaldavxq_s32(__a, __b) | |
1166 | #define vmlaldavq_s32(__a, __b) __arm_vmlaldavq_s32(__a, __b) | |
1167 | #define vminnmvq_f32(__a, __b) __arm_vminnmvq_f32(__a, __b) | |
1168 | #define vminnmq_f32(__a, __b) __arm_vminnmq_f32(__a, __b) | |
1169 | #define vminnmavq_f32(__a, __b) __arm_vminnmavq_f32(__a, __b) | |
1170 | #define vminnmaq_f32(__a, __b) __arm_vminnmaq_f32(__a, __b) | |
1171 | #define vmaxnmvq_f32(__a, __b) __arm_vmaxnmvq_f32(__a, __b) | |
1172 | #define vmaxnmq_f32(__a, __b) __arm_vmaxnmq_f32(__a, __b) | |
1173 | #define vmaxnmavq_f32(__a, __b) __arm_vmaxnmavq_f32(__a, __b) | |
1174 | #define vmaxnmaq_f32(__a, __b) __arm_vmaxnmaq_f32(__a, __b) | |
1175 | #define veorq_f32(__a, __b) __arm_veorq_f32(__a, __b) | |
1176 | #define vcmulq_rot90_f32(__a, __b) __arm_vcmulq_rot90_f32(__a, __b) | |
1177 | #define vcmulq_rot270_f32(__a, __b) __arm_vcmulq_rot270_f32(__a, __b) | |
1178 | #define vcmulq_rot180_f32(__a, __b) __arm_vcmulq_rot180_f32(__a, __b) | |
1179 | #define vcmulq_f32(__a, __b) __arm_vcmulq_f32(__a, __b) | |
1180 | #define vcaddq_rot90_f32(__a, __b) __arm_vcaddq_rot90_f32(__a, __b) | |
1181 | #define vcaddq_rot270_f32(__a, __b) __arm_vcaddq_rot270_f32(__a, __b) | |
1182 | #define vbicq_f32(__a, __b) __arm_vbicq_f32(__a, __b) | |
1183 | #define vandq_f32(__a, __b) __arm_vandq_f32(__a, __b) | |
1184 | #define vaddq_n_f32(__a, __b) __arm_vaddq_n_f32(__a, __b) | |
1185 | #define vabdq_f32(__a, __b) __arm_vabdq_f32(__a, __b) | |
1186 | #define vshlltq_n_s16(__a, __imm) __arm_vshlltq_n_s16(__a, __imm) | |
1187 | #define vshllbq_n_s16(__a, __imm) __arm_vshllbq_n_s16(__a, __imm) | |
1188 | #define vorrq_n_s32(__a, __imm) __arm_vorrq_n_s32(__a, __imm) | |
1189 | #define vbicq_n_s32(__a, __imm) __arm_vbicq_n_s32(__a, __imm) | |
1190 | #define vrmlaldavhq_u32(__a, __b) __arm_vrmlaldavhq_u32(__a, __b) | |
1191 | #define vctp8q_m(__a, __p) __arm_vctp8q_m(__a, __p) | |
1192 | #define vctp64q_m(__a, __p) __arm_vctp64q_m(__a, __p) | |
1193 | #define vctp32q_m(__a, __p) __arm_vctp32q_m(__a, __p) | |
1194 | #define vctp16q_m(__a, __p) __arm_vctp16q_m(__a, __p) | |
1195 | #define vaddlvaq_u32(__a, __b) __arm_vaddlvaq_u32(__a, __b) | |
1196 | #define vrmlsldavhxq_s32(__a, __b) __arm_vrmlsldavhxq_s32(__a, __b) | |
1197 | #define vrmlsldavhq_s32(__a, __b) __arm_vrmlsldavhq_s32(__a, __b) | |
1198 | #define vrmlaldavhxq_s32(__a, __b) __arm_vrmlaldavhxq_s32(__a, __b) | |
1199 | #define vrmlaldavhq_s32(__a, __b) __arm_vrmlaldavhq_s32(__a, __b) | |
1200 | #define vcvttq_f16_f32(__a, __b) __arm_vcvttq_f16_f32(__a, __b) | |
1201 | #define vcvtbq_f16_f32(__a, __b) __arm_vcvtbq_f16_f32(__a, __b) | |
1202 | #define vaddlvaq_s32(__a, __b) __arm_vaddlvaq_s32(__a, __b) | |
0dad5b33 SP |
1203 | #define vabavq_s8(__a, __b, __c) __arm_vabavq_s8(__a, __b, __c) |
1204 | #define vabavq_s16(__a, __b, __c) __arm_vabavq_s16(__a, __b, __c) | |
1205 | #define vabavq_s32(__a, __b, __c) __arm_vabavq_s32(__a, __b, __c) | |
1206 | #define vbicq_m_n_s16(__a, __imm, __p) __arm_vbicq_m_n_s16(__a, __imm, __p) | |
1207 | #define vbicq_m_n_s32(__a, __imm, __p) __arm_vbicq_m_n_s32(__a, __imm, __p) | |
1208 | #define vbicq_m_n_u16(__a, __imm, __p) __arm_vbicq_m_n_u16(__a, __imm, __p) | |
1209 | #define vbicq_m_n_u32(__a, __imm, __p) __arm_vbicq_m_n_u32(__a, __imm, __p) | |
1210 | #define vcmpeqq_m_f16(__a, __b, __p) __arm_vcmpeqq_m_f16(__a, __b, __p) | |
1211 | #define vcmpeqq_m_f32(__a, __b, __p) __arm_vcmpeqq_m_f32(__a, __b, __p) | |
1212 | #define vcvtaq_m_s16_f16(__inactive, __a, __p) __arm_vcvtaq_m_s16_f16(__inactive, __a, __p) | |
1213 | #define vcvtaq_m_u16_f16(__inactive, __a, __p) __arm_vcvtaq_m_u16_f16(__inactive, __a, __p) | |
1214 | #define vcvtaq_m_s32_f32(__inactive, __a, __p) __arm_vcvtaq_m_s32_f32(__inactive, __a, __p) | |
1215 | #define vcvtaq_m_u32_f32(__inactive, __a, __p) __arm_vcvtaq_m_u32_f32(__inactive, __a, __p) | |
1216 | #define vcvtq_m_f16_s16(__inactive, __a, __p) __arm_vcvtq_m_f16_s16(__inactive, __a, __p) | |
1217 | #define vcvtq_m_f16_u16(__inactive, __a, __p) __arm_vcvtq_m_f16_u16(__inactive, __a, __p) | |
1218 | #define vcvtq_m_f32_s32(__inactive, __a, __p) __arm_vcvtq_m_f32_s32(__inactive, __a, __p) | |
1219 | #define vcvtq_m_f32_u32(__inactive, __a, __p) __arm_vcvtq_m_f32_u32(__inactive, __a, __p) | |
1220 | #define vqrshrnbq_n_s16(__a, __b, __imm) __arm_vqrshrnbq_n_s16(__a, __b, __imm) | |
1221 | #define vqrshrnbq_n_u16(__a, __b, __imm) __arm_vqrshrnbq_n_u16(__a, __b, __imm) | |
1222 | #define vqrshrnbq_n_s32(__a, __b, __imm) __arm_vqrshrnbq_n_s32(__a, __b, __imm) | |
1223 | #define vqrshrnbq_n_u32(__a, __b, __imm) __arm_vqrshrnbq_n_u32(__a, __b, __imm) | |
1224 | #define vqrshrunbq_n_s16(__a, __b, __imm) __arm_vqrshrunbq_n_s16(__a, __b, __imm) | |
1225 | #define vqrshrunbq_n_s32(__a, __b, __imm) __arm_vqrshrunbq_n_s32(__a, __b, __imm) | |
1226 | #define vrmlaldavhaq_s32(__a, __b, __c) __arm_vrmlaldavhaq_s32(__a, __b, __c) | |
1227 | #define vrmlaldavhaq_u32(__a, __b, __c) __arm_vrmlaldavhaq_u32(__a, __b, __c) | |
1228 | #define vshlcq_s8(__a, __b, __imm) __arm_vshlcq_s8(__a, __b, __imm) | |
1229 | #define vshlcq_u8(__a, __b, __imm) __arm_vshlcq_u8(__a, __b, __imm) | |
1230 | #define vshlcq_s16(__a, __b, __imm) __arm_vshlcq_s16(__a, __b, __imm) | |
1231 | #define vshlcq_u16(__a, __b, __imm) __arm_vshlcq_u16(__a, __b, __imm) | |
1232 | #define vshlcq_s32(__a, __b, __imm) __arm_vshlcq_s32(__a, __b, __imm) | |
1233 | #define vshlcq_u32(__a, __b, __imm) __arm_vshlcq_u32(__a, __b, __imm) | |
1234 | #define vabavq_u8(__a, __b, __c) __arm_vabavq_u8(__a, __b, __c) | |
1235 | #define vabavq_u16(__a, __b, __c) __arm_vabavq_u16(__a, __b, __c) | |
1236 | #define vabavq_u32(__a, __b, __c) __arm_vabavq_u32(__a, __b, __c) | |
8165795c SP |
1237 | #define vpselq_u8(__a, __b, __p) __arm_vpselq_u8(__a, __b, __p) |
1238 | #define vpselq_s8(__a, __b, __p) __arm_vpselq_s8(__a, __b, __p) | |
1239 | #define vrev64q_m_u8(__inactive, __a, __p) __arm_vrev64q_m_u8(__inactive, __a, __p) | |
8165795c SP |
1240 | #define vmvnq_m_u8(__inactive, __a, __p) __arm_vmvnq_m_u8(__inactive, __a, __p) |
1241 | #define vmlasq_n_u8(__a, __b, __c) __arm_vmlasq_n_u8(__a, __b, __c) | |
1242 | #define vmlaq_n_u8(__a, __b, __c) __arm_vmlaq_n_u8(__a, __b, __c) | |
1243 | #define vmladavq_p_u8(__a, __b, __p) __arm_vmladavq_p_u8(__a, __b, __p) | |
1244 | #define vmladavaq_u8(__a, __b, __c) __arm_vmladavaq_u8(__a, __b, __c) | |
1245 | #define vminvq_p_u8(__a, __b, __p) __arm_vminvq_p_u8(__a, __b, __p) | |
1246 | #define vmaxvq_p_u8(__a, __b, __p) __arm_vmaxvq_p_u8(__a, __b, __p) | |
1247 | #define vdupq_m_n_u8(__inactive, __a, __p) __arm_vdupq_m_n_u8(__inactive, __a, __p) | |
1248 | #define vcmpneq_m_u8(__a, __b, __p) __arm_vcmpneq_m_u8(__a, __b, __p) | |
1249 | #define vcmpneq_m_n_u8(__a, __b, __p) __arm_vcmpneq_m_n_u8(__a, __b, __p) | |
1250 | #define vcmphiq_m_u8(__a, __b, __p) __arm_vcmphiq_m_u8(__a, __b, __p) | |
1251 | #define vcmphiq_m_n_u8(__a, __b, __p) __arm_vcmphiq_m_n_u8(__a, __b, __p) | |
1252 | #define vcmpeqq_m_u8(__a, __b, __p) __arm_vcmpeqq_m_u8(__a, __b, __p) | |
1253 | #define vcmpeqq_m_n_u8(__a, __b, __p) __arm_vcmpeqq_m_n_u8(__a, __b, __p) | |
1254 | #define vcmpcsq_m_u8(__a, __b, __p) __arm_vcmpcsq_m_u8(__a, __b, __p) | |
1255 | #define vcmpcsq_m_n_u8(__a, __b, __p) __arm_vcmpcsq_m_n_u8(__a, __b, __p) | |
1256 | #define vclzq_m_u8(__inactive, __a, __p) __arm_vclzq_m_u8(__inactive, __a, __p) | |
1257 | #define vaddvaq_p_u8(__a, __b, __p) __arm_vaddvaq_p_u8(__a, __b, __p) | |
1258 | #define vsriq_n_u8(__a, __b, __imm) __arm_vsriq_n_u8(__a, __b, __imm) | |
1259 | #define vsliq_n_u8(__a, __b, __imm) __arm_vsliq_n_u8(__a, __b, __imm) | |
1260 | #define vshlq_m_r_u8(__a, __b, __p) __arm_vshlq_m_r_u8(__a, __b, __p) | |
1261 | #define vrshlq_m_n_u8(__a, __b, __p) __arm_vrshlq_m_n_u8(__a, __b, __p) | |
1262 | #define vqshlq_m_r_u8(__a, __b, __p) __arm_vqshlq_m_r_u8(__a, __b, __p) | |
1263 | #define vqrshlq_m_n_u8(__a, __b, __p) __arm_vqrshlq_m_n_u8(__a, __b, __p) | |
1264 | #define vminavq_p_s8(__a, __b, __p) __arm_vminavq_p_s8(__a, __b, __p) | |
1265 | #define vminaq_m_s8(__a, __b, __p) __arm_vminaq_m_s8(__a, __b, __p) | |
1266 | #define vmaxavq_p_s8(__a, __b, __p) __arm_vmaxavq_p_s8(__a, __b, __p) | |
1267 | #define vmaxaq_m_s8(__a, __b, __p) __arm_vmaxaq_m_s8(__a, __b, __p) | |
1268 | #define vcmpneq_m_s8(__a, __b, __p) __arm_vcmpneq_m_s8(__a, __b, __p) | |
1269 | #define vcmpneq_m_n_s8(__a, __b, __p) __arm_vcmpneq_m_n_s8(__a, __b, __p) | |
1270 | #define vcmpltq_m_s8(__a, __b, __p) __arm_vcmpltq_m_s8(__a, __b, __p) | |
1271 | #define vcmpltq_m_n_s8(__a, __b, __p) __arm_vcmpltq_m_n_s8(__a, __b, __p) | |
1272 | #define vcmpleq_m_s8(__a, __b, __p) __arm_vcmpleq_m_s8(__a, __b, __p) | |
1273 | #define vcmpleq_m_n_s8(__a, __b, __p) __arm_vcmpleq_m_n_s8(__a, __b, __p) | |
1274 | #define vcmpgtq_m_s8(__a, __b, __p) __arm_vcmpgtq_m_s8(__a, __b, __p) | |
1275 | #define vcmpgtq_m_n_s8(__a, __b, __p) __arm_vcmpgtq_m_n_s8(__a, __b, __p) | |
1276 | #define vcmpgeq_m_s8(__a, __b, __p) __arm_vcmpgeq_m_s8(__a, __b, __p) | |
1277 | #define vcmpgeq_m_n_s8(__a, __b, __p) __arm_vcmpgeq_m_n_s8(__a, __b, __p) | |
1278 | #define vcmpeqq_m_s8(__a, __b, __p) __arm_vcmpeqq_m_s8(__a, __b, __p) | |
1279 | #define vcmpeqq_m_n_s8(__a, __b, __p) __arm_vcmpeqq_m_n_s8(__a, __b, __p) | |
1280 | #define vshlq_m_r_s8(__a, __b, __p) __arm_vshlq_m_r_s8(__a, __b, __p) | |
1281 | #define vrshlq_m_n_s8(__a, __b, __p) __arm_vrshlq_m_n_s8(__a, __b, __p) | |
1282 | #define vrev64q_m_s8(__inactive, __a, __p) __arm_vrev64q_m_s8(__inactive, __a, __p) | |
1283 | #define vqshlq_m_r_s8(__a, __b, __p) __arm_vqshlq_m_r_s8(__a, __b, __p) | |
1284 | #define vqrshlq_m_n_s8(__a, __b, __p) __arm_vqrshlq_m_n_s8(__a, __b, __p) | |
1285 | #define vqnegq_m_s8(__inactive, __a, __p) __arm_vqnegq_m_s8(__inactive, __a, __p) | |
1286 | #define vqabsq_m_s8(__inactive, __a, __p) __arm_vqabsq_m_s8(__inactive, __a, __p) | |
1287 | #define vnegq_m_s8(__inactive, __a, __p) __arm_vnegq_m_s8(__inactive, __a, __p) | |
1288 | #define vmvnq_m_s8(__inactive, __a, __p) __arm_vmvnq_m_s8(__inactive, __a, __p) | |
1289 | #define vmlsdavxq_p_s8(__a, __b, __p) __arm_vmlsdavxq_p_s8(__a, __b, __p) | |
1290 | #define vmlsdavq_p_s8(__a, __b, __p) __arm_vmlsdavq_p_s8(__a, __b, __p) | |
1291 | #define vmladavxq_p_s8(__a, __b, __p) __arm_vmladavxq_p_s8(__a, __b, __p) | |
1292 | #define vmladavq_p_s8(__a, __b, __p) __arm_vmladavq_p_s8(__a, __b, __p) | |
1293 | #define vminvq_p_s8(__a, __b, __p) __arm_vminvq_p_s8(__a, __b, __p) | |
1294 | #define vmaxvq_p_s8(__a, __b, __p) __arm_vmaxvq_p_s8(__a, __b, __p) | |
1295 | #define vdupq_m_n_s8(__inactive, __a, __p) __arm_vdupq_m_n_s8(__inactive, __a, __p) | |
1296 | #define vclzq_m_s8(__inactive, __a, __p) __arm_vclzq_m_s8(__inactive, __a, __p) | |
1297 | #define vclsq_m_s8(__inactive, __a, __p) __arm_vclsq_m_s8(__inactive, __a, __p) | |
1298 | #define vaddvaq_p_s8(__a, __b, __p) __arm_vaddvaq_p_s8(__a, __b, __p) | |
1299 | #define vabsq_m_s8(__inactive, __a, __p) __arm_vabsq_m_s8(__inactive, __a, __p) | |
1300 | #define vqrdmlsdhxq_s8(__inactive, __a, __b) __arm_vqrdmlsdhxq_s8(__inactive, __a, __b) | |
1301 | #define vqrdmlsdhq_s8(__inactive, __a, __b) __arm_vqrdmlsdhq_s8(__inactive, __a, __b) | |
1302 | #define vqrdmlashq_n_s8(__a, __b, __c) __arm_vqrdmlashq_n_s8(__a, __b, __c) | |
1303 | #define vqrdmlahq_n_s8(__a, __b, __c) __arm_vqrdmlahq_n_s8(__a, __b, __c) | |
1304 | #define vqrdmladhxq_s8(__inactive, __a, __b) __arm_vqrdmladhxq_s8(__inactive, __a, __b) | |
1305 | #define vqrdmladhq_s8(__inactive, __a, __b) __arm_vqrdmladhq_s8(__inactive, __a, __b) | |
1306 | #define vqdmlsdhxq_s8(__inactive, __a, __b) __arm_vqdmlsdhxq_s8(__inactive, __a, __b) | |
1307 | #define vqdmlsdhq_s8(__inactive, __a, __b) __arm_vqdmlsdhq_s8(__inactive, __a, __b) | |
1308 | #define vqdmlahq_n_s8(__a, __b, __c) __arm_vqdmlahq_n_s8(__a, __b, __c) | |
afb198ee | 1309 | #define vqdmlashq_n_s8(__a, __b, __c) __arm_vqdmlashq_n_s8(__a, __b, __c) |
8165795c SP |
1310 | #define vqdmladhxq_s8(__inactive, __a, __b) __arm_vqdmladhxq_s8(__inactive, __a, __b) |
1311 | #define vqdmladhq_s8(__inactive, __a, __b) __arm_vqdmladhq_s8(__inactive, __a, __b) | |
1312 | #define vmlsdavaxq_s8(__a, __b, __c) __arm_vmlsdavaxq_s8(__a, __b, __c) | |
1313 | #define vmlsdavaq_s8(__a, __b, __c) __arm_vmlsdavaq_s8(__a, __b, __c) | |
1314 | #define vmlasq_n_s8(__a, __b, __c) __arm_vmlasq_n_s8(__a, __b, __c) | |
1315 | #define vmlaq_n_s8(__a, __b, __c) __arm_vmlaq_n_s8(__a, __b, __c) | |
1316 | #define vmladavaxq_s8(__a, __b, __c) __arm_vmladavaxq_s8(__a, __b, __c) | |
1317 | #define vmladavaq_s8(__a, __b, __c) __arm_vmladavaq_s8(__a, __b, __c) | |
1318 | #define vsriq_n_s8(__a, __b, __imm) __arm_vsriq_n_s8(__a, __b, __imm) | |
1319 | #define vsliq_n_s8(__a, __b, __imm) __arm_vsliq_n_s8(__a, __b, __imm) | |
1320 | #define vpselq_u16(__a, __b, __p) __arm_vpselq_u16(__a, __b, __p) | |
1321 | #define vpselq_s16(__a, __b, __p) __arm_vpselq_s16(__a, __b, __p) | |
1322 | #define vrev64q_m_u16(__inactive, __a, __p) __arm_vrev64q_m_u16(__inactive, __a, __p) | |
8165795c SP |
1323 | #define vmvnq_m_u16(__inactive, __a, __p) __arm_vmvnq_m_u16(__inactive, __a, __p) |
1324 | #define vmlasq_n_u16(__a, __b, __c) __arm_vmlasq_n_u16(__a, __b, __c) | |
1325 | #define vmlaq_n_u16(__a, __b, __c) __arm_vmlaq_n_u16(__a, __b, __c) | |
1326 | #define vmladavq_p_u16(__a, __b, __p) __arm_vmladavq_p_u16(__a, __b, __p) | |
1327 | #define vmladavaq_u16(__a, __b, __c) __arm_vmladavaq_u16(__a, __b, __c) | |
1328 | #define vminvq_p_u16(__a, __b, __p) __arm_vminvq_p_u16(__a, __b, __p) | |
1329 | #define vmaxvq_p_u16(__a, __b, __p) __arm_vmaxvq_p_u16(__a, __b, __p) | |
1330 | #define vdupq_m_n_u16(__inactive, __a, __p) __arm_vdupq_m_n_u16(__inactive, __a, __p) | |
1331 | #define vcmpneq_m_u16(__a, __b, __p) __arm_vcmpneq_m_u16(__a, __b, __p) | |
1332 | #define vcmpneq_m_n_u16(__a, __b, __p) __arm_vcmpneq_m_n_u16(__a, __b, __p) | |
1333 | #define vcmphiq_m_u16(__a, __b, __p) __arm_vcmphiq_m_u16(__a, __b, __p) | |
1334 | #define vcmphiq_m_n_u16(__a, __b, __p) __arm_vcmphiq_m_n_u16(__a, __b, __p) | |
1335 | #define vcmpeqq_m_u16(__a, __b, __p) __arm_vcmpeqq_m_u16(__a, __b, __p) | |
1336 | #define vcmpeqq_m_n_u16(__a, __b, __p) __arm_vcmpeqq_m_n_u16(__a, __b, __p) | |
1337 | #define vcmpcsq_m_u16(__a, __b, __p) __arm_vcmpcsq_m_u16(__a, __b, __p) | |
1338 | #define vcmpcsq_m_n_u16(__a, __b, __p) __arm_vcmpcsq_m_n_u16(__a, __b, __p) | |
1339 | #define vclzq_m_u16(__inactive, __a, __p) __arm_vclzq_m_u16(__inactive, __a, __p) | |
1340 | #define vaddvaq_p_u16(__a, __b, __p) __arm_vaddvaq_p_u16(__a, __b, __p) | |
1341 | #define vsriq_n_u16(__a, __b, __imm) __arm_vsriq_n_u16(__a, __b, __imm) | |
1342 | #define vsliq_n_u16(__a, __b, __imm) __arm_vsliq_n_u16(__a, __b, __imm) | |
1343 | #define vshlq_m_r_u16(__a, __b, __p) __arm_vshlq_m_r_u16(__a, __b, __p) | |
1344 | #define vrshlq_m_n_u16(__a, __b, __p) __arm_vrshlq_m_n_u16(__a, __b, __p) | |
1345 | #define vqshlq_m_r_u16(__a, __b, __p) __arm_vqshlq_m_r_u16(__a, __b, __p) | |
1346 | #define vqrshlq_m_n_u16(__a, __b, __p) __arm_vqrshlq_m_n_u16(__a, __b, __p) | |
1347 | #define vminavq_p_s16(__a, __b, __p) __arm_vminavq_p_s16(__a, __b, __p) | |
1348 | #define vminaq_m_s16(__a, __b, __p) __arm_vminaq_m_s16(__a, __b, __p) | |
1349 | #define vmaxavq_p_s16(__a, __b, __p) __arm_vmaxavq_p_s16(__a, __b, __p) | |
1350 | #define vmaxaq_m_s16(__a, __b, __p) __arm_vmaxaq_m_s16(__a, __b, __p) | |
1351 | #define vcmpneq_m_s16(__a, __b, __p) __arm_vcmpneq_m_s16(__a, __b, __p) | |
1352 | #define vcmpneq_m_n_s16(__a, __b, __p) __arm_vcmpneq_m_n_s16(__a, __b, __p) | |
1353 | #define vcmpltq_m_s16(__a, __b, __p) __arm_vcmpltq_m_s16(__a, __b, __p) | |
1354 | #define vcmpltq_m_n_s16(__a, __b, __p) __arm_vcmpltq_m_n_s16(__a, __b, __p) | |
1355 | #define vcmpleq_m_s16(__a, __b, __p) __arm_vcmpleq_m_s16(__a, __b, __p) | |
1356 | #define vcmpleq_m_n_s16(__a, __b, __p) __arm_vcmpleq_m_n_s16(__a, __b, __p) | |
1357 | #define vcmpgtq_m_s16(__a, __b, __p) __arm_vcmpgtq_m_s16(__a, __b, __p) | |
1358 | #define vcmpgtq_m_n_s16(__a, __b, __p) __arm_vcmpgtq_m_n_s16(__a, __b, __p) | |
1359 | #define vcmpgeq_m_s16(__a, __b, __p) __arm_vcmpgeq_m_s16(__a, __b, __p) | |
1360 | #define vcmpgeq_m_n_s16(__a, __b, __p) __arm_vcmpgeq_m_n_s16(__a, __b, __p) | |
1361 | #define vcmpeqq_m_s16(__a, __b, __p) __arm_vcmpeqq_m_s16(__a, __b, __p) | |
1362 | #define vcmpeqq_m_n_s16(__a, __b, __p) __arm_vcmpeqq_m_n_s16(__a, __b, __p) | |
1363 | #define vshlq_m_r_s16(__a, __b, __p) __arm_vshlq_m_r_s16(__a, __b, __p) | |
1364 | #define vrshlq_m_n_s16(__a, __b, __p) __arm_vrshlq_m_n_s16(__a, __b, __p) | |
1365 | #define vrev64q_m_s16(__inactive, __a, __p) __arm_vrev64q_m_s16(__inactive, __a, __p) | |
1366 | #define vqshlq_m_r_s16(__a, __b, __p) __arm_vqshlq_m_r_s16(__a, __b, __p) | |
1367 | #define vqrshlq_m_n_s16(__a, __b, __p) __arm_vqrshlq_m_n_s16(__a, __b, __p) | |
1368 | #define vqnegq_m_s16(__inactive, __a, __p) __arm_vqnegq_m_s16(__inactive, __a, __p) | |
1369 | #define vqabsq_m_s16(__inactive, __a, __p) __arm_vqabsq_m_s16(__inactive, __a, __p) | |
1370 | #define vnegq_m_s16(__inactive, __a, __p) __arm_vnegq_m_s16(__inactive, __a, __p) | |
1371 | #define vmvnq_m_s16(__inactive, __a, __p) __arm_vmvnq_m_s16(__inactive, __a, __p) | |
1372 | #define vmlsdavxq_p_s16(__a, __b, __p) __arm_vmlsdavxq_p_s16(__a, __b, __p) | |
1373 | #define vmlsdavq_p_s16(__a, __b, __p) __arm_vmlsdavq_p_s16(__a, __b, __p) | |
1374 | #define vmladavxq_p_s16(__a, __b, __p) __arm_vmladavxq_p_s16(__a, __b, __p) | |
1375 | #define vmladavq_p_s16(__a, __b, __p) __arm_vmladavq_p_s16(__a, __b, __p) | |
1376 | #define vminvq_p_s16(__a, __b, __p) __arm_vminvq_p_s16(__a, __b, __p) | |
1377 | #define vmaxvq_p_s16(__a, __b, __p) __arm_vmaxvq_p_s16(__a, __b, __p) | |
1378 | #define vdupq_m_n_s16(__inactive, __a, __p) __arm_vdupq_m_n_s16(__inactive, __a, __p) | |
1379 | #define vclzq_m_s16(__inactive, __a, __p) __arm_vclzq_m_s16(__inactive, __a, __p) | |
1380 | #define vclsq_m_s16(__inactive, __a, __p) __arm_vclsq_m_s16(__inactive, __a, __p) | |
1381 | #define vaddvaq_p_s16(__a, __b, __p) __arm_vaddvaq_p_s16(__a, __b, __p) | |
1382 | #define vabsq_m_s16(__inactive, __a, __p) __arm_vabsq_m_s16(__inactive, __a, __p) | |
1383 | #define vqrdmlsdhxq_s16(__inactive, __a, __b) __arm_vqrdmlsdhxq_s16(__inactive, __a, __b) | |
1384 | #define vqrdmlsdhq_s16(__inactive, __a, __b) __arm_vqrdmlsdhq_s16(__inactive, __a, __b) | |
1385 | #define vqrdmlashq_n_s16(__a, __b, __c) __arm_vqrdmlashq_n_s16(__a, __b, __c) | |
1386 | #define vqrdmlahq_n_s16(__a, __b, __c) __arm_vqrdmlahq_n_s16(__a, __b, __c) | |
1387 | #define vqrdmladhxq_s16(__inactive, __a, __b) __arm_vqrdmladhxq_s16(__inactive, __a, __b) | |
1388 | #define vqrdmladhq_s16(__inactive, __a, __b) __arm_vqrdmladhq_s16(__inactive, __a, __b) | |
1389 | #define vqdmlsdhxq_s16(__inactive, __a, __b) __arm_vqdmlsdhxq_s16(__inactive, __a, __b) | |
1390 | #define vqdmlsdhq_s16(__inactive, __a, __b) __arm_vqdmlsdhq_s16(__inactive, __a, __b) | |
afb198ee | 1391 | #define vqdmlashq_n_s16(__a, __b, __c) __arm_vqdmlashq_n_s16(__a, __b, __c) |
8165795c SP |
1392 | #define vqdmlahq_n_s16(__a, __b, __c) __arm_vqdmlahq_n_s16(__a, __b, __c) |
1393 | #define vqdmladhxq_s16(__inactive, __a, __b) __arm_vqdmladhxq_s16(__inactive, __a, __b) | |
1394 | #define vqdmladhq_s16(__inactive, __a, __b) __arm_vqdmladhq_s16(__inactive, __a, __b) | |
1395 | #define vmlsdavaxq_s16(__a, __b, __c) __arm_vmlsdavaxq_s16(__a, __b, __c) | |
1396 | #define vmlsdavaq_s16(__a, __b, __c) __arm_vmlsdavaq_s16(__a, __b, __c) | |
1397 | #define vmlasq_n_s16(__a, __b, __c) __arm_vmlasq_n_s16(__a, __b, __c) | |
1398 | #define vmlaq_n_s16(__a, __b, __c) __arm_vmlaq_n_s16(__a, __b, __c) | |
1399 | #define vmladavaxq_s16(__a, __b, __c) __arm_vmladavaxq_s16(__a, __b, __c) | |
1400 | #define vmladavaq_s16(__a, __b, __c) __arm_vmladavaq_s16(__a, __b, __c) | |
1401 | #define vsriq_n_s16(__a, __b, __imm) __arm_vsriq_n_s16(__a, __b, __imm) | |
1402 | #define vsliq_n_s16(__a, __b, __imm) __arm_vsliq_n_s16(__a, __b, __imm) | |
1403 | #define vpselq_u32(__a, __b, __p) __arm_vpselq_u32(__a, __b, __p) | |
1404 | #define vpselq_s32(__a, __b, __p) __arm_vpselq_s32(__a, __b, __p) | |
1405 | #define vrev64q_m_u32(__inactive, __a, __p) __arm_vrev64q_m_u32(__inactive, __a, __p) | |
8165795c SP |
1406 | #define vmvnq_m_u32(__inactive, __a, __p) __arm_vmvnq_m_u32(__inactive, __a, __p) |
1407 | #define vmlasq_n_u32(__a, __b, __c) __arm_vmlasq_n_u32(__a, __b, __c) | |
1408 | #define vmlaq_n_u32(__a, __b, __c) __arm_vmlaq_n_u32(__a, __b, __c) | |
1409 | #define vmladavq_p_u32(__a, __b, __p) __arm_vmladavq_p_u32(__a, __b, __p) | |
1410 | #define vmladavaq_u32(__a, __b, __c) __arm_vmladavaq_u32(__a, __b, __c) | |
1411 | #define vminvq_p_u32(__a, __b, __p) __arm_vminvq_p_u32(__a, __b, __p) | |
1412 | #define vmaxvq_p_u32(__a, __b, __p) __arm_vmaxvq_p_u32(__a, __b, __p) | |
1413 | #define vdupq_m_n_u32(__inactive, __a, __p) __arm_vdupq_m_n_u32(__inactive, __a, __p) | |
1414 | #define vcmpneq_m_u32(__a, __b, __p) __arm_vcmpneq_m_u32(__a, __b, __p) | |
1415 | #define vcmpneq_m_n_u32(__a, __b, __p) __arm_vcmpneq_m_n_u32(__a, __b, __p) | |
1416 | #define vcmphiq_m_u32(__a, __b, __p) __arm_vcmphiq_m_u32(__a, __b, __p) | |
1417 | #define vcmphiq_m_n_u32(__a, __b, __p) __arm_vcmphiq_m_n_u32(__a, __b, __p) | |
1418 | #define vcmpeqq_m_u32(__a, __b, __p) __arm_vcmpeqq_m_u32(__a, __b, __p) | |
1419 | #define vcmpeqq_m_n_u32(__a, __b, __p) __arm_vcmpeqq_m_n_u32(__a, __b, __p) | |
1420 | #define vcmpcsq_m_u32(__a, __b, __p) __arm_vcmpcsq_m_u32(__a, __b, __p) | |
1421 | #define vcmpcsq_m_n_u32(__a, __b, __p) __arm_vcmpcsq_m_n_u32(__a, __b, __p) | |
1422 | #define vclzq_m_u32(__inactive, __a, __p) __arm_vclzq_m_u32(__inactive, __a, __p) | |
1423 | #define vaddvaq_p_u32(__a, __b, __p) __arm_vaddvaq_p_u32(__a, __b, __p) | |
1424 | #define vsriq_n_u32(__a, __b, __imm) __arm_vsriq_n_u32(__a, __b, __imm) | |
1425 | #define vsliq_n_u32(__a, __b, __imm) __arm_vsliq_n_u32(__a, __b, __imm) | |
1426 | #define vshlq_m_r_u32(__a, __b, __p) __arm_vshlq_m_r_u32(__a, __b, __p) | |
1427 | #define vrshlq_m_n_u32(__a, __b, __p) __arm_vrshlq_m_n_u32(__a, __b, __p) | |
1428 | #define vqshlq_m_r_u32(__a, __b, __p) __arm_vqshlq_m_r_u32(__a, __b, __p) | |
1429 | #define vqrshlq_m_n_u32(__a, __b, __p) __arm_vqrshlq_m_n_u32(__a, __b, __p) | |
1430 | #define vminavq_p_s32(__a, __b, __p) __arm_vminavq_p_s32(__a, __b, __p) | |
1431 | #define vminaq_m_s32(__a, __b, __p) __arm_vminaq_m_s32(__a, __b, __p) | |
1432 | #define vmaxavq_p_s32(__a, __b, __p) __arm_vmaxavq_p_s32(__a, __b, __p) | |
1433 | #define vmaxaq_m_s32(__a, __b, __p) __arm_vmaxaq_m_s32(__a, __b, __p) | |
1434 | #define vcmpneq_m_s32(__a, __b, __p) __arm_vcmpneq_m_s32(__a, __b, __p) | |
1435 | #define vcmpneq_m_n_s32(__a, __b, __p) __arm_vcmpneq_m_n_s32(__a, __b, __p) | |
1436 | #define vcmpltq_m_s32(__a, __b, __p) __arm_vcmpltq_m_s32(__a, __b, __p) | |
1437 | #define vcmpltq_m_n_s32(__a, __b, __p) __arm_vcmpltq_m_n_s32(__a, __b, __p) | |
1438 | #define vcmpleq_m_s32(__a, __b, __p) __arm_vcmpleq_m_s32(__a, __b, __p) | |
1439 | #define vcmpleq_m_n_s32(__a, __b, __p) __arm_vcmpleq_m_n_s32(__a, __b, __p) | |
1440 | #define vcmpgtq_m_s32(__a, __b, __p) __arm_vcmpgtq_m_s32(__a, __b, __p) | |
1441 | #define vcmpgtq_m_n_s32(__a, __b, __p) __arm_vcmpgtq_m_n_s32(__a, __b, __p) | |
1442 | #define vcmpgeq_m_s32(__a, __b, __p) __arm_vcmpgeq_m_s32(__a, __b, __p) | |
1443 | #define vcmpgeq_m_n_s32(__a, __b, __p) __arm_vcmpgeq_m_n_s32(__a, __b, __p) | |
1444 | #define vcmpeqq_m_s32(__a, __b, __p) __arm_vcmpeqq_m_s32(__a, __b, __p) | |
1445 | #define vcmpeqq_m_n_s32(__a, __b, __p) __arm_vcmpeqq_m_n_s32(__a, __b, __p) | |
1446 | #define vshlq_m_r_s32(__a, __b, __p) __arm_vshlq_m_r_s32(__a, __b, __p) | |
1447 | #define vrshlq_m_n_s32(__a, __b, __p) __arm_vrshlq_m_n_s32(__a, __b, __p) | |
1448 | #define vrev64q_m_s32(__inactive, __a, __p) __arm_vrev64q_m_s32(__inactive, __a, __p) | |
1449 | #define vqshlq_m_r_s32(__a, __b, __p) __arm_vqshlq_m_r_s32(__a, __b, __p) | |
1450 | #define vqrshlq_m_n_s32(__a, __b, __p) __arm_vqrshlq_m_n_s32(__a, __b, __p) | |
1451 | #define vqnegq_m_s32(__inactive, __a, __p) __arm_vqnegq_m_s32(__inactive, __a, __p) | |
1452 | #define vqabsq_m_s32(__inactive, __a, __p) __arm_vqabsq_m_s32(__inactive, __a, __p) | |
1453 | #define vnegq_m_s32(__inactive, __a, __p) __arm_vnegq_m_s32(__inactive, __a, __p) | |
1454 | #define vmvnq_m_s32(__inactive, __a, __p) __arm_vmvnq_m_s32(__inactive, __a, __p) | |
1455 | #define vmlsdavxq_p_s32(__a, __b, __p) __arm_vmlsdavxq_p_s32(__a, __b, __p) | |
1456 | #define vmlsdavq_p_s32(__a, __b, __p) __arm_vmlsdavq_p_s32(__a, __b, __p) | |
1457 | #define vmladavxq_p_s32(__a, __b, __p) __arm_vmladavxq_p_s32(__a, __b, __p) | |
1458 | #define vmladavq_p_s32(__a, __b, __p) __arm_vmladavq_p_s32(__a, __b, __p) | |
1459 | #define vminvq_p_s32(__a, __b, __p) __arm_vminvq_p_s32(__a, __b, __p) | |
1460 | #define vmaxvq_p_s32(__a, __b, __p) __arm_vmaxvq_p_s32(__a, __b, __p) | |
1461 | #define vdupq_m_n_s32(__inactive, __a, __p) __arm_vdupq_m_n_s32(__inactive, __a, __p) | |
1462 | #define vclzq_m_s32(__inactive, __a, __p) __arm_vclzq_m_s32(__inactive, __a, __p) | |
1463 | #define vclsq_m_s32(__inactive, __a, __p) __arm_vclsq_m_s32(__inactive, __a, __p) | |
1464 | #define vaddvaq_p_s32(__a, __b, __p) __arm_vaddvaq_p_s32(__a, __b, __p) | |
1465 | #define vabsq_m_s32(__inactive, __a, __p) __arm_vabsq_m_s32(__inactive, __a, __p) | |
1466 | #define vqrdmlsdhxq_s32(__inactive, __a, __b) __arm_vqrdmlsdhxq_s32(__inactive, __a, __b) | |
1467 | #define vqrdmlsdhq_s32(__inactive, __a, __b) __arm_vqrdmlsdhq_s32(__inactive, __a, __b) | |
1468 | #define vqrdmlashq_n_s32(__a, __b, __c) __arm_vqrdmlashq_n_s32(__a, __b, __c) | |
1469 | #define vqrdmlahq_n_s32(__a, __b, __c) __arm_vqrdmlahq_n_s32(__a, __b, __c) | |
1470 | #define vqrdmladhxq_s32(__inactive, __a, __b) __arm_vqrdmladhxq_s32(__inactive, __a, __b) | |
1471 | #define vqrdmladhq_s32(__inactive, __a, __b) __arm_vqrdmladhq_s32(__inactive, __a, __b) | |
1472 | #define vqdmlsdhxq_s32(__inactive, __a, __b) __arm_vqdmlsdhxq_s32(__inactive, __a, __b) | |
1473 | #define vqdmlsdhq_s32(__inactive, __a, __b) __arm_vqdmlsdhq_s32(__inactive, __a, __b) | |
afb198ee | 1474 | #define vqdmlashq_n_s32(__a, __b, __c) __arm_vqdmlashq_n_s32(__a, __b, __c) |
8165795c SP |
1475 | #define vqdmlahq_n_s32(__a, __b, __c) __arm_vqdmlahq_n_s32(__a, __b, __c) |
1476 | #define vqdmladhxq_s32(__inactive, __a, __b) __arm_vqdmladhxq_s32(__inactive, __a, __b) | |
1477 | #define vqdmladhq_s32(__inactive, __a, __b) __arm_vqdmladhq_s32(__inactive, __a, __b) | |
1478 | #define vmlsdavaxq_s32(__a, __b, __c) __arm_vmlsdavaxq_s32(__a, __b, __c) | |
1479 | #define vmlsdavaq_s32(__a, __b, __c) __arm_vmlsdavaq_s32(__a, __b, __c) | |
1480 | #define vmlasq_n_s32(__a, __b, __c) __arm_vmlasq_n_s32(__a, __b, __c) | |
1481 | #define vmlaq_n_s32(__a, __b, __c) __arm_vmlaq_n_s32(__a, __b, __c) | |
1482 | #define vmladavaxq_s32(__a, __b, __c) __arm_vmladavaxq_s32(__a, __b, __c) | |
1483 | #define vmladavaq_s32(__a, __b, __c) __arm_vmladavaq_s32(__a, __b, __c) | |
1484 | #define vsriq_n_s32(__a, __b, __imm) __arm_vsriq_n_s32(__a, __b, __imm) | |
1485 | #define vsliq_n_s32(__a, __b, __imm) __arm_vsliq_n_s32(__a, __b, __imm) | |
1486 | #define vpselq_u64(__a, __b, __p) __arm_vpselq_u64(__a, __b, __p) | |
1487 | #define vpselq_s64(__a, __b, __p) __arm_vpselq_s64(__a, __b, __p) | |
e3678b44 SP |
1488 | #define vrmlaldavhaxq_s32(__a, __b, __c) __arm_vrmlaldavhaxq_s32(__a, __b, __c) |
1489 | #define vrmlsldavhaq_s32(__a, __b, __c) __arm_vrmlsldavhaq_s32(__a, __b, __c) | |
1490 | #define vrmlsldavhaxq_s32(__a, __b, __c) __arm_vrmlsldavhaxq_s32(__a, __b, __c) | |
1491 | #define vaddlvaq_p_s32(__a, __b, __p) __arm_vaddlvaq_p_s32(__a, __b, __p) | |
1492 | #define vcvtbq_m_f16_f32(__a, __b, __p) __arm_vcvtbq_m_f16_f32(__a, __b, __p) | |
1493 | #define vcvtbq_m_f32_f16(__inactive, __a, __p) __arm_vcvtbq_m_f32_f16(__inactive, __a, __p) | |
1494 | #define vcvttq_m_f16_f32(__a, __b, __p) __arm_vcvttq_m_f16_f32(__a, __b, __p) | |
1495 | #define vcvttq_m_f32_f16(__inactive, __a, __p) __arm_vcvttq_m_f32_f16(__inactive, __a, __p) | |
1496 | #define vrev16q_m_s8(__inactive, __a, __p) __arm_vrev16q_m_s8(__inactive, __a, __p) | |
1497 | #define vrev32q_m_f16(__inactive, __a, __p) __arm_vrev32q_m_f16(__inactive, __a, __p) | |
1498 | #define vrmlaldavhq_p_s32(__a, __b, __p) __arm_vrmlaldavhq_p_s32(__a, __b, __p) | |
1499 | #define vrmlaldavhxq_p_s32(__a, __b, __p) __arm_vrmlaldavhxq_p_s32(__a, __b, __p) | |
1500 | #define vrmlsldavhq_p_s32(__a, __b, __p) __arm_vrmlsldavhq_p_s32(__a, __b, __p) | |
1501 | #define vrmlsldavhxq_p_s32(__a, __b, __p) __arm_vrmlsldavhxq_p_s32(__a, __b, __p) | |
1502 | #define vaddlvaq_p_u32(__a, __b, __p) __arm_vaddlvaq_p_u32(__a, __b, __p) | |
1503 | #define vrev16q_m_u8(__inactive, __a, __p) __arm_vrev16q_m_u8(__inactive, __a, __p) | |
1504 | #define vrmlaldavhq_p_u32(__a, __b, __p) __arm_vrmlaldavhq_p_u32(__a, __b, __p) | |
1505 | #define vmvnq_m_n_s16(__inactive, __imm, __p) __arm_vmvnq_m_n_s16(__inactive, __imm, __p) | |
1506 | #define vorrq_m_n_s16(__a, __imm, __p) __arm_vorrq_m_n_s16(__a, __imm, __p) | |
1507 | #define vqrshrntq_n_s16(__a, __b, __imm) __arm_vqrshrntq_n_s16(__a, __b, __imm) | |
1508 | #define vqshrnbq_n_s16(__a, __b, __imm) __arm_vqshrnbq_n_s16(__a, __b, __imm) | |
1509 | #define vqshrntq_n_s16(__a, __b, __imm) __arm_vqshrntq_n_s16(__a, __b, __imm) | |
1510 | #define vrshrnbq_n_s16(__a, __b, __imm) __arm_vrshrnbq_n_s16(__a, __b, __imm) | |
1511 | #define vrshrntq_n_s16(__a, __b, __imm) __arm_vrshrntq_n_s16(__a, __b, __imm) | |
1512 | #define vshrnbq_n_s16(__a, __b, __imm) __arm_vshrnbq_n_s16(__a, __b, __imm) | |
1513 | #define vshrntq_n_s16(__a, __b, __imm) __arm_vshrntq_n_s16(__a, __b, __imm) | |
1514 | #define vcmlaq_f16(__a, __b, __c) __arm_vcmlaq_f16(__a, __b, __c) | |
1515 | #define vcmlaq_rot180_f16(__a, __b, __c) __arm_vcmlaq_rot180_f16(__a, __b, __c) | |
1516 | #define vcmlaq_rot270_f16(__a, __b, __c) __arm_vcmlaq_rot270_f16(__a, __b, __c) | |
1517 | #define vcmlaq_rot90_f16(__a, __b, __c) __arm_vcmlaq_rot90_f16(__a, __b, __c) | |
1518 | #define vfmaq_f16(__a, __b, __c) __arm_vfmaq_f16(__a, __b, __c) | |
1519 | #define vfmaq_n_f16(__a, __b, __c) __arm_vfmaq_n_f16(__a, __b, __c) | |
1520 | #define vfmasq_n_f16(__a, __b, __c) __arm_vfmasq_n_f16(__a, __b, __c) | |
1521 | #define vfmsq_f16(__a, __b, __c) __arm_vfmsq_f16(__a, __b, __c) | |
1522 | #define vmlaldavaq_s16(__a, __b, __c) __arm_vmlaldavaq_s16(__a, __b, __c) | |
1523 | #define vmlaldavaxq_s16(__a, __b, __c) __arm_vmlaldavaxq_s16(__a, __b, __c) | |
1524 | #define vmlsldavaq_s16(__a, __b, __c) __arm_vmlsldavaq_s16(__a, __b, __c) | |
1525 | #define vmlsldavaxq_s16(__a, __b, __c) __arm_vmlsldavaxq_s16(__a, __b, __c) | |
1526 | #define vabsq_m_f16(__inactive, __a, __p) __arm_vabsq_m_f16(__inactive, __a, __p) | |
1527 | #define vcvtmq_m_s16_f16(__inactive, __a, __p) __arm_vcvtmq_m_s16_f16(__inactive, __a, __p) | |
1528 | #define vcvtnq_m_s16_f16(__inactive, __a, __p) __arm_vcvtnq_m_s16_f16(__inactive, __a, __p) | |
1529 | #define vcvtpq_m_s16_f16(__inactive, __a, __p) __arm_vcvtpq_m_s16_f16(__inactive, __a, __p) | |
1530 | #define vcvtq_m_s16_f16(__inactive, __a, __p) __arm_vcvtq_m_s16_f16(__inactive, __a, __p) | |
1531 | #define vdupq_m_n_f16(__inactive, __a, __p) __arm_vdupq_m_n_f16(__inactive, __a, __p) | |
1532 | #define vmaxnmaq_m_f16(__a, __b, __p) __arm_vmaxnmaq_m_f16(__a, __b, __p) | |
1533 | #define vmaxnmavq_p_f16(__a, __b, __p) __arm_vmaxnmavq_p_f16(__a, __b, __p) | |
1534 | #define vmaxnmvq_p_f16(__a, __b, __p) __arm_vmaxnmvq_p_f16(__a, __b, __p) | |
1535 | #define vminnmaq_m_f16(__a, __b, __p) __arm_vminnmaq_m_f16(__a, __b, __p) | |
1536 | #define vminnmavq_p_f16(__a, __b, __p) __arm_vminnmavq_p_f16(__a, __b, __p) | |
1537 | #define vminnmvq_p_f16(__a, __b, __p) __arm_vminnmvq_p_f16(__a, __b, __p) | |
1538 | #define vmlaldavq_p_s16(__a, __b, __p) __arm_vmlaldavq_p_s16(__a, __b, __p) | |
1539 | #define vmlaldavxq_p_s16(__a, __b, __p) __arm_vmlaldavxq_p_s16(__a, __b, __p) | |
1540 | #define vmlsldavq_p_s16(__a, __b, __p) __arm_vmlsldavq_p_s16(__a, __b, __p) | |
1541 | #define vmlsldavxq_p_s16(__a, __b, __p) __arm_vmlsldavxq_p_s16(__a, __b, __p) | |
1542 | #define vmovlbq_m_s8(__inactive, __a, __p) __arm_vmovlbq_m_s8(__inactive, __a, __p) | |
1543 | #define vmovltq_m_s8(__inactive, __a, __p) __arm_vmovltq_m_s8(__inactive, __a, __p) | |
1544 | #define vmovnbq_m_s16(__a, __b, __p) __arm_vmovnbq_m_s16(__a, __b, __p) | |
1545 | #define vmovntq_m_s16(__a, __b, __p) __arm_vmovntq_m_s16(__a, __b, __p) | |
1546 | #define vnegq_m_f16(__inactive, __a, __p) __arm_vnegq_m_f16(__inactive, __a, __p) | |
1547 | #define vpselq_f16(__a, __b, __p) __arm_vpselq_f16(__a, __b, __p) | |
1548 | #define vqmovnbq_m_s16(__a, __b, __p) __arm_vqmovnbq_m_s16(__a, __b, __p) | |
1549 | #define vqmovntq_m_s16(__a, __b, __p) __arm_vqmovntq_m_s16(__a, __b, __p) | |
1550 | #define vrev32q_m_s8(__inactive, __a, __p) __arm_vrev32q_m_s8(__inactive, __a, __p) | |
1551 | #define vrev64q_m_f16(__inactive, __a, __p) __arm_vrev64q_m_f16(__inactive, __a, __p) | |
1552 | #define vrndaq_m_f16(__inactive, __a, __p) __arm_vrndaq_m_f16(__inactive, __a, __p) | |
1553 | #define vrndmq_m_f16(__inactive, __a, __p) __arm_vrndmq_m_f16(__inactive, __a, __p) | |
1554 | #define vrndnq_m_f16(__inactive, __a, __p) __arm_vrndnq_m_f16(__inactive, __a, __p) | |
1555 | #define vrndpq_m_f16(__inactive, __a, __p) __arm_vrndpq_m_f16(__inactive, __a, __p) | |
1556 | #define vrndq_m_f16(__inactive, __a, __p) __arm_vrndq_m_f16(__inactive, __a, __p) | |
1557 | #define vrndxq_m_f16(__inactive, __a, __p) __arm_vrndxq_m_f16(__inactive, __a, __p) | |
1558 | #define vcmpeqq_m_n_f16(__a, __b, __p) __arm_vcmpeqq_m_n_f16(__a, __b, __p) | |
1559 | #define vcmpgeq_m_f16(__a, __b, __p) __arm_vcmpgeq_m_f16(__a, __b, __p) | |
1560 | #define vcmpgeq_m_n_f16(__a, __b, __p) __arm_vcmpgeq_m_n_f16(__a, __b, __p) | |
1561 | #define vcmpgtq_m_f16(__a, __b, __p) __arm_vcmpgtq_m_f16(__a, __b, __p) | |
1562 | #define vcmpgtq_m_n_f16(__a, __b, __p) __arm_vcmpgtq_m_n_f16(__a, __b, __p) | |
1563 | #define vcmpleq_m_f16(__a, __b, __p) __arm_vcmpleq_m_f16(__a, __b, __p) | |
1564 | #define vcmpleq_m_n_f16(__a, __b, __p) __arm_vcmpleq_m_n_f16(__a, __b, __p) | |
1565 | #define vcmpltq_m_f16(__a, __b, __p) __arm_vcmpltq_m_f16(__a, __b, __p) | |
1566 | #define vcmpltq_m_n_f16(__a, __b, __p) __arm_vcmpltq_m_n_f16(__a, __b, __p) | |
1567 | #define vcmpneq_m_f16(__a, __b, __p) __arm_vcmpneq_m_f16(__a, __b, __p) | |
1568 | #define vcmpneq_m_n_f16(__a, __b, __p) __arm_vcmpneq_m_n_f16(__a, __b, __p) | |
1569 | #define vmvnq_m_n_u16(__inactive, __imm, __p) __arm_vmvnq_m_n_u16(__inactive, __imm, __p) | |
1570 | #define vorrq_m_n_u16(__a, __imm, __p) __arm_vorrq_m_n_u16(__a, __imm, __p) | |
1571 | #define vqrshruntq_n_s16(__a, __b, __imm) __arm_vqrshruntq_n_s16(__a, __b, __imm) | |
1572 | #define vqshrunbq_n_s16(__a, __b, __imm) __arm_vqshrunbq_n_s16(__a, __b, __imm) | |
1573 | #define vqshruntq_n_s16(__a, __b, __imm) __arm_vqshruntq_n_s16(__a, __b, __imm) | |
1574 | #define vcvtmq_m_u16_f16(__inactive, __a, __p) __arm_vcvtmq_m_u16_f16(__inactive, __a, __p) | |
1575 | #define vcvtnq_m_u16_f16(__inactive, __a, __p) __arm_vcvtnq_m_u16_f16(__inactive, __a, __p) | |
1576 | #define vcvtpq_m_u16_f16(__inactive, __a, __p) __arm_vcvtpq_m_u16_f16(__inactive, __a, __p) | |
1577 | #define vcvtq_m_u16_f16(__inactive, __a, __p) __arm_vcvtq_m_u16_f16(__inactive, __a, __p) | |
1578 | #define vqmovunbq_m_s16(__a, __b, __p) __arm_vqmovunbq_m_s16(__a, __b, __p) | |
1579 | #define vqmovuntq_m_s16(__a, __b, __p) __arm_vqmovuntq_m_s16(__a, __b, __p) | |
1580 | #define vqrshrntq_n_u16(__a, __b, __imm) __arm_vqrshrntq_n_u16(__a, __b, __imm) | |
1581 | #define vqshrnbq_n_u16(__a, __b, __imm) __arm_vqshrnbq_n_u16(__a, __b, __imm) | |
1582 | #define vqshrntq_n_u16(__a, __b, __imm) __arm_vqshrntq_n_u16(__a, __b, __imm) | |
1583 | #define vrshrnbq_n_u16(__a, __b, __imm) __arm_vrshrnbq_n_u16(__a, __b, __imm) | |
1584 | #define vrshrntq_n_u16(__a, __b, __imm) __arm_vrshrntq_n_u16(__a, __b, __imm) | |
1585 | #define vshrnbq_n_u16(__a, __b, __imm) __arm_vshrnbq_n_u16(__a, __b, __imm) | |
1586 | #define vshrntq_n_u16(__a, __b, __imm) __arm_vshrntq_n_u16(__a, __b, __imm) | |
1587 | #define vmlaldavaq_u16(__a, __b, __c) __arm_vmlaldavaq_u16(__a, __b, __c) | |
1588 | #define vmlaldavq_p_u16(__a, __b, __p) __arm_vmlaldavq_p_u16(__a, __b, __p) | |
1589 | #define vmovlbq_m_u8(__inactive, __a, __p) __arm_vmovlbq_m_u8(__inactive, __a, __p) | |
1590 | #define vmovltq_m_u8(__inactive, __a, __p) __arm_vmovltq_m_u8(__inactive, __a, __p) | |
1591 | #define vmovnbq_m_u16(__a, __b, __p) __arm_vmovnbq_m_u16(__a, __b, __p) | |
1592 | #define vmovntq_m_u16(__a, __b, __p) __arm_vmovntq_m_u16(__a, __b, __p) | |
1593 | #define vqmovnbq_m_u16(__a, __b, __p) __arm_vqmovnbq_m_u16(__a, __b, __p) | |
1594 | #define vqmovntq_m_u16(__a, __b, __p) __arm_vqmovntq_m_u16(__a, __b, __p) | |
1595 | #define vrev32q_m_u8(__inactive, __a, __p) __arm_vrev32q_m_u8(__inactive, __a, __p) | |
1596 | #define vmvnq_m_n_s32(__inactive, __imm, __p) __arm_vmvnq_m_n_s32(__inactive, __imm, __p) | |
1597 | #define vorrq_m_n_s32(__a, __imm, __p) __arm_vorrq_m_n_s32(__a, __imm, __p) | |
1598 | #define vqrshrntq_n_s32(__a, __b, __imm) __arm_vqrshrntq_n_s32(__a, __b, __imm) | |
1599 | #define vqshrnbq_n_s32(__a, __b, __imm) __arm_vqshrnbq_n_s32(__a, __b, __imm) | |
1600 | #define vqshrntq_n_s32(__a, __b, __imm) __arm_vqshrntq_n_s32(__a, __b, __imm) | |
1601 | #define vrshrnbq_n_s32(__a, __b, __imm) __arm_vrshrnbq_n_s32(__a, __b, __imm) | |
1602 | #define vrshrntq_n_s32(__a, __b, __imm) __arm_vrshrntq_n_s32(__a, __b, __imm) | |
1603 | #define vshrnbq_n_s32(__a, __b, __imm) __arm_vshrnbq_n_s32(__a, __b, __imm) | |
1604 | #define vshrntq_n_s32(__a, __b, __imm) __arm_vshrntq_n_s32(__a, __b, __imm) | |
1605 | #define vcmlaq_f32(__a, __b, __c) __arm_vcmlaq_f32(__a, __b, __c) | |
1606 | #define vcmlaq_rot180_f32(__a, __b, __c) __arm_vcmlaq_rot180_f32(__a, __b, __c) | |
1607 | #define vcmlaq_rot270_f32(__a, __b, __c) __arm_vcmlaq_rot270_f32(__a, __b, __c) | |
1608 | #define vcmlaq_rot90_f32(__a, __b, __c) __arm_vcmlaq_rot90_f32(__a, __b, __c) | |
1609 | #define vfmaq_f32(__a, __b, __c) __arm_vfmaq_f32(__a, __b, __c) | |
1610 | #define vfmaq_n_f32(__a, __b, __c) __arm_vfmaq_n_f32(__a, __b, __c) | |
1611 | #define vfmasq_n_f32(__a, __b, __c) __arm_vfmasq_n_f32(__a, __b, __c) | |
1612 | #define vfmsq_f32(__a, __b, __c) __arm_vfmsq_f32(__a, __b, __c) | |
1613 | #define vmlaldavaq_s32(__a, __b, __c) __arm_vmlaldavaq_s32(__a, __b, __c) | |
1614 | #define vmlaldavaxq_s32(__a, __b, __c) __arm_vmlaldavaxq_s32(__a, __b, __c) | |
1615 | #define vmlsldavaq_s32(__a, __b, __c) __arm_vmlsldavaq_s32(__a, __b, __c) | |
1616 | #define vmlsldavaxq_s32(__a, __b, __c) __arm_vmlsldavaxq_s32(__a, __b, __c) | |
1617 | #define vabsq_m_f32(__inactive, __a, __p) __arm_vabsq_m_f32(__inactive, __a, __p) | |
1618 | #define vcvtmq_m_s32_f32(__inactive, __a, __p) __arm_vcvtmq_m_s32_f32(__inactive, __a, __p) | |
1619 | #define vcvtnq_m_s32_f32(__inactive, __a, __p) __arm_vcvtnq_m_s32_f32(__inactive, __a, __p) | |
1620 | #define vcvtpq_m_s32_f32(__inactive, __a, __p) __arm_vcvtpq_m_s32_f32(__inactive, __a, __p) | |
1621 | #define vcvtq_m_s32_f32(__inactive, __a, __p) __arm_vcvtq_m_s32_f32(__inactive, __a, __p) | |
1622 | #define vdupq_m_n_f32(__inactive, __a, __p) __arm_vdupq_m_n_f32(__inactive, __a, __p) | |
1623 | #define vmaxnmaq_m_f32(__a, __b, __p) __arm_vmaxnmaq_m_f32(__a, __b, __p) | |
1624 | #define vmaxnmavq_p_f32(__a, __b, __p) __arm_vmaxnmavq_p_f32(__a, __b, __p) | |
1625 | #define vmaxnmvq_p_f32(__a, __b, __p) __arm_vmaxnmvq_p_f32(__a, __b, __p) | |
1626 | #define vminnmaq_m_f32(__a, __b, __p) __arm_vminnmaq_m_f32(__a, __b, __p) | |
1627 | #define vminnmavq_p_f32(__a, __b, __p) __arm_vminnmavq_p_f32(__a, __b, __p) | |
1628 | #define vminnmvq_p_f32(__a, __b, __p) __arm_vminnmvq_p_f32(__a, __b, __p) | |
1629 | #define vmlaldavq_p_s32(__a, __b, __p) __arm_vmlaldavq_p_s32(__a, __b, __p) | |
1630 | #define vmlaldavxq_p_s32(__a, __b, __p) __arm_vmlaldavxq_p_s32(__a, __b, __p) | |
1631 | #define vmlsldavq_p_s32(__a, __b, __p) __arm_vmlsldavq_p_s32(__a, __b, __p) | |
1632 | #define vmlsldavxq_p_s32(__a, __b, __p) __arm_vmlsldavxq_p_s32(__a, __b, __p) | |
1633 | #define vmovlbq_m_s16(__inactive, __a, __p) __arm_vmovlbq_m_s16(__inactive, __a, __p) | |
1634 | #define vmovltq_m_s16(__inactive, __a, __p) __arm_vmovltq_m_s16(__inactive, __a, __p) | |
1635 | #define vmovnbq_m_s32(__a, __b, __p) __arm_vmovnbq_m_s32(__a, __b, __p) | |
1636 | #define vmovntq_m_s32(__a, __b, __p) __arm_vmovntq_m_s32(__a, __b, __p) | |
1637 | #define vnegq_m_f32(__inactive, __a, __p) __arm_vnegq_m_f32(__inactive, __a, __p) | |
1638 | #define vpselq_f32(__a, __b, __p) __arm_vpselq_f32(__a, __b, __p) | |
1639 | #define vqmovnbq_m_s32(__a, __b, __p) __arm_vqmovnbq_m_s32(__a, __b, __p) | |
1640 | #define vqmovntq_m_s32(__a, __b, __p) __arm_vqmovntq_m_s32(__a, __b, __p) | |
1641 | #define vrev32q_m_s16(__inactive, __a, __p) __arm_vrev32q_m_s16(__inactive, __a, __p) | |
1642 | #define vrev64q_m_f32(__inactive, __a, __p) __arm_vrev64q_m_f32(__inactive, __a, __p) | |
1643 | #define vrndaq_m_f32(__inactive, __a, __p) __arm_vrndaq_m_f32(__inactive, __a, __p) | |
1644 | #define vrndmq_m_f32(__inactive, __a, __p) __arm_vrndmq_m_f32(__inactive, __a, __p) | |
1645 | #define vrndnq_m_f32(__inactive, __a, __p) __arm_vrndnq_m_f32(__inactive, __a, __p) | |
1646 | #define vrndpq_m_f32(__inactive, __a, __p) __arm_vrndpq_m_f32(__inactive, __a, __p) | |
1647 | #define vrndq_m_f32(__inactive, __a, __p) __arm_vrndq_m_f32(__inactive, __a, __p) | |
1648 | #define vrndxq_m_f32(__inactive, __a, __p) __arm_vrndxq_m_f32(__inactive, __a, __p) | |
1649 | #define vcmpeqq_m_n_f32(__a, __b, __p) __arm_vcmpeqq_m_n_f32(__a, __b, __p) | |
1650 | #define vcmpgeq_m_f32(__a, __b, __p) __arm_vcmpgeq_m_f32(__a, __b, __p) | |
1651 | #define vcmpgeq_m_n_f32(__a, __b, __p) __arm_vcmpgeq_m_n_f32(__a, __b, __p) | |
1652 | #define vcmpgtq_m_f32(__a, __b, __p) __arm_vcmpgtq_m_f32(__a, __b, __p) | |
1653 | #define vcmpgtq_m_n_f32(__a, __b, __p) __arm_vcmpgtq_m_n_f32(__a, __b, __p) | |
1654 | #define vcmpleq_m_f32(__a, __b, __p) __arm_vcmpleq_m_f32(__a, __b, __p) | |
1655 | #define vcmpleq_m_n_f32(__a, __b, __p) __arm_vcmpleq_m_n_f32(__a, __b, __p) | |
1656 | #define vcmpltq_m_f32(__a, __b, __p) __arm_vcmpltq_m_f32(__a, __b, __p) | |
1657 | #define vcmpltq_m_n_f32(__a, __b, __p) __arm_vcmpltq_m_n_f32(__a, __b, __p) | |
1658 | #define vcmpneq_m_f32(__a, __b, __p) __arm_vcmpneq_m_f32(__a, __b, __p) | |
1659 | #define vcmpneq_m_n_f32(__a, __b, __p) __arm_vcmpneq_m_n_f32(__a, __b, __p) | |
1660 | #define vmvnq_m_n_u32(__inactive, __imm, __p) __arm_vmvnq_m_n_u32(__inactive, __imm, __p) | |
1661 | #define vorrq_m_n_u32(__a, __imm, __p) __arm_vorrq_m_n_u32(__a, __imm, __p) | |
1662 | #define vqrshruntq_n_s32(__a, __b, __imm) __arm_vqrshruntq_n_s32(__a, __b, __imm) | |
1663 | #define vqshrunbq_n_s32(__a, __b, __imm) __arm_vqshrunbq_n_s32(__a, __b, __imm) | |
1664 | #define vqshruntq_n_s32(__a, __b, __imm) __arm_vqshruntq_n_s32(__a, __b, __imm) | |
1665 | #define vcvtmq_m_u32_f32(__inactive, __a, __p) __arm_vcvtmq_m_u32_f32(__inactive, __a, __p) | |
1666 | #define vcvtnq_m_u32_f32(__inactive, __a, __p) __arm_vcvtnq_m_u32_f32(__inactive, __a, __p) | |
1667 | #define vcvtpq_m_u32_f32(__inactive, __a, __p) __arm_vcvtpq_m_u32_f32(__inactive, __a, __p) | |
1668 | #define vcvtq_m_u32_f32(__inactive, __a, __p) __arm_vcvtq_m_u32_f32(__inactive, __a, __p) | |
1669 | #define vqmovunbq_m_s32(__a, __b, __p) __arm_vqmovunbq_m_s32(__a, __b, __p) | |
1670 | #define vqmovuntq_m_s32(__a, __b, __p) __arm_vqmovuntq_m_s32(__a, __b, __p) | |
1671 | #define vqrshrntq_n_u32(__a, __b, __imm) __arm_vqrshrntq_n_u32(__a, __b, __imm) | |
1672 | #define vqshrnbq_n_u32(__a, __b, __imm) __arm_vqshrnbq_n_u32(__a, __b, __imm) | |
1673 | #define vqshrntq_n_u32(__a, __b, __imm) __arm_vqshrntq_n_u32(__a, __b, __imm) | |
1674 | #define vrshrnbq_n_u32(__a, __b, __imm) __arm_vrshrnbq_n_u32(__a, __b, __imm) | |
1675 | #define vrshrntq_n_u32(__a, __b, __imm) __arm_vrshrntq_n_u32(__a, __b, __imm) | |
1676 | #define vshrnbq_n_u32(__a, __b, __imm) __arm_vshrnbq_n_u32(__a, __b, __imm) | |
1677 | #define vshrntq_n_u32(__a, __b, __imm) __arm_vshrntq_n_u32(__a, __b, __imm) | |
1678 | #define vmlaldavaq_u32(__a, __b, __c) __arm_vmlaldavaq_u32(__a, __b, __c) | |
1679 | #define vmlaldavq_p_u32(__a, __b, __p) __arm_vmlaldavq_p_u32(__a, __b, __p) | |
1680 | #define vmovlbq_m_u16(__inactive, __a, __p) __arm_vmovlbq_m_u16(__inactive, __a, __p) | |
1681 | #define vmovltq_m_u16(__inactive, __a, __p) __arm_vmovltq_m_u16(__inactive, __a, __p) | |
1682 | #define vmovnbq_m_u32(__a, __b, __p) __arm_vmovnbq_m_u32(__a, __b, __p) | |
1683 | #define vmovntq_m_u32(__a, __b, __p) __arm_vmovntq_m_u32(__a, __b, __p) | |
1684 | #define vqmovnbq_m_u32(__a, __b, __p) __arm_vqmovnbq_m_u32(__a, __b, __p) | |
1685 | #define vqmovntq_m_u32(__a, __b, __p) __arm_vqmovntq_m_u32(__a, __b, __p) | |
1686 | #define vrev32q_m_u16(__inactive, __a, __p) __arm_vrev32q_m_u16(__inactive, __a, __p) | |
db5db9d2 SP |
1687 | #define vsriq_m_n_s8(__a, __b, __imm, __p) __arm_vsriq_m_n_s8(__a, __b, __imm, __p) |
1688 | #define vsubq_m_s8(__inactive, __a, __b, __p) __arm_vsubq_m_s8(__inactive, __a, __b, __p) | |
1689 | #define vcvtq_m_n_f16_u16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f16_u16(__inactive, __a, __imm6, __p) | |
1690 | #define vqshluq_m_n_s8(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s8(__inactive, __a, __imm, __p) | |
1691 | #define vabavq_p_s8(__a, __b, __c, __p) __arm_vabavq_p_s8(__a, __b, __c, __p) | |
1692 | #define vsriq_m_n_u8(__a, __b, __imm, __p) __arm_vsriq_m_n_u8(__a, __b, __imm, __p) | |
1693 | #define vshlq_m_u8(__inactive, __a, __b, __p) __arm_vshlq_m_u8(__inactive, __a, __b, __p) | |
1694 | #define vsubq_m_u8(__inactive, __a, __b, __p) __arm_vsubq_m_u8(__inactive, __a, __b, __p) | |
1695 | #define vabavq_p_u8(__a, __b, __c, __p) __arm_vabavq_p_u8(__a, __b, __c, __p) | |
1696 | #define vshlq_m_s8(__inactive, __a, __b, __p) __arm_vshlq_m_s8(__inactive, __a, __b, __p) | |
1697 | #define vcvtq_m_n_f16_s16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f16_s16(__inactive, __a, __imm6, __p) | |
1698 | #define vsriq_m_n_s16(__a, __b, __imm, __p) __arm_vsriq_m_n_s16(__a, __b, __imm, __p) | |
1699 | #define vsubq_m_s16(__inactive, __a, __b, __p) __arm_vsubq_m_s16(__inactive, __a, __b, __p) | |
1700 | #define vcvtq_m_n_f32_u32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f32_u32(__inactive, __a, __imm6, __p) | |
1701 | #define vqshluq_m_n_s16(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s16(__inactive, __a, __imm, __p) | |
1702 | #define vabavq_p_s16(__a, __b, __c, __p) __arm_vabavq_p_s16(__a, __b, __c, __p) | |
1703 | #define vsriq_m_n_u16(__a, __b, __imm, __p) __arm_vsriq_m_n_u16(__a, __b, __imm, __p) | |
1704 | #define vshlq_m_u16(__inactive, __a, __b, __p) __arm_vshlq_m_u16(__inactive, __a, __b, __p) | |
1705 | #define vsubq_m_u16(__inactive, __a, __b, __p) __arm_vsubq_m_u16(__inactive, __a, __b, __p) | |
1706 | #define vabavq_p_u16(__a, __b, __c, __p) __arm_vabavq_p_u16(__a, __b, __c, __p) | |
1707 | #define vshlq_m_s16(__inactive, __a, __b, __p) __arm_vshlq_m_s16(__inactive, __a, __b, __p) | |
1708 | #define vcvtq_m_n_f32_s32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f32_s32(__inactive, __a, __imm6, __p) | |
1709 | #define vsriq_m_n_s32(__a, __b, __imm, __p) __arm_vsriq_m_n_s32(__a, __b, __imm, __p) | |
1710 | #define vsubq_m_s32(__inactive, __a, __b, __p) __arm_vsubq_m_s32(__inactive, __a, __b, __p) | |
1711 | #define vqshluq_m_n_s32(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s32(__inactive, __a, __imm, __p) | |
1712 | #define vabavq_p_s32(__a, __b, __c, __p) __arm_vabavq_p_s32(__a, __b, __c, __p) | |
1713 | #define vsriq_m_n_u32(__a, __b, __imm, __p) __arm_vsriq_m_n_u32(__a, __b, __imm, __p) | |
1714 | #define vshlq_m_u32(__inactive, __a, __b, __p) __arm_vshlq_m_u32(__inactive, __a, __b, __p) | |
1715 | #define vsubq_m_u32(__inactive, __a, __b, __p) __arm_vsubq_m_u32(__inactive, __a, __b, __p) | |
1716 | #define vabavq_p_u32(__a, __b, __c, __p) __arm_vabavq_p_u32(__a, __b, __c, __p) | |
1717 | #define vshlq_m_s32(__inactive, __a, __b, __p) __arm_vshlq_m_s32(__inactive, __a, __b, __p) | |
8eb3b6b9 SP |
1718 | #define vabdq_m_s8(__inactive, __a, __b, __p) __arm_vabdq_m_s8(__inactive, __a, __b, __p) |
1719 | #define vabdq_m_s32(__inactive, __a, __b, __p) __arm_vabdq_m_s32(__inactive, __a, __b, __p) | |
1720 | #define vabdq_m_s16(__inactive, __a, __b, __p) __arm_vabdq_m_s16(__inactive, __a, __b, __p) | |
1721 | #define vabdq_m_u8(__inactive, __a, __b, __p) __arm_vabdq_m_u8(__inactive, __a, __b, __p) | |
1722 | #define vabdq_m_u32(__inactive, __a, __b, __p) __arm_vabdq_m_u32(__inactive, __a, __b, __p) | |
1723 | #define vabdq_m_u16(__inactive, __a, __b, __p) __arm_vabdq_m_u16(__inactive, __a, __b, __p) | |
1724 | #define vaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vaddq_m_n_s8(__inactive, __a, __b, __p) | |
1725 | #define vaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vaddq_m_n_s32(__inactive, __a, __b, __p) | |
1726 | #define vaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vaddq_m_n_s16(__inactive, __a, __b, __p) | |
1727 | #define vaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vaddq_m_n_u8(__inactive, __a, __b, __p) | |
1728 | #define vaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vaddq_m_n_u32(__inactive, __a, __b, __p) | |
1729 | #define vaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vaddq_m_n_u16(__inactive, __a, __b, __p) | |
1730 | #define vaddq_m_s8(__inactive, __a, __b, __p) __arm_vaddq_m_s8(__inactive, __a, __b, __p) | |
1731 | #define vaddq_m_s32(__inactive, __a, __b, __p) __arm_vaddq_m_s32(__inactive, __a, __b, __p) | |
1732 | #define vaddq_m_s16(__inactive, __a, __b, __p) __arm_vaddq_m_s16(__inactive, __a, __b, __p) | |
1733 | #define vaddq_m_u8(__inactive, __a, __b, __p) __arm_vaddq_m_u8(__inactive, __a, __b, __p) | |
1734 | #define vaddq_m_u32(__inactive, __a, __b, __p) __arm_vaddq_m_u32(__inactive, __a, __b, __p) | |
1735 | #define vaddq_m_u16(__inactive, __a, __b, __p) __arm_vaddq_m_u16(__inactive, __a, __b, __p) | |
1736 | #define vandq_m_s8(__inactive, __a, __b, __p) __arm_vandq_m_s8(__inactive, __a, __b, __p) | |
1737 | #define vandq_m_s32(__inactive, __a, __b, __p) __arm_vandq_m_s32(__inactive, __a, __b, __p) | |
1738 | #define vandq_m_s16(__inactive, __a, __b, __p) __arm_vandq_m_s16(__inactive, __a, __b, __p) | |
1739 | #define vandq_m_u8(__inactive, __a, __b, __p) __arm_vandq_m_u8(__inactive, __a, __b, __p) | |
1740 | #define vandq_m_u32(__inactive, __a, __b, __p) __arm_vandq_m_u32(__inactive, __a, __b, __p) | |
1741 | #define vandq_m_u16(__inactive, __a, __b, __p) __arm_vandq_m_u16(__inactive, __a, __b, __p) | |
1742 | #define vbicq_m_s8(__inactive, __a, __b, __p) __arm_vbicq_m_s8(__inactive, __a, __b, __p) | |
1743 | #define vbicq_m_s32(__inactive, __a, __b, __p) __arm_vbicq_m_s32(__inactive, __a, __b, __p) | |
1744 | #define vbicq_m_s16(__inactive, __a, __b, __p) __arm_vbicq_m_s16(__inactive, __a, __b, __p) | |
1745 | #define vbicq_m_u8(__inactive, __a, __b, __p) __arm_vbicq_m_u8(__inactive, __a, __b, __p) | |
1746 | #define vbicq_m_u32(__inactive, __a, __b, __p) __arm_vbicq_m_u32(__inactive, __a, __b, __p) | |
1747 | #define vbicq_m_u16(__inactive, __a, __b, __p) __arm_vbicq_m_u16(__inactive, __a, __b, __p) | |
1748 | #define vbrsrq_m_n_s8(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s8(__inactive, __a, __b, __p) | |
1749 | #define vbrsrq_m_n_s32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s32(__inactive, __a, __b, __p) | |
1750 | #define vbrsrq_m_n_s16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s16(__inactive, __a, __b, __p) | |
1751 | #define vbrsrq_m_n_u8(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u8(__inactive, __a, __b, __p) | |
1752 | #define vbrsrq_m_n_u32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u32(__inactive, __a, __b, __p) | |
1753 | #define vbrsrq_m_n_u16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u16(__inactive, __a, __b, __p) | |
1754 | #define vcaddq_rot270_m_s8(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s8(__inactive, __a, __b, __p) | |
1755 | #define vcaddq_rot270_m_s32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s32(__inactive, __a, __b, __p) | |
1756 | #define vcaddq_rot270_m_s16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s16(__inactive, __a, __b, __p) | |
1757 | #define vcaddq_rot270_m_u8(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u8(__inactive, __a, __b, __p) | |
1758 | #define vcaddq_rot270_m_u32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u32(__inactive, __a, __b, __p) | |
1759 | #define vcaddq_rot270_m_u16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u16(__inactive, __a, __b, __p) | |
1760 | #define vcaddq_rot90_m_s8(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s8(__inactive, __a, __b, __p) | |
1761 | #define vcaddq_rot90_m_s32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s32(__inactive, __a, __b, __p) | |
1762 | #define vcaddq_rot90_m_s16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s16(__inactive, __a, __b, __p) | |
1763 | #define vcaddq_rot90_m_u8(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u8(__inactive, __a, __b, __p) | |
1764 | #define vcaddq_rot90_m_u32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u32(__inactive, __a, __b, __p) | |
1765 | #define vcaddq_rot90_m_u16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u16(__inactive, __a, __b, __p) | |
1766 | #define veorq_m_s8(__inactive, __a, __b, __p) __arm_veorq_m_s8(__inactive, __a, __b, __p) | |
1767 | #define veorq_m_s32(__inactive, __a, __b, __p) __arm_veorq_m_s32(__inactive, __a, __b, __p) | |
1768 | #define veorq_m_s16(__inactive, __a, __b, __p) __arm_veorq_m_s16(__inactive, __a, __b, __p) | |
1769 | #define veorq_m_u8(__inactive, __a, __b, __p) __arm_veorq_m_u8(__inactive, __a, __b, __p) | |
1770 | #define veorq_m_u32(__inactive, __a, __b, __p) __arm_veorq_m_u32(__inactive, __a, __b, __p) | |
1771 | #define veorq_m_u16(__inactive, __a, __b, __p) __arm_veorq_m_u16(__inactive, __a, __b, __p) | |
1772 | #define vhaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s8(__inactive, __a, __b, __p) | |
1773 | #define vhaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s32(__inactive, __a, __b, __p) | |
1774 | #define vhaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s16(__inactive, __a, __b, __p) | |
1775 | #define vhaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u8(__inactive, __a, __b, __p) | |
1776 | #define vhaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u32(__inactive, __a, __b, __p) | |
1777 | #define vhaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u16(__inactive, __a, __b, __p) | |
1778 | #define vhaddq_m_s8(__inactive, __a, __b, __p) __arm_vhaddq_m_s8(__inactive, __a, __b, __p) | |
1779 | #define vhaddq_m_s32(__inactive, __a, __b, __p) __arm_vhaddq_m_s32(__inactive, __a, __b, __p) | |
1780 | #define vhaddq_m_s16(__inactive, __a, __b, __p) __arm_vhaddq_m_s16(__inactive, __a, __b, __p) | |
1781 | #define vhaddq_m_u8(__inactive, __a, __b, __p) __arm_vhaddq_m_u8(__inactive, __a, __b, __p) | |
1782 | #define vhaddq_m_u32(__inactive, __a, __b, __p) __arm_vhaddq_m_u32(__inactive, __a, __b, __p) | |
1783 | #define vhaddq_m_u16(__inactive, __a, __b, __p) __arm_vhaddq_m_u16(__inactive, __a, __b, __p) | |
1784 | #define vhcaddq_rot270_m_s8(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s8(__inactive, __a, __b, __p) | |
1785 | #define vhcaddq_rot270_m_s32(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s32(__inactive, __a, __b, __p) | |
1786 | #define vhcaddq_rot270_m_s16(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s16(__inactive, __a, __b, __p) | |
1787 | #define vhcaddq_rot90_m_s8(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s8(__inactive, __a, __b, __p) | |
1788 | #define vhcaddq_rot90_m_s32(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s32(__inactive, __a, __b, __p) | |
1789 | #define vhcaddq_rot90_m_s16(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s16(__inactive, __a, __b, __p) | |
1790 | #define vhsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s8(__inactive, __a, __b, __p) | |
1791 | #define vhsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s32(__inactive, __a, __b, __p) | |
1792 | #define vhsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s16(__inactive, __a, __b, __p) | |
1793 | #define vhsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u8(__inactive, __a, __b, __p) | |
1794 | #define vhsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u32(__inactive, __a, __b, __p) | |
1795 | #define vhsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u16(__inactive, __a, __b, __p) | |
1796 | #define vhsubq_m_s8(__inactive, __a, __b, __p) __arm_vhsubq_m_s8(__inactive, __a, __b, __p) | |
1797 | #define vhsubq_m_s32(__inactive, __a, __b, __p) __arm_vhsubq_m_s32(__inactive, __a, __b, __p) | |
1798 | #define vhsubq_m_s16(__inactive, __a, __b, __p) __arm_vhsubq_m_s16(__inactive, __a, __b, __p) | |
1799 | #define vhsubq_m_u8(__inactive, __a, __b, __p) __arm_vhsubq_m_u8(__inactive, __a, __b, __p) | |
1800 | #define vhsubq_m_u32(__inactive, __a, __b, __p) __arm_vhsubq_m_u32(__inactive, __a, __b, __p) | |
1801 | #define vhsubq_m_u16(__inactive, __a, __b, __p) __arm_vhsubq_m_u16(__inactive, __a, __b, __p) | |
1802 | #define vmaxq_m_s8(__inactive, __a, __b, __p) __arm_vmaxq_m_s8(__inactive, __a, __b, __p) | |
1803 | #define vmaxq_m_s32(__inactive, __a, __b, __p) __arm_vmaxq_m_s32(__inactive, __a, __b, __p) | |
1804 | #define vmaxq_m_s16(__inactive, __a, __b, __p) __arm_vmaxq_m_s16(__inactive, __a, __b, __p) | |
1805 | #define vmaxq_m_u8(__inactive, __a, __b, __p) __arm_vmaxq_m_u8(__inactive, __a, __b, __p) | |
1806 | #define vmaxq_m_u32(__inactive, __a, __b, __p) __arm_vmaxq_m_u32(__inactive, __a, __b, __p) | |
1807 | #define vmaxq_m_u16(__inactive, __a, __b, __p) __arm_vmaxq_m_u16(__inactive, __a, __b, __p) | |
1808 | #define vminq_m_s8(__inactive, __a, __b, __p) __arm_vminq_m_s8(__inactive, __a, __b, __p) | |
1809 | #define vminq_m_s32(__inactive, __a, __b, __p) __arm_vminq_m_s32(__inactive, __a, __b, __p) | |
1810 | #define vminq_m_s16(__inactive, __a, __b, __p) __arm_vminq_m_s16(__inactive, __a, __b, __p) | |
1811 | #define vminq_m_u8(__inactive, __a, __b, __p) __arm_vminq_m_u8(__inactive, __a, __b, __p) | |
1812 | #define vminq_m_u32(__inactive, __a, __b, __p) __arm_vminq_m_u32(__inactive, __a, __b, __p) | |
1813 | #define vminq_m_u16(__inactive, __a, __b, __p) __arm_vminq_m_u16(__inactive, __a, __b, __p) | |
1814 | #define vmladavaq_p_s8(__a, __b, __c, __p) __arm_vmladavaq_p_s8(__a, __b, __c, __p) | |
1815 | #define vmladavaq_p_s32(__a, __b, __c, __p) __arm_vmladavaq_p_s32(__a, __b, __c, __p) | |
1816 | #define vmladavaq_p_s16(__a, __b, __c, __p) __arm_vmladavaq_p_s16(__a, __b, __c, __p) | |
1817 | #define vmladavaq_p_u8(__a, __b, __c, __p) __arm_vmladavaq_p_u8(__a, __b, __c, __p) | |
1818 | #define vmladavaq_p_u32(__a, __b, __c, __p) __arm_vmladavaq_p_u32(__a, __b, __c, __p) | |
1819 | #define vmladavaq_p_u16(__a, __b, __c, __p) __arm_vmladavaq_p_u16(__a, __b, __c, __p) | |
1820 | #define vmladavaxq_p_s8(__a, __b, __c, __p) __arm_vmladavaxq_p_s8(__a, __b, __c, __p) | |
1821 | #define vmladavaxq_p_s32(__a, __b, __c, __p) __arm_vmladavaxq_p_s32(__a, __b, __c, __p) | |
1822 | #define vmladavaxq_p_s16(__a, __b, __c, __p) __arm_vmladavaxq_p_s16(__a, __b, __c, __p) | |
1823 | #define vmlaq_m_n_s8(__a, __b, __c, __p) __arm_vmlaq_m_n_s8(__a, __b, __c, __p) | |
1824 | #define vmlaq_m_n_s32(__a, __b, __c, __p) __arm_vmlaq_m_n_s32(__a, __b, __c, __p) | |
1825 | #define vmlaq_m_n_s16(__a, __b, __c, __p) __arm_vmlaq_m_n_s16(__a, __b, __c, __p) | |
1826 | #define vmlaq_m_n_u8(__a, __b, __c, __p) __arm_vmlaq_m_n_u8(__a, __b, __c, __p) | |
1827 | #define vmlaq_m_n_u32(__a, __b, __c, __p) __arm_vmlaq_m_n_u32(__a, __b, __c, __p) | |
1828 | #define vmlaq_m_n_u16(__a, __b, __c, __p) __arm_vmlaq_m_n_u16(__a, __b, __c, __p) | |
1829 | #define vmlasq_m_n_s8(__a, __b, __c, __p) __arm_vmlasq_m_n_s8(__a, __b, __c, __p) | |
1830 | #define vmlasq_m_n_s32(__a, __b, __c, __p) __arm_vmlasq_m_n_s32(__a, __b, __c, __p) | |
1831 | #define vmlasq_m_n_s16(__a, __b, __c, __p) __arm_vmlasq_m_n_s16(__a, __b, __c, __p) | |
1832 | #define vmlasq_m_n_u8(__a, __b, __c, __p) __arm_vmlasq_m_n_u8(__a, __b, __c, __p) | |
1833 | #define vmlasq_m_n_u32(__a, __b, __c, __p) __arm_vmlasq_m_n_u32(__a, __b, __c, __p) | |
1834 | #define vmlasq_m_n_u16(__a, __b, __c, __p) __arm_vmlasq_m_n_u16(__a, __b, __c, __p) | |
1835 | #define vmlsdavaq_p_s8(__a, __b, __c, __p) __arm_vmlsdavaq_p_s8(__a, __b, __c, __p) | |
1836 | #define vmlsdavaq_p_s32(__a, __b, __c, __p) __arm_vmlsdavaq_p_s32(__a, __b, __c, __p) | |
1837 | #define vmlsdavaq_p_s16(__a, __b, __c, __p) __arm_vmlsdavaq_p_s16(__a, __b, __c, __p) | |
1838 | #define vmlsdavaxq_p_s8(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s8(__a, __b, __c, __p) | |
1839 | #define vmlsdavaxq_p_s32(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s32(__a, __b, __c, __p) | |
1840 | #define vmlsdavaxq_p_s16(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s16(__a, __b, __c, __p) | |
1841 | #define vmulhq_m_s8(__inactive, __a, __b, __p) __arm_vmulhq_m_s8(__inactive, __a, __b, __p) | |
1842 | #define vmulhq_m_s32(__inactive, __a, __b, __p) __arm_vmulhq_m_s32(__inactive, __a, __b, __p) | |
1843 | #define vmulhq_m_s16(__inactive, __a, __b, __p) __arm_vmulhq_m_s16(__inactive, __a, __b, __p) | |
1844 | #define vmulhq_m_u8(__inactive, __a, __b, __p) __arm_vmulhq_m_u8(__inactive, __a, __b, __p) | |
1845 | #define vmulhq_m_u32(__inactive, __a, __b, __p) __arm_vmulhq_m_u32(__inactive, __a, __b, __p) | |
1846 | #define vmulhq_m_u16(__inactive, __a, __b, __p) __arm_vmulhq_m_u16(__inactive, __a, __b, __p) | |
1847 | #define vmullbq_int_m_s8(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s8(__inactive, __a, __b, __p) | |
1848 | #define vmullbq_int_m_s32(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s32(__inactive, __a, __b, __p) | |
1849 | #define vmullbq_int_m_s16(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s16(__inactive, __a, __b, __p) | |
1850 | #define vmullbq_int_m_u8(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u8(__inactive, __a, __b, __p) | |
1851 | #define vmullbq_int_m_u32(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u32(__inactive, __a, __b, __p) | |
1852 | #define vmullbq_int_m_u16(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u16(__inactive, __a, __b, __p) | |
1853 | #define vmulltq_int_m_s8(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s8(__inactive, __a, __b, __p) | |
1854 | #define vmulltq_int_m_s32(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s32(__inactive, __a, __b, __p) | |
1855 | #define vmulltq_int_m_s16(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s16(__inactive, __a, __b, __p) | |
1856 | #define vmulltq_int_m_u8(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u8(__inactive, __a, __b, __p) | |
1857 | #define vmulltq_int_m_u32(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u32(__inactive, __a, __b, __p) | |
1858 | #define vmulltq_int_m_u16(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u16(__inactive, __a, __b, __p) | |
1859 | #define vmulq_m_n_s8(__inactive, __a, __b, __p) __arm_vmulq_m_n_s8(__inactive, __a, __b, __p) | |
1860 | #define vmulq_m_n_s32(__inactive, __a, __b, __p) __arm_vmulq_m_n_s32(__inactive, __a, __b, __p) | |
1861 | #define vmulq_m_n_s16(__inactive, __a, __b, __p) __arm_vmulq_m_n_s16(__inactive, __a, __b, __p) | |
1862 | #define vmulq_m_n_u8(__inactive, __a, __b, __p) __arm_vmulq_m_n_u8(__inactive, __a, __b, __p) | |
1863 | #define vmulq_m_n_u32(__inactive, __a, __b, __p) __arm_vmulq_m_n_u32(__inactive, __a, __b, __p) | |
1864 | #define vmulq_m_n_u16(__inactive, __a, __b, __p) __arm_vmulq_m_n_u16(__inactive, __a, __b, __p) | |
1865 | #define vmulq_m_s8(__inactive, __a, __b, __p) __arm_vmulq_m_s8(__inactive, __a, __b, __p) | |
1866 | #define vmulq_m_s32(__inactive, __a, __b, __p) __arm_vmulq_m_s32(__inactive, __a, __b, __p) | |
1867 | #define vmulq_m_s16(__inactive, __a, __b, __p) __arm_vmulq_m_s16(__inactive, __a, __b, __p) | |
1868 | #define vmulq_m_u8(__inactive, __a, __b, __p) __arm_vmulq_m_u8(__inactive, __a, __b, __p) | |
1869 | #define vmulq_m_u32(__inactive, __a, __b, __p) __arm_vmulq_m_u32(__inactive, __a, __b, __p) | |
1870 | #define vmulq_m_u16(__inactive, __a, __b, __p) __arm_vmulq_m_u16(__inactive, __a, __b, __p) | |
1871 | #define vornq_m_s8(__inactive, __a, __b, __p) __arm_vornq_m_s8(__inactive, __a, __b, __p) | |
1872 | #define vornq_m_s32(__inactive, __a, __b, __p) __arm_vornq_m_s32(__inactive, __a, __b, __p) | |
1873 | #define vornq_m_s16(__inactive, __a, __b, __p) __arm_vornq_m_s16(__inactive, __a, __b, __p) | |
1874 | #define vornq_m_u8(__inactive, __a, __b, __p) __arm_vornq_m_u8(__inactive, __a, __b, __p) | |
1875 | #define vornq_m_u32(__inactive, __a, __b, __p) __arm_vornq_m_u32(__inactive, __a, __b, __p) | |
1876 | #define vornq_m_u16(__inactive, __a, __b, __p) __arm_vornq_m_u16(__inactive, __a, __b, __p) | |
1877 | #define vorrq_m_s8(__inactive, __a, __b, __p) __arm_vorrq_m_s8(__inactive, __a, __b, __p) | |
1878 | #define vorrq_m_s32(__inactive, __a, __b, __p) __arm_vorrq_m_s32(__inactive, __a, __b, __p) | |
1879 | #define vorrq_m_s16(__inactive, __a, __b, __p) __arm_vorrq_m_s16(__inactive, __a, __b, __p) | |
1880 | #define vorrq_m_u8(__inactive, __a, __b, __p) __arm_vorrq_m_u8(__inactive, __a, __b, __p) | |
1881 | #define vorrq_m_u32(__inactive, __a, __b, __p) __arm_vorrq_m_u32(__inactive, __a, __b, __p) | |
1882 | #define vorrq_m_u16(__inactive, __a, __b, __p) __arm_vorrq_m_u16(__inactive, __a, __b, __p) | |
1883 | #define vqaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s8(__inactive, __a, __b, __p) | |
1884 | #define vqaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s32(__inactive, __a, __b, __p) | |
1885 | #define vqaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s16(__inactive, __a, __b, __p) | |
1886 | #define vqaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u8(__inactive, __a, __b, __p) | |
1887 | #define vqaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u32(__inactive, __a, __b, __p) | |
1888 | #define vqaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u16(__inactive, __a, __b, __p) | |
1889 | #define vqaddq_m_s8(__inactive, __a, __b, __p) __arm_vqaddq_m_s8(__inactive, __a, __b, __p) | |
1890 | #define vqaddq_m_s32(__inactive, __a, __b, __p) __arm_vqaddq_m_s32(__inactive, __a, __b, __p) | |
1891 | #define vqaddq_m_s16(__inactive, __a, __b, __p) __arm_vqaddq_m_s16(__inactive, __a, __b, __p) | |
1892 | #define vqaddq_m_u8(__inactive, __a, __b, __p) __arm_vqaddq_m_u8(__inactive, __a, __b, __p) | |
1893 | #define vqaddq_m_u32(__inactive, __a, __b, __p) __arm_vqaddq_m_u32(__inactive, __a, __b, __p) | |
1894 | #define vqaddq_m_u16(__inactive, __a, __b, __p) __arm_vqaddq_m_u16(__inactive, __a, __b, __p) | |
1895 | #define vqdmladhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s8(__inactive, __a, __b, __p) | |
1896 | #define vqdmladhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s32(__inactive, __a, __b, __p) | |
1897 | #define vqdmladhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s16(__inactive, __a, __b, __p) | |
1898 | #define vqdmladhxq_m_s8(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s8(__inactive, __a, __b, __p) | |
1899 | #define vqdmladhxq_m_s32(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s32(__inactive, __a, __b, __p) | |
1900 | #define vqdmladhxq_m_s16(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s16(__inactive, __a, __b, __p) | |
afb198ee CL |
1901 | #define vqdmlashq_m_n_s8(__a, __b, __c, __p) __arm_vqdmlashq_m_n_s8(__a, __b, __c, __p) |
1902 | #define vqdmlashq_m_n_s32(__a, __b, __c, __p) __arm_vqdmlashq_m_n_s32(__a, __b, __c, __p) | |
1903 | #define vqdmlashq_m_n_s16(__a, __b, __c, __p) __arm_vqdmlashq_m_n_s16(__a, __b, __c, __p) | |
8eb3b6b9 SP |
1904 | #define vqdmlahq_m_n_s8(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s8(__a, __b, __c, __p) |
1905 | #define vqdmlahq_m_n_s32(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s32(__a, __b, __c, __p) | |
1906 | #define vqdmlahq_m_n_s16(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s16(__a, __b, __c, __p) | |
1907 | #define vqdmlsdhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s8(__inactive, __a, __b, __p) | |
1908 | #define vqdmlsdhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s32(__inactive, __a, __b, __p) | |
1909 | #define vqdmlsdhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s16(__inactive, __a, __b, __p) | |
1910 | #define vqdmlsdhxq_m_s8(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s8(__inactive, __a, __b, __p) | |
1911 | #define vqdmlsdhxq_m_s32(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s32(__inactive, __a, __b, __p) | |
1912 | #define vqdmlsdhxq_m_s16(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s16(__inactive, __a, __b, __p) | |
1913 | #define vqdmulhq_m_n_s8(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s8(__inactive, __a, __b, __p) | |
1914 | #define vqdmulhq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s32(__inactive, __a, __b, __p) | |
1915 | #define vqdmulhq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s16(__inactive, __a, __b, __p) | |
1916 | #define vqdmulhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s8(__inactive, __a, __b, __p) | |
1917 | #define vqdmulhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s32(__inactive, __a, __b, __p) | |
1918 | #define vqdmulhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s16(__inactive, __a, __b, __p) | |
1919 | #define vqrdmladhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s8(__inactive, __a, __b, __p) | |
1920 | #define vqrdmladhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s32(__inactive, __a, __b, __p) | |
1921 | #define vqrdmladhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s16(__inactive, __a, __b, __p) | |
1922 | #define vqrdmladhxq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s8(__inactive, __a, __b, __p) | |
1923 | #define vqrdmladhxq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s32(__inactive, __a, __b, __p) | |
1924 | #define vqrdmladhxq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s16(__inactive, __a, __b, __p) | |
1925 | #define vqrdmlahq_m_n_s8(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s8(__a, __b, __c, __p) | |
1926 | #define vqrdmlahq_m_n_s32(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s32(__a, __b, __c, __p) | |
1927 | #define vqrdmlahq_m_n_s16(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s16(__a, __b, __c, __p) | |
1928 | #define vqrdmlashq_m_n_s8(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s8(__a, __b, __c, __p) | |
1929 | #define vqrdmlashq_m_n_s32(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s32(__a, __b, __c, __p) | |
1930 | #define vqrdmlashq_m_n_s16(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s16(__a, __b, __c, __p) | |
1931 | #define vqrdmlsdhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s8(__inactive, __a, __b, __p) | |
1932 | #define vqrdmlsdhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s32(__inactive, __a, __b, __p) | |
1933 | #define vqrdmlsdhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s16(__inactive, __a, __b, __p) | |
1934 | #define vqrdmlsdhxq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s8(__inactive, __a, __b, __p) | |
1935 | #define vqrdmlsdhxq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s32(__inactive, __a, __b, __p) | |
1936 | #define vqrdmlsdhxq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s16(__inactive, __a, __b, __p) | |
1937 | #define vqrdmulhq_m_n_s8(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s8(__inactive, __a, __b, __p) | |
1938 | #define vqrdmulhq_m_n_s32(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s32(__inactive, __a, __b, __p) | |
1939 | #define vqrdmulhq_m_n_s16(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s16(__inactive, __a, __b, __p) | |
1940 | #define vqrdmulhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s8(__inactive, __a, __b, __p) | |
1941 | #define vqrdmulhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s32(__inactive, __a, __b, __p) | |
1942 | #define vqrdmulhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s16(__inactive, __a, __b, __p) | |
1943 | #define vqrshlq_m_s8(__inactive, __a, __b, __p) __arm_vqrshlq_m_s8(__inactive, __a, __b, __p) | |
1944 | #define vqrshlq_m_s32(__inactive, __a, __b, __p) __arm_vqrshlq_m_s32(__inactive, __a, __b, __p) | |
1945 | #define vqrshlq_m_s16(__inactive, __a, __b, __p) __arm_vqrshlq_m_s16(__inactive, __a, __b, __p) | |
1946 | #define vqrshlq_m_u8(__inactive, __a, __b, __p) __arm_vqrshlq_m_u8(__inactive, __a, __b, __p) | |
1947 | #define vqrshlq_m_u32(__inactive, __a, __b, __p) __arm_vqrshlq_m_u32(__inactive, __a, __b, __p) | |
1948 | #define vqrshlq_m_u16(__inactive, __a, __b, __p) __arm_vqrshlq_m_u16(__inactive, __a, __b, __p) | |
1949 | #define vqshlq_m_n_s8(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s8(__inactive, __a, __imm, __p) | |
1950 | #define vqshlq_m_n_s32(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s32(__inactive, __a, __imm, __p) | |
1951 | #define vqshlq_m_n_s16(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s16(__inactive, __a, __imm, __p) | |
1952 | #define vqshlq_m_n_u8(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_u8(__inactive, __a, __imm, __p) | |
1953 | #define vqshlq_m_n_u32(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_u32(__inactive, __a, __imm, __p) | |
1954 | #define vqshlq_m_n_u16(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_u16(__inactive, __a, __imm, __p) | |
1955 | #define vqshlq_m_s8(__inactive, __a, __b, __p) __arm_vqshlq_m_s8(__inactive, __a, __b, __p) | |
1956 | #define vqshlq_m_s32(__inactive, __a, __b, __p) __arm_vqshlq_m_s32(__inactive, __a, __b, __p) | |
1957 | #define vqshlq_m_s16(__inactive, __a, __b, __p) __arm_vqshlq_m_s16(__inactive, __a, __b, __p) | |
1958 | #define vqshlq_m_u8(__inactive, __a, __b, __p) __arm_vqshlq_m_u8(__inactive, __a, __b, __p) | |
1959 | #define vqshlq_m_u32(__inactive, __a, __b, __p) __arm_vqshlq_m_u32(__inactive, __a, __b, __p) | |
1960 | #define vqshlq_m_u16(__inactive, __a, __b, __p) __arm_vqshlq_m_u16(__inactive, __a, __b, __p) | |
1961 | #define vqsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s8(__inactive, __a, __b, __p) | |
1962 | #define vqsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s32(__inactive, __a, __b, __p) | |
1963 | #define vqsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s16(__inactive, __a, __b, __p) | |
1964 | #define vqsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u8(__inactive, __a, __b, __p) | |
1965 | #define vqsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u32(__inactive, __a, __b, __p) | |
1966 | #define vqsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u16(__inactive, __a, __b, __p) | |
1967 | #define vqsubq_m_s8(__inactive, __a, __b, __p) __arm_vqsubq_m_s8(__inactive, __a, __b, __p) | |
1968 | #define vqsubq_m_s32(__inactive, __a, __b, __p) __arm_vqsubq_m_s32(__inactive, __a, __b, __p) | |
1969 | #define vqsubq_m_s16(__inactive, __a, __b, __p) __arm_vqsubq_m_s16(__inactive, __a, __b, __p) | |
1970 | #define vqsubq_m_u8(__inactive, __a, __b, __p) __arm_vqsubq_m_u8(__inactive, __a, __b, __p) | |
1971 | #define vqsubq_m_u32(__inactive, __a, __b, __p) __arm_vqsubq_m_u32(__inactive, __a, __b, __p) | |
1972 | #define vqsubq_m_u16(__inactive, __a, __b, __p) __arm_vqsubq_m_u16(__inactive, __a, __b, __p) | |
1973 | #define vrhaddq_m_s8(__inactive, __a, __b, __p) __arm_vrhaddq_m_s8(__inactive, __a, __b, __p) | |
1974 | #define vrhaddq_m_s32(__inactive, __a, __b, __p) __arm_vrhaddq_m_s32(__inactive, __a, __b, __p) | |
1975 | #define vrhaddq_m_s16(__inactive, __a, __b, __p) __arm_vrhaddq_m_s16(__inactive, __a, __b, __p) | |
1976 | #define vrhaddq_m_u8(__inactive, __a, __b, __p) __arm_vrhaddq_m_u8(__inactive, __a, __b, __p) | |
1977 | #define vrhaddq_m_u32(__inactive, __a, __b, __p) __arm_vrhaddq_m_u32(__inactive, __a, __b, __p) | |
1978 | #define vrhaddq_m_u16(__inactive, __a, __b, __p) __arm_vrhaddq_m_u16(__inactive, __a, __b, __p) | |
1979 | #define vrmulhq_m_s8(__inactive, __a, __b, __p) __arm_vrmulhq_m_s8(__inactive, __a, __b, __p) | |
1980 | #define vrmulhq_m_s32(__inactive, __a, __b, __p) __arm_vrmulhq_m_s32(__inactive, __a, __b, __p) | |
1981 | #define vrmulhq_m_s16(__inactive, __a, __b, __p) __arm_vrmulhq_m_s16(__inactive, __a, __b, __p) | |
1982 | #define vrmulhq_m_u8(__inactive, __a, __b, __p) __arm_vrmulhq_m_u8(__inactive, __a, __b, __p) | |
1983 | #define vrmulhq_m_u32(__inactive, __a, __b, __p) __arm_vrmulhq_m_u32(__inactive, __a, __b, __p) | |
1984 | #define vrmulhq_m_u16(__inactive, __a, __b, __p) __arm_vrmulhq_m_u16(__inactive, __a, __b, __p) | |
1985 | #define vrshlq_m_s8(__inactive, __a, __b, __p) __arm_vrshlq_m_s8(__inactive, __a, __b, __p) | |
1986 | #define vrshlq_m_s32(__inactive, __a, __b, __p) __arm_vrshlq_m_s32(__inactive, __a, __b, __p) | |
1987 | #define vrshlq_m_s16(__inactive, __a, __b, __p) __arm_vrshlq_m_s16(__inactive, __a, __b, __p) | |
1988 | #define vrshlq_m_u8(__inactive, __a, __b, __p) __arm_vrshlq_m_u8(__inactive, __a, __b, __p) | |
1989 | #define vrshlq_m_u32(__inactive, __a, __b, __p) __arm_vrshlq_m_u32(__inactive, __a, __b, __p) | |
1990 | #define vrshlq_m_u16(__inactive, __a, __b, __p) __arm_vrshlq_m_u16(__inactive, __a, __b, __p) | |
1991 | #define vrshrq_m_n_s8(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s8(__inactive, __a, __imm, __p) | |
1992 | #define vrshrq_m_n_s32(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s32(__inactive, __a, __imm, __p) | |
1993 | #define vrshrq_m_n_s16(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s16(__inactive, __a, __imm, __p) | |
1994 | #define vrshrq_m_n_u8(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_u8(__inactive, __a, __imm, __p) | |
1995 | #define vrshrq_m_n_u32(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_u32(__inactive, __a, __imm, __p) | |
1996 | #define vrshrq_m_n_u16(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_u16(__inactive, __a, __imm, __p) | |
1997 | #define vshlq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshlq_m_n_s8(__inactive, __a, __imm, __p) | |
1998 | #define vshlq_m_n_s32(__inactive, __a, __imm, __p) __arm_vshlq_m_n_s32(__inactive, __a, __imm, __p) | |
1999 | #define vshlq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshlq_m_n_s16(__inactive, __a, __imm, __p) | |
2000 | #define vshlq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshlq_m_n_u8(__inactive, __a, __imm, __p) | |
2001 | #define vshlq_m_n_u32(__inactive, __a, __imm, __p) __arm_vshlq_m_n_u32(__inactive, __a, __imm, __p) | |
2002 | #define vshlq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshlq_m_n_u16(__inactive, __a, __imm, __p) | |
2003 | #define vshrq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshrq_m_n_s8(__inactive, __a, __imm, __p) | |
2004 | #define vshrq_m_n_s32(__inactive, __a, __imm, __p) __arm_vshrq_m_n_s32(__inactive, __a, __imm, __p) | |
2005 | #define vshrq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshrq_m_n_s16(__inactive, __a, __imm, __p) | |
2006 | #define vshrq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshrq_m_n_u8(__inactive, __a, __imm, __p) | |
2007 | #define vshrq_m_n_u32(__inactive, __a, __imm, __p) __arm_vshrq_m_n_u32(__inactive, __a, __imm, __p) | |
2008 | #define vshrq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshrq_m_n_u16(__inactive, __a, __imm, __p) | |
2009 | #define vsliq_m_n_s8(__a, __b, __imm, __p) __arm_vsliq_m_n_s8(__a, __b, __imm, __p) | |
2010 | #define vsliq_m_n_s32(__a, __b, __imm, __p) __arm_vsliq_m_n_s32(__a, __b, __imm, __p) | |
2011 | #define vsliq_m_n_s16(__a, __b, __imm, __p) __arm_vsliq_m_n_s16(__a, __b, __imm, __p) | |
2012 | #define vsliq_m_n_u8(__a, __b, __imm, __p) __arm_vsliq_m_n_u8(__a, __b, __imm, __p) | |
2013 | #define vsliq_m_n_u32(__a, __b, __imm, __p) __arm_vsliq_m_n_u32(__a, __b, __imm, __p) | |
2014 | #define vsliq_m_n_u16(__a, __b, __imm, __p) __arm_vsliq_m_n_u16(__a, __b, __imm, __p) | |
2015 | #define vsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vsubq_m_n_s8(__inactive, __a, __b, __p) | |
2016 | #define vsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vsubq_m_n_s32(__inactive, __a, __b, __p) | |
2017 | #define vsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vsubq_m_n_s16(__inactive, __a, __b, __p) | |
2018 | #define vsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vsubq_m_n_u8(__inactive, __a, __b, __p) | |
2019 | #define vsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vsubq_m_n_u32(__inactive, __a, __b, __p) | |
2020 | #define vsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vsubq_m_n_u16(__inactive, __a, __b, __p) | |
f2170a37 SP |
2021 | #define vmlaldavaq_p_s32(__a, __b, __c, __p) __arm_vmlaldavaq_p_s32(__a, __b, __c, __p) |
2022 | #define vmlaldavaq_p_s16(__a, __b, __c, __p) __arm_vmlaldavaq_p_s16(__a, __b, __c, __p) | |
2023 | #define vmlaldavaq_p_u32(__a, __b, __c, __p) __arm_vmlaldavaq_p_u32(__a, __b, __c, __p) | |
2024 | #define vmlaldavaq_p_u16(__a, __b, __c, __p) __arm_vmlaldavaq_p_u16(__a, __b, __c, __p) | |
2025 | #define vmlaldavaxq_p_s32(__a, __b, __c, __p) __arm_vmlaldavaxq_p_s32(__a, __b, __c, __p) | |
2026 | #define vmlaldavaxq_p_s16(__a, __b, __c, __p) __arm_vmlaldavaxq_p_s16(__a, __b, __c, __p) | |
f2170a37 SP |
2027 | #define vmlsldavaq_p_s32(__a, __b, __c, __p) __arm_vmlsldavaq_p_s32(__a, __b, __c, __p) |
2028 | #define vmlsldavaq_p_s16(__a, __b, __c, __p) __arm_vmlsldavaq_p_s16(__a, __b, __c, __p) | |
2029 | #define vmlsldavaxq_p_s32(__a, __b, __c, __p) __arm_vmlsldavaxq_p_s32(__a, __b, __c, __p) | |
2030 | #define vmlsldavaxq_p_s16(__a, __b, __c, __p) __arm_vmlsldavaxq_p_s16(__a, __b, __c, __p) | |
2031 | #define vmullbq_poly_m_p8(__inactive, __a, __b, __p) __arm_vmullbq_poly_m_p8(__inactive, __a, __b, __p) | |
2032 | #define vmullbq_poly_m_p16(__inactive, __a, __b, __p) __arm_vmullbq_poly_m_p16(__inactive, __a, __b, __p) | |
2033 | #define vmulltq_poly_m_p8(__inactive, __a, __b, __p) __arm_vmulltq_poly_m_p8(__inactive, __a, __b, __p) | |
2034 | #define vmulltq_poly_m_p16(__inactive, __a, __b, __p) __arm_vmulltq_poly_m_p16(__inactive, __a, __b, __p) | |
2035 | #define vqdmullbq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmullbq_m_n_s32(__inactive, __a, __b, __p) | |
2036 | #define vqdmullbq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmullbq_m_n_s16(__inactive, __a, __b, __p) | |
2037 | #define vqdmullbq_m_s32(__inactive, __a, __b, __p) __arm_vqdmullbq_m_s32(__inactive, __a, __b, __p) | |
2038 | #define vqdmullbq_m_s16(__inactive, __a, __b, __p) __arm_vqdmullbq_m_s16(__inactive, __a, __b, __p) | |
2039 | #define vqdmulltq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmulltq_m_n_s32(__inactive, __a, __b, __p) | |
2040 | #define vqdmulltq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmulltq_m_n_s16(__inactive, __a, __b, __p) | |
2041 | #define vqdmulltq_m_s32(__inactive, __a, __b, __p) __arm_vqdmulltq_m_s32(__inactive, __a, __b, __p) | |
2042 | #define vqdmulltq_m_s16(__inactive, __a, __b, __p) __arm_vqdmulltq_m_s16(__inactive, __a, __b, __p) | |
2043 | #define vqrshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_s32(__a, __b, __imm, __p) | |
2044 | #define vqrshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_s16(__a, __b, __imm, __p) | |
2045 | #define vqrshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_u32(__a, __b, __imm, __p) | |
2046 | #define vqrshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_u16(__a, __b, __imm, __p) | |
2047 | #define vqrshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_s32(__a, __b, __imm, __p) | |
2048 | #define vqrshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_s16(__a, __b, __imm, __p) | |
2049 | #define vqrshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_u32(__a, __b, __imm, __p) | |
2050 | #define vqrshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_u16(__a, __b, __imm, __p) | |
2051 | #define vqrshrunbq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrunbq_m_n_s32(__a, __b, __imm, __p) | |
2052 | #define vqrshrunbq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrunbq_m_n_s16(__a, __b, __imm, __p) | |
2053 | #define vqrshruntq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshruntq_m_n_s32(__a, __b, __imm, __p) | |
2054 | #define vqrshruntq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshruntq_m_n_s16(__a, __b, __imm, __p) | |
2055 | #define vqshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_s32(__a, __b, __imm, __p) | |
2056 | #define vqshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_s16(__a, __b, __imm, __p) | |
2057 | #define vqshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_u32(__a, __b, __imm, __p) | |
2058 | #define vqshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_u16(__a, __b, __imm, __p) | |
2059 | #define vqshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrntq_m_n_s32(__a, __b, __imm, __p) | |
2060 | #define vqshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrntq_m_n_s16(__a, __b, __imm, __p) | |
2061 | #define vqshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vqshrntq_m_n_u32(__a, __b, __imm, __p) | |
2062 | #define vqshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vqshrntq_m_n_u16(__a, __b, __imm, __p) | |
2063 | #define vqshrunbq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrunbq_m_n_s32(__a, __b, __imm, __p) | |
2064 | #define vqshrunbq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrunbq_m_n_s16(__a, __b, __imm, __p) | |
2065 | #define vqshruntq_m_n_s32(__a, __b, __imm, __p) __arm_vqshruntq_m_n_s32(__a, __b, __imm, __p) | |
2066 | #define vqshruntq_m_n_s16(__a, __b, __imm, __p) __arm_vqshruntq_m_n_s16(__a, __b, __imm, __p) | |
2067 | #define vrmlaldavhaq_p_s32(__a, __b, __c, __p) __arm_vrmlaldavhaq_p_s32(__a, __b, __c, __p) | |
2068 | #define vrmlaldavhaq_p_u32(__a, __b, __c, __p) __arm_vrmlaldavhaq_p_u32(__a, __b, __c, __p) | |
2069 | #define vrmlaldavhaxq_p_s32(__a, __b, __c, __p) __arm_vrmlaldavhaxq_p_s32(__a, __b, __c, __p) | |
2070 | #define vrmlsldavhaq_p_s32(__a, __b, __c, __p) __arm_vrmlsldavhaq_p_s32(__a, __b, __c, __p) | |
2071 | #define vrmlsldavhaxq_p_s32(__a, __b, __c, __p) __arm_vrmlsldavhaxq_p_s32(__a, __b, __c, __p) | |
2072 | #define vrshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_s32(__a, __b, __imm, __p) | |
2073 | #define vrshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_s16(__a, __b, __imm, __p) | |
2074 | #define vrshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_u32(__a, __b, __imm, __p) | |
2075 | #define vrshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_u16(__a, __b, __imm, __p) | |
2076 | #define vrshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vrshrntq_m_n_s32(__a, __b, __imm, __p) | |
2077 | #define vrshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vrshrntq_m_n_s16(__a, __b, __imm, __p) | |
2078 | #define vrshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vrshrntq_m_n_u32(__a, __b, __imm, __p) | |
2079 | #define vrshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vrshrntq_m_n_u16(__a, __b, __imm, __p) | |
2080 | #define vshllbq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_s8(__inactive, __a, __imm, __p) | |
2081 | #define vshllbq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_s16(__inactive, __a, __imm, __p) | |
2082 | #define vshllbq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_u8(__inactive, __a, __imm, __p) | |
2083 | #define vshllbq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_u16(__inactive, __a, __imm, __p) | |
2084 | #define vshlltq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_s8(__inactive, __a, __imm, __p) | |
2085 | #define vshlltq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_s16(__inactive, __a, __imm, __p) | |
2086 | #define vshlltq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_u8(__inactive, __a, __imm, __p) | |
2087 | #define vshlltq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_u16(__inactive, __a, __imm, __p) | |
2088 | #define vshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vshrnbq_m_n_s32(__a, __b, __imm, __p) | |
2089 | #define vshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vshrnbq_m_n_s16(__a, __b, __imm, __p) | |
2090 | #define vshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vshrnbq_m_n_u32(__a, __b, __imm, __p) | |
2091 | #define vshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vshrnbq_m_n_u16(__a, __b, __imm, __p) | |
2092 | #define vshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vshrntq_m_n_s32(__a, __b, __imm, __p) | |
2093 | #define vshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vshrntq_m_n_s16(__a, __b, __imm, __p) | |
2094 | #define vshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vshrntq_m_n_u32(__a, __b, __imm, __p) | |
2095 | #define vshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vshrntq_m_n_u16(__a, __b, __imm, __p) | |
532e9e24 SP |
2096 | #define vabdq_m_f32(__inactive, __a, __b, __p) __arm_vabdq_m_f32(__inactive, __a, __b, __p) |
2097 | #define vabdq_m_f16(__inactive, __a, __b, __p) __arm_vabdq_m_f16(__inactive, __a, __b, __p) | |
2098 | #define vaddq_m_f32(__inactive, __a, __b, __p) __arm_vaddq_m_f32(__inactive, __a, __b, __p) | |
2099 | #define vaddq_m_f16(__inactive, __a, __b, __p) __arm_vaddq_m_f16(__inactive, __a, __b, __p) | |
2100 | #define vaddq_m_n_f32(__inactive, __a, __b, __p) __arm_vaddq_m_n_f32(__inactive, __a, __b, __p) | |
2101 | #define vaddq_m_n_f16(__inactive, __a, __b, __p) __arm_vaddq_m_n_f16(__inactive, __a, __b, __p) | |
2102 | #define vandq_m_f32(__inactive, __a, __b, __p) __arm_vandq_m_f32(__inactive, __a, __b, __p) | |
2103 | #define vandq_m_f16(__inactive, __a, __b, __p) __arm_vandq_m_f16(__inactive, __a, __b, __p) | |
2104 | #define vbicq_m_f32(__inactive, __a, __b, __p) __arm_vbicq_m_f32(__inactive, __a, __b, __p) | |
2105 | #define vbicq_m_f16(__inactive, __a, __b, __p) __arm_vbicq_m_f16(__inactive, __a, __b, __p) | |
2106 | #define vbrsrq_m_n_f32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_f32(__inactive, __a, __b, __p) | |
2107 | #define vbrsrq_m_n_f16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_f16(__inactive, __a, __b, __p) | |
2108 | #define vcaddq_rot270_m_f32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_f32(__inactive, __a, __b, __p) | |
2109 | #define vcaddq_rot270_m_f16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_f16(__inactive, __a, __b, __p) | |
2110 | #define vcaddq_rot90_m_f32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_f32(__inactive, __a, __b, __p) | |
2111 | #define vcaddq_rot90_m_f16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_f16(__inactive, __a, __b, __p) | |
2112 | #define vcmlaq_m_f32(__a, __b, __c, __p) __arm_vcmlaq_m_f32(__a, __b, __c, __p) | |
2113 | #define vcmlaq_m_f16(__a, __b, __c, __p) __arm_vcmlaq_m_f16(__a, __b, __c, __p) | |
2114 | #define vcmlaq_rot180_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot180_m_f32(__a, __b, __c, __p) | |
2115 | #define vcmlaq_rot180_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot180_m_f16(__a, __b, __c, __p) | |
2116 | #define vcmlaq_rot270_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot270_m_f32(__a, __b, __c, __p) | |
2117 | #define vcmlaq_rot270_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot270_m_f16(__a, __b, __c, __p) | |
2118 | #define vcmlaq_rot90_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot90_m_f32(__a, __b, __c, __p) | |
2119 | #define vcmlaq_rot90_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot90_m_f16(__a, __b, __c, __p) | |
2120 | #define vcmulq_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_m_f32(__inactive, __a, __b, __p) | |
2121 | #define vcmulq_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_m_f16(__inactive, __a, __b, __p) | |
2122 | #define vcmulq_rot180_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m_f32(__inactive, __a, __b, __p) | |
2123 | #define vcmulq_rot180_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m_f16(__inactive, __a, __b, __p) | |
2124 | #define vcmulq_rot270_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m_f32(__inactive, __a, __b, __p) | |
2125 | #define vcmulq_rot270_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m_f16(__inactive, __a, __b, __p) | |
2126 | #define vcmulq_rot90_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m_f32(__inactive, __a, __b, __p) | |
2127 | #define vcmulq_rot90_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m_f16(__inactive, __a, __b, __p) | |
2128 | #define vcvtq_m_n_s32_f32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_s32_f32(__inactive, __a, __imm6, __p) | |
2129 | #define vcvtq_m_n_s16_f16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_s16_f16(__inactive, __a, __imm6, __p) | |
2130 | #define vcvtq_m_n_u32_f32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_u32_f32(__inactive, __a, __imm6, __p) | |
2131 | #define vcvtq_m_n_u16_f16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_u16_f16(__inactive, __a, __imm6, __p) | |
2132 | #define veorq_m_f32(__inactive, __a, __b, __p) __arm_veorq_m_f32(__inactive, __a, __b, __p) | |
2133 | #define veorq_m_f16(__inactive, __a, __b, __p) __arm_veorq_m_f16(__inactive, __a, __b, __p) | |
2134 | #define vfmaq_m_f32(__a, __b, __c, __p) __arm_vfmaq_m_f32(__a, __b, __c, __p) | |
2135 | #define vfmaq_m_f16(__a, __b, __c, __p) __arm_vfmaq_m_f16(__a, __b, __c, __p) | |
2136 | #define vfmaq_m_n_f32(__a, __b, __c, __p) __arm_vfmaq_m_n_f32(__a, __b, __c, __p) | |
2137 | #define vfmaq_m_n_f16(__a, __b, __c, __p) __arm_vfmaq_m_n_f16(__a, __b, __c, __p) | |
2138 | #define vfmasq_m_n_f32(__a, __b, __c, __p) __arm_vfmasq_m_n_f32(__a, __b, __c, __p) | |
2139 | #define vfmasq_m_n_f16(__a, __b, __c, __p) __arm_vfmasq_m_n_f16(__a, __b, __c, __p) | |
2140 | #define vfmsq_m_f32(__a, __b, __c, __p) __arm_vfmsq_m_f32(__a, __b, __c, __p) | |
2141 | #define vfmsq_m_f16(__a, __b, __c, __p) __arm_vfmsq_m_f16(__a, __b, __c, __p) | |
2142 | #define vmaxnmq_m_f32(__inactive, __a, __b, __p) __arm_vmaxnmq_m_f32(__inactive, __a, __b, __p) | |
2143 | #define vmaxnmq_m_f16(__inactive, __a, __b, __p) __arm_vmaxnmq_m_f16(__inactive, __a, __b, __p) | |
2144 | #define vminnmq_m_f32(__inactive, __a, __b, __p) __arm_vminnmq_m_f32(__inactive, __a, __b, __p) | |
2145 | #define vminnmq_m_f16(__inactive, __a, __b, __p) __arm_vminnmq_m_f16(__inactive, __a, __b, __p) | |
2146 | #define vmulq_m_f32(__inactive, __a, __b, __p) __arm_vmulq_m_f32(__inactive, __a, __b, __p) | |
2147 | #define vmulq_m_f16(__inactive, __a, __b, __p) __arm_vmulq_m_f16(__inactive, __a, __b, __p) | |
2148 | #define vmulq_m_n_f32(__inactive, __a, __b, __p) __arm_vmulq_m_n_f32(__inactive, __a, __b, __p) | |
2149 | #define vmulq_m_n_f16(__inactive, __a, __b, __p) __arm_vmulq_m_n_f16(__inactive, __a, __b, __p) | |
2150 | #define vornq_m_f32(__inactive, __a, __b, __p) __arm_vornq_m_f32(__inactive, __a, __b, __p) | |
2151 | #define vornq_m_f16(__inactive, __a, __b, __p) __arm_vornq_m_f16(__inactive, __a, __b, __p) | |
2152 | #define vorrq_m_f32(__inactive, __a, __b, __p) __arm_vorrq_m_f32(__inactive, __a, __b, __p) | |
2153 | #define vorrq_m_f16(__inactive, __a, __b, __p) __arm_vorrq_m_f16(__inactive, __a, __b, __p) | |
2154 | #define vsubq_m_f32(__inactive, __a, __b, __p) __arm_vsubq_m_f32(__inactive, __a, __b, __p) | |
2155 | #define vsubq_m_f16(__inactive, __a, __b, __p) __arm_vsubq_m_f16(__inactive, __a, __b, __p) | |
2156 | #define vsubq_m_n_f32(__inactive, __a, __b, __p) __arm_vsubq_m_n_f32(__inactive, __a, __b, __p) | |
2157 | #define vsubq_m_n_f16(__inactive, __a, __b, __p) __arm_vsubq_m_n_f16(__inactive, __a, __b, __p) | |
4ff68575 SP |
2158 | #define vstrbq_s8( __addr, __value) __arm_vstrbq_s8( __addr, __value) |
2159 | #define vstrbq_u8( __addr, __value) __arm_vstrbq_u8( __addr, __value) | |
2160 | #define vstrbq_u16( __addr, __value) __arm_vstrbq_u16( __addr, __value) | |
2161 | #define vstrbq_scatter_offset_s8( __base, __offset, __value) __arm_vstrbq_scatter_offset_s8( __base, __offset, __value) | |
2162 | #define vstrbq_scatter_offset_u8( __base, __offset, __value) __arm_vstrbq_scatter_offset_u8( __base, __offset, __value) | |
2163 | #define vstrbq_scatter_offset_u16( __base, __offset, __value) __arm_vstrbq_scatter_offset_u16( __base, __offset, __value) | |
2164 | #define vstrbq_s16( __addr, __value) __arm_vstrbq_s16( __addr, __value) | |
2165 | #define vstrbq_u32( __addr, __value) __arm_vstrbq_u32( __addr, __value) | |
2166 | #define vstrbq_scatter_offset_s16( __base, __offset, __value) __arm_vstrbq_scatter_offset_s16( __base, __offset, __value) | |
2167 | #define vstrbq_scatter_offset_u32( __base, __offset, __value) __arm_vstrbq_scatter_offset_u32( __base, __offset, __value) | |
2168 | #define vstrbq_s32( __addr, __value) __arm_vstrbq_s32( __addr, __value) | |
2169 | #define vstrbq_scatter_offset_s32( __base, __offset, __value) __arm_vstrbq_scatter_offset_s32( __base, __offset, __value) | |
2170 | #define vstrwq_scatter_base_s32(__addr, __offset, __value) __arm_vstrwq_scatter_base_s32(__addr, __offset, __value) | |
2171 | #define vstrwq_scatter_base_u32(__addr, __offset, __value) __arm_vstrwq_scatter_base_u32(__addr, __offset, __value) | |
535a8645 SP |
2172 | #define vldrbq_gather_offset_u8(__base, __offset) __arm_vldrbq_gather_offset_u8(__base, __offset) |
2173 | #define vldrbq_gather_offset_s8(__base, __offset) __arm_vldrbq_gather_offset_s8(__base, __offset) | |
2174 | #define vldrbq_s8(__base) __arm_vldrbq_s8(__base) | |
2175 | #define vldrbq_u8(__base) __arm_vldrbq_u8(__base) | |
2176 | #define vldrbq_gather_offset_u16(__base, __offset) __arm_vldrbq_gather_offset_u16(__base, __offset) | |
2177 | #define vldrbq_gather_offset_s16(__base, __offset) __arm_vldrbq_gather_offset_s16(__base, __offset) | |
2178 | #define vldrbq_s16(__base) __arm_vldrbq_s16(__base) | |
2179 | #define vldrbq_u16(__base) __arm_vldrbq_u16(__base) | |
2180 | #define vldrbq_gather_offset_u32(__base, __offset) __arm_vldrbq_gather_offset_u32(__base, __offset) | |
2181 | #define vldrbq_gather_offset_s32(__base, __offset) __arm_vldrbq_gather_offset_s32(__base, __offset) | |
2182 | #define vldrbq_s32(__base) __arm_vldrbq_s32(__base) | |
2183 | #define vldrbq_u32(__base) __arm_vldrbq_u32(__base) | |
2184 | #define vldrwq_gather_base_s32(__addr, __offset) __arm_vldrwq_gather_base_s32(__addr, __offset) | |
2185 | #define vldrwq_gather_base_u32(__addr, __offset) __arm_vldrwq_gather_base_u32(__addr, __offset) | |
405e918c SP |
2186 | #define vstrbq_p_s8( __addr, __value, __p) __arm_vstrbq_p_s8( __addr, __value, __p) |
2187 | #define vstrbq_p_s32( __addr, __value, __p) __arm_vstrbq_p_s32( __addr, __value, __p) | |
2188 | #define vstrbq_p_s16( __addr, __value, __p) __arm_vstrbq_p_s16( __addr, __value, __p) | |
2189 | #define vstrbq_p_u8( __addr, __value, __p) __arm_vstrbq_p_u8( __addr, __value, __p) | |
2190 | #define vstrbq_p_u32( __addr, __value, __p) __arm_vstrbq_p_u32( __addr, __value, __p) | |
2191 | #define vstrbq_p_u16( __addr, __value, __p) __arm_vstrbq_p_u16( __addr, __value, __p) | |
2192 | #define vstrbq_scatter_offset_p_s8( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s8( __base, __offset, __value, __p) | |
2193 | #define vstrbq_scatter_offset_p_s32( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s32( __base, __offset, __value, __p) | |
2194 | #define vstrbq_scatter_offset_p_s16( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s16( __base, __offset, __value, __p) | |
2195 | #define vstrbq_scatter_offset_p_u8( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u8( __base, __offset, __value, __p) | |
2196 | #define vstrbq_scatter_offset_p_u32( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u32( __base, __offset, __value, __p) | |
2197 | #define vstrbq_scatter_offset_p_u16( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u16( __base, __offset, __value, __p) | |
2198 | #define vstrwq_scatter_base_p_s32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_s32(__addr, __offset, __value, __p) | |
2199 | #define vstrwq_scatter_base_p_u32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_u32(__addr, __offset, __value, __p) | |
429d607b SP |
2200 | #define vldrbq_gather_offset_z_s16(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s16(__base, __offset, __p) |
2201 | #define vldrbq_gather_offset_z_u8(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u8(__base, __offset, __p) | |
2202 | #define vldrbq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s32(__base, __offset, __p) | |
2203 | #define vldrbq_gather_offset_z_u16(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u16(__base, __offset, __p) | |
2204 | #define vldrbq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u32(__base, __offset, __p) | |
2205 | #define vldrbq_gather_offset_z_s8(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s8(__base, __offset, __p) | |
2206 | #define vldrbq_z_s16(__base, __p) __arm_vldrbq_z_s16(__base, __p) | |
2207 | #define vldrbq_z_u8(__base, __p) __arm_vldrbq_z_u8(__base, __p) | |
2208 | #define vldrbq_z_s8(__base, __p) __arm_vldrbq_z_s8(__base, __p) | |
2209 | #define vldrbq_z_s32(__base, __p) __arm_vldrbq_z_s32(__base, __p) | |
2210 | #define vldrbq_z_u16(__base, __p) __arm_vldrbq_z_u16(__base, __p) | |
2211 | #define vldrbq_z_u32(__base, __p) __arm_vldrbq_z_u32(__base, __p) | |
2212 | #define vldrwq_gather_base_z_u32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_u32(__addr, __offset, __p) | |
2213 | #define vldrwq_gather_base_z_s32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_s32(__addr, __offset, __p) | |
bf1e3d5a SP |
2214 | #define vld1q_s8(__base) __arm_vld1q_s8(__base) |
2215 | #define vld1q_s32(__base) __arm_vld1q_s32(__base) | |
2216 | #define vld1q_s16(__base) __arm_vld1q_s16(__base) | |
2217 | #define vld1q_u8(__base) __arm_vld1q_u8(__base) | |
2218 | #define vld1q_u32(__base) __arm_vld1q_u32(__base) | |
2219 | #define vld1q_u16(__base) __arm_vld1q_u16(__base) | |
2220 | #define vldrhq_gather_offset_s32(__base, __offset) __arm_vldrhq_gather_offset_s32(__base, __offset) | |
2221 | #define vldrhq_gather_offset_s16(__base, __offset) __arm_vldrhq_gather_offset_s16(__base, __offset) | |
2222 | #define vldrhq_gather_offset_u32(__base, __offset) __arm_vldrhq_gather_offset_u32(__base, __offset) | |
2223 | #define vldrhq_gather_offset_u16(__base, __offset) __arm_vldrhq_gather_offset_u16(__base, __offset) | |
2224 | #define vldrhq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrhq_gather_offset_z_s32(__base, __offset, __p) | |
2225 | #define vldrhq_gather_offset_z_s16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_s16(__base, __offset, __p) | |
2226 | #define vldrhq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrhq_gather_offset_z_u32(__base, __offset, __p) | |
2227 | #define vldrhq_gather_offset_z_u16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_u16(__base, __offset, __p) | |
2228 | #define vldrhq_gather_shifted_offset_s32(__base, __offset) __arm_vldrhq_gather_shifted_offset_s32(__base, __offset) | |
2229 | #define vldrhq_gather_shifted_offset_s16(__base, __offset) __arm_vldrhq_gather_shifted_offset_s16(__base, __offset) | |
2230 | #define vldrhq_gather_shifted_offset_u32(__base, __offset) __arm_vldrhq_gather_shifted_offset_u32(__base, __offset) | |
2231 | #define vldrhq_gather_shifted_offset_u16(__base, __offset) __arm_vldrhq_gather_shifted_offset_u16(__base, __offset) | |
2232 | #define vldrhq_gather_shifted_offset_z_s32(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_s32(__base, __offset, __p) | |
2233 | #define vldrhq_gather_shifted_offset_z_s16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_s16(__base, __offset, __p) | |
2234 | #define vldrhq_gather_shifted_offset_z_u32(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_u32(__base, __offset, __p) | |
2235 | #define vldrhq_gather_shifted_offset_z_u16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_u16(__base, __offset, __p) | |
2236 | #define vldrhq_s32(__base) __arm_vldrhq_s32(__base) | |
2237 | #define vldrhq_s16(__base) __arm_vldrhq_s16(__base) | |
2238 | #define vldrhq_u32(__base) __arm_vldrhq_u32(__base) | |
2239 | #define vldrhq_u16(__base) __arm_vldrhq_u16(__base) | |
2240 | #define vldrhq_z_s32(__base, __p) __arm_vldrhq_z_s32(__base, __p) | |
2241 | #define vldrhq_z_s16(__base, __p) __arm_vldrhq_z_s16(__base, __p) | |
2242 | #define vldrhq_z_u32(__base, __p) __arm_vldrhq_z_u32(__base, __p) | |
2243 | #define vldrhq_z_u16(__base, __p) __arm_vldrhq_z_u16(__base, __p) | |
2244 | #define vldrwq_s32(__base) __arm_vldrwq_s32(__base) | |
2245 | #define vldrwq_u32(__base) __arm_vldrwq_u32(__base) | |
2246 | #define vldrwq_z_s32(__base, __p) __arm_vldrwq_z_s32(__base, __p) | |
2247 | #define vldrwq_z_u32(__base, __p) __arm_vldrwq_z_u32(__base, __p) | |
2248 | #define vld1q_f32(__base) __arm_vld1q_f32(__base) | |
2249 | #define vld1q_f16(__base) __arm_vld1q_f16(__base) | |
2250 | #define vldrhq_f16(__base) __arm_vldrhq_f16(__base) | |
2251 | #define vldrhq_z_f16(__base, __p) __arm_vldrhq_z_f16(__base, __p) | |
2252 | #define vldrwq_f32(__base) __arm_vldrwq_f32(__base) | |
2253 | #define vldrwq_z_f32(__base, __p) __arm_vldrwq_z_f32(__base, __p) | |
4cc23303 SP |
2254 | #define vldrdq_gather_base_s64(__addr, __offset) __arm_vldrdq_gather_base_s64(__addr, __offset) |
2255 | #define vldrdq_gather_base_u64(__addr, __offset) __arm_vldrdq_gather_base_u64(__addr, __offset) | |
2256 | #define vldrdq_gather_base_z_s64(__addr, __offset, __p) __arm_vldrdq_gather_base_z_s64(__addr, __offset, __p) | |
2257 | #define vldrdq_gather_base_z_u64(__addr, __offset, __p) __arm_vldrdq_gather_base_z_u64(__addr, __offset, __p) | |
2258 | #define vldrdq_gather_offset_s64(__base, __offset) __arm_vldrdq_gather_offset_s64(__base, __offset) | |
2259 | #define vldrdq_gather_offset_u64(__base, __offset) __arm_vldrdq_gather_offset_u64(__base, __offset) | |
2260 | #define vldrdq_gather_offset_z_s64(__base, __offset, __p) __arm_vldrdq_gather_offset_z_s64(__base, __offset, __p) | |
2261 | #define vldrdq_gather_offset_z_u64(__base, __offset, __p) __arm_vldrdq_gather_offset_z_u64(__base, __offset, __p) | |
2262 | #define vldrdq_gather_shifted_offset_s64(__base, __offset) __arm_vldrdq_gather_shifted_offset_s64(__base, __offset) | |
2263 | #define vldrdq_gather_shifted_offset_u64(__base, __offset) __arm_vldrdq_gather_shifted_offset_u64(__base, __offset) | |
2264 | #define vldrdq_gather_shifted_offset_z_s64(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z_s64(__base, __offset, __p) | |
2265 | #define vldrdq_gather_shifted_offset_z_u64(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z_u64(__base, __offset, __p) | |
2266 | #define vldrhq_gather_offset_f16(__base, __offset) __arm_vldrhq_gather_offset_f16(__base, __offset) | |
2267 | #define vldrhq_gather_offset_z_f16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_f16(__base, __offset, __p) | |
2268 | #define vldrhq_gather_shifted_offset_f16(__base, __offset) __arm_vldrhq_gather_shifted_offset_f16(__base, __offset) | |
2269 | #define vldrhq_gather_shifted_offset_z_f16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_f16(__base, __offset, __p) | |
2270 | #define vldrwq_gather_base_f32(__addr, __offset) __arm_vldrwq_gather_base_f32(__addr, __offset) | |
2271 | #define vldrwq_gather_base_z_f32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_f32(__addr, __offset, __p) | |
2272 | #define vldrwq_gather_offset_f32(__base, __offset) __arm_vldrwq_gather_offset_f32(__base, __offset) | |
2273 | #define vldrwq_gather_offset_s32(__base, __offset) __arm_vldrwq_gather_offset_s32(__base, __offset) | |
2274 | #define vldrwq_gather_offset_u32(__base, __offset) __arm_vldrwq_gather_offset_u32(__base, __offset) | |
2275 | #define vldrwq_gather_offset_z_f32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_f32(__base, __offset, __p) | |
2276 | #define vldrwq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_s32(__base, __offset, __p) | |
2277 | #define vldrwq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_u32(__base, __offset, __p) | |
2278 | #define vldrwq_gather_shifted_offset_f32(__base, __offset) __arm_vldrwq_gather_shifted_offset_f32(__base, __offset) | |
2279 | #define vldrwq_gather_shifted_offset_s32(__base, __offset) __arm_vldrwq_gather_shifted_offset_s32(__base, __offset) | |
2280 | #define vldrwq_gather_shifted_offset_u32(__base, __offset) __arm_vldrwq_gather_shifted_offset_u32(__base, __offset) | |
2281 | #define vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p) | |
2282 | #define vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p) | |
2283 | #define vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p) | |
5cad47e0 SP |
2284 | #define vst1q_f32(__addr, __value) __arm_vst1q_f32(__addr, __value) |
2285 | #define vst1q_f16(__addr, __value) __arm_vst1q_f16(__addr, __value) | |
2286 | #define vst1q_s8(__addr, __value) __arm_vst1q_s8(__addr, __value) | |
2287 | #define vst1q_s32(__addr, __value) __arm_vst1q_s32(__addr, __value) | |
2288 | #define vst1q_s16(__addr, __value) __arm_vst1q_s16(__addr, __value) | |
2289 | #define vst1q_u8(__addr, __value) __arm_vst1q_u8(__addr, __value) | |
2290 | #define vst1q_u32(__addr, __value) __arm_vst1q_u32(__addr, __value) | |
2291 | #define vst1q_u16(__addr, __value) __arm_vst1q_u16(__addr, __value) | |
2292 | #define vstrhq_f16(__addr, __value) __arm_vstrhq_f16(__addr, __value) | |
2293 | #define vstrhq_scatter_offset_s32( __base, __offset, __value) __arm_vstrhq_scatter_offset_s32( __base, __offset, __value) | |
2294 | #define vstrhq_scatter_offset_s16( __base, __offset, __value) __arm_vstrhq_scatter_offset_s16( __base, __offset, __value) | |
2295 | #define vstrhq_scatter_offset_u32( __base, __offset, __value) __arm_vstrhq_scatter_offset_u32( __base, __offset, __value) | |
2296 | #define vstrhq_scatter_offset_u16( __base, __offset, __value) __arm_vstrhq_scatter_offset_u16( __base, __offset, __value) | |
2297 | #define vstrhq_scatter_offset_p_s32( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_s32( __base, __offset, __value, __p) | |
2298 | #define vstrhq_scatter_offset_p_s16( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_s16( __base, __offset, __value, __p) | |
2299 | #define vstrhq_scatter_offset_p_u32( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_u32( __base, __offset, __value, __p) | |
2300 | #define vstrhq_scatter_offset_p_u16( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_u16( __base, __offset, __value, __p) | |
2301 | #define vstrhq_scatter_shifted_offset_s32( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_s32( __base, __offset, __value) | |
2302 | #define vstrhq_scatter_shifted_offset_s16( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_s16( __base, __offset, __value) | |
2303 | #define vstrhq_scatter_shifted_offset_u32( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_u32( __base, __offset, __value) | |
2304 | #define vstrhq_scatter_shifted_offset_u16( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_u16( __base, __offset, __value) | |
2305 | #define vstrhq_scatter_shifted_offset_p_s32( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_s32( __base, __offset, __value, __p) | |
2306 | #define vstrhq_scatter_shifted_offset_p_s16( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_s16( __base, __offset, __value, __p) | |
2307 | #define vstrhq_scatter_shifted_offset_p_u32( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_u32( __base, __offset, __value, __p) | |
2308 | #define vstrhq_scatter_shifted_offset_p_u16( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_u16( __base, __offset, __value, __p) | |
2309 | #define vstrhq_s32(__addr, __value) __arm_vstrhq_s32(__addr, __value) | |
2310 | #define vstrhq_s16(__addr, __value) __arm_vstrhq_s16(__addr, __value) | |
2311 | #define vstrhq_u32(__addr, __value) __arm_vstrhq_u32(__addr, __value) | |
2312 | #define vstrhq_u16(__addr, __value) __arm_vstrhq_u16(__addr, __value) | |
2313 | #define vstrhq_p_f16(__addr, __value, __p) __arm_vstrhq_p_f16(__addr, __value, __p) | |
2314 | #define vstrhq_p_s32(__addr, __value, __p) __arm_vstrhq_p_s32(__addr, __value, __p) | |
2315 | #define vstrhq_p_s16(__addr, __value, __p) __arm_vstrhq_p_s16(__addr, __value, __p) | |
2316 | #define vstrhq_p_u32(__addr, __value, __p) __arm_vstrhq_p_u32(__addr, __value, __p) | |
2317 | #define vstrhq_p_u16(__addr, __value, __p) __arm_vstrhq_p_u16(__addr, __value, __p) | |
2318 | #define vstrwq_f32(__addr, __value) __arm_vstrwq_f32(__addr, __value) | |
2319 | #define vstrwq_s32(__addr, __value) __arm_vstrwq_s32(__addr, __value) | |
2320 | #define vstrwq_u32(__addr, __value) __arm_vstrwq_u32(__addr, __value) | |
2321 | #define vstrwq_p_f32(__addr, __value, __p) __arm_vstrwq_p_f32(__addr, __value, __p) | |
2322 | #define vstrwq_p_s32(__addr, __value, __p) __arm_vstrwq_p_s32(__addr, __value, __p) | |
2323 | #define vstrwq_p_u32(__addr, __value, __p) __arm_vstrwq_p_u32(__addr, __value, __p) | |
7a5fffa5 SP |
2324 | #define vstrdq_scatter_base_p_s64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p_s64(__addr, __offset, __value, __p) |
2325 | #define vstrdq_scatter_base_p_u64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p_u64(__addr, __offset, __value, __p) | |
2326 | #define vstrdq_scatter_base_s64(__addr, __offset, __value) __arm_vstrdq_scatter_base_s64(__addr, __offset, __value) | |
2327 | #define vstrdq_scatter_base_u64(__addr, __offset, __value) __arm_vstrdq_scatter_base_u64(__addr, __offset, __value) | |
2328 | #define vstrdq_scatter_offset_p_s64(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p_s64(__base, __offset, __value, __p) | |
2329 | #define vstrdq_scatter_offset_p_u64(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p_u64(__base, __offset, __value, __p) | |
2330 | #define vstrdq_scatter_offset_s64(__base, __offset, __value) __arm_vstrdq_scatter_offset_s64(__base, __offset, __value) | |
2331 | #define vstrdq_scatter_offset_u64(__base, __offset, __value) __arm_vstrdq_scatter_offset_u64(__base, __offset, __value) | |
2332 | #define vstrdq_scatter_shifted_offset_p_s64(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p_s64(__base, __offset, __value, __p) | |
2333 | #define vstrdq_scatter_shifted_offset_p_u64(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p_u64(__base, __offset, __value, __p) | |
2334 | #define vstrdq_scatter_shifted_offset_s64(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset_s64(__base, __offset, __value) | |
2335 | #define vstrdq_scatter_shifted_offset_u64(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset_u64(__base, __offset, __value) | |
2336 | #define vstrhq_scatter_offset_f16(__base, __offset, __value) __arm_vstrhq_scatter_offset_f16(__base, __offset, __value) | |
2337 | #define vstrhq_scatter_offset_p_f16(__base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_f16(__base, __offset, __value, __p) | |
2338 | #define vstrhq_scatter_shifted_offset_f16(__base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_f16(__base, __offset, __value) | |
2339 | #define vstrhq_scatter_shifted_offset_p_f16(__base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_f16(__base, __offset, __value, __p) | |
2340 | #define vstrwq_scatter_base_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_f32(__addr, __offset, __value) | |
2341 | #define vstrwq_scatter_base_p_f32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_f32(__addr, __offset, __value, __p) | |
2342 | #define vstrwq_scatter_offset_f32(__base, __offset, __value) __arm_vstrwq_scatter_offset_f32(__base, __offset, __value) | |
2343 | #define vstrwq_scatter_offset_p_f32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_f32(__base, __offset, __value, __p) | |
2344 | #define vstrwq_scatter_offset_p_s32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_s32(__base, __offset, __value, __p) | |
2345 | #define vstrwq_scatter_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_u32(__base, __offset, __value, __p) | |
2346 | #define vstrwq_scatter_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_offset_s32(__base, __offset, __value) | |
2347 | #define vstrwq_scatter_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_offset_u32(__base, __offset, __value) | |
2348 | #define vstrwq_scatter_shifted_offset_f32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_f32(__base, __offset, __value) | |
2349 | #define vstrwq_scatter_shifted_offset_p_f32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_f32(__base, __offset, __value, __p) | |
2350 | #define vstrwq_scatter_shifted_offset_p_s32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_s32(__base, __offset, __value, __p) | |
2351 | #define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) | |
2352 | #define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) | |
2353 | #define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) | |
3eff57aa SP |
2354 | #define vaddq_s8(__a, __b) __arm_vaddq_s8(__a, __b) |
2355 | #define vaddq_s16(__a, __b) __arm_vaddq_s16(__a, __b) | |
2356 | #define vaddq_s32(__a, __b) __arm_vaddq_s32(__a, __b) | |
2357 | #define vaddq_u8(__a, __b) __arm_vaddq_u8(__a, __b) | |
2358 | #define vaddq_u16(__a, __b) __arm_vaddq_u16(__a, __b) | |
2359 | #define vaddq_u32(__a, __b) __arm_vaddq_u32(__a, __b) | |
2360 | #define vaddq_f16(__a, __b) __arm_vaddq_f16(__a, __b) | |
2361 | #define vaddq_f32(__a, __b) __arm_vaddq_f32(__a, __b) | |
85a94e87 SP |
2362 | #define vreinterpretq_s16_s32(__a) __arm_vreinterpretq_s16_s32(__a) |
2363 | #define vreinterpretq_s16_s64(__a) __arm_vreinterpretq_s16_s64(__a) | |
2364 | #define vreinterpretq_s16_s8(__a) __arm_vreinterpretq_s16_s8(__a) | |
2365 | #define vreinterpretq_s16_u16(__a) __arm_vreinterpretq_s16_u16(__a) | |
2366 | #define vreinterpretq_s16_u32(__a) __arm_vreinterpretq_s16_u32(__a) | |
2367 | #define vreinterpretq_s16_u64(__a) __arm_vreinterpretq_s16_u64(__a) | |
2368 | #define vreinterpretq_s16_u8(__a) __arm_vreinterpretq_s16_u8(__a) | |
2369 | #define vreinterpretq_s32_s16(__a) __arm_vreinterpretq_s32_s16(__a) | |
2370 | #define vreinterpretq_s32_s64(__a) __arm_vreinterpretq_s32_s64(__a) | |
2371 | #define vreinterpretq_s32_s8(__a) __arm_vreinterpretq_s32_s8(__a) | |
2372 | #define vreinterpretq_s32_u16(__a) __arm_vreinterpretq_s32_u16(__a) | |
2373 | #define vreinterpretq_s32_u32(__a) __arm_vreinterpretq_s32_u32(__a) | |
2374 | #define vreinterpretq_s32_u64(__a) __arm_vreinterpretq_s32_u64(__a) | |
2375 | #define vreinterpretq_s32_u8(__a) __arm_vreinterpretq_s32_u8(__a) | |
2376 | #define vreinterpretq_s64_s16(__a) __arm_vreinterpretq_s64_s16(__a) | |
2377 | #define vreinterpretq_s64_s32(__a) __arm_vreinterpretq_s64_s32(__a) | |
2378 | #define vreinterpretq_s64_s8(__a) __arm_vreinterpretq_s64_s8(__a) | |
2379 | #define vreinterpretq_s64_u16(__a) __arm_vreinterpretq_s64_u16(__a) | |
2380 | #define vreinterpretq_s64_u32(__a) __arm_vreinterpretq_s64_u32(__a) | |
2381 | #define vreinterpretq_s64_u64(__a) __arm_vreinterpretq_s64_u64(__a) | |
2382 | #define vreinterpretq_s64_u8(__a) __arm_vreinterpretq_s64_u8(__a) | |
2383 | #define vreinterpretq_s8_s16(__a) __arm_vreinterpretq_s8_s16(__a) | |
2384 | #define vreinterpretq_s8_s32(__a) __arm_vreinterpretq_s8_s32(__a) | |
2385 | #define vreinterpretq_s8_s64(__a) __arm_vreinterpretq_s8_s64(__a) | |
2386 | #define vreinterpretq_s8_u16(__a) __arm_vreinterpretq_s8_u16(__a) | |
2387 | #define vreinterpretq_s8_u32(__a) __arm_vreinterpretq_s8_u32(__a) | |
2388 | #define vreinterpretq_s8_u64(__a) __arm_vreinterpretq_s8_u64(__a) | |
2389 | #define vreinterpretq_s8_u8(__a) __arm_vreinterpretq_s8_u8(__a) | |
2390 | #define vreinterpretq_u16_s16(__a) __arm_vreinterpretq_u16_s16(__a) | |
2391 | #define vreinterpretq_u16_s32(__a) __arm_vreinterpretq_u16_s32(__a) | |
2392 | #define vreinterpretq_u16_s64(__a) __arm_vreinterpretq_u16_s64(__a) | |
2393 | #define vreinterpretq_u16_s8(__a) __arm_vreinterpretq_u16_s8(__a) | |
2394 | #define vreinterpretq_u16_u32(__a) __arm_vreinterpretq_u16_u32(__a) | |
2395 | #define vreinterpretq_u16_u64(__a) __arm_vreinterpretq_u16_u64(__a) | |
2396 | #define vreinterpretq_u16_u8(__a) __arm_vreinterpretq_u16_u8(__a) | |
2397 | #define vreinterpretq_u32_s16(__a) __arm_vreinterpretq_u32_s16(__a) | |
2398 | #define vreinterpretq_u32_s32(__a) __arm_vreinterpretq_u32_s32(__a) | |
2399 | #define vreinterpretq_u32_s64(__a) __arm_vreinterpretq_u32_s64(__a) | |
2400 | #define vreinterpretq_u32_s8(__a) __arm_vreinterpretq_u32_s8(__a) | |
2401 | #define vreinterpretq_u32_u16(__a) __arm_vreinterpretq_u32_u16(__a) | |
2402 | #define vreinterpretq_u32_u64(__a) __arm_vreinterpretq_u32_u64(__a) | |
2403 | #define vreinterpretq_u32_u8(__a) __arm_vreinterpretq_u32_u8(__a) | |
2404 | #define vreinterpretq_u64_s16(__a) __arm_vreinterpretq_u64_s16(__a) | |
2405 | #define vreinterpretq_u64_s32(__a) __arm_vreinterpretq_u64_s32(__a) | |
2406 | #define vreinterpretq_u64_s64(__a) __arm_vreinterpretq_u64_s64(__a) | |
2407 | #define vreinterpretq_u64_s8(__a) __arm_vreinterpretq_u64_s8(__a) | |
2408 | #define vreinterpretq_u64_u16(__a) __arm_vreinterpretq_u64_u16(__a) | |
2409 | #define vreinterpretq_u64_u32(__a) __arm_vreinterpretq_u64_u32(__a) | |
2410 | #define vreinterpretq_u64_u8(__a) __arm_vreinterpretq_u64_u8(__a) | |
2411 | #define vreinterpretq_u8_s16(__a) __arm_vreinterpretq_u8_s16(__a) | |
2412 | #define vreinterpretq_u8_s32(__a) __arm_vreinterpretq_u8_s32(__a) | |
2413 | #define vreinterpretq_u8_s64(__a) __arm_vreinterpretq_u8_s64(__a) | |
2414 | #define vreinterpretq_u8_s8(__a) __arm_vreinterpretq_u8_s8(__a) | |
2415 | #define vreinterpretq_u8_u16(__a) __arm_vreinterpretq_u8_u16(__a) | |
2416 | #define vreinterpretq_u8_u32(__a) __arm_vreinterpretq_u8_u32(__a) | |
2417 | #define vreinterpretq_u8_u64(__a) __arm_vreinterpretq_u8_u64(__a) | |
2418 | #define vreinterpretq_s32_f16(__a) __arm_vreinterpretq_s32_f16(__a) | |
2419 | #define vreinterpretq_s32_f32(__a) __arm_vreinterpretq_s32_f32(__a) | |
2420 | #define vreinterpretq_u16_f16(__a) __arm_vreinterpretq_u16_f16(__a) | |
2421 | #define vreinterpretq_u16_f32(__a) __arm_vreinterpretq_u16_f32(__a) | |
2422 | #define vreinterpretq_u32_f16(__a) __arm_vreinterpretq_u32_f16(__a) | |
2423 | #define vreinterpretq_u32_f32(__a) __arm_vreinterpretq_u32_f32(__a) | |
2424 | #define vreinterpretq_u64_f16(__a) __arm_vreinterpretq_u64_f16(__a) | |
2425 | #define vreinterpretq_u64_f32(__a) __arm_vreinterpretq_u64_f32(__a) | |
2426 | #define vreinterpretq_u8_f16(__a) __arm_vreinterpretq_u8_f16(__a) | |
2427 | #define vreinterpretq_u8_f32(__a) __arm_vreinterpretq_u8_f32(__a) | |
2428 | #define vreinterpretq_f16_f32(__a) __arm_vreinterpretq_f16_f32(__a) | |
2429 | #define vreinterpretq_f16_s16(__a) __arm_vreinterpretq_f16_s16(__a) | |
2430 | #define vreinterpretq_f16_s32(__a) __arm_vreinterpretq_f16_s32(__a) | |
2431 | #define vreinterpretq_f16_s64(__a) __arm_vreinterpretq_f16_s64(__a) | |
2432 | #define vreinterpretq_f16_s8(__a) __arm_vreinterpretq_f16_s8(__a) | |
2433 | #define vreinterpretq_f16_u16(__a) __arm_vreinterpretq_f16_u16(__a) | |
2434 | #define vreinterpretq_f16_u32(__a) __arm_vreinterpretq_f16_u32(__a) | |
2435 | #define vreinterpretq_f16_u64(__a) __arm_vreinterpretq_f16_u64(__a) | |
2436 | #define vreinterpretq_f16_u8(__a) __arm_vreinterpretq_f16_u8(__a) | |
2437 | #define vreinterpretq_f32_f16(__a) __arm_vreinterpretq_f32_f16(__a) | |
2438 | #define vreinterpretq_f32_s16(__a) __arm_vreinterpretq_f32_s16(__a) | |
2439 | #define vreinterpretq_f32_s32(__a) __arm_vreinterpretq_f32_s32(__a) | |
2440 | #define vreinterpretq_f32_s64(__a) __arm_vreinterpretq_f32_s64(__a) | |
2441 | #define vreinterpretq_f32_s8(__a) __arm_vreinterpretq_f32_s8(__a) | |
2442 | #define vreinterpretq_f32_u16(__a) __arm_vreinterpretq_f32_u16(__a) | |
2443 | #define vreinterpretq_f32_u32(__a) __arm_vreinterpretq_f32_u32(__a) | |
2444 | #define vreinterpretq_f32_u64(__a) __arm_vreinterpretq_f32_u64(__a) | |
2445 | #define vreinterpretq_f32_u8(__a) __arm_vreinterpretq_f32_u8(__a) | |
2446 | #define vreinterpretq_s16_f16(__a) __arm_vreinterpretq_s16_f16(__a) | |
2447 | #define vreinterpretq_s16_f32(__a) __arm_vreinterpretq_s16_f32(__a) | |
2448 | #define vreinterpretq_s64_f16(__a) __arm_vreinterpretq_s64_f16(__a) | |
2449 | #define vreinterpretq_s64_f32(__a) __arm_vreinterpretq_s64_f32(__a) | |
2450 | #define vreinterpretq_s8_f16(__a) __arm_vreinterpretq_s8_f16(__a) | |
2451 | #define vreinterpretq_s8_f32(__a) __arm_vreinterpretq_s8_f32(__a) | |
2452 | #define vuninitializedq_u8(void) __arm_vuninitializedq_u8(void) | |
2453 | #define vuninitializedq_u16(void) __arm_vuninitializedq_u16(void) | |
2454 | #define vuninitializedq_u32(void) __arm_vuninitializedq_u32(void) | |
2455 | #define vuninitializedq_u64(void) __arm_vuninitializedq_u64(void) | |
2456 | #define vuninitializedq_s8(void) __arm_vuninitializedq_s8(void) | |
2457 | #define vuninitializedq_s16(void) __arm_vuninitializedq_s16(void) | |
2458 | #define vuninitializedq_s32(void) __arm_vuninitializedq_s32(void) | |
2459 | #define vuninitializedq_s64(void) __arm_vuninitializedq_s64(void) | |
2460 | #define vuninitializedq_f16(void) __arm_vuninitializedq_f16(void) | |
2461 | #define vuninitializedq_f32(void) __arm_vuninitializedq_f32(void) | |
92f80065 SP |
2462 | #define vddupq_m_n_u8(__inactive, __a, __imm, __p) __arm_vddupq_m_n_u8(__inactive, __a, __imm, __p) |
2463 | #define vddupq_m_n_u32(__inactive, __a, __imm, __p) __arm_vddupq_m_n_u32(__inactive, __a, __imm, __p) | |
2464 | #define vddupq_m_n_u16(__inactive, __a, __imm, __p) __arm_vddupq_m_n_u16(__inactive, __a, __imm, __p) | |
2465 | #define vddupq_m_wb_u8(__inactive, __a, __imm, __p) __arm_vddupq_m_wb_u8(__inactive, __a, __imm, __p) | |
2466 | #define vddupq_m_wb_u16(__inactive, __a, __imm, __p) __arm_vddupq_m_wb_u16(__inactive, __a, __imm, __p) | |
2467 | #define vddupq_m_wb_u32(__inactive, __a, __imm, __p) __arm_vddupq_m_wb_u32(__inactive, __a, __imm, __p) | |
2468 | #define vddupq_n_u8(__a, __imm) __arm_vddupq_n_u8(__a, __imm) | |
2469 | #define vddupq_n_u32(__a, __imm) __arm_vddupq_n_u32(__a, __imm) | |
2470 | #define vddupq_n_u16(__a, __imm) __arm_vddupq_n_u16(__a, __imm) | |
2471 | #define vddupq_wb_u8( __a, __imm) __arm_vddupq_wb_u8( __a, __imm) | |
2472 | #define vddupq_wb_u16( __a, __imm) __arm_vddupq_wb_u16( __a, __imm) | |
2473 | #define vddupq_wb_u32( __a, __imm) __arm_vddupq_wb_u32( __a, __imm) | |
2474 | #define vdwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) | |
2475 | #define vdwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) | |
2476 | #define vdwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) | |
2477 | #define vdwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) | |
2478 | #define vdwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) | |
2479 | #define vdwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) | |
2480 | #define vdwdupq_n_u8(__a, __b, __imm) __arm_vdwdupq_n_u8(__a, __b, __imm) | |
2481 | #define vdwdupq_n_u32(__a, __b, __imm) __arm_vdwdupq_n_u32(__a, __b, __imm) | |
2482 | #define vdwdupq_n_u16(__a, __b, __imm) __arm_vdwdupq_n_u16(__a, __b, __imm) | |
2483 | #define vdwdupq_wb_u8( __a, __b, __imm) __arm_vdwdupq_wb_u8( __a, __b, __imm) | |
2484 | #define vdwdupq_wb_u32( __a, __b, __imm) __arm_vdwdupq_wb_u32( __a, __b, __imm) | |
2485 | #define vdwdupq_wb_u16( __a, __b, __imm) __arm_vdwdupq_wb_u16( __a, __b, __imm) | |
2486 | #define vidupq_m_n_u8(__inactive, __a, __imm, __p) __arm_vidupq_m_n_u8(__inactive, __a, __imm, __p) | |
2487 | #define vidupq_m_n_u32(__inactive, __a, __imm, __p) __arm_vidupq_m_n_u32(__inactive, __a, __imm, __p) | |
2488 | #define vidupq_m_n_u16(__inactive, __a, __imm, __p) __arm_vidupq_m_n_u16(__inactive, __a, __imm, __p) | |
2489 | #define vidupq_m_wb_u8(__inactive, __a, __imm, __p) __arm_vidupq_m_wb_u8(__inactive, __a, __imm, __p) | |
2490 | #define vidupq_m_wb_u16(__inactive, __a, __imm, __p) __arm_vidupq_m_wb_u16(__inactive, __a, __imm, __p) | |
2491 | #define vidupq_m_wb_u32(__inactive, __a, __imm, __p) __arm_vidupq_m_wb_u32(__inactive, __a, __imm, __p) | |
2492 | #define vidupq_n_u8(__a, __imm) __arm_vidupq_n_u8(__a, __imm) | |
2493 | #define vidupq_n_u32(__a, __imm) __arm_vidupq_n_u32(__a, __imm) | |
2494 | #define vidupq_n_u16(__a, __imm) __arm_vidupq_n_u16(__a, __imm) | |
2495 | #define vidupq_wb_u8( __a, __imm) __arm_vidupq_wb_u8( __a, __imm) | |
2496 | #define vidupq_wb_u16( __a, __imm) __arm_vidupq_wb_u16( __a, __imm) | |
2497 | #define vidupq_wb_u32( __a, __imm) __arm_vidupq_wb_u32( __a, __imm) | |
2498 | #define viwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) | |
2499 | #define viwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) | |
2500 | #define viwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) | |
2501 | #define viwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) | |
2502 | #define viwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) | |
2503 | #define viwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) | |
2504 | #define viwdupq_n_u8(__a, __b, __imm) __arm_viwdupq_n_u8(__a, __b, __imm) | |
2505 | #define viwdupq_n_u32(__a, __b, __imm) __arm_viwdupq_n_u32(__a, __b, __imm) | |
2506 | #define viwdupq_n_u16(__a, __b, __imm) __arm_viwdupq_n_u16(__a, __b, __imm) | |
2507 | #define viwdupq_wb_u8( __a, __b, __imm) __arm_viwdupq_wb_u8( __a, __b, __imm) | |
2508 | #define viwdupq_wb_u32( __a, __b, __imm) __arm_viwdupq_wb_u32( __a, __b, __imm) | |
2509 | #define viwdupq_wb_u16( __a, __b, __imm) __arm_viwdupq_wb_u16( __a, __b, __imm) | |
41e1a7ff SP |
2510 | #define vldrdq_gather_base_wb_s64(__addr, __offset) __arm_vldrdq_gather_base_wb_s64(__addr, __offset) |
2511 | #define vldrdq_gather_base_wb_u64(__addr, __offset) __arm_vldrdq_gather_base_wb_u64(__addr, __offset) | |
2512 | #define vldrdq_gather_base_wb_z_s64(__addr, __offset, __p) __arm_vldrdq_gather_base_wb_z_s64(__addr, __offset, __p) | |
2513 | #define vldrdq_gather_base_wb_z_u64(__addr, __offset, __p) __arm_vldrdq_gather_base_wb_z_u64(__addr, __offset, __p) | |
2514 | #define vldrwq_gather_base_wb_f32(__addr, __offset) __arm_vldrwq_gather_base_wb_f32(__addr, __offset) | |
2515 | #define vldrwq_gather_base_wb_s32(__addr, __offset) __arm_vldrwq_gather_base_wb_s32(__addr, __offset) | |
2516 | #define vldrwq_gather_base_wb_u32(__addr, __offset) __arm_vldrwq_gather_base_wb_u32(__addr, __offset) | |
2517 | #define vldrwq_gather_base_wb_z_f32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_f32(__addr, __offset, __p) | |
2518 | #define vldrwq_gather_base_wb_z_s32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_s32(__addr, __offset, __p) | |
2519 | #define vldrwq_gather_base_wb_z_u32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_u32(__addr, __offset, __p) | |
2520 | #define vstrdq_scatter_base_wb_p_s64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p_s64(__addr, __offset, __value, __p) | |
2521 | #define vstrdq_scatter_base_wb_p_u64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p_u64(__addr, __offset, __value, __p) | |
2522 | #define vstrdq_scatter_base_wb_s64(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb_s64(__addr, __offset, __value) | |
2523 | #define vstrdq_scatter_base_wb_u64(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb_u64(__addr, __offset, __value) | |
2524 | #define vstrwq_scatter_base_wb_p_s32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_s32(__addr, __offset, __value, __p) | |
2525 | #define vstrwq_scatter_base_wb_p_f32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_f32(__addr, __offset, __value, __p) | |
2526 | #define vstrwq_scatter_base_wb_p_u32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_u32(__addr, __offset, __value, __p) | |
2527 | #define vstrwq_scatter_base_wb_s32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_s32(__addr, __offset, __value) | |
2528 | #define vstrwq_scatter_base_wb_u32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_u32(__addr, __offset, __value) | |
2529 | #define vstrwq_scatter_base_wb_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_f32(__addr, __offset, __value) | |
261014a1 SP |
2530 | #define vddupq_x_n_u8(__a, __imm, __p) __arm_vddupq_x_n_u8(__a, __imm, __p) |
2531 | #define vddupq_x_n_u16(__a, __imm, __p) __arm_vddupq_x_n_u16(__a, __imm, __p) | |
2532 | #define vddupq_x_n_u32(__a, __imm, __p) __arm_vddupq_x_n_u32(__a, __imm, __p) | |
2533 | #define vddupq_x_wb_u8(__a, __imm, __p) __arm_vddupq_x_wb_u8(__a, __imm, __p) | |
2534 | #define vddupq_x_wb_u16(__a, __imm, __p) __arm_vddupq_x_wb_u16(__a, __imm, __p) | |
2535 | #define vddupq_x_wb_u32(__a, __imm, __p) __arm_vddupq_x_wb_u32(__a, __imm, __p) | |
2536 | #define vdwdupq_x_n_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u8(__a, __b, __imm, __p) | |
2537 | #define vdwdupq_x_n_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u16(__a, __b, __imm, __p) | |
2538 | #define vdwdupq_x_n_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u32(__a, __b, __imm, __p) | |
2539 | #define vdwdupq_x_wb_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u8(__a, __b, __imm, __p) | |
2540 | #define vdwdupq_x_wb_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u16(__a, __b, __imm, __p) | |
2541 | #define vdwdupq_x_wb_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u32(__a, __b, __imm, __p) | |
2542 | #define vidupq_x_n_u8(__a, __imm, __p) __arm_vidupq_x_n_u8(__a, __imm, __p) | |
2543 | #define vidupq_x_n_u16(__a, __imm, __p) __arm_vidupq_x_n_u16(__a, __imm, __p) | |
2544 | #define vidupq_x_n_u32(__a, __imm, __p) __arm_vidupq_x_n_u32(__a, __imm, __p) | |
2545 | #define vidupq_x_wb_u8(__a, __imm, __p) __arm_vidupq_x_wb_u8(__a, __imm, __p) | |
2546 | #define vidupq_x_wb_u16(__a, __imm, __p) __arm_vidupq_x_wb_u16(__a, __imm, __p) | |
2547 | #define vidupq_x_wb_u32(__a, __imm, __p) __arm_vidupq_x_wb_u32(__a, __imm, __p) | |
2548 | #define viwdupq_x_n_u8(__a, __b, __imm, __p) __arm_viwdupq_x_n_u8(__a, __b, __imm, __p) | |
2549 | #define viwdupq_x_n_u16(__a, __b, __imm, __p) __arm_viwdupq_x_n_u16(__a, __b, __imm, __p) | |
2550 | #define viwdupq_x_n_u32(__a, __b, __imm, __p) __arm_viwdupq_x_n_u32(__a, __b, __imm, __p) | |
2551 | #define viwdupq_x_wb_u8(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u8(__a, __b, __imm, __p) | |
2552 | #define viwdupq_x_wb_u16(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u16(__a, __b, __imm, __p) | |
2553 | #define viwdupq_x_wb_u32(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u32(__a, __b, __imm, __p) | |
2554 | #define vdupq_x_n_s8(__a, __p) __arm_vdupq_x_n_s8(__a, __p) | |
2555 | #define vdupq_x_n_s16(__a, __p) __arm_vdupq_x_n_s16(__a, __p) | |
2556 | #define vdupq_x_n_s32(__a, __p) __arm_vdupq_x_n_s32(__a, __p) | |
2557 | #define vdupq_x_n_u8(__a, __p) __arm_vdupq_x_n_u8(__a, __p) | |
2558 | #define vdupq_x_n_u16(__a, __p) __arm_vdupq_x_n_u16(__a, __p) | |
2559 | #define vdupq_x_n_u32(__a, __p) __arm_vdupq_x_n_u32(__a, __p) | |
2560 | #define vminq_x_s8(__a, __b, __p) __arm_vminq_x_s8(__a, __b, __p) | |
2561 | #define vminq_x_s16(__a, __b, __p) __arm_vminq_x_s16(__a, __b, __p) | |
2562 | #define vminq_x_s32(__a, __b, __p) __arm_vminq_x_s32(__a, __b, __p) | |
2563 | #define vminq_x_u8(__a, __b, __p) __arm_vminq_x_u8(__a, __b, __p) | |
2564 | #define vminq_x_u16(__a, __b, __p) __arm_vminq_x_u16(__a, __b, __p) | |
2565 | #define vminq_x_u32(__a, __b, __p) __arm_vminq_x_u32(__a, __b, __p) | |
2566 | #define vmaxq_x_s8(__a, __b, __p) __arm_vmaxq_x_s8(__a, __b, __p) | |
2567 | #define vmaxq_x_s16(__a, __b, __p) __arm_vmaxq_x_s16(__a, __b, __p) | |
2568 | #define vmaxq_x_s32(__a, __b, __p) __arm_vmaxq_x_s32(__a, __b, __p) | |
2569 | #define vmaxq_x_u8(__a, __b, __p) __arm_vmaxq_x_u8(__a, __b, __p) | |
2570 | #define vmaxq_x_u16(__a, __b, __p) __arm_vmaxq_x_u16(__a, __b, __p) | |
2571 | #define vmaxq_x_u32(__a, __b, __p) __arm_vmaxq_x_u32(__a, __b, __p) | |
2572 | #define vabdq_x_s8(__a, __b, __p) __arm_vabdq_x_s8(__a, __b, __p) | |
2573 | #define vabdq_x_s16(__a, __b, __p) __arm_vabdq_x_s16(__a, __b, __p) | |
2574 | #define vabdq_x_s32(__a, __b, __p) __arm_vabdq_x_s32(__a, __b, __p) | |
2575 | #define vabdq_x_u8(__a, __b, __p) __arm_vabdq_x_u8(__a, __b, __p) | |
2576 | #define vabdq_x_u16(__a, __b, __p) __arm_vabdq_x_u16(__a, __b, __p) | |
2577 | #define vabdq_x_u32(__a, __b, __p) __arm_vabdq_x_u32(__a, __b, __p) | |
2578 | #define vabsq_x_s8(__a, __p) __arm_vabsq_x_s8(__a, __p) | |
2579 | #define vabsq_x_s16(__a, __p) __arm_vabsq_x_s16(__a, __p) | |
2580 | #define vabsq_x_s32(__a, __p) __arm_vabsq_x_s32(__a, __p) | |
2581 | #define vaddq_x_s8(__a, __b, __p) __arm_vaddq_x_s8(__a, __b, __p) | |
2582 | #define vaddq_x_s16(__a, __b, __p) __arm_vaddq_x_s16(__a, __b, __p) | |
2583 | #define vaddq_x_s32(__a, __b, __p) __arm_vaddq_x_s32(__a, __b, __p) | |
2584 | #define vaddq_x_n_s8(__a, __b, __p) __arm_vaddq_x_n_s8(__a, __b, __p) | |
2585 | #define vaddq_x_n_s16(__a, __b, __p) __arm_vaddq_x_n_s16(__a, __b, __p) | |
2586 | #define vaddq_x_n_s32(__a, __b, __p) __arm_vaddq_x_n_s32(__a, __b, __p) | |
2587 | #define vaddq_x_u8(__a, __b, __p) __arm_vaddq_x_u8(__a, __b, __p) | |
2588 | #define vaddq_x_u16(__a, __b, __p) __arm_vaddq_x_u16(__a, __b, __p) | |
2589 | #define vaddq_x_u32(__a, __b, __p) __arm_vaddq_x_u32(__a, __b, __p) | |
2590 | #define vaddq_x_n_u8(__a, __b, __p) __arm_vaddq_x_n_u8(__a, __b, __p) | |
2591 | #define vaddq_x_n_u16(__a, __b, __p) __arm_vaddq_x_n_u16(__a, __b, __p) | |
2592 | #define vaddq_x_n_u32(__a, __b, __p) __arm_vaddq_x_n_u32(__a, __b, __p) | |
2593 | #define vclsq_x_s8(__a, __p) __arm_vclsq_x_s8(__a, __p) | |
2594 | #define vclsq_x_s16(__a, __p) __arm_vclsq_x_s16(__a, __p) | |
2595 | #define vclsq_x_s32(__a, __p) __arm_vclsq_x_s32(__a, __p) | |
2596 | #define vclzq_x_s8(__a, __p) __arm_vclzq_x_s8(__a, __p) | |
2597 | #define vclzq_x_s16(__a, __p) __arm_vclzq_x_s16(__a, __p) | |
2598 | #define vclzq_x_s32(__a, __p) __arm_vclzq_x_s32(__a, __p) | |
2599 | #define vclzq_x_u8(__a, __p) __arm_vclzq_x_u8(__a, __p) | |
2600 | #define vclzq_x_u16(__a, __p) __arm_vclzq_x_u16(__a, __p) | |
2601 | #define vclzq_x_u32(__a, __p) __arm_vclzq_x_u32(__a, __p) | |
2602 | #define vnegq_x_s8(__a, __p) __arm_vnegq_x_s8(__a, __p) | |
2603 | #define vnegq_x_s16(__a, __p) __arm_vnegq_x_s16(__a, __p) | |
2604 | #define vnegq_x_s32(__a, __p) __arm_vnegq_x_s32(__a, __p) | |
2605 | #define vmulhq_x_s8(__a, __b, __p) __arm_vmulhq_x_s8(__a, __b, __p) | |
2606 | #define vmulhq_x_s16(__a, __b, __p) __arm_vmulhq_x_s16(__a, __b, __p) | |
2607 | #define vmulhq_x_s32(__a, __b, __p) __arm_vmulhq_x_s32(__a, __b, __p) | |
2608 | #define vmulhq_x_u8(__a, __b, __p) __arm_vmulhq_x_u8(__a, __b, __p) | |
2609 | #define vmulhq_x_u16(__a, __b, __p) __arm_vmulhq_x_u16(__a, __b, __p) | |
2610 | #define vmulhq_x_u32(__a, __b, __p) __arm_vmulhq_x_u32(__a, __b, __p) | |
2611 | #define vmullbq_poly_x_p8(__a, __b, __p) __arm_vmullbq_poly_x_p8(__a, __b, __p) | |
2612 | #define vmullbq_poly_x_p16(__a, __b, __p) __arm_vmullbq_poly_x_p16(__a, __b, __p) | |
2613 | #define vmullbq_int_x_s8(__a, __b, __p) __arm_vmullbq_int_x_s8(__a, __b, __p) | |
2614 | #define vmullbq_int_x_s16(__a, __b, __p) __arm_vmullbq_int_x_s16(__a, __b, __p) | |
2615 | #define vmullbq_int_x_s32(__a, __b, __p) __arm_vmullbq_int_x_s32(__a, __b, __p) | |
2616 | #define vmullbq_int_x_u8(__a, __b, __p) __arm_vmullbq_int_x_u8(__a, __b, __p) | |
2617 | #define vmullbq_int_x_u16(__a, __b, __p) __arm_vmullbq_int_x_u16(__a, __b, __p) | |
2618 | #define vmullbq_int_x_u32(__a, __b, __p) __arm_vmullbq_int_x_u32(__a, __b, __p) | |
2619 | #define vmulltq_poly_x_p8(__a, __b, __p) __arm_vmulltq_poly_x_p8(__a, __b, __p) | |
2620 | #define vmulltq_poly_x_p16(__a, __b, __p) __arm_vmulltq_poly_x_p16(__a, __b, __p) | |
2621 | #define vmulltq_int_x_s8(__a, __b, __p) __arm_vmulltq_int_x_s8(__a, __b, __p) | |
2622 | #define vmulltq_int_x_s16(__a, __b, __p) __arm_vmulltq_int_x_s16(__a, __b, __p) | |
2623 | #define vmulltq_int_x_s32(__a, __b, __p) __arm_vmulltq_int_x_s32(__a, __b, __p) | |
2624 | #define vmulltq_int_x_u8(__a, __b, __p) __arm_vmulltq_int_x_u8(__a, __b, __p) | |
2625 | #define vmulltq_int_x_u16(__a, __b, __p) __arm_vmulltq_int_x_u16(__a, __b, __p) | |
2626 | #define vmulltq_int_x_u32(__a, __b, __p) __arm_vmulltq_int_x_u32(__a, __b, __p) | |
2627 | #define vmulq_x_s8(__a, __b, __p) __arm_vmulq_x_s8(__a, __b, __p) | |
2628 | #define vmulq_x_s16(__a, __b, __p) __arm_vmulq_x_s16(__a, __b, __p) | |
2629 | #define vmulq_x_s32(__a, __b, __p) __arm_vmulq_x_s32(__a, __b, __p) | |
2630 | #define vmulq_x_n_s8(__a, __b, __p) __arm_vmulq_x_n_s8(__a, __b, __p) | |
2631 | #define vmulq_x_n_s16(__a, __b, __p) __arm_vmulq_x_n_s16(__a, __b, __p) | |
2632 | #define vmulq_x_n_s32(__a, __b, __p) __arm_vmulq_x_n_s32(__a, __b, __p) | |
2633 | #define vmulq_x_u8(__a, __b, __p) __arm_vmulq_x_u8(__a, __b, __p) | |
2634 | #define vmulq_x_u16(__a, __b, __p) __arm_vmulq_x_u16(__a, __b, __p) | |
2635 | #define vmulq_x_u32(__a, __b, __p) __arm_vmulq_x_u32(__a, __b, __p) | |
2636 | #define vmulq_x_n_u8(__a, __b, __p) __arm_vmulq_x_n_u8(__a, __b, __p) | |
2637 | #define vmulq_x_n_u16(__a, __b, __p) __arm_vmulq_x_n_u16(__a, __b, __p) | |
2638 | #define vmulq_x_n_u32(__a, __b, __p) __arm_vmulq_x_n_u32(__a, __b, __p) | |
2639 | #define vsubq_x_s8(__a, __b, __p) __arm_vsubq_x_s8(__a, __b, __p) | |
2640 | #define vsubq_x_s16(__a, __b, __p) __arm_vsubq_x_s16(__a, __b, __p) | |
2641 | #define vsubq_x_s32(__a, __b, __p) __arm_vsubq_x_s32(__a, __b, __p) | |
2642 | #define vsubq_x_n_s8(__a, __b, __p) __arm_vsubq_x_n_s8(__a, __b, __p) | |
2643 | #define vsubq_x_n_s16(__a, __b, __p) __arm_vsubq_x_n_s16(__a, __b, __p) | |
2644 | #define vsubq_x_n_s32(__a, __b, __p) __arm_vsubq_x_n_s32(__a, __b, __p) | |
2645 | #define vsubq_x_u8(__a, __b, __p) __arm_vsubq_x_u8(__a, __b, __p) | |
2646 | #define vsubq_x_u16(__a, __b, __p) __arm_vsubq_x_u16(__a, __b, __p) | |
2647 | #define vsubq_x_u32(__a, __b, __p) __arm_vsubq_x_u32(__a, __b, __p) | |
2648 | #define vsubq_x_n_u8(__a, __b, __p) __arm_vsubq_x_n_u8(__a, __b, __p) | |
2649 | #define vsubq_x_n_u16(__a, __b, __p) __arm_vsubq_x_n_u16(__a, __b, __p) | |
2650 | #define vsubq_x_n_u32(__a, __b, __p) __arm_vsubq_x_n_u32(__a, __b, __p) | |
2651 | #define vcaddq_rot90_x_s8(__a, __b, __p) __arm_vcaddq_rot90_x_s8(__a, __b, __p) | |
2652 | #define vcaddq_rot90_x_s16(__a, __b, __p) __arm_vcaddq_rot90_x_s16(__a, __b, __p) | |
2653 | #define vcaddq_rot90_x_s32(__a, __b, __p) __arm_vcaddq_rot90_x_s32(__a, __b, __p) | |
2654 | #define vcaddq_rot90_x_u8(__a, __b, __p) __arm_vcaddq_rot90_x_u8(__a, __b, __p) | |
2655 | #define vcaddq_rot90_x_u16(__a, __b, __p) __arm_vcaddq_rot90_x_u16(__a, __b, __p) | |
2656 | #define vcaddq_rot90_x_u32(__a, __b, __p) __arm_vcaddq_rot90_x_u32(__a, __b, __p) | |
2657 | #define vcaddq_rot270_x_s8(__a, __b, __p) __arm_vcaddq_rot270_x_s8(__a, __b, __p) | |
2658 | #define vcaddq_rot270_x_s16(__a, __b, __p) __arm_vcaddq_rot270_x_s16(__a, __b, __p) | |
2659 | #define vcaddq_rot270_x_s32(__a, __b, __p) __arm_vcaddq_rot270_x_s32(__a, __b, __p) | |
2660 | #define vcaddq_rot270_x_u8(__a, __b, __p) __arm_vcaddq_rot270_x_u8(__a, __b, __p) | |
2661 | #define vcaddq_rot270_x_u16(__a, __b, __p) __arm_vcaddq_rot270_x_u16(__a, __b, __p) | |
2662 | #define vcaddq_rot270_x_u32(__a, __b, __p) __arm_vcaddq_rot270_x_u32(__a, __b, __p) | |
2663 | #define vhaddq_x_n_s8(__a, __b, __p) __arm_vhaddq_x_n_s8(__a, __b, __p) | |
2664 | #define vhaddq_x_n_s16(__a, __b, __p) __arm_vhaddq_x_n_s16(__a, __b, __p) | |
2665 | #define vhaddq_x_n_s32(__a, __b, __p) __arm_vhaddq_x_n_s32(__a, __b, __p) | |
2666 | #define vhaddq_x_n_u8(__a, __b, __p) __arm_vhaddq_x_n_u8(__a, __b, __p) | |
2667 | #define vhaddq_x_n_u16(__a, __b, __p) __arm_vhaddq_x_n_u16(__a, __b, __p) | |
2668 | #define vhaddq_x_n_u32(__a, __b, __p) __arm_vhaddq_x_n_u32(__a, __b, __p) | |
2669 | #define vhaddq_x_s8(__a, __b, __p) __arm_vhaddq_x_s8(__a, __b, __p) | |
2670 | #define vhaddq_x_s16(__a, __b, __p) __arm_vhaddq_x_s16(__a, __b, __p) | |
2671 | #define vhaddq_x_s32(__a, __b, __p) __arm_vhaddq_x_s32(__a, __b, __p) | |
2672 | #define vhaddq_x_u8(__a, __b, __p) __arm_vhaddq_x_u8(__a, __b, __p) | |
2673 | #define vhaddq_x_u16(__a, __b, __p) __arm_vhaddq_x_u16(__a, __b, __p) | |
2674 | #define vhaddq_x_u32(__a, __b, __p) __arm_vhaddq_x_u32(__a, __b, __p) | |
2675 | #define vhcaddq_rot90_x_s8(__a, __b, __p) __arm_vhcaddq_rot90_x_s8(__a, __b, __p) | |
2676 | #define vhcaddq_rot90_x_s16(__a, __b, __p) __arm_vhcaddq_rot90_x_s16(__a, __b, __p) | |
2677 | #define vhcaddq_rot90_x_s32(__a, __b, __p) __arm_vhcaddq_rot90_x_s32(__a, __b, __p) | |
2678 | #define vhcaddq_rot270_x_s8(__a, __b, __p) __arm_vhcaddq_rot270_x_s8(__a, __b, __p) | |
2679 | #define vhcaddq_rot270_x_s16(__a, __b, __p) __arm_vhcaddq_rot270_x_s16(__a, __b, __p) | |
2680 | #define vhcaddq_rot270_x_s32(__a, __b, __p) __arm_vhcaddq_rot270_x_s32(__a, __b, __p) | |
2681 | #define vhsubq_x_n_s8(__a, __b, __p) __arm_vhsubq_x_n_s8(__a, __b, __p) | |
2682 | #define vhsubq_x_n_s16(__a, __b, __p) __arm_vhsubq_x_n_s16(__a, __b, __p) | |
2683 | #define vhsubq_x_n_s32(__a, __b, __p) __arm_vhsubq_x_n_s32(__a, __b, __p) | |
2684 | #define vhsubq_x_n_u8(__a, __b, __p) __arm_vhsubq_x_n_u8(__a, __b, __p) | |
2685 | #define vhsubq_x_n_u16(__a, __b, __p) __arm_vhsubq_x_n_u16(__a, __b, __p) | |
2686 | #define vhsubq_x_n_u32(__a, __b, __p) __arm_vhsubq_x_n_u32(__a, __b, __p) | |
2687 | #define vhsubq_x_s8(__a, __b, __p) __arm_vhsubq_x_s8(__a, __b, __p) | |
2688 | #define vhsubq_x_s16(__a, __b, __p) __arm_vhsubq_x_s16(__a, __b, __p) | |
2689 | #define vhsubq_x_s32(__a, __b, __p) __arm_vhsubq_x_s32(__a, __b, __p) | |
2690 | #define vhsubq_x_u8(__a, __b, __p) __arm_vhsubq_x_u8(__a, __b, __p) | |
2691 | #define vhsubq_x_u16(__a, __b, __p) __arm_vhsubq_x_u16(__a, __b, __p) | |
2692 | #define vhsubq_x_u32(__a, __b, __p) __arm_vhsubq_x_u32(__a, __b, __p) | |
2693 | #define vrhaddq_x_s8(__a, __b, __p) __arm_vrhaddq_x_s8(__a, __b, __p) | |
2694 | #define vrhaddq_x_s16(__a, __b, __p) __arm_vrhaddq_x_s16(__a, __b, __p) | |
2695 | #define vrhaddq_x_s32(__a, __b, __p) __arm_vrhaddq_x_s32(__a, __b, __p) | |
2696 | #define vrhaddq_x_u8(__a, __b, __p) __arm_vrhaddq_x_u8(__a, __b, __p) | |
2697 | #define vrhaddq_x_u16(__a, __b, __p) __arm_vrhaddq_x_u16(__a, __b, __p) | |
2698 | #define vrhaddq_x_u32(__a, __b, __p) __arm_vrhaddq_x_u32(__a, __b, __p) | |
2699 | #define vrmulhq_x_s8(__a, __b, __p) __arm_vrmulhq_x_s8(__a, __b, __p) | |
2700 | #define vrmulhq_x_s16(__a, __b, __p) __arm_vrmulhq_x_s16(__a, __b, __p) | |
2701 | #define vrmulhq_x_s32(__a, __b, __p) __arm_vrmulhq_x_s32(__a, __b, __p) | |
2702 | #define vrmulhq_x_u8(__a, __b, __p) __arm_vrmulhq_x_u8(__a, __b, __p) | |
2703 | #define vrmulhq_x_u16(__a, __b, __p) __arm_vrmulhq_x_u16(__a, __b, __p) | |
2704 | #define vrmulhq_x_u32(__a, __b, __p) __arm_vrmulhq_x_u32(__a, __b, __p) | |
2705 | #define vandq_x_s8(__a, __b, __p) __arm_vandq_x_s8(__a, __b, __p) | |
2706 | #define vandq_x_s16(__a, __b, __p) __arm_vandq_x_s16(__a, __b, __p) | |
2707 | #define vandq_x_s32(__a, __b, __p) __arm_vandq_x_s32(__a, __b, __p) | |
2708 | #define vandq_x_u8(__a, __b, __p) __arm_vandq_x_u8(__a, __b, __p) | |
2709 | #define vandq_x_u16(__a, __b, __p) __arm_vandq_x_u16(__a, __b, __p) | |
2710 | #define vandq_x_u32(__a, __b, __p) __arm_vandq_x_u32(__a, __b, __p) | |
2711 | #define vbicq_x_s8(__a, __b, __p) __arm_vbicq_x_s8(__a, __b, __p) | |
2712 | #define vbicq_x_s16(__a, __b, __p) __arm_vbicq_x_s16(__a, __b, __p) | |
2713 | #define vbicq_x_s32(__a, __b, __p) __arm_vbicq_x_s32(__a, __b, __p) | |
2714 | #define vbicq_x_u8(__a, __b, __p) __arm_vbicq_x_u8(__a, __b, __p) | |
2715 | #define vbicq_x_u16(__a, __b, __p) __arm_vbicq_x_u16(__a, __b, __p) | |
2716 | #define vbicq_x_u32(__a, __b, __p) __arm_vbicq_x_u32(__a, __b, __p) | |
2717 | #define vbrsrq_x_n_s8(__a, __b, __p) __arm_vbrsrq_x_n_s8(__a, __b, __p) | |
2718 | #define vbrsrq_x_n_s16(__a, __b, __p) __arm_vbrsrq_x_n_s16(__a, __b, __p) | |
2719 | #define vbrsrq_x_n_s32(__a, __b, __p) __arm_vbrsrq_x_n_s32(__a, __b, __p) | |
2720 | #define vbrsrq_x_n_u8(__a, __b, __p) __arm_vbrsrq_x_n_u8(__a, __b, __p) | |
2721 | #define vbrsrq_x_n_u16(__a, __b, __p) __arm_vbrsrq_x_n_u16(__a, __b, __p) | |
2722 | #define vbrsrq_x_n_u32(__a, __b, __p) __arm_vbrsrq_x_n_u32(__a, __b, __p) | |
2723 | #define veorq_x_s8(__a, __b, __p) __arm_veorq_x_s8(__a, __b, __p) | |
2724 | #define veorq_x_s16(__a, __b, __p) __arm_veorq_x_s16(__a, __b, __p) | |
2725 | #define veorq_x_s32(__a, __b, __p) __arm_veorq_x_s32(__a, __b, __p) | |
2726 | #define veorq_x_u8(__a, __b, __p) __arm_veorq_x_u8(__a, __b, __p) | |
2727 | #define veorq_x_u16(__a, __b, __p) __arm_veorq_x_u16(__a, __b, __p) | |
2728 | #define veorq_x_u32(__a, __b, __p) __arm_veorq_x_u32(__a, __b, __p) | |
2729 | #define vmovlbq_x_s8(__a, __p) __arm_vmovlbq_x_s8(__a, __p) | |
2730 | #define vmovlbq_x_s16(__a, __p) __arm_vmovlbq_x_s16(__a, __p) | |
2731 | #define vmovlbq_x_u8(__a, __p) __arm_vmovlbq_x_u8(__a, __p) | |
2732 | #define vmovlbq_x_u16(__a, __p) __arm_vmovlbq_x_u16(__a, __p) | |
2733 | #define vmovltq_x_s8(__a, __p) __arm_vmovltq_x_s8(__a, __p) | |
2734 | #define vmovltq_x_s16(__a, __p) __arm_vmovltq_x_s16(__a, __p) | |
2735 | #define vmovltq_x_u8(__a, __p) __arm_vmovltq_x_u8(__a, __p) | |
2736 | #define vmovltq_x_u16(__a, __p) __arm_vmovltq_x_u16(__a, __p) | |
2737 | #define vmvnq_x_s8(__a, __p) __arm_vmvnq_x_s8(__a, __p) | |
2738 | #define vmvnq_x_s16(__a, __p) __arm_vmvnq_x_s16(__a, __p) | |
2739 | #define vmvnq_x_s32(__a, __p) __arm_vmvnq_x_s32(__a, __p) | |
2740 | #define vmvnq_x_u8(__a, __p) __arm_vmvnq_x_u8(__a, __p) | |
2741 | #define vmvnq_x_u16(__a, __p) __arm_vmvnq_x_u16(__a, __p) | |
2742 | #define vmvnq_x_u32(__a, __p) __arm_vmvnq_x_u32(__a, __p) | |
2743 | #define vmvnq_x_n_s16( __imm, __p) __arm_vmvnq_x_n_s16( __imm, __p) | |
2744 | #define vmvnq_x_n_s32( __imm, __p) __arm_vmvnq_x_n_s32( __imm, __p) | |
2745 | #define vmvnq_x_n_u16( __imm, __p) __arm_vmvnq_x_n_u16( __imm, __p) | |
2746 | #define vmvnq_x_n_u32( __imm, __p) __arm_vmvnq_x_n_u32( __imm, __p) | |
2747 | #define vornq_x_s8(__a, __b, __p) __arm_vornq_x_s8(__a, __b, __p) | |
2748 | #define vornq_x_s16(__a, __b, __p) __arm_vornq_x_s16(__a, __b, __p) | |
2749 | #define vornq_x_s32(__a, __b, __p) __arm_vornq_x_s32(__a, __b, __p) | |
2750 | #define vornq_x_u8(__a, __b, __p) __arm_vornq_x_u8(__a, __b, __p) | |
2751 | #define vornq_x_u16(__a, __b, __p) __arm_vornq_x_u16(__a, __b, __p) | |
2752 | #define vornq_x_u32(__a, __b, __p) __arm_vornq_x_u32(__a, __b, __p) | |
2753 | #define vorrq_x_s8(__a, __b, __p) __arm_vorrq_x_s8(__a, __b, __p) | |
2754 | #define vorrq_x_s16(__a, __b, __p) __arm_vorrq_x_s16(__a, __b, __p) | |
2755 | #define vorrq_x_s32(__a, __b, __p) __arm_vorrq_x_s32(__a, __b, __p) | |
2756 | #define vorrq_x_u8(__a, __b, __p) __arm_vorrq_x_u8(__a, __b, __p) | |
2757 | #define vorrq_x_u16(__a, __b, __p) __arm_vorrq_x_u16(__a, __b, __p) | |
2758 | #define vorrq_x_u32(__a, __b, __p) __arm_vorrq_x_u32(__a, __b, __p) | |
2759 | #define vrev16q_x_s8(__a, __p) __arm_vrev16q_x_s8(__a, __p) | |
2760 | #define vrev16q_x_u8(__a, __p) __arm_vrev16q_x_u8(__a, __p) | |
2761 | #define vrev32q_x_s8(__a, __p) __arm_vrev32q_x_s8(__a, __p) | |
2762 | #define vrev32q_x_s16(__a, __p) __arm_vrev32q_x_s16(__a, __p) | |
2763 | #define vrev32q_x_u8(__a, __p) __arm_vrev32q_x_u8(__a, __p) | |
2764 | #define vrev32q_x_u16(__a, __p) __arm_vrev32q_x_u16(__a, __p) | |
2765 | #define vrev64q_x_s8(__a, __p) __arm_vrev64q_x_s8(__a, __p) | |
2766 | #define vrev64q_x_s16(__a, __p) __arm_vrev64q_x_s16(__a, __p) | |
2767 | #define vrev64q_x_s32(__a, __p) __arm_vrev64q_x_s32(__a, __p) | |
2768 | #define vrev64q_x_u8(__a, __p) __arm_vrev64q_x_u8(__a, __p) | |
2769 | #define vrev64q_x_u16(__a, __p) __arm_vrev64q_x_u16(__a, __p) | |
2770 | #define vrev64q_x_u32(__a, __p) __arm_vrev64q_x_u32(__a, __p) | |
2771 | #define vrshlq_x_s8(__a, __b, __p) __arm_vrshlq_x_s8(__a, __b, __p) | |
2772 | #define vrshlq_x_s16(__a, __b, __p) __arm_vrshlq_x_s16(__a, __b, __p) | |
2773 | #define vrshlq_x_s32(__a, __b, __p) __arm_vrshlq_x_s32(__a, __b, __p) | |
2774 | #define vrshlq_x_u8(__a, __b, __p) __arm_vrshlq_x_u8(__a, __b, __p) | |
2775 | #define vrshlq_x_u16(__a, __b, __p) __arm_vrshlq_x_u16(__a, __b, __p) | |
2776 | #define vrshlq_x_u32(__a, __b, __p) __arm_vrshlq_x_u32(__a, __b, __p) | |
2777 | #define vshllbq_x_n_s8(__a, __imm, __p) __arm_vshllbq_x_n_s8(__a, __imm, __p) | |
2778 | #define vshllbq_x_n_s16(__a, __imm, __p) __arm_vshllbq_x_n_s16(__a, __imm, __p) | |
2779 | #define vshllbq_x_n_u8(__a, __imm, __p) __arm_vshllbq_x_n_u8(__a, __imm, __p) | |
2780 | #define vshllbq_x_n_u16(__a, __imm, __p) __arm_vshllbq_x_n_u16(__a, __imm, __p) | |
2781 | #define vshlltq_x_n_s8(__a, __imm, __p) __arm_vshlltq_x_n_s8(__a, __imm, __p) | |
2782 | #define vshlltq_x_n_s16(__a, __imm, __p) __arm_vshlltq_x_n_s16(__a, __imm, __p) | |
2783 | #define vshlltq_x_n_u8(__a, __imm, __p) __arm_vshlltq_x_n_u8(__a, __imm, __p) | |
2784 | #define vshlltq_x_n_u16(__a, __imm, __p) __arm_vshlltq_x_n_u16(__a, __imm, __p) | |
2785 | #define vshlq_x_s8(__a, __b, __p) __arm_vshlq_x_s8(__a, __b, __p) | |
2786 | #define vshlq_x_s16(__a, __b, __p) __arm_vshlq_x_s16(__a, __b, __p) | |
2787 | #define vshlq_x_s32(__a, __b, __p) __arm_vshlq_x_s32(__a, __b, __p) | |
2788 | #define vshlq_x_u8(__a, __b, __p) __arm_vshlq_x_u8(__a, __b, __p) | |
2789 | #define vshlq_x_u16(__a, __b, __p) __arm_vshlq_x_u16(__a, __b, __p) | |
2790 | #define vshlq_x_u32(__a, __b, __p) __arm_vshlq_x_u32(__a, __b, __p) | |
2791 | #define vshlq_x_n_s8(__a, __imm, __p) __arm_vshlq_x_n_s8(__a, __imm, __p) | |
2792 | #define vshlq_x_n_s16(__a, __imm, __p) __arm_vshlq_x_n_s16(__a, __imm, __p) | |
2793 | #define vshlq_x_n_s32(__a, __imm, __p) __arm_vshlq_x_n_s32(__a, __imm, __p) | |
2794 | #define vshlq_x_n_u8(__a, __imm, __p) __arm_vshlq_x_n_u8(__a, __imm, __p) | |
2795 | #define vshlq_x_n_u16(__a, __imm, __p) __arm_vshlq_x_n_u16(__a, __imm, __p) | |
2796 | #define vshlq_x_n_u32(__a, __imm, __p) __arm_vshlq_x_n_u32(__a, __imm, __p) | |
2797 | #define vrshrq_x_n_s8(__a, __imm, __p) __arm_vrshrq_x_n_s8(__a, __imm, __p) | |
2798 | #define vrshrq_x_n_s16(__a, __imm, __p) __arm_vrshrq_x_n_s16(__a, __imm, __p) | |
2799 | #define vrshrq_x_n_s32(__a, __imm, __p) __arm_vrshrq_x_n_s32(__a, __imm, __p) | |
2800 | #define vrshrq_x_n_u8(__a, __imm, __p) __arm_vrshrq_x_n_u8(__a, __imm, __p) | |
2801 | #define vrshrq_x_n_u16(__a, __imm, __p) __arm_vrshrq_x_n_u16(__a, __imm, __p) | |
2802 | #define vrshrq_x_n_u32(__a, __imm, __p) __arm_vrshrq_x_n_u32(__a, __imm, __p) | |
2803 | #define vshrq_x_n_s8(__a, __imm, __p) __arm_vshrq_x_n_s8(__a, __imm, __p) | |
2804 | #define vshrq_x_n_s16(__a, __imm, __p) __arm_vshrq_x_n_s16(__a, __imm, __p) | |
2805 | #define vshrq_x_n_s32(__a, __imm, __p) __arm_vshrq_x_n_s32(__a, __imm, __p) | |
2806 | #define vshrq_x_n_u8(__a, __imm, __p) __arm_vshrq_x_n_u8(__a, __imm, __p) | |
2807 | #define vshrq_x_n_u16(__a, __imm, __p) __arm_vshrq_x_n_u16(__a, __imm, __p) | |
2808 | #define vshrq_x_n_u32(__a, __imm, __p) __arm_vshrq_x_n_u32(__a, __imm, __p) | |
2809 | #define vdupq_x_n_f16(__a, __p) __arm_vdupq_x_n_f16(__a, __p) | |
2810 | #define vdupq_x_n_f32(__a, __p) __arm_vdupq_x_n_f32(__a, __p) | |
2811 | #define vminnmq_x_f16(__a, __b, __p) __arm_vminnmq_x_f16(__a, __b, __p) | |
2812 | #define vminnmq_x_f32(__a, __b, __p) __arm_vminnmq_x_f32(__a, __b, __p) | |
2813 | #define vmaxnmq_x_f16(__a, __b, __p) __arm_vmaxnmq_x_f16(__a, __b, __p) | |
2814 | #define vmaxnmq_x_f32(__a, __b, __p) __arm_vmaxnmq_x_f32(__a, __b, __p) | |
2815 | #define vabdq_x_f16(__a, __b, __p) __arm_vabdq_x_f16(__a, __b, __p) | |
2816 | #define vabdq_x_f32(__a, __b, __p) __arm_vabdq_x_f32(__a, __b, __p) | |
2817 | #define vabsq_x_f16(__a, __p) __arm_vabsq_x_f16(__a, __p) | |
2818 | #define vabsq_x_f32(__a, __p) __arm_vabsq_x_f32(__a, __p) | |
2819 | #define vaddq_x_f16(__a, __b, __p) __arm_vaddq_x_f16(__a, __b, __p) | |
2820 | #define vaddq_x_f32(__a, __b, __p) __arm_vaddq_x_f32(__a, __b, __p) | |
2821 | #define vaddq_x_n_f16(__a, __b, __p) __arm_vaddq_x_n_f16(__a, __b, __p) | |
2822 | #define vaddq_x_n_f32(__a, __b, __p) __arm_vaddq_x_n_f32(__a, __b, __p) | |
2823 | #define vnegq_x_f16(__a, __p) __arm_vnegq_x_f16(__a, __p) | |
2824 | #define vnegq_x_f32(__a, __p) __arm_vnegq_x_f32(__a, __p) | |
2825 | #define vmulq_x_f16(__a, __b, __p) __arm_vmulq_x_f16(__a, __b, __p) | |
2826 | #define vmulq_x_f32(__a, __b, __p) __arm_vmulq_x_f32(__a, __b, __p) | |
2827 | #define vmulq_x_n_f16(__a, __b, __p) __arm_vmulq_x_n_f16(__a, __b, __p) | |
2828 | #define vmulq_x_n_f32(__a, __b, __p) __arm_vmulq_x_n_f32(__a, __b, __p) | |
2829 | #define vsubq_x_f16(__a, __b, __p) __arm_vsubq_x_f16(__a, __b, __p) | |
2830 | #define vsubq_x_f32(__a, __b, __p) __arm_vsubq_x_f32(__a, __b, __p) | |
2831 | #define vsubq_x_n_f16(__a, __b, __p) __arm_vsubq_x_n_f16(__a, __b, __p) | |
2832 | #define vsubq_x_n_f32(__a, __b, __p) __arm_vsubq_x_n_f32(__a, __b, __p) | |
2833 | #define vcaddq_rot90_x_f16(__a, __b, __p) __arm_vcaddq_rot90_x_f16(__a, __b, __p) | |
2834 | #define vcaddq_rot90_x_f32(__a, __b, __p) __arm_vcaddq_rot90_x_f32(__a, __b, __p) | |
2835 | #define vcaddq_rot270_x_f16(__a, __b, __p) __arm_vcaddq_rot270_x_f16(__a, __b, __p) | |
2836 | #define vcaddq_rot270_x_f32(__a, __b, __p) __arm_vcaddq_rot270_x_f32(__a, __b, __p) | |
2837 | #define vcmulq_x_f16(__a, __b, __p) __arm_vcmulq_x_f16(__a, __b, __p) | |
2838 | #define vcmulq_x_f32(__a, __b, __p) __arm_vcmulq_x_f32(__a, __b, __p) | |
2839 | #define vcmulq_rot90_x_f16(__a, __b, __p) __arm_vcmulq_rot90_x_f16(__a, __b, __p) | |
2840 | #define vcmulq_rot90_x_f32(__a, __b, __p) __arm_vcmulq_rot90_x_f32(__a, __b, __p) | |
2841 | #define vcmulq_rot180_x_f16(__a, __b, __p) __arm_vcmulq_rot180_x_f16(__a, __b, __p) | |
2842 | #define vcmulq_rot180_x_f32(__a, __b, __p) __arm_vcmulq_rot180_x_f32(__a, __b, __p) | |
2843 | #define vcmulq_rot270_x_f16(__a, __b, __p) __arm_vcmulq_rot270_x_f16(__a, __b, __p) | |
2844 | #define vcmulq_rot270_x_f32(__a, __b, __p) __arm_vcmulq_rot270_x_f32(__a, __b, __p) | |
2845 | #define vcvtaq_x_s16_f16(__a, __p) __arm_vcvtaq_x_s16_f16(__a, __p) | |
2846 | #define vcvtaq_x_s32_f32(__a, __p) __arm_vcvtaq_x_s32_f32(__a, __p) | |
2847 | #define vcvtaq_x_u16_f16(__a, __p) __arm_vcvtaq_x_u16_f16(__a, __p) | |
2848 | #define vcvtaq_x_u32_f32(__a, __p) __arm_vcvtaq_x_u32_f32(__a, __p) | |
2849 | #define vcvtnq_x_s16_f16(__a, __p) __arm_vcvtnq_x_s16_f16(__a, __p) | |
2850 | #define vcvtnq_x_s32_f32(__a, __p) __arm_vcvtnq_x_s32_f32(__a, __p) | |
2851 | #define vcvtnq_x_u16_f16(__a, __p) __arm_vcvtnq_x_u16_f16(__a, __p) | |
2852 | #define vcvtnq_x_u32_f32(__a, __p) __arm_vcvtnq_x_u32_f32(__a, __p) | |
2853 | #define vcvtpq_x_s16_f16(__a, __p) __arm_vcvtpq_x_s16_f16(__a, __p) | |
2854 | #define vcvtpq_x_s32_f32(__a, __p) __arm_vcvtpq_x_s32_f32(__a, __p) | |
2855 | #define vcvtpq_x_u16_f16(__a, __p) __arm_vcvtpq_x_u16_f16(__a, __p) | |
2856 | #define vcvtpq_x_u32_f32(__a, __p) __arm_vcvtpq_x_u32_f32(__a, __p) | |
2857 | #define vcvtmq_x_s16_f16(__a, __p) __arm_vcvtmq_x_s16_f16(__a, __p) | |
2858 | #define vcvtmq_x_s32_f32(__a, __p) __arm_vcvtmq_x_s32_f32(__a, __p) | |
2859 | #define vcvtmq_x_u16_f16(__a, __p) __arm_vcvtmq_x_u16_f16(__a, __p) | |
2860 | #define vcvtmq_x_u32_f32(__a, __p) __arm_vcvtmq_x_u32_f32(__a, __p) | |
2861 | #define vcvtbq_x_f32_f16(__a, __p) __arm_vcvtbq_x_f32_f16(__a, __p) | |
2862 | #define vcvttq_x_f32_f16(__a, __p) __arm_vcvttq_x_f32_f16(__a, __p) | |
2863 | #define vcvtq_x_f16_u16(__a, __p) __arm_vcvtq_x_f16_u16(__a, __p) | |
2864 | #define vcvtq_x_f16_s16(__a, __p) __arm_vcvtq_x_f16_s16(__a, __p) | |
2865 | #define vcvtq_x_f32_s32(__a, __p) __arm_vcvtq_x_f32_s32(__a, __p) | |
2866 | #define vcvtq_x_f32_u32(__a, __p) __arm_vcvtq_x_f32_u32(__a, __p) | |
2867 | #define vcvtq_x_n_f16_s16(__a, __imm6, __p) __arm_vcvtq_x_n_f16_s16(__a, __imm6, __p) | |
2868 | #define vcvtq_x_n_f16_u16(__a, __imm6, __p) __arm_vcvtq_x_n_f16_u16(__a, __imm6, __p) | |
2869 | #define vcvtq_x_n_f32_s32(__a, __imm6, __p) __arm_vcvtq_x_n_f32_s32(__a, __imm6, __p) | |
2870 | #define vcvtq_x_n_f32_u32(__a, __imm6, __p) __arm_vcvtq_x_n_f32_u32(__a, __imm6, __p) | |
2871 | #define vcvtq_x_s16_f16(__a, __p) __arm_vcvtq_x_s16_f16(__a, __p) | |
2872 | #define vcvtq_x_s32_f32(__a, __p) __arm_vcvtq_x_s32_f32(__a, __p) | |
2873 | #define vcvtq_x_u16_f16(__a, __p) __arm_vcvtq_x_u16_f16(__a, __p) | |
2874 | #define vcvtq_x_u32_f32(__a, __p) __arm_vcvtq_x_u32_f32(__a, __p) | |
2875 | #define vcvtq_x_n_s16_f16(__a, __imm6, __p) __arm_vcvtq_x_n_s16_f16(__a, __imm6, __p) | |
2876 | #define vcvtq_x_n_s32_f32(__a, __imm6, __p) __arm_vcvtq_x_n_s32_f32(__a, __imm6, __p) | |
2877 | #define vcvtq_x_n_u16_f16(__a, __imm6, __p) __arm_vcvtq_x_n_u16_f16(__a, __imm6, __p) | |
2878 | #define vcvtq_x_n_u32_f32(__a, __imm6, __p) __arm_vcvtq_x_n_u32_f32(__a, __imm6, __p) | |
2879 | #define vrndq_x_f16(__a, __p) __arm_vrndq_x_f16(__a, __p) | |
2880 | #define vrndq_x_f32(__a, __p) __arm_vrndq_x_f32(__a, __p) | |
2881 | #define vrndnq_x_f16(__a, __p) __arm_vrndnq_x_f16(__a, __p) | |
2882 | #define vrndnq_x_f32(__a, __p) __arm_vrndnq_x_f32(__a, __p) | |
2883 | #define vrndmq_x_f16(__a, __p) __arm_vrndmq_x_f16(__a, __p) | |
2884 | #define vrndmq_x_f32(__a, __p) __arm_vrndmq_x_f32(__a, __p) | |
2885 | #define vrndpq_x_f16(__a, __p) __arm_vrndpq_x_f16(__a, __p) | |
2886 | #define vrndpq_x_f32(__a, __p) __arm_vrndpq_x_f32(__a, __p) | |
2887 | #define vrndaq_x_f16(__a, __p) __arm_vrndaq_x_f16(__a, __p) | |
2888 | #define vrndaq_x_f32(__a, __p) __arm_vrndaq_x_f32(__a, __p) | |
2889 | #define vrndxq_x_f16(__a, __p) __arm_vrndxq_x_f16(__a, __p) | |
2890 | #define vrndxq_x_f32(__a, __p) __arm_vrndxq_x_f32(__a, __p) | |
2891 | #define vandq_x_f16(__a, __b, __p) __arm_vandq_x_f16(__a, __b, __p) | |
2892 | #define vandq_x_f32(__a, __b, __p) __arm_vandq_x_f32(__a, __b, __p) | |
2893 | #define vbicq_x_f16(__a, __b, __p) __arm_vbicq_x_f16(__a, __b, __p) | |
2894 | #define vbicq_x_f32(__a, __b, __p) __arm_vbicq_x_f32(__a, __b, __p) | |
2895 | #define vbrsrq_x_n_f16(__a, __b, __p) __arm_vbrsrq_x_n_f16(__a, __b, __p) | |
2896 | #define vbrsrq_x_n_f32(__a, __b, __p) __arm_vbrsrq_x_n_f32(__a, __b, __p) | |
2897 | #define veorq_x_f16(__a, __b, __p) __arm_veorq_x_f16(__a, __b, __p) | |
2898 | #define veorq_x_f32(__a, __b, __p) __arm_veorq_x_f32(__a, __b, __p) | |
2899 | #define vornq_x_f16(__a, __b, __p) __arm_vornq_x_f16(__a, __b, __p) | |
2900 | #define vornq_x_f32(__a, __b, __p) __arm_vornq_x_f32(__a, __b, __p) | |
2901 | #define vorrq_x_f16(__a, __b, __p) __arm_vorrq_x_f16(__a, __b, __p) | |
2902 | #define vorrq_x_f32(__a, __b, __p) __arm_vorrq_x_f32(__a, __b, __p) | |
2903 | #define vrev32q_x_f16(__a, __p) __arm_vrev32q_x_f16(__a, __p) | |
2904 | #define vrev64q_x_f16(__a, __p) __arm_vrev64q_x_f16(__a, __p) | |
2905 | #define vrev64q_x_f32(__a, __p) __arm_vrev64q_x_f32(__a, __p) | |
c3562f81 SP |
2906 | #define vadciq_s32(__a, __b, __carry_out) __arm_vadciq_s32(__a, __b, __carry_out) |
2907 | #define vadciq_u32(__a, __b, __carry_out) __arm_vadciq_u32(__a, __b, __carry_out) | |
2908 | #define vadciq_m_s32(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m_s32(__inactive, __a, __b, __carry_out, __p) | |
2909 | #define vadciq_m_u32(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m_u32(__inactive, __a, __b, __carry_out, __p) | |
2910 | #define vadcq_s32(__a, __b, __carry) __arm_vadcq_s32(__a, __b, __carry) | |
2911 | #define vadcq_u32(__a, __b, __carry) __arm_vadcq_u32(__a, __b, __carry) | |
2912 | #define vadcq_m_s32(__inactive, __a, __b, __carry, __p) __arm_vadcq_m_s32(__inactive, __a, __b, __carry, __p) | |
2913 | #define vadcq_m_u32(__inactive, __a, __b, __carry, __p) __arm_vadcq_m_u32(__inactive, __a, __b, __carry, __p) | |
2914 | #define vsbciq_s32(__a, __b, __carry_out) __arm_vsbciq_s32(__a, __b, __carry_out) | |
2915 | #define vsbciq_u32(__a, __b, __carry_out) __arm_vsbciq_u32(__a, __b, __carry_out) | |
2916 | #define vsbciq_m_s32(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m_s32(__inactive, __a, __b, __carry_out, __p) | |
2917 | #define vsbciq_m_u32(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m_u32(__inactive, __a, __b, __carry_out, __p) | |
2918 | #define vsbcq_s32(__a, __b, __carry) __arm_vsbcq_s32(__a, __b, __carry) | |
2919 | #define vsbcq_u32(__a, __b, __carry) __arm_vsbcq_u32(__a, __b, __carry) | |
2920 | #define vsbcq_m_s32(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m_s32(__inactive, __a, __b, __carry, __p) | |
2921 | #define vsbcq_m_u32(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m_u32(__inactive, __a, __b, __carry, __p) | |
1dfcc3b5 SP |
2922 | #define vst1q_p_u8(__addr, __value, __p) __arm_vst1q_p_u8(__addr, __value, __p) |
2923 | #define vst1q_p_s8(__addr, __value, __p) __arm_vst1q_p_s8(__addr, __value, __p) | |
2924 | #define vst2q_s8(__addr, __value) __arm_vst2q_s8(__addr, __value) | |
2925 | #define vst2q_u8(__addr, __value) __arm_vst2q_u8(__addr, __value) | |
2926 | #define vld1q_z_u8(__base, __p) __arm_vld1q_z_u8(__base, __p) | |
2927 | #define vld1q_z_s8(__base, __p) __arm_vld1q_z_s8(__base, __p) | |
2928 | #define vld2q_s8(__addr) __arm_vld2q_s8(__addr) | |
2929 | #define vld2q_u8(__addr) __arm_vld2q_u8(__addr) | |
2930 | #define vld4q_s8(__addr) __arm_vld4q_s8(__addr) | |
2931 | #define vld4q_u8(__addr) __arm_vld4q_u8(__addr) | |
2932 | #define vst1q_p_u16(__addr, __value, __p) __arm_vst1q_p_u16(__addr, __value, __p) | |
2933 | #define vst1q_p_s16(__addr, __value, __p) __arm_vst1q_p_s16(__addr, __value, __p) | |
2934 | #define vst2q_s16(__addr, __value) __arm_vst2q_s16(__addr, __value) | |
2935 | #define vst2q_u16(__addr, __value) __arm_vst2q_u16(__addr, __value) | |
2936 | #define vld1q_z_u16(__base, __p) __arm_vld1q_z_u16(__base, __p) | |
2937 | #define vld1q_z_s16(__base, __p) __arm_vld1q_z_s16(__base, __p) | |
2938 | #define vld2q_s16(__addr) __arm_vld2q_s16(__addr) | |
2939 | #define vld2q_u16(__addr) __arm_vld2q_u16(__addr) | |
2940 | #define vld4q_s16(__addr) __arm_vld4q_s16(__addr) | |
2941 | #define vld4q_u16(__addr) __arm_vld4q_u16(__addr) | |
2942 | #define vst1q_p_u32(__addr, __value, __p) __arm_vst1q_p_u32(__addr, __value, __p) | |
2943 | #define vst1q_p_s32(__addr, __value, __p) __arm_vst1q_p_s32(__addr, __value, __p) | |
2944 | #define vst2q_s32(__addr, __value) __arm_vst2q_s32(__addr, __value) | |
2945 | #define vst2q_u32(__addr, __value) __arm_vst2q_u32(__addr, __value) | |
2946 | #define vld1q_z_u32(__base, __p) __arm_vld1q_z_u32(__base, __p) | |
2947 | #define vld1q_z_s32(__base, __p) __arm_vld1q_z_s32(__base, __p) | |
2948 | #define vld2q_s32(__addr) __arm_vld2q_s32(__addr) | |
2949 | #define vld2q_u32(__addr) __arm_vld2q_u32(__addr) | |
2950 | #define vld4q_s32(__addr) __arm_vld4q_s32(__addr) | |
2951 | #define vld4q_u32(__addr) __arm_vld4q_u32(__addr) | |
2952 | #define vld4q_f16(__addr) __arm_vld4q_f16(__addr) | |
2953 | #define vld2q_f16(__addr) __arm_vld2q_f16(__addr) | |
2954 | #define vld1q_z_f16(__base, __p) __arm_vld1q_z_f16(__base, __p) | |
2955 | #define vst2q_f16(__addr, __value) __arm_vst2q_f16(__addr, __value) | |
2956 | #define vst1q_p_f16(__addr, __value, __p) __arm_vst1q_p_f16(__addr, __value, __p) | |
2957 | #define vld4q_f32(__addr) __arm_vld4q_f32(__addr) | |
2958 | #define vld2q_f32(__addr) __arm_vld2q_f32(__addr) | |
2959 | #define vld1q_z_f32(__base, __p) __arm_vld1q_z_f32(__base, __p) | |
2960 | #define vst2q_f32(__addr, __value) __arm_vst2q_f32(__addr, __value) | |
2961 | #define vst1q_p_f32(__addr, __value, __p) __arm_vst1q_p_f32(__addr, __value, __p) | |
1a5c27b1 SP |
2962 | #define vsetq_lane_f16(__a, __b, __idx) __arm_vsetq_lane_f16(__a, __b, __idx) |
2963 | #define vsetq_lane_f32(__a, __b, __idx) __arm_vsetq_lane_f32(__a, __b, __idx) | |
2964 | #define vsetq_lane_s16(__a, __b, __idx) __arm_vsetq_lane_s16(__a, __b, __idx) | |
2965 | #define vsetq_lane_s32(__a, __b, __idx) __arm_vsetq_lane_s32(__a, __b, __idx) | |
2966 | #define vsetq_lane_s8(__a, __b, __idx) __arm_vsetq_lane_s8(__a, __b, __idx) | |
2967 | #define vsetq_lane_s64(__a, __b, __idx) __arm_vsetq_lane_s64(__a, __b, __idx) | |
2968 | #define vsetq_lane_u8(__a, __b, __idx) __arm_vsetq_lane_u8(__a, __b, __idx) | |
2969 | #define vsetq_lane_u16(__a, __b, __idx) __arm_vsetq_lane_u16(__a, __b, __idx) | |
2970 | #define vsetq_lane_u32(__a, __b, __idx) __arm_vsetq_lane_u32(__a, __b, __idx) | |
2971 | #define vsetq_lane_u64(__a, __b, __idx) __arm_vsetq_lane_u64(__a, __b, __idx) | |
2972 | #define vgetq_lane_f16(__a, __idx) __arm_vgetq_lane_f16(__a, __idx) | |
2973 | #define vgetq_lane_f32(__a, __idx) __arm_vgetq_lane_f32(__a, __idx) | |
2974 | #define vgetq_lane_s16(__a, __idx) __arm_vgetq_lane_s16(__a, __idx) | |
2975 | #define vgetq_lane_s32(__a, __idx) __arm_vgetq_lane_s32(__a, __idx) | |
2976 | #define vgetq_lane_s8(__a, __idx) __arm_vgetq_lane_s8(__a, __idx) | |
2977 | #define vgetq_lane_s64(__a, __idx) __arm_vgetq_lane_s64(__a, __idx) | |
2978 | #define vgetq_lane_u8(__a, __idx) __arm_vgetq_lane_u8(__a, __idx) | |
2979 | #define vgetq_lane_u16(__a, __idx) __arm_vgetq_lane_u16(__a, __idx) | |
2980 | #define vgetq_lane_u32(__a, __idx) __arm_vgetq_lane_u32(__a, __idx) | |
2981 | #define vgetq_lane_u64(__a, __idx) __arm_vgetq_lane_u64(__a, __idx) | |
85244449 SP |
2982 | #define sqrshr(__p0, __p1) __arm_sqrshr(__p0, __p1) |
2983 | #define sqrshrl(__p0, __p1) __arm_sqrshrl(__p0, __p1) | |
2984 | #define sqrshrl_sat48(__p0, __p1) __arm_sqrshrl_sat48(__p0, __p1) | |
2985 | #define sqshl(__p0, __p1) __arm_sqshl(__p0, __p1) | |
2986 | #define sqshll(__p0, __p1) __arm_sqshll(__p0, __p1) | |
2987 | #define srshr(__p0, __p1) __arm_srshr(__p0, __p1) | |
2988 | #define srshrl(__p0, __p1) __arm_srshrl(__p0, __p1) | |
2989 | #define uqrshl(__p0, __p1) __arm_uqrshl(__p0, __p1) | |
2990 | #define uqrshll(__p0, __p1) __arm_uqrshll(__p0, __p1) | |
2991 | #define uqrshll_sat48(__p0, __p1) __arm_uqrshll_sat48(__p0, __p1) | |
2992 | #define uqshl(__p0, __p1) __arm_uqshl(__p0, __p1) | |
2993 | #define uqshll(__p0, __p1) __arm_uqshll(__p0, __p1) | |
2994 | #define urshr(__p0, __p1) __arm_urshr(__p0, __p1) | |
2995 | #define urshrl(__p0, __p1) __arm_urshrl(__p0, __p1) | |
2996 | #define lsll(__p0, __p1) __arm_lsll(__p0, __p1) | |
2997 | #define asrl(__p0, __p1) __arm_asrl(__p0, __p1) | |
88c9a831 SP |
2998 | #define vshlcq_m_s8(__a, __b, __imm, __p) __arm_vshlcq_m_s8(__a, __b, __imm, __p) |
2999 | #define vshlcq_m_u8(__a, __b, __imm, __p) __arm_vshlcq_m_u8(__a, __b, __imm, __p) | |
3000 | #define vshlcq_m_s16(__a, __b, __imm, __p) __arm_vshlcq_m_s16(__a, __b, __imm, __p) | |
3001 | #define vshlcq_m_u16(__a, __b, __imm, __p) __arm_vshlcq_m_u16(__a, __b, __imm, __p) | |
3002 | #define vshlcq_m_s32(__a, __b, __imm, __p) __arm_vshlcq_m_s32(__a, __b, __imm, __p) | |
3003 | #define vshlcq_m_u32(__a, __b, __imm, __p) __arm_vshlcq_m_u32(__a, __b, __imm, __p) | |
14782c81 SP |
3004 | #endif |
3005 | ||
1a5c27b1 SP |
3006 | /* For big-endian, GCC's vector indices are reversed within each 64 bits |
3007 | compared to the architectural lane indices used by MVE intrinsics. */ | |
3008 | #define __ARM_NUM_LANES(__v) (sizeof (__v) / sizeof (__v[0])) | |
3009 | #ifdef __ARM_BIG_ENDIAN | |
3010 | #define __ARM_LANEQ(__vec, __idx) (__idx ^ (__ARM_NUM_LANES(__vec)/2 - 1)) | |
3011 | #else | |
3012 | #define __ARM_LANEQ(__vec, __idx) __idx | |
3013 | #endif | |
3014 | #define __ARM_CHECK_LANEQ(__vec, __idx) \ | |
3015 | __builtin_arm_lane_check (__ARM_NUM_LANES(__vec), \ | |
3016 | __ARM_LANEQ(__vec, __idx)) | |
3017 | ||
14782c81 SP |
3018 | __extension__ extern __inline void |
3019 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3020 | __arm_vst4q_s8 (int8_t * __addr, int8x16x4_t __value) | |
3021 | { | |
3022 | union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv; | |
3023 | __rv.__i = __value; | |
3024 | __builtin_mve_vst4qv16qi ((__builtin_neon_qi *) __addr, __rv.__o); | |
3025 | } | |
3026 | ||
3027 | __extension__ extern __inline void | |
3028 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3029 | __arm_vst4q_s16 (int16_t * __addr, int16x8x4_t __value) | |
3030 | { | |
3031 | union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv; | |
3032 | __rv.__i = __value; | |
3033 | __builtin_mve_vst4qv8hi ((__builtin_neon_hi *) __addr, __rv.__o); | |
3034 | } | |
3035 | ||
3036 | __extension__ extern __inline void | |
3037 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3038 | __arm_vst4q_s32 (int32_t * __addr, int32x4x4_t __value) | |
3039 | { | |
3040 | union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv; | |
3041 | __rv.__i = __value; | |
3042 | __builtin_mve_vst4qv4si ((__builtin_neon_si *) __addr, __rv.__o); | |
3043 | } | |
3044 | ||
3045 | __extension__ extern __inline void | |
3046 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3047 | __arm_vst4q_u8 (uint8_t * __addr, uint8x16x4_t __value) | |
3048 | { | |
3049 | union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv; | |
3050 | __rv.__i = __value; | |
3051 | __builtin_mve_vst4qv16qi ((__builtin_neon_qi *) __addr, __rv.__o); | |
3052 | } | |
3053 | ||
3054 | __extension__ extern __inline void | |
3055 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3056 | __arm_vst4q_u16 (uint16_t * __addr, uint16x8x4_t __value) | |
3057 | { | |
3058 | union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv; | |
3059 | __rv.__i = __value; | |
3060 | __builtin_mve_vst4qv8hi ((__builtin_neon_hi *) __addr, __rv.__o); | |
3061 | } | |
3062 | ||
3063 | __extension__ extern __inline void | |
3064 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3065 | __arm_vst4q_u32 (uint32_t * __addr, uint32x4x4_t __value) | |
3066 | { | |
3067 | union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv; | |
3068 | __rv.__i = __value; | |
3069 | __builtin_mve_vst4qv4si ((__builtin_neon_si *) __addr, __rv.__o); | |
3070 | } | |
3071 | ||
6df4618c SP |
3072 | __extension__ extern __inline int8x16_t |
3073 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3074 | __arm_vdupq_n_s8 (int8_t __a) | |
3075 | { | |
3076 | return __builtin_mve_vdupq_n_sv16qi (__a); | |
3077 | } | |
3078 | ||
3079 | __extension__ extern __inline int16x8_t | |
3080 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3081 | __arm_vdupq_n_s16 (int16_t __a) | |
3082 | { | |
3083 | return __builtin_mve_vdupq_n_sv8hi (__a); | |
3084 | } | |
3085 | ||
3086 | __extension__ extern __inline int32x4_t | |
3087 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3088 | __arm_vdupq_n_s32 (int32_t __a) | |
3089 | { | |
3090 | return __builtin_mve_vdupq_n_sv4si (__a); | |
3091 | } | |
3092 | ||
3093 | __extension__ extern __inline int8x16_t | |
3094 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3095 | __arm_vabsq_s8 (int8x16_t __a) | |
3096 | { | |
3097 | return __builtin_mve_vabsq_sv16qi (__a); | |
3098 | } | |
3099 | ||
3100 | __extension__ extern __inline int16x8_t | |
3101 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3102 | __arm_vabsq_s16 (int16x8_t __a) | |
3103 | { | |
3104 | return __builtin_mve_vabsq_sv8hi (__a); | |
3105 | } | |
3106 | ||
3107 | __extension__ extern __inline int32x4_t | |
3108 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3109 | __arm_vabsq_s32 (int32x4_t __a) | |
3110 | { | |
3111 | return __builtin_mve_vabsq_sv4si (__a); | |
3112 | } | |
3113 | ||
3114 | __extension__ extern __inline int8x16_t | |
3115 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3116 | __arm_vclsq_s8 (int8x16_t __a) | |
3117 | { | |
3118 | return __builtin_mve_vclsq_sv16qi (__a); | |
3119 | } | |
3120 | ||
3121 | __extension__ extern __inline int16x8_t | |
3122 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3123 | __arm_vclsq_s16 (int16x8_t __a) | |
3124 | { | |
3125 | return __builtin_mve_vclsq_sv8hi (__a); | |
3126 | } | |
3127 | ||
3128 | __extension__ extern __inline int32x4_t | |
3129 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3130 | __arm_vclsq_s32 (int32x4_t __a) | |
3131 | { | |
3132 | return __builtin_mve_vclsq_sv4si (__a); | |
3133 | } | |
3134 | ||
3135 | __extension__ extern __inline int8x16_t | |
3136 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3137 | __arm_vclzq_s8 (int8x16_t __a) | |
3138 | { | |
3139 | return __builtin_mve_vclzq_sv16qi (__a); | |
3140 | } | |
3141 | ||
3142 | __extension__ extern __inline int16x8_t | |
3143 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3144 | __arm_vclzq_s16 (int16x8_t __a) | |
3145 | { | |
3146 | return __builtin_mve_vclzq_sv8hi (__a); | |
3147 | } | |
3148 | ||
3149 | __extension__ extern __inline int32x4_t | |
3150 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3151 | __arm_vclzq_s32 (int32x4_t __a) | |
3152 | { | |
3153 | return __builtin_mve_vclzq_sv4si (__a); | |
3154 | } | |
3155 | ||
3156 | __extension__ extern __inline int8x16_t | |
3157 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3158 | __arm_vnegq_s8 (int8x16_t __a) | |
3159 | { | |
3160 | return __builtin_mve_vnegq_sv16qi (__a); | |
3161 | } | |
3162 | ||
3163 | __extension__ extern __inline int16x8_t | |
3164 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3165 | __arm_vnegq_s16 (int16x8_t __a) | |
3166 | { | |
3167 | return __builtin_mve_vnegq_sv8hi (__a); | |
3168 | } | |
3169 | ||
3170 | __extension__ extern __inline int32x4_t | |
3171 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3172 | __arm_vnegq_s32 (int32x4_t __a) | |
3173 | { | |
3174 | return __builtin_mve_vnegq_sv4si (__a); | |
3175 | } | |
3176 | ||
3177 | __extension__ extern __inline int64_t | |
3178 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3179 | __arm_vaddlvq_s32 (int32x4_t __a) | |
3180 | { | |
3181 | return __builtin_mve_vaddlvq_sv4si (__a); | |
3182 | } | |
3183 | ||
3184 | __extension__ extern __inline int32_t | |
3185 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3186 | __arm_vaddvq_s8 (int8x16_t __a) | |
3187 | { | |
3188 | return __builtin_mve_vaddvq_sv16qi (__a); | |
3189 | } | |
3190 | ||
3191 | __extension__ extern __inline int32_t | |
3192 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3193 | __arm_vaddvq_s16 (int16x8_t __a) | |
3194 | { | |
3195 | return __builtin_mve_vaddvq_sv8hi (__a); | |
3196 | } | |
3197 | ||
3198 | __extension__ extern __inline int32_t | |
3199 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3200 | __arm_vaddvq_s32 (int32x4_t __a) | |
3201 | { | |
3202 | return __builtin_mve_vaddvq_sv4si (__a); | |
3203 | } | |
3204 | ||
3205 | __extension__ extern __inline int16x8_t | |
3206 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3207 | __arm_vmovlbq_s8 (int8x16_t __a) | |
3208 | { | |
3209 | return __builtin_mve_vmovlbq_sv16qi (__a); | |
3210 | } | |
3211 | ||
3212 | __extension__ extern __inline int32x4_t | |
3213 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3214 | __arm_vmovlbq_s16 (int16x8_t __a) | |
3215 | { | |
3216 | return __builtin_mve_vmovlbq_sv8hi (__a); | |
3217 | } | |
3218 | ||
3219 | __extension__ extern __inline int16x8_t | |
3220 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3221 | __arm_vmovltq_s8 (int8x16_t __a) | |
3222 | { | |
3223 | return __builtin_mve_vmovltq_sv16qi (__a); | |
3224 | } | |
3225 | ||
3226 | __extension__ extern __inline int32x4_t | |
3227 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3228 | __arm_vmovltq_s16 (int16x8_t __a) | |
3229 | { | |
3230 | return __builtin_mve_vmovltq_sv8hi (__a); | |
3231 | } | |
3232 | ||
3233 | __extension__ extern __inline int8x16_t | |
3234 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3235 | __arm_vmvnq_s8 (int8x16_t __a) | |
3236 | { | |
3237 | return __builtin_mve_vmvnq_sv16qi (__a); | |
3238 | } | |
3239 | ||
3240 | __extension__ extern __inline int16x8_t | |
3241 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3242 | __arm_vmvnq_s16 (int16x8_t __a) | |
3243 | { | |
3244 | return __builtin_mve_vmvnq_sv8hi (__a); | |
3245 | } | |
3246 | ||
3247 | __extension__ extern __inline int32x4_t | |
3248 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3249 | __arm_vmvnq_s32 (int32x4_t __a) | |
3250 | { | |
3251 | return __builtin_mve_vmvnq_sv4si (__a); | |
3252 | } | |
3253 | ||
5db0eb95 SP |
3254 | __extension__ extern __inline int16x8_t |
3255 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3256 | __arm_vmvnq_n_s16 (const int16_t __imm) | |
3257 | { | |
3258 | return __builtin_mve_vmvnq_n_sv8hi (__imm); | |
3259 | } | |
3260 | ||
3261 | __extension__ extern __inline int32x4_t | |
3262 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3263 | __arm_vmvnq_n_s32 (const int32_t __imm) | |
3264 | { | |
3265 | return __builtin_mve_vmvnq_n_sv4si (__imm); | |
3266 | } | |
3267 | ||
6df4618c SP |
3268 | __extension__ extern __inline int8x16_t |
3269 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3270 | __arm_vrev16q_s8 (int8x16_t __a) | |
3271 | { | |
3272 | return __builtin_mve_vrev16q_sv16qi (__a); | |
3273 | } | |
3274 | ||
3275 | __extension__ extern __inline int8x16_t | |
3276 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3277 | __arm_vrev32q_s8 (int8x16_t __a) | |
3278 | { | |
3279 | return __builtin_mve_vrev32q_sv16qi (__a); | |
3280 | } | |
3281 | ||
3282 | __extension__ extern __inline int16x8_t | |
3283 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3284 | __arm_vrev32q_s16 (int16x8_t __a) | |
3285 | { | |
3286 | return __builtin_mve_vrev32q_sv8hi (__a); | |
3287 | } | |
3288 | ||
5db0eb95 SP |
3289 | __extension__ extern __inline int8x16_t |
3290 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3291 | __arm_vrev64q_s8 (int8x16_t __a) | |
3292 | { | |
3293 | return __builtin_mve_vrev64q_sv16qi (__a); | |
3294 | } | |
3295 | ||
3296 | __extension__ extern __inline int16x8_t | |
3297 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3298 | __arm_vrev64q_s16 (int16x8_t __a) | |
3299 | { | |
3300 | return __builtin_mve_vrev64q_sv8hi (__a); | |
3301 | } | |
3302 | ||
3303 | __extension__ extern __inline int32x4_t | |
3304 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3305 | __arm_vrev64q_s32 (int32x4_t __a) | |
3306 | { | |
3307 | return __builtin_mve_vrev64q_sv4si (__a); | |
3308 | } | |
3309 | ||
6df4618c SP |
3310 | __extension__ extern __inline int8x16_t |
3311 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3312 | __arm_vqabsq_s8 (int8x16_t __a) | |
3313 | { | |
3314 | return __builtin_mve_vqabsq_sv16qi (__a); | |
3315 | } | |
3316 | ||
3317 | __extension__ extern __inline int16x8_t | |
3318 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3319 | __arm_vqabsq_s16 (int16x8_t __a) | |
3320 | { | |
3321 | return __builtin_mve_vqabsq_sv8hi (__a); | |
3322 | } | |
3323 | ||
3324 | __extension__ extern __inline int32x4_t | |
3325 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3326 | __arm_vqabsq_s32 (int32x4_t __a) | |
3327 | { | |
3328 | return __builtin_mve_vqabsq_sv4si (__a); | |
3329 | } | |
3330 | ||
3331 | __extension__ extern __inline int8x16_t | |
3332 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3333 | __arm_vqnegq_s8 (int8x16_t __a) | |
3334 | { | |
3335 | return __builtin_mve_vqnegq_sv16qi (__a); | |
3336 | } | |
3337 | ||
3338 | __extension__ extern __inline int16x8_t | |
3339 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3340 | __arm_vqnegq_s16 (int16x8_t __a) | |
3341 | { | |
3342 | return __builtin_mve_vqnegq_sv8hi (__a); | |
3343 | } | |
3344 | ||
3345 | __extension__ extern __inline int32x4_t | |
3346 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3347 | __arm_vqnegq_s32 (int32x4_t __a) | |
3348 | { | |
3349 | return __builtin_mve_vqnegq_sv4si (__a); | |
3350 | } | |
3351 | ||
5db0eb95 SP |
3352 | __extension__ extern __inline uint8x16_t |
3353 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3354 | __arm_vrev64q_u8 (uint8x16_t __a) | |
3355 | { | |
3356 | return __builtin_mve_vrev64q_uv16qi (__a); | |
3357 | } | |
3358 | ||
3359 | __extension__ extern __inline uint16x8_t | |
3360 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3361 | __arm_vrev64q_u16 (uint16x8_t __a) | |
3362 | { | |
3363 | return __builtin_mve_vrev64q_uv8hi (__a); | |
3364 | } | |
3365 | ||
3366 | __extension__ extern __inline uint32x4_t | |
3367 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3368 | __arm_vrev64q_u32 (uint32x4_t __a) | |
3369 | { | |
3370 | return __builtin_mve_vrev64q_uv4si (__a); | |
3371 | } | |
3372 | ||
6df4618c SP |
3373 | __extension__ extern __inline uint8x16_t |
3374 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3375 | __arm_vmvnq_u8 (uint8x16_t __a) | |
3376 | { | |
3377 | return __builtin_mve_vmvnq_uv16qi (__a); | |
3378 | } | |
3379 | ||
3380 | __extension__ extern __inline uint16x8_t | |
3381 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3382 | __arm_vmvnq_u16 (uint16x8_t __a) | |
3383 | { | |
3384 | return __builtin_mve_vmvnq_uv8hi (__a); | |
3385 | } | |
3386 | ||
3387 | __extension__ extern __inline uint32x4_t | |
3388 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3389 | __arm_vmvnq_u32 (uint32x4_t __a) | |
3390 | { | |
3391 | return __builtin_mve_vmvnq_uv4si (__a); | |
3392 | } | |
3393 | ||
3394 | __extension__ extern __inline uint8x16_t | |
3395 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3396 | __arm_vdupq_n_u8 (uint8_t __a) | |
3397 | { | |
3398 | return __builtin_mve_vdupq_n_uv16qi (__a); | |
3399 | } | |
3400 | ||
3401 | __extension__ extern __inline uint16x8_t | |
3402 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3403 | __arm_vdupq_n_u16 (uint16_t __a) | |
3404 | { | |
3405 | return __builtin_mve_vdupq_n_uv8hi (__a); | |
3406 | } | |
3407 | ||
3408 | __extension__ extern __inline uint32x4_t | |
3409 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3410 | __arm_vdupq_n_u32 (uint32_t __a) | |
3411 | { | |
3412 | return __builtin_mve_vdupq_n_uv4si (__a); | |
3413 | } | |
3414 | ||
3415 | __extension__ extern __inline uint8x16_t | |
3416 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3417 | __arm_vclzq_u8 (uint8x16_t __a) | |
3418 | { | |
3419 | return __builtin_mve_vclzq_uv16qi (__a); | |
3420 | } | |
3421 | ||
3422 | __extension__ extern __inline uint16x8_t | |
3423 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3424 | __arm_vclzq_u16 (uint16x8_t __a) | |
3425 | { | |
3426 | return __builtin_mve_vclzq_uv8hi (__a); | |
3427 | } | |
3428 | ||
3429 | __extension__ extern __inline uint32x4_t | |
3430 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3431 | __arm_vclzq_u32 (uint32x4_t __a) | |
3432 | { | |
3433 | return __builtin_mve_vclzq_uv4si (__a); | |
3434 | } | |
3435 | ||
3436 | __extension__ extern __inline uint32_t | |
3437 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3438 | __arm_vaddvq_u8 (uint8x16_t __a) | |
3439 | { | |
3440 | return __builtin_mve_vaddvq_uv16qi (__a); | |
3441 | } | |
3442 | ||
3443 | __extension__ extern __inline uint32_t | |
3444 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3445 | __arm_vaddvq_u16 (uint16x8_t __a) | |
3446 | { | |
3447 | return __builtin_mve_vaddvq_uv8hi (__a); | |
3448 | } | |
3449 | ||
3450 | __extension__ extern __inline uint32_t | |
3451 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3452 | __arm_vaddvq_u32 (uint32x4_t __a) | |
3453 | { | |
3454 | return __builtin_mve_vaddvq_uv4si (__a); | |
3455 | } | |
3456 | ||
3457 | __extension__ extern __inline uint8x16_t | |
3458 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3459 | __arm_vrev32q_u8 (uint8x16_t __a) | |
3460 | { | |
3461 | return __builtin_mve_vrev32q_uv16qi (__a); | |
3462 | } | |
3463 | ||
3464 | __extension__ extern __inline uint16x8_t | |
3465 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3466 | __arm_vrev32q_u16 (uint16x8_t __a) | |
3467 | { | |
3468 | return __builtin_mve_vrev32q_uv8hi (__a); | |
3469 | } | |
3470 | ||
3471 | __extension__ extern __inline uint16x8_t | |
3472 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3473 | __arm_vmovltq_u8 (uint8x16_t __a) | |
3474 | { | |
3475 | return __builtin_mve_vmovltq_uv16qi (__a); | |
3476 | } | |
3477 | ||
3478 | __extension__ extern __inline uint32x4_t | |
3479 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3480 | __arm_vmovltq_u16 (uint16x8_t __a) | |
3481 | { | |
3482 | return __builtin_mve_vmovltq_uv8hi (__a); | |
3483 | } | |
3484 | ||
3485 | __extension__ extern __inline uint16x8_t | |
3486 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3487 | __arm_vmovlbq_u8 (uint8x16_t __a) | |
3488 | { | |
3489 | return __builtin_mve_vmovlbq_uv16qi (__a); | |
3490 | } | |
3491 | ||
3492 | __extension__ extern __inline uint32x4_t | |
3493 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3494 | __arm_vmovlbq_u16 (uint16x8_t __a) | |
3495 | { | |
3496 | return __builtin_mve_vmovlbq_uv8hi (__a); | |
3497 | } | |
3498 | ||
5db0eb95 SP |
3499 | __extension__ extern __inline uint16x8_t |
3500 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3501 | __arm_vmvnq_n_u16 (const int __imm) | |
3502 | { | |
3503 | return __builtin_mve_vmvnq_n_uv8hi (__imm); | |
3504 | } | |
3505 | ||
3506 | __extension__ extern __inline uint32x4_t | |
3507 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3508 | __arm_vmvnq_n_u32 (const int __imm) | |
3509 | { | |
3510 | return __builtin_mve_vmvnq_n_uv4si (__imm); | |
3511 | } | |
3512 | ||
6df4618c SP |
3513 | __extension__ extern __inline uint8x16_t |
3514 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3515 | __arm_vrev16q_u8 (uint8x16_t __a) | |
3516 | { | |
3517 | return __builtin_mve_vrev16q_uv16qi (__a); | |
3518 | } | |
3519 | ||
3520 | __extension__ extern __inline uint64_t | |
3521 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3522 | __arm_vaddlvq_u32 (uint32x4_t __a) | |
3523 | { | |
3524 | return __builtin_mve_vaddlvq_uv4si (__a); | |
3525 | } | |
3526 | ||
ac3bcc81 | 3527 | __extension__ extern __inline mve_pred16_t |
a475f153 SP |
3528 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
3529 | __arm_vctp16q (uint32_t __a) | |
3530 | { | |
3531 | return __builtin_mve_vctp16qhi (__a); | |
3532 | } | |
3533 | ||
3534 | __extension__ extern __inline mve_pred16_t | |
3535 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3536 | __arm_vctp32q (uint32_t __a) | |
3537 | { | |
3538 | return __builtin_mve_vctp32qhi (__a); | |
3539 | } | |
3540 | ||
3541 | __extension__ extern __inline mve_pred16_t | |
3542 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3543 | __arm_vctp64q (uint32_t __a) | |
3544 | { | |
3545 | return __builtin_mve_vctp64qhi (__a); | |
3546 | } | |
3547 | ||
3548 | __extension__ extern __inline mve_pred16_t | |
3549 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3550 | __arm_vctp8q (uint32_t __a) | |
3551 | { | |
3552 | return __builtin_mve_vctp8qhi (__a); | |
3553 | } | |
3554 | ||
3555 | __extension__ extern __inline mve_pred16_t | |
3556 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3557 | __arm_vpnot (mve_pred16_t __a) | |
3558 | { | |
3559 | return __builtin_mve_vpnothi (__a); | |
3560 | } | |
3561 | ||
f166a8cd SP |
3562 | __extension__ extern __inline uint8x16_t |
3563 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3564 | __arm_vcreateq_u8 (uint64_t __a, uint64_t __b) | |
3565 | { | |
3566 | return __builtin_mve_vcreateq_uv16qi (__a, __b); | |
3567 | } | |
3568 | ||
3569 | __extension__ extern __inline uint16x8_t | |
3570 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3571 | __arm_vcreateq_u16 (uint64_t __a, uint64_t __b) | |
3572 | { | |
3573 | return __builtin_mve_vcreateq_uv8hi (__a, __b); | |
3574 | } | |
3575 | ||
3576 | __extension__ extern __inline uint32x4_t | |
3577 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3578 | __arm_vcreateq_u32 (uint64_t __a, uint64_t __b) | |
3579 | { | |
3580 | return __builtin_mve_vcreateq_uv4si (__a, __b); | |
3581 | } | |
3582 | ||
3583 | __extension__ extern __inline uint64x2_t | |
3584 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3585 | __arm_vcreateq_u64 (uint64_t __a, uint64_t __b) | |
3586 | { | |
3587 | return __builtin_mve_vcreateq_uv2di (__a, __b); | |
3588 | } | |
3589 | ||
3590 | __extension__ extern __inline int8x16_t | |
3591 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3592 | __arm_vcreateq_s8 (uint64_t __a, uint64_t __b) | |
3593 | { | |
3594 | return __builtin_mve_vcreateq_sv16qi (__a, __b); | |
3595 | } | |
3596 | ||
3597 | __extension__ extern __inline int16x8_t | |
3598 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3599 | __arm_vcreateq_s16 (uint64_t __a, uint64_t __b) | |
3600 | { | |
3601 | return __builtin_mve_vcreateq_sv8hi (__a, __b); | |
3602 | } | |
3603 | ||
3604 | __extension__ extern __inline int32x4_t | |
3605 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3606 | __arm_vcreateq_s32 (uint64_t __a, uint64_t __b) | |
3607 | { | |
3608 | return __builtin_mve_vcreateq_sv4si (__a, __b); | |
3609 | } | |
3610 | ||
3611 | __extension__ extern __inline int64x2_t | |
3612 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3613 | __arm_vcreateq_s64 (uint64_t __a, uint64_t __b) | |
3614 | { | |
3615 | return __builtin_mve_vcreateq_sv2di (__a, __b); | |
3616 | } | |
3617 | ||
3618 | __extension__ extern __inline int8x16_t | |
3619 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3620 | __arm_vshrq_n_s8 (int8x16_t __a, const int __imm) | |
3621 | { | |
3622 | return __builtin_mve_vshrq_n_sv16qi (__a, __imm); | |
3623 | } | |
3624 | ||
3625 | __extension__ extern __inline int16x8_t | |
3626 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3627 | __arm_vshrq_n_s16 (int16x8_t __a, const int __imm) | |
3628 | { | |
3629 | return __builtin_mve_vshrq_n_sv8hi (__a, __imm); | |
3630 | } | |
3631 | ||
3632 | __extension__ extern __inline int32x4_t | |
3633 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3634 | __arm_vshrq_n_s32 (int32x4_t __a, const int __imm) | |
3635 | { | |
3636 | return __builtin_mve_vshrq_n_sv4si (__a, __imm); | |
3637 | } | |
3638 | ||
3639 | __extension__ extern __inline uint8x16_t | |
3640 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3641 | __arm_vshrq_n_u8 (uint8x16_t __a, const int __imm) | |
3642 | { | |
3643 | return __builtin_mve_vshrq_n_uv16qi (__a, __imm); | |
3644 | } | |
3645 | ||
3646 | __extension__ extern __inline uint16x8_t | |
3647 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3648 | __arm_vshrq_n_u16 (uint16x8_t __a, const int __imm) | |
3649 | { | |
3650 | return __builtin_mve_vshrq_n_uv8hi (__a, __imm); | |
3651 | } | |
3652 | ||
3653 | __extension__ extern __inline uint32x4_t | |
3654 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3655 | __arm_vshrq_n_u32 (uint32x4_t __a, const int __imm) | |
3656 | { | |
3657 | return __builtin_mve_vshrq_n_uv4si (__a, __imm); | |
3658 | } | |
d71dba7b SP |
3659 | __extension__ extern __inline int64_t |
3660 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3661 | __arm_vaddlvq_p_s32 (int32x4_t __a, mve_pred16_t __p) | |
3662 | { | |
3663 | return __builtin_mve_vaddlvq_p_sv4si (__a, __p); | |
3664 | } | |
3665 | ||
3666 | __extension__ extern __inline uint64_t | |
3667 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3668 | __arm_vaddlvq_p_u32 (uint32x4_t __a, mve_pred16_t __p) | |
3669 | { | |
3670 | return __builtin_mve_vaddlvq_p_uv4si (__a, __p); | |
3671 | } | |
3672 | ||
e154009f | 3673 | __extension__ extern __inline mve_pred16_t |
d71dba7b SP |
3674 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
3675 | __arm_vcmpneq_s8 (int8x16_t __a, int8x16_t __b) | |
3676 | { | |
929056a7 | 3677 | return __builtin_mve_vcmpneq_v16qi (__a, __b); |
d71dba7b SP |
3678 | } |
3679 | ||
3680 | __extension__ extern __inline mve_pred16_t | |
3681 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3682 | __arm_vcmpneq_s16 (int16x8_t __a, int16x8_t __b) | |
3683 | { | |
929056a7 | 3684 | return __builtin_mve_vcmpneq_v8hi (__a, __b); |
d71dba7b SP |
3685 | } |
3686 | ||
3687 | __extension__ extern __inline mve_pred16_t | |
3688 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3689 | __arm_vcmpneq_s32 (int32x4_t __a, int32x4_t __b) | |
3690 | { | |
929056a7 | 3691 | return __builtin_mve_vcmpneq_v4si (__a, __b); |
d71dba7b SP |
3692 | } |
3693 | ||
3694 | __extension__ extern __inline mve_pred16_t | |
3695 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3696 | __arm_vcmpneq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3697 | { | |
929056a7 | 3698 | return __builtin_mve_vcmpneq_v16qi ((int8x16_t)__a, (int8x16_t)__b); |
d71dba7b SP |
3699 | } |
3700 | ||
3701 | __extension__ extern __inline mve_pred16_t | |
3702 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3703 | __arm_vcmpneq_u16 (uint16x8_t __a, uint16x8_t __b) | |
3704 | { | |
929056a7 | 3705 | return __builtin_mve_vcmpneq_v8hi ((int16x8_t)__a, (int16x8_t)__b); |
d71dba7b SP |
3706 | } |
3707 | ||
3708 | __extension__ extern __inline mve_pred16_t | |
3709 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3710 | __arm_vcmpneq_u32 (uint32x4_t __a, uint32x4_t __b) | |
3711 | { | |
929056a7 | 3712 | return __builtin_mve_vcmpneq_v4si ((int32x4_t)__a, (int32x4_t)__b); |
d71dba7b SP |
3713 | } |
3714 | ||
3715 | __extension__ extern __inline int8x16_t | |
3716 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3717 | __arm_vshlq_s8 (int8x16_t __a, int8x16_t __b) | |
3718 | { | |
3719 | return __builtin_mve_vshlq_sv16qi (__a, __b); | |
3720 | } | |
3721 | ||
3722 | __extension__ extern __inline int16x8_t | |
3723 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3724 | __arm_vshlq_s16 (int16x8_t __a, int16x8_t __b) | |
3725 | { | |
3726 | return __builtin_mve_vshlq_sv8hi (__a, __b); | |
3727 | } | |
3728 | ||
3729 | __extension__ extern __inline int32x4_t | |
3730 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3731 | __arm_vshlq_s32 (int32x4_t __a, int32x4_t __b) | |
3732 | { | |
3733 | return __builtin_mve_vshlq_sv4si (__a, __b); | |
3734 | } | |
3735 | ||
3736 | __extension__ extern __inline uint8x16_t | |
3737 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3738 | __arm_vshlq_u8 (uint8x16_t __a, int8x16_t __b) | |
3739 | { | |
3740 | return __builtin_mve_vshlq_uv16qi (__a, __b); | |
3741 | } | |
3742 | ||
3743 | __extension__ extern __inline uint16x8_t | |
3744 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3745 | __arm_vshlq_u16 (uint16x8_t __a, int16x8_t __b) | |
3746 | { | |
3747 | return __builtin_mve_vshlq_uv8hi (__a, __b); | |
3748 | } | |
3749 | ||
3750 | __extension__ extern __inline uint32x4_t | |
3751 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3752 | __arm_vshlq_u32 (uint32x4_t __a, int32x4_t __b) | |
3753 | { | |
3754 | return __builtin_mve_vshlq_uv4si (__a, __b); | |
3755 | } | |
33203b4c SP |
3756 | __extension__ extern __inline uint8x16_t |
3757 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3758 | __arm_vsubq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3759 | { | |
3760 | return __builtin_mve_vsubq_uv16qi (__a, __b); | |
3761 | } | |
3762 | ||
3763 | __extension__ extern __inline uint8x16_t | |
3764 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3765 | __arm_vsubq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3766 | { | |
3767 | return __builtin_mve_vsubq_n_uv16qi (__a, __b); | |
3768 | } | |
3769 | ||
3770 | __extension__ extern __inline uint8x16_t | |
3771 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3772 | __arm_vrmulhq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3773 | { | |
3774 | return __builtin_mve_vrmulhq_uv16qi (__a, __b); | |
3775 | } | |
3776 | ||
3777 | __extension__ extern __inline uint8x16_t | |
3778 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3779 | __arm_vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3780 | { | |
3781 | return __builtin_mve_vrhaddq_uv16qi (__a, __b); | |
3782 | } | |
3783 | ||
3784 | __extension__ extern __inline uint8x16_t | |
3785 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3786 | __arm_vqsubq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3787 | { | |
3788 | return __builtin_mve_vqsubq_uv16qi (__a, __b); | |
3789 | } | |
3790 | ||
3791 | __extension__ extern __inline uint8x16_t | |
3792 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3793 | __arm_vqsubq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3794 | { | |
3795 | return __builtin_mve_vqsubq_n_uv16qi (__a, __b); | |
3796 | } | |
3797 | ||
3798 | __extension__ extern __inline uint8x16_t | |
3799 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3800 | __arm_vqaddq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3801 | { | |
3802 | return __builtin_mve_vqaddq_uv16qi (__a, __b); | |
3803 | } | |
3804 | ||
3805 | __extension__ extern __inline uint8x16_t | |
3806 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3807 | __arm_vqaddq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3808 | { | |
3809 | return __builtin_mve_vqaddq_n_uv16qi (__a, __b); | |
3810 | } | |
3811 | ||
3812 | __extension__ extern __inline uint8x16_t | |
3813 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3814 | __arm_vorrq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3815 | { | |
3816 | return __builtin_mve_vorrq_uv16qi (__a, __b); | |
3817 | } | |
3818 | ||
3819 | __extension__ extern __inline uint8x16_t | |
3820 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3821 | __arm_vornq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3822 | { | |
3823 | return __builtin_mve_vornq_uv16qi (__a, __b); | |
3824 | } | |
3825 | ||
3826 | __extension__ extern __inline uint8x16_t | |
3827 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3828 | __arm_vmulq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3829 | { | |
3830 | return __builtin_mve_vmulq_uv16qi (__a, __b); | |
3831 | } | |
3832 | ||
3833 | __extension__ extern __inline uint8x16_t | |
3834 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3835 | __arm_vmulq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3836 | { | |
3837 | return __builtin_mve_vmulq_n_uv16qi (__a, __b); | |
3838 | } | |
3839 | ||
3840 | __extension__ extern __inline uint16x8_t | |
3841 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3842 | __arm_vmulltq_int_u8 (uint8x16_t __a, uint8x16_t __b) | |
3843 | { | |
3844 | return __builtin_mve_vmulltq_int_uv16qi (__a, __b); | |
3845 | } | |
3846 | ||
3847 | __extension__ extern __inline uint16x8_t | |
3848 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3849 | __arm_vmullbq_int_u8 (uint8x16_t __a, uint8x16_t __b) | |
3850 | { | |
3851 | return __builtin_mve_vmullbq_int_uv16qi (__a, __b); | |
3852 | } | |
3853 | ||
3854 | __extension__ extern __inline uint8x16_t | |
3855 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3856 | __arm_vmulhq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3857 | { | |
3858 | return __builtin_mve_vmulhq_uv16qi (__a, __b); | |
3859 | } | |
3860 | ||
3861 | __extension__ extern __inline uint32_t | |
3862 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3863 | __arm_vmladavq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3864 | { | |
3865 | return __builtin_mve_vmladavq_uv16qi (__a, __b); | |
3866 | } | |
3867 | ||
3868 | __extension__ extern __inline uint8_t | |
3869 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3870 | __arm_vminvq_u8 (uint8_t __a, uint8x16_t __b) | |
3871 | { | |
3872 | return __builtin_mve_vminvq_uv16qi (__a, __b); | |
3873 | } | |
3874 | ||
3875 | __extension__ extern __inline uint8x16_t | |
3876 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3877 | __arm_vminq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3878 | { | |
3879 | return __builtin_mve_vminq_uv16qi (__a, __b); | |
3880 | } | |
3881 | ||
3882 | __extension__ extern __inline uint8_t | |
3883 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3884 | __arm_vmaxvq_u8 (uint8_t __a, uint8x16_t __b) | |
3885 | { | |
3886 | return __builtin_mve_vmaxvq_uv16qi (__a, __b); | |
3887 | } | |
3888 | ||
3889 | __extension__ extern __inline uint8x16_t | |
3890 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3891 | __arm_vmaxq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3892 | { | |
3893 | return __builtin_mve_vmaxq_uv16qi (__a, __b); | |
3894 | } | |
3895 | ||
3896 | __extension__ extern __inline uint8x16_t | |
3897 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3898 | __arm_vhsubq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3899 | { | |
3900 | return __builtin_mve_vhsubq_uv16qi (__a, __b); | |
3901 | } | |
3902 | ||
3903 | __extension__ extern __inline uint8x16_t | |
3904 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3905 | __arm_vhsubq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3906 | { | |
3907 | return __builtin_mve_vhsubq_n_uv16qi (__a, __b); | |
3908 | } | |
3909 | ||
3910 | __extension__ extern __inline uint8x16_t | |
3911 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3912 | __arm_vhaddq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3913 | { | |
3914 | return __builtin_mve_vhaddq_uv16qi (__a, __b); | |
3915 | } | |
3916 | ||
3917 | __extension__ extern __inline uint8x16_t | |
3918 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3919 | __arm_vhaddq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3920 | { | |
3921 | return __builtin_mve_vhaddq_n_uv16qi (__a, __b); | |
3922 | } | |
3923 | ||
3924 | __extension__ extern __inline uint8x16_t | |
3925 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3926 | __arm_veorq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3927 | { | |
3928 | return __builtin_mve_veorq_uv16qi (__a, __b); | |
3929 | } | |
3930 | ||
3931 | __extension__ extern __inline mve_pred16_t | |
3932 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3933 | __arm_vcmpneq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3934 | { | |
929056a7 | 3935 | return __builtin_mve_vcmpneq_n_v16qi ((int8x16_t)__a, (int8_t)__b); |
33203b4c SP |
3936 | } |
3937 | ||
3938 | __extension__ extern __inline mve_pred16_t | |
3939 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3940 | __arm_vcmphiq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3941 | { | |
929056a7 | 3942 | return __builtin_mve_vcmphiq_v16qi (__a, __b); |
33203b4c SP |
3943 | } |
3944 | ||
3945 | __extension__ extern __inline mve_pred16_t | |
3946 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3947 | __arm_vcmphiq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3948 | { | |
929056a7 | 3949 | return __builtin_mve_vcmphiq_n_v16qi (__a, __b); |
33203b4c SP |
3950 | } |
3951 | ||
3952 | __extension__ extern __inline mve_pred16_t | |
3953 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3954 | __arm_vcmpeqq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3955 | { | |
929056a7 | 3956 | return __builtin_mve_vcmpeqq_v16qi ((int8x16_t)__a, (int8x16_t)__b); |
33203b4c SP |
3957 | } |
3958 | ||
3959 | __extension__ extern __inline mve_pred16_t | |
3960 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3961 | __arm_vcmpeqq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3962 | { | |
929056a7 | 3963 | return __builtin_mve_vcmpeqq_n_v16qi ((int8x16_t)__a, (int8_t)__b); |
33203b4c SP |
3964 | } |
3965 | ||
3966 | __extension__ extern __inline mve_pred16_t | |
3967 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3968 | __arm_vcmpcsq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3969 | { | |
929056a7 | 3970 | return __builtin_mve_vcmpcsq_v16qi (__a, __b); |
33203b4c SP |
3971 | } |
3972 | ||
3973 | __extension__ extern __inline mve_pred16_t | |
3974 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3975 | __arm_vcmpcsq_n_u8 (uint8x16_t __a, uint8_t __b) | |
3976 | { | |
929056a7 | 3977 | return __builtin_mve_vcmpcsq_n_v16qi (__a, __b); |
33203b4c SP |
3978 | } |
3979 | ||
3980 | __extension__ extern __inline uint8x16_t | |
3981 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3982 | __arm_vcaddq_rot90_u8 (uint8x16_t __a, uint8x16_t __b) | |
3983 | { | |
9732dc85 TC |
3984 | return (uint8x16_t) |
3985 | __builtin_mve_vcaddq_rot90v16qi ((int8x16_t)__a, (int8x16_t)__b); | |
33203b4c SP |
3986 | } |
3987 | ||
3988 | __extension__ extern __inline uint8x16_t | |
3989 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3990 | __arm_vcaddq_rot270_u8 (uint8x16_t __a, uint8x16_t __b) | |
3991 | { | |
9732dc85 TC |
3992 | return (uint8x16_t) |
3993 | __builtin_mve_vcaddq_rot270v16qi ((int8x16_t)__a, (int8x16_t)__b); | |
33203b4c SP |
3994 | } |
3995 | ||
3996 | __extension__ extern __inline uint8x16_t | |
3997 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
3998 | __arm_vbicq_u8 (uint8x16_t __a, uint8x16_t __b) | |
3999 | { | |
4000 | return __builtin_mve_vbicq_uv16qi (__a, __b); | |
4001 | } | |
4002 | ||
4003 | __extension__ extern __inline uint8x16_t | |
4004 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4005 | __arm_vandq_u8 (uint8x16_t __a, uint8x16_t __b) | |
4006 | { | |
4007 | return __builtin_mve_vandq_uv16qi (__a, __b); | |
4008 | } | |
4009 | ||
4010 | __extension__ extern __inline uint32_t | |
4011 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4012 | __arm_vaddvq_p_u8 (uint8x16_t __a, mve_pred16_t __p) | |
4013 | { | |
4014 | return __builtin_mve_vaddvq_p_uv16qi (__a, __p); | |
4015 | } | |
4016 | ||
4017 | __extension__ extern __inline uint32_t | |
4018 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4019 | __arm_vaddvaq_u8 (uint32_t __a, uint8x16_t __b) | |
4020 | { | |
4021 | return __builtin_mve_vaddvaq_uv16qi (__a, __b); | |
4022 | } | |
4023 | ||
4024 | __extension__ extern __inline uint8x16_t | |
4025 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4026 | __arm_vaddq_n_u8 (uint8x16_t __a, uint8_t __b) | |
4027 | { | |
4028 | return __builtin_mve_vaddq_n_uv16qi (__a, __b); | |
4029 | } | |
4030 | ||
4031 | __extension__ extern __inline uint8x16_t | |
4032 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4033 | __arm_vabdq_u8 (uint8x16_t __a, uint8x16_t __b) | |
4034 | { | |
4035 | return __builtin_mve_vabdq_uv16qi (__a, __b); | |
4036 | } | |
4037 | ||
4038 | __extension__ extern __inline uint8x16_t | |
4039 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4040 | __arm_vshlq_r_u8 (uint8x16_t __a, int32_t __b) | |
4041 | { | |
4042 | return __builtin_mve_vshlq_r_uv16qi (__a, __b); | |
4043 | } | |
4044 | ||
4045 | __extension__ extern __inline uint8x16_t | |
4046 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4047 | __arm_vrshlq_u8 (uint8x16_t __a, int8x16_t __b) | |
4048 | { | |
4049 | return __builtin_mve_vrshlq_uv16qi (__a, __b); | |
4050 | } | |
4051 | ||
4052 | __extension__ extern __inline uint8x16_t | |
4053 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4054 | __arm_vrshlq_n_u8 (uint8x16_t __a, int32_t __b) | |
4055 | { | |
4056 | return __builtin_mve_vrshlq_n_uv16qi (__a, __b); | |
4057 | } | |
4058 | ||
4059 | __extension__ extern __inline uint8x16_t | |
4060 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4061 | __arm_vqshlq_u8 (uint8x16_t __a, int8x16_t __b) | |
4062 | { | |
4063 | return __builtin_mve_vqshlq_uv16qi (__a, __b); | |
4064 | } | |
4065 | ||
4066 | __extension__ extern __inline uint8x16_t | |
4067 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4068 | __arm_vqshlq_r_u8 (uint8x16_t __a, int32_t __b) | |
4069 | { | |
4070 | return __builtin_mve_vqshlq_r_uv16qi (__a, __b); | |
4071 | } | |
4072 | ||
4073 | __extension__ extern __inline uint8x16_t | |
4074 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4075 | __arm_vqrshlq_u8 (uint8x16_t __a, int8x16_t __b) | |
4076 | { | |
4077 | return __builtin_mve_vqrshlq_uv16qi (__a, __b); | |
4078 | } | |
4079 | ||
4080 | __extension__ extern __inline uint8x16_t | |
4081 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4082 | __arm_vqrshlq_n_u8 (uint8x16_t __a, int32_t __b) | |
4083 | { | |
4084 | return __builtin_mve_vqrshlq_n_uv16qi (__a, __b); | |
4085 | } | |
4086 | ||
4087 | __extension__ extern __inline uint8_t | |
4088 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4089 | __arm_vminavq_s8 (uint8_t __a, int8x16_t __b) | |
4090 | { | |
4091 | return __builtin_mve_vminavq_sv16qi (__a, __b); | |
4092 | } | |
4093 | ||
4094 | __extension__ extern __inline uint8x16_t | |
4095 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4096 | __arm_vminaq_s8 (uint8x16_t __a, int8x16_t __b) | |
4097 | { | |
4098 | return __builtin_mve_vminaq_sv16qi (__a, __b); | |
4099 | } | |
4100 | ||
4101 | __extension__ extern __inline uint8_t | |
4102 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4103 | __arm_vmaxavq_s8 (uint8_t __a, int8x16_t __b) | |
4104 | { | |
4105 | return __builtin_mve_vmaxavq_sv16qi (__a, __b); | |
4106 | } | |
4107 | ||
4108 | __extension__ extern __inline uint8x16_t | |
4109 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4110 | __arm_vmaxaq_s8 (uint8x16_t __a, int8x16_t __b) | |
4111 | { | |
4112 | return __builtin_mve_vmaxaq_sv16qi (__a, __b); | |
4113 | } | |
4114 | ||
4115 | __extension__ extern __inline uint8x16_t | |
4116 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4117 | __arm_vbrsrq_n_u8 (uint8x16_t __a, int32_t __b) | |
4118 | { | |
4119 | return __builtin_mve_vbrsrq_n_uv16qi (__a, __b); | |
4120 | } | |
4121 | ||
4122 | __extension__ extern __inline uint8x16_t | |
4123 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4124 | __arm_vshlq_n_u8 (uint8x16_t __a, const int __imm) | |
4125 | { | |
4126 | return __builtin_mve_vshlq_n_uv16qi (__a, __imm); | |
4127 | } | |
4128 | ||
4129 | __extension__ extern __inline uint8x16_t | |
4130 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4131 | __arm_vrshrq_n_u8 (uint8x16_t __a, const int __imm) | |
4132 | { | |
4133 | return __builtin_mve_vrshrq_n_uv16qi (__a, __imm); | |
4134 | } | |
4135 | ||
4136 | __extension__ extern __inline uint8x16_t | |
4137 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4138 | __arm_vqshlq_n_u8 (uint8x16_t __a, const int __imm) | |
4139 | { | |
4140 | return __builtin_mve_vqshlq_n_uv16qi (__a, __imm); | |
4141 | } | |
4142 | ||
4143 | __extension__ extern __inline mve_pred16_t | |
4144 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4145 | __arm_vcmpneq_n_s8 (int8x16_t __a, int8_t __b) | |
4146 | { | |
929056a7 | 4147 | return __builtin_mve_vcmpneq_n_v16qi (__a, __b); |
33203b4c SP |
4148 | } |
4149 | ||
4150 | __extension__ extern __inline mve_pred16_t | |
4151 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4152 | __arm_vcmpltq_s8 (int8x16_t __a, int8x16_t __b) | |
4153 | { | |
929056a7 | 4154 | return __builtin_mve_vcmpltq_v16qi (__a, __b); |
33203b4c SP |
4155 | } |
4156 | ||
4157 | __extension__ extern __inline mve_pred16_t | |
4158 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4159 | __arm_vcmpltq_n_s8 (int8x16_t __a, int8_t __b) | |
4160 | { | |
929056a7 | 4161 | return __builtin_mve_vcmpltq_n_v16qi (__a, __b); |
33203b4c SP |
4162 | } |
4163 | ||
4164 | __extension__ extern __inline mve_pred16_t | |
4165 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4166 | __arm_vcmpleq_s8 (int8x16_t __a, int8x16_t __b) | |
4167 | { | |
929056a7 | 4168 | return __builtin_mve_vcmpleq_v16qi (__a, __b); |
33203b4c SP |
4169 | } |
4170 | ||
4171 | __extension__ extern __inline mve_pred16_t | |
4172 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4173 | __arm_vcmpleq_n_s8 (int8x16_t __a, int8_t __b) | |
4174 | { | |
929056a7 | 4175 | return __builtin_mve_vcmpleq_n_v16qi (__a, __b); |
33203b4c SP |
4176 | } |
4177 | ||
4178 | __extension__ extern __inline mve_pred16_t | |
4179 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4180 | __arm_vcmpgtq_s8 (int8x16_t __a, int8x16_t __b) | |
4181 | { | |
929056a7 | 4182 | return __builtin_mve_vcmpgtq_v16qi (__a, __b); |
33203b4c SP |
4183 | } |
4184 | ||
4185 | __extension__ extern __inline mve_pred16_t | |
4186 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4187 | __arm_vcmpgtq_n_s8 (int8x16_t __a, int8_t __b) | |
4188 | { | |
929056a7 | 4189 | return __builtin_mve_vcmpgtq_n_v16qi (__a, __b); |
33203b4c SP |
4190 | } |
4191 | ||
4192 | __extension__ extern __inline mve_pred16_t | |
4193 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4194 | __arm_vcmpgeq_s8 (int8x16_t __a, int8x16_t __b) | |
4195 | { | |
929056a7 | 4196 | return __builtin_mve_vcmpgeq_v16qi (__a, __b); |
33203b4c SP |
4197 | } |
4198 | ||
4199 | __extension__ extern __inline mve_pred16_t | |
4200 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4201 | __arm_vcmpgeq_n_s8 (int8x16_t __a, int8_t __b) | |
4202 | { | |
929056a7 | 4203 | return __builtin_mve_vcmpgeq_n_v16qi (__a, __b); |
33203b4c SP |
4204 | } |
4205 | ||
4206 | __extension__ extern __inline mve_pred16_t | |
4207 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4208 | __arm_vcmpeqq_s8 (int8x16_t __a, int8x16_t __b) | |
4209 | { | |
929056a7 | 4210 | return __builtin_mve_vcmpeqq_v16qi (__a, __b); |
33203b4c SP |
4211 | } |
4212 | ||
4213 | __extension__ extern __inline mve_pred16_t | |
4214 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4215 | __arm_vcmpeqq_n_s8 (int8x16_t __a, int8_t __b) | |
4216 | { | |
929056a7 | 4217 | return __builtin_mve_vcmpeqq_n_v16qi (__a, __b); |
33203b4c SP |
4218 | } |
4219 | ||
4220 | __extension__ extern __inline uint8x16_t | |
4221 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4222 | __arm_vqshluq_n_s8 (int8x16_t __a, const int __imm) | |
4223 | { | |
4224 | return __builtin_mve_vqshluq_n_sv16qi (__a, __imm); | |
4225 | } | |
4226 | ||
4227 | __extension__ extern __inline int32_t | |
4228 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4229 | __arm_vaddvq_p_s8 (int8x16_t __a, mve_pred16_t __p) | |
4230 | { | |
4231 | return __builtin_mve_vaddvq_p_sv16qi (__a, __p); | |
4232 | } | |
4233 | ||
4234 | __extension__ extern __inline int8x16_t | |
4235 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4236 | __arm_vsubq_s8 (int8x16_t __a, int8x16_t __b) | |
4237 | { | |
4238 | return __builtin_mve_vsubq_sv16qi (__a, __b); | |
4239 | } | |
4240 | ||
4241 | __extension__ extern __inline int8x16_t | |
4242 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4243 | __arm_vsubq_n_s8 (int8x16_t __a, int8_t __b) | |
4244 | { | |
4245 | return __builtin_mve_vsubq_n_sv16qi (__a, __b); | |
4246 | } | |
4247 | ||
4248 | __extension__ extern __inline int8x16_t | |
4249 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4250 | __arm_vshlq_r_s8 (int8x16_t __a, int32_t __b) | |
4251 | { | |
4252 | return __builtin_mve_vshlq_r_sv16qi (__a, __b); | |
4253 | } | |
4254 | ||
4255 | __extension__ extern __inline int8x16_t | |
4256 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4257 | __arm_vrshlq_s8 (int8x16_t __a, int8x16_t __b) | |
4258 | { | |
4259 | return __builtin_mve_vrshlq_sv16qi (__a, __b); | |
4260 | } | |
4261 | ||
4262 | __extension__ extern __inline int8x16_t | |
4263 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4264 | __arm_vrshlq_n_s8 (int8x16_t __a, int32_t __b) | |
4265 | { | |
4266 | return __builtin_mve_vrshlq_n_sv16qi (__a, __b); | |
4267 | } | |
4268 | ||
4269 | __extension__ extern __inline int8x16_t | |
4270 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4271 | __arm_vrmulhq_s8 (int8x16_t __a, int8x16_t __b) | |
4272 | { | |
4273 | return __builtin_mve_vrmulhq_sv16qi (__a, __b); | |
4274 | } | |
4275 | ||
4276 | __extension__ extern __inline int8x16_t | |
4277 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4278 | __arm_vrhaddq_s8 (int8x16_t __a, int8x16_t __b) | |
4279 | { | |
4280 | return __builtin_mve_vrhaddq_sv16qi (__a, __b); | |
4281 | } | |
4282 | ||
4283 | __extension__ extern __inline int8x16_t | |
4284 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4285 | __arm_vqsubq_s8 (int8x16_t __a, int8x16_t __b) | |
4286 | { | |
4287 | return __builtin_mve_vqsubq_sv16qi (__a, __b); | |
4288 | } | |
4289 | ||
4290 | __extension__ extern __inline int8x16_t | |
4291 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4292 | __arm_vqsubq_n_s8 (int8x16_t __a, int8_t __b) | |
4293 | { | |
4294 | return __builtin_mve_vqsubq_n_sv16qi (__a, __b); | |
4295 | } | |
4296 | ||
4297 | __extension__ extern __inline int8x16_t | |
4298 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4299 | __arm_vqshlq_s8 (int8x16_t __a, int8x16_t __b) | |
4300 | { | |
4301 | return __builtin_mve_vqshlq_sv16qi (__a, __b); | |
4302 | } | |
4303 | ||
4304 | __extension__ extern __inline int8x16_t | |
4305 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4306 | __arm_vqshlq_r_s8 (int8x16_t __a, int32_t __b) | |
4307 | { | |
4308 | return __builtin_mve_vqshlq_r_sv16qi (__a, __b); | |
4309 | } | |
4310 | ||
4311 | __extension__ extern __inline int8x16_t | |
4312 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4313 | __arm_vqrshlq_s8 (int8x16_t __a, int8x16_t __b) | |
4314 | { | |
4315 | return __builtin_mve_vqrshlq_sv16qi (__a, __b); | |
4316 | } | |
4317 | ||
4318 | __extension__ extern __inline int8x16_t | |
4319 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4320 | __arm_vqrshlq_n_s8 (int8x16_t __a, int32_t __b) | |
4321 | { | |
4322 | return __builtin_mve_vqrshlq_n_sv16qi (__a, __b); | |
4323 | } | |
4324 | ||
4325 | __extension__ extern __inline int8x16_t | |
4326 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4327 | __arm_vqrdmulhq_s8 (int8x16_t __a, int8x16_t __b) | |
4328 | { | |
4329 | return __builtin_mve_vqrdmulhq_sv16qi (__a, __b); | |
4330 | } | |
4331 | ||
4332 | __extension__ extern __inline int8x16_t | |
4333 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4334 | __arm_vqrdmulhq_n_s8 (int8x16_t __a, int8_t __b) | |
4335 | { | |
4336 | return __builtin_mve_vqrdmulhq_n_sv16qi (__a, __b); | |
4337 | } | |
4338 | ||
4339 | __extension__ extern __inline int8x16_t | |
4340 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4341 | __arm_vqdmulhq_s8 (int8x16_t __a, int8x16_t __b) | |
4342 | { | |
4343 | return __builtin_mve_vqdmulhq_sv16qi (__a, __b); | |
4344 | } | |
4345 | ||
4346 | __extension__ extern __inline int8x16_t | |
4347 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4348 | __arm_vqdmulhq_n_s8 (int8x16_t __a, int8_t __b) | |
4349 | { | |
4350 | return __builtin_mve_vqdmulhq_n_sv16qi (__a, __b); | |
4351 | } | |
4352 | ||
4353 | __extension__ extern __inline int8x16_t | |
4354 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4355 | __arm_vqaddq_s8 (int8x16_t __a, int8x16_t __b) | |
4356 | { | |
4357 | return __builtin_mve_vqaddq_sv16qi (__a, __b); | |
4358 | } | |
4359 | ||
4360 | __extension__ extern __inline int8x16_t | |
4361 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4362 | __arm_vqaddq_n_s8 (int8x16_t __a, int8_t __b) | |
4363 | { | |
4364 | return __builtin_mve_vqaddq_n_sv16qi (__a, __b); | |
4365 | } | |
4366 | ||
4367 | __extension__ extern __inline int8x16_t | |
4368 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4369 | __arm_vorrq_s8 (int8x16_t __a, int8x16_t __b) | |
4370 | { | |
4371 | return __builtin_mve_vorrq_sv16qi (__a, __b); | |
4372 | } | |
4373 | ||
4374 | __extension__ extern __inline int8x16_t | |
4375 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4376 | __arm_vornq_s8 (int8x16_t __a, int8x16_t __b) | |
4377 | { | |
4378 | return __builtin_mve_vornq_sv16qi (__a, __b); | |
4379 | } | |
4380 | ||
4381 | __extension__ extern __inline int8x16_t | |
4382 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4383 | __arm_vmulq_s8 (int8x16_t __a, int8x16_t __b) | |
4384 | { | |
4385 | return __builtin_mve_vmulq_sv16qi (__a, __b); | |
4386 | } | |
4387 | ||
4388 | __extension__ extern __inline int8x16_t | |
4389 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4390 | __arm_vmulq_n_s8 (int8x16_t __a, int8_t __b) | |
4391 | { | |
4392 | return __builtin_mve_vmulq_n_sv16qi (__a, __b); | |
4393 | } | |
4394 | ||
4395 | __extension__ extern __inline int16x8_t | |
4396 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4397 | __arm_vmulltq_int_s8 (int8x16_t __a, int8x16_t __b) | |
4398 | { | |
4399 | return __builtin_mve_vmulltq_int_sv16qi (__a, __b); | |
4400 | } | |
4401 | ||
4402 | __extension__ extern __inline int16x8_t | |
4403 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4404 | __arm_vmullbq_int_s8 (int8x16_t __a, int8x16_t __b) | |
4405 | { | |
4406 | return __builtin_mve_vmullbq_int_sv16qi (__a, __b); | |
4407 | } | |
4408 | ||
4409 | __extension__ extern __inline int8x16_t | |
4410 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4411 | __arm_vmulhq_s8 (int8x16_t __a, int8x16_t __b) | |
4412 | { | |
4413 | return __builtin_mve_vmulhq_sv16qi (__a, __b); | |
4414 | } | |
4415 | ||
4416 | __extension__ extern __inline int32_t | |
4417 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4418 | __arm_vmlsdavxq_s8 (int8x16_t __a, int8x16_t __b) | |
4419 | { | |
4420 | return __builtin_mve_vmlsdavxq_sv16qi (__a, __b); | |
4421 | } | |
4422 | ||
4423 | __extension__ extern __inline int32_t | |
4424 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4425 | __arm_vmlsdavq_s8 (int8x16_t __a, int8x16_t __b) | |
4426 | { | |
4427 | return __builtin_mve_vmlsdavq_sv16qi (__a, __b); | |
4428 | } | |
4429 | ||
4430 | __extension__ extern __inline int32_t | |
4431 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4432 | __arm_vmladavxq_s8 (int8x16_t __a, int8x16_t __b) | |
4433 | { | |
4434 | return __builtin_mve_vmladavxq_sv16qi (__a, __b); | |
4435 | } | |
4436 | ||
4437 | __extension__ extern __inline int32_t | |
4438 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4439 | __arm_vmladavq_s8 (int8x16_t __a, int8x16_t __b) | |
4440 | { | |
4441 | return __builtin_mve_vmladavq_sv16qi (__a, __b); | |
4442 | } | |
4443 | ||
4444 | __extension__ extern __inline int8_t | |
4445 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4446 | __arm_vminvq_s8 (int8_t __a, int8x16_t __b) | |
4447 | { | |
4448 | return __builtin_mve_vminvq_sv16qi (__a, __b); | |
4449 | } | |
4450 | ||
4451 | __extension__ extern __inline int8x16_t | |
4452 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4453 | __arm_vminq_s8 (int8x16_t __a, int8x16_t __b) | |
4454 | { | |
4455 | return __builtin_mve_vminq_sv16qi (__a, __b); | |
4456 | } | |
4457 | ||
4458 | __extension__ extern __inline int8_t | |
4459 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4460 | __arm_vmaxvq_s8 (int8_t __a, int8x16_t __b) | |
4461 | { | |
4462 | return __builtin_mve_vmaxvq_sv16qi (__a, __b); | |
4463 | } | |
4464 | ||
4465 | __extension__ extern __inline int8x16_t | |
4466 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4467 | __arm_vmaxq_s8 (int8x16_t __a, int8x16_t __b) | |
4468 | { | |
4469 | return __builtin_mve_vmaxq_sv16qi (__a, __b); | |
4470 | } | |
4471 | ||
4472 | __extension__ extern __inline int8x16_t | |
4473 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4474 | __arm_vhsubq_s8 (int8x16_t __a, int8x16_t __b) | |
4475 | { | |
4476 | return __builtin_mve_vhsubq_sv16qi (__a, __b); | |
4477 | } | |
4478 | ||
4479 | __extension__ extern __inline int8x16_t | |
4480 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4481 | __arm_vhsubq_n_s8 (int8x16_t __a, int8_t __b) | |
4482 | { | |
4483 | return __builtin_mve_vhsubq_n_sv16qi (__a, __b); | |
4484 | } | |
4485 | ||
4486 | __extension__ extern __inline int8x16_t | |
4487 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4488 | __arm_vhcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b) | |
4489 | { | |
4490 | return __builtin_mve_vhcaddq_rot90_sv16qi (__a, __b); | |
4491 | } | |
4492 | ||
4493 | __extension__ extern __inline int8x16_t | |
4494 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4495 | __arm_vhcaddq_rot270_s8 (int8x16_t __a, int8x16_t __b) | |
4496 | { | |
4497 | return __builtin_mve_vhcaddq_rot270_sv16qi (__a, __b); | |
4498 | } | |
4499 | ||
4500 | __extension__ extern __inline int8x16_t | |
4501 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4502 | __arm_vhaddq_s8 (int8x16_t __a, int8x16_t __b) | |
4503 | { | |
4504 | return __builtin_mve_vhaddq_sv16qi (__a, __b); | |
4505 | } | |
4506 | ||
4507 | __extension__ extern __inline int8x16_t | |
4508 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4509 | __arm_vhaddq_n_s8 (int8x16_t __a, int8_t __b) | |
4510 | { | |
4511 | return __builtin_mve_vhaddq_n_sv16qi (__a, __b); | |
4512 | } | |
4513 | ||
4514 | __extension__ extern __inline int8x16_t | |
4515 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4516 | __arm_veorq_s8 (int8x16_t __a, int8x16_t __b) | |
4517 | { | |
4518 | return __builtin_mve_veorq_sv16qi (__a, __b); | |
4519 | } | |
4520 | ||
4521 | __extension__ extern __inline int8x16_t | |
4522 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4523 | __arm_vcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b) | |
4524 | { | |
9732dc85 | 4525 | return __builtin_mve_vcaddq_rot90v16qi (__a, __b); |
33203b4c SP |
4526 | } |
4527 | ||
4528 | __extension__ extern __inline int8x16_t | |
4529 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4530 | __arm_vcaddq_rot270_s8 (int8x16_t __a, int8x16_t __b) | |
4531 | { | |
9732dc85 | 4532 | return __builtin_mve_vcaddq_rot270v16qi (__a, __b); |
33203b4c SP |
4533 | } |
4534 | ||
4535 | __extension__ extern __inline int8x16_t | |
4536 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4537 | __arm_vbrsrq_n_s8 (int8x16_t __a, int32_t __b) | |
4538 | { | |
4539 | return __builtin_mve_vbrsrq_n_sv16qi (__a, __b); | |
4540 | } | |
4541 | ||
4542 | __extension__ extern __inline int8x16_t | |
4543 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4544 | __arm_vbicq_s8 (int8x16_t __a, int8x16_t __b) | |
4545 | { | |
4546 | return __builtin_mve_vbicq_sv16qi (__a, __b); | |
4547 | } | |
4548 | ||
4549 | __extension__ extern __inline int8x16_t | |
4550 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4551 | __arm_vandq_s8 (int8x16_t __a, int8x16_t __b) | |
4552 | { | |
4553 | return __builtin_mve_vandq_sv16qi (__a, __b); | |
4554 | } | |
4555 | ||
4556 | __extension__ extern __inline int32_t | |
4557 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4558 | __arm_vaddvaq_s8 (int32_t __a, int8x16_t __b) | |
4559 | { | |
4560 | return __builtin_mve_vaddvaq_sv16qi (__a, __b); | |
4561 | } | |
4562 | ||
4563 | __extension__ extern __inline int8x16_t | |
4564 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4565 | __arm_vaddq_n_s8 (int8x16_t __a, int8_t __b) | |
4566 | { | |
4567 | return __builtin_mve_vaddq_n_sv16qi (__a, __b); | |
4568 | } | |
4569 | ||
4570 | __extension__ extern __inline int8x16_t | |
4571 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4572 | __arm_vabdq_s8 (int8x16_t __a, int8x16_t __b) | |
4573 | { | |
4574 | return __builtin_mve_vabdq_sv16qi (__a, __b); | |
4575 | } | |
4576 | ||
4577 | __extension__ extern __inline int8x16_t | |
4578 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4579 | __arm_vshlq_n_s8 (int8x16_t __a, const int __imm) | |
4580 | { | |
4581 | return __builtin_mve_vshlq_n_sv16qi (__a, __imm); | |
4582 | } | |
4583 | ||
4584 | __extension__ extern __inline int8x16_t | |
4585 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4586 | __arm_vrshrq_n_s8 (int8x16_t __a, const int __imm) | |
4587 | { | |
4588 | return __builtin_mve_vrshrq_n_sv16qi (__a, __imm); | |
4589 | } | |
4590 | ||
4591 | __extension__ extern __inline int8x16_t | |
4592 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4593 | __arm_vqshlq_n_s8 (int8x16_t __a, const int __imm) | |
4594 | { | |
4595 | return __builtin_mve_vqshlq_n_sv16qi (__a, __imm); | |
4596 | } | |
4597 | ||
4598 | __extension__ extern __inline uint16x8_t | |
4599 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4600 | __arm_vsubq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4601 | { | |
4602 | return __builtin_mve_vsubq_uv8hi (__a, __b); | |
4603 | } | |
4604 | ||
4605 | __extension__ extern __inline uint16x8_t | |
4606 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4607 | __arm_vsubq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4608 | { | |
4609 | return __builtin_mve_vsubq_n_uv8hi (__a, __b); | |
4610 | } | |
4611 | ||
4612 | __extension__ extern __inline uint16x8_t | |
4613 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4614 | __arm_vrmulhq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4615 | { | |
4616 | return __builtin_mve_vrmulhq_uv8hi (__a, __b); | |
4617 | } | |
4618 | ||
4619 | __extension__ extern __inline uint16x8_t | |
4620 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4621 | __arm_vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4622 | { | |
4623 | return __builtin_mve_vrhaddq_uv8hi (__a, __b); | |
4624 | } | |
4625 | ||
4626 | __extension__ extern __inline uint16x8_t | |
4627 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4628 | __arm_vqsubq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4629 | { | |
4630 | return __builtin_mve_vqsubq_uv8hi (__a, __b); | |
4631 | } | |
4632 | ||
4633 | __extension__ extern __inline uint16x8_t | |
4634 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4635 | __arm_vqsubq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4636 | { | |
4637 | return __builtin_mve_vqsubq_n_uv8hi (__a, __b); | |
4638 | } | |
4639 | ||
4640 | __extension__ extern __inline uint16x8_t | |
4641 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4642 | __arm_vqaddq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4643 | { | |
4644 | return __builtin_mve_vqaddq_uv8hi (__a, __b); | |
4645 | } | |
4646 | ||
4647 | __extension__ extern __inline uint16x8_t | |
4648 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4649 | __arm_vqaddq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4650 | { | |
4651 | return __builtin_mve_vqaddq_n_uv8hi (__a, __b); | |
4652 | } | |
4653 | ||
4654 | __extension__ extern __inline uint16x8_t | |
4655 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4656 | __arm_vorrq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4657 | { | |
4658 | return __builtin_mve_vorrq_uv8hi (__a, __b); | |
4659 | } | |
4660 | ||
4661 | __extension__ extern __inline uint16x8_t | |
4662 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4663 | __arm_vornq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4664 | { | |
4665 | return __builtin_mve_vornq_uv8hi (__a, __b); | |
4666 | } | |
4667 | ||
4668 | __extension__ extern __inline uint16x8_t | |
4669 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4670 | __arm_vmulq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4671 | { | |
4672 | return __builtin_mve_vmulq_uv8hi (__a, __b); | |
4673 | } | |
4674 | ||
4675 | __extension__ extern __inline uint16x8_t | |
4676 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4677 | __arm_vmulq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4678 | { | |
4679 | return __builtin_mve_vmulq_n_uv8hi (__a, __b); | |
4680 | } | |
4681 | ||
4682 | __extension__ extern __inline uint32x4_t | |
4683 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4684 | __arm_vmulltq_int_u16 (uint16x8_t __a, uint16x8_t __b) | |
4685 | { | |
4686 | return __builtin_mve_vmulltq_int_uv8hi (__a, __b); | |
4687 | } | |
4688 | ||
4689 | __extension__ extern __inline uint32x4_t | |
4690 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4691 | __arm_vmullbq_int_u16 (uint16x8_t __a, uint16x8_t __b) | |
4692 | { | |
4693 | return __builtin_mve_vmullbq_int_uv8hi (__a, __b); | |
4694 | } | |
4695 | ||
4696 | __extension__ extern __inline uint16x8_t | |
4697 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4698 | __arm_vmulhq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4699 | { | |
4700 | return __builtin_mve_vmulhq_uv8hi (__a, __b); | |
4701 | } | |
4702 | ||
4703 | __extension__ extern __inline uint32_t | |
4704 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4705 | __arm_vmladavq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4706 | { | |
4707 | return __builtin_mve_vmladavq_uv8hi (__a, __b); | |
4708 | } | |
4709 | ||
4710 | __extension__ extern __inline uint16_t | |
4711 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4712 | __arm_vminvq_u16 (uint16_t __a, uint16x8_t __b) | |
4713 | { | |
4714 | return __builtin_mve_vminvq_uv8hi (__a, __b); | |
4715 | } | |
4716 | ||
4717 | __extension__ extern __inline uint16x8_t | |
4718 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4719 | __arm_vminq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4720 | { | |
4721 | return __builtin_mve_vminq_uv8hi (__a, __b); | |
4722 | } | |
4723 | ||
4724 | __extension__ extern __inline uint16_t | |
4725 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4726 | __arm_vmaxvq_u16 (uint16_t __a, uint16x8_t __b) | |
4727 | { | |
4728 | return __builtin_mve_vmaxvq_uv8hi (__a, __b); | |
4729 | } | |
4730 | ||
4731 | __extension__ extern __inline uint16x8_t | |
4732 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4733 | __arm_vmaxq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4734 | { | |
4735 | return __builtin_mve_vmaxq_uv8hi (__a, __b); | |
4736 | } | |
4737 | ||
4738 | __extension__ extern __inline uint16x8_t | |
4739 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4740 | __arm_vhsubq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4741 | { | |
4742 | return __builtin_mve_vhsubq_uv8hi (__a, __b); | |
4743 | } | |
4744 | ||
4745 | __extension__ extern __inline uint16x8_t | |
4746 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4747 | __arm_vhsubq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4748 | { | |
4749 | return __builtin_mve_vhsubq_n_uv8hi (__a, __b); | |
4750 | } | |
4751 | ||
4752 | __extension__ extern __inline uint16x8_t | |
4753 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4754 | __arm_vhaddq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4755 | { | |
4756 | return __builtin_mve_vhaddq_uv8hi (__a, __b); | |
4757 | } | |
4758 | ||
4759 | __extension__ extern __inline uint16x8_t | |
4760 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4761 | __arm_vhaddq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4762 | { | |
4763 | return __builtin_mve_vhaddq_n_uv8hi (__a, __b); | |
4764 | } | |
4765 | ||
4766 | __extension__ extern __inline uint16x8_t | |
4767 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4768 | __arm_veorq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4769 | { | |
4770 | return __builtin_mve_veorq_uv8hi (__a, __b); | |
4771 | } | |
4772 | ||
4773 | __extension__ extern __inline mve_pred16_t | |
4774 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4775 | __arm_vcmpneq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4776 | { | |
929056a7 | 4777 | return __builtin_mve_vcmpneq_n_v8hi ((int16x8_t)__a, (int16_t)__b); |
33203b4c SP |
4778 | } |
4779 | ||
4780 | __extension__ extern __inline mve_pred16_t | |
4781 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4782 | __arm_vcmphiq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4783 | { | |
929056a7 | 4784 | return __builtin_mve_vcmphiq_v8hi (__a, __b); |
33203b4c SP |
4785 | } |
4786 | ||
4787 | __extension__ extern __inline mve_pred16_t | |
4788 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4789 | __arm_vcmphiq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4790 | { | |
929056a7 | 4791 | return __builtin_mve_vcmphiq_n_v8hi (__a, __b); |
33203b4c SP |
4792 | } |
4793 | ||
4794 | __extension__ extern __inline mve_pred16_t | |
4795 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4796 | __arm_vcmpeqq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4797 | { | |
929056a7 | 4798 | return __builtin_mve_vcmpeqq_v8hi ((int16x8_t)__a, (int16x8_t)__b); |
33203b4c SP |
4799 | } |
4800 | ||
4801 | __extension__ extern __inline mve_pred16_t | |
4802 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4803 | __arm_vcmpeqq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4804 | { | |
929056a7 | 4805 | return __builtin_mve_vcmpeqq_n_v8hi ((int16x8_t)__a, (int16_t)__b); |
33203b4c SP |
4806 | } |
4807 | ||
4808 | __extension__ extern __inline mve_pred16_t | |
4809 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4810 | __arm_vcmpcsq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4811 | { | |
929056a7 | 4812 | return __builtin_mve_vcmpcsq_v8hi (__a, __b); |
33203b4c SP |
4813 | } |
4814 | ||
4815 | __extension__ extern __inline mve_pred16_t | |
4816 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4817 | __arm_vcmpcsq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4818 | { | |
929056a7 | 4819 | return __builtin_mve_vcmpcsq_n_v8hi (__a, __b); |
33203b4c SP |
4820 | } |
4821 | ||
4822 | __extension__ extern __inline uint16x8_t | |
4823 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4824 | __arm_vcaddq_rot90_u16 (uint16x8_t __a, uint16x8_t __b) | |
4825 | { | |
9732dc85 TC |
4826 | return (uint16x8_t) |
4827 | __builtin_mve_vcaddq_rot90v8hi ((int16x8_t)__a, (int16x8_t)__b); | |
33203b4c SP |
4828 | } |
4829 | ||
4830 | __extension__ extern __inline uint16x8_t | |
4831 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4832 | __arm_vcaddq_rot270_u16 (uint16x8_t __a, uint16x8_t __b) | |
4833 | { | |
9732dc85 TC |
4834 | return (uint16x8_t) |
4835 | __builtin_mve_vcaddq_rot270v8hi ((int16x8_t)__a, (int16x8_t)__b); | |
33203b4c SP |
4836 | } |
4837 | ||
4838 | __extension__ extern __inline uint16x8_t | |
4839 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4840 | __arm_vbicq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4841 | { | |
4842 | return __builtin_mve_vbicq_uv8hi (__a, __b); | |
4843 | } | |
4844 | ||
4845 | __extension__ extern __inline uint16x8_t | |
4846 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4847 | __arm_vandq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4848 | { | |
4849 | return __builtin_mve_vandq_uv8hi (__a, __b); | |
4850 | } | |
4851 | ||
4852 | __extension__ extern __inline uint32_t | |
4853 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4854 | __arm_vaddvq_p_u16 (uint16x8_t __a, mve_pred16_t __p) | |
4855 | { | |
4856 | return __builtin_mve_vaddvq_p_uv8hi (__a, __p); | |
4857 | } | |
4858 | ||
4859 | __extension__ extern __inline uint32_t | |
4860 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4861 | __arm_vaddvaq_u16 (uint32_t __a, uint16x8_t __b) | |
4862 | { | |
4863 | return __builtin_mve_vaddvaq_uv8hi (__a, __b); | |
4864 | } | |
4865 | ||
4866 | __extension__ extern __inline uint16x8_t | |
4867 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4868 | __arm_vaddq_n_u16 (uint16x8_t __a, uint16_t __b) | |
4869 | { | |
4870 | return __builtin_mve_vaddq_n_uv8hi (__a, __b); | |
4871 | } | |
4872 | ||
4873 | __extension__ extern __inline uint16x8_t | |
4874 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4875 | __arm_vabdq_u16 (uint16x8_t __a, uint16x8_t __b) | |
4876 | { | |
4877 | return __builtin_mve_vabdq_uv8hi (__a, __b); | |
4878 | } | |
4879 | ||
4880 | __extension__ extern __inline uint16x8_t | |
4881 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4882 | __arm_vshlq_r_u16 (uint16x8_t __a, int32_t __b) | |
4883 | { | |
4884 | return __builtin_mve_vshlq_r_uv8hi (__a, __b); | |
4885 | } | |
4886 | ||
4887 | __extension__ extern __inline uint16x8_t | |
4888 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4889 | __arm_vrshlq_u16 (uint16x8_t __a, int16x8_t __b) | |
4890 | { | |
4891 | return __builtin_mve_vrshlq_uv8hi (__a, __b); | |
4892 | } | |
4893 | ||
4894 | __extension__ extern __inline uint16x8_t | |
4895 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4896 | __arm_vrshlq_n_u16 (uint16x8_t __a, int32_t __b) | |
4897 | { | |
4898 | return __builtin_mve_vrshlq_n_uv8hi (__a, __b); | |
4899 | } | |
4900 | ||
4901 | __extension__ extern __inline uint16x8_t | |
4902 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4903 | __arm_vqshlq_u16 (uint16x8_t __a, int16x8_t __b) | |
4904 | { | |
4905 | return __builtin_mve_vqshlq_uv8hi (__a, __b); | |
4906 | } | |
4907 | ||
4908 | __extension__ extern __inline uint16x8_t | |
4909 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4910 | __arm_vqshlq_r_u16 (uint16x8_t __a, int32_t __b) | |
4911 | { | |
4912 | return __builtin_mve_vqshlq_r_uv8hi (__a, __b); | |
4913 | } | |
4914 | ||
4915 | __extension__ extern __inline uint16x8_t | |
4916 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4917 | __arm_vqrshlq_u16 (uint16x8_t __a, int16x8_t __b) | |
4918 | { | |
4919 | return __builtin_mve_vqrshlq_uv8hi (__a, __b); | |
4920 | } | |
4921 | ||
4922 | __extension__ extern __inline uint16x8_t | |
4923 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4924 | __arm_vqrshlq_n_u16 (uint16x8_t __a, int32_t __b) | |
4925 | { | |
4926 | return __builtin_mve_vqrshlq_n_uv8hi (__a, __b); | |
4927 | } | |
4928 | ||
4929 | __extension__ extern __inline uint16_t | |
4930 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4931 | __arm_vminavq_s16 (uint16_t __a, int16x8_t __b) | |
4932 | { | |
4933 | return __builtin_mve_vminavq_sv8hi (__a, __b); | |
4934 | } | |
4935 | ||
4936 | __extension__ extern __inline uint16x8_t | |
4937 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4938 | __arm_vminaq_s16 (uint16x8_t __a, int16x8_t __b) | |
4939 | { | |
4940 | return __builtin_mve_vminaq_sv8hi (__a, __b); | |
4941 | } | |
4942 | ||
4943 | __extension__ extern __inline uint16_t | |
4944 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4945 | __arm_vmaxavq_s16 (uint16_t __a, int16x8_t __b) | |
4946 | { | |
4947 | return __builtin_mve_vmaxavq_sv8hi (__a, __b); | |
4948 | } | |
4949 | ||
4950 | __extension__ extern __inline uint16x8_t | |
4951 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4952 | __arm_vmaxaq_s16 (uint16x8_t __a, int16x8_t __b) | |
4953 | { | |
4954 | return __builtin_mve_vmaxaq_sv8hi (__a, __b); | |
4955 | } | |
4956 | ||
4957 | __extension__ extern __inline uint16x8_t | |
4958 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4959 | __arm_vbrsrq_n_u16 (uint16x8_t __a, int32_t __b) | |
4960 | { | |
4961 | return __builtin_mve_vbrsrq_n_uv8hi (__a, __b); | |
4962 | } | |
4963 | ||
4964 | __extension__ extern __inline uint16x8_t | |
4965 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4966 | __arm_vshlq_n_u16 (uint16x8_t __a, const int __imm) | |
4967 | { | |
4968 | return __builtin_mve_vshlq_n_uv8hi (__a, __imm); | |
4969 | } | |
4970 | ||
4971 | __extension__ extern __inline uint16x8_t | |
4972 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4973 | __arm_vrshrq_n_u16 (uint16x8_t __a, const int __imm) | |
4974 | { | |
4975 | return __builtin_mve_vrshrq_n_uv8hi (__a, __imm); | |
4976 | } | |
4977 | ||
4978 | __extension__ extern __inline uint16x8_t | |
4979 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4980 | __arm_vqshlq_n_u16 (uint16x8_t __a, const int __imm) | |
4981 | { | |
4982 | return __builtin_mve_vqshlq_n_uv8hi (__a, __imm); | |
4983 | } | |
4984 | ||
4985 | __extension__ extern __inline mve_pred16_t | |
4986 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4987 | __arm_vcmpneq_n_s16 (int16x8_t __a, int16_t __b) | |
4988 | { | |
929056a7 | 4989 | return __builtin_mve_vcmpneq_n_v8hi (__a, __b); |
33203b4c SP |
4990 | } |
4991 | ||
4992 | __extension__ extern __inline mve_pred16_t | |
4993 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
4994 | __arm_vcmpltq_s16 (int16x8_t __a, int16x8_t __b) | |
4995 | { | |
929056a7 | 4996 | return __builtin_mve_vcmpltq_v8hi (__a, __b); |
33203b4c SP |
4997 | } |
4998 | ||
4999 | __extension__ extern __inline mve_pred16_t | |
5000 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5001 | __arm_vcmpltq_n_s16 (int16x8_t __a, int16_t __b) | |
5002 | { | |
929056a7 | 5003 | return __builtin_mve_vcmpltq_n_v8hi (__a, __b); |
33203b4c SP |
5004 | } |
5005 | ||
5006 | __extension__ extern __inline mve_pred16_t | |
5007 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5008 | __arm_vcmpleq_s16 (int16x8_t __a, int16x8_t __b) | |
5009 | { | |
929056a7 | 5010 | return __builtin_mve_vcmpleq_v8hi (__a, __b); |
33203b4c SP |
5011 | } |
5012 | ||
5013 | __extension__ extern __inline mve_pred16_t | |
5014 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5015 | __arm_vcmpleq_n_s16 (int16x8_t __a, int16_t __b) | |
5016 | { | |
929056a7 | 5017 | return __builtin_mve_vcmpleq_n_v8hi (__a, __b); |
33203b4c SP |
5018 | } |
5019 | ||
5020 | __extension__ extern __inline mve_pred16_t | |
5021 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5022 | __arm_vcmpgtq_s16 (int16x8_t __a, int16x8_t __b) | |
5023 | { | |
929056a7 | 5024 | return __builtin_mve_vcmpgtq_v8hi (__a, __b); |
33203b4c SP |
5025 | } |
5026 | ||
5027 | __extension__ extern __inline mve_pred16_t | |
5028 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5029 | __arm_vcmpgtq_n_s16 (int16x8_t __a, int16_t __b) | |
5030 | { | |
929056a7 | 5031 | return __builtin_mve_vcmpgtq_n_v8hi (__a, __b); |
33203b4c SP |
5032 | } |
5033 | ||
5034 | __extension__ extern __inline mve_pred16_t | |
5035 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5036 | __arm_vcmpgeq_s16 (int16x8_t __a, int16x8_t __b) | |
5037 | { | |
929056a7 | 5038 | return __builtin_mve_vcmpgeq_v8hi (__a, __b); |
33203b4c SP |
5039 | } |
5040 | ||
5041 | __extension__ extern __inline mve_pred16_t | |
5042 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5043 | __arm_vcmpgeq_n_s16 (int16x8_t __a, int16_t __b) | |
5044 | { | |
929056a7 | 5045 | return __builtin_mve_vcmpgeq_n_v8hi (__a, __b); |
33203b4c SP |
5046 | } |
5047 | ||
5048 | __extension__ extern __inline mve_pred16_t | |
5049 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5050 | __arm_vcmpeqq_s16 (int16x8_t __a, int16x8_t __b) | |
5051 | { | |
929056a7 | 5052 | return __builtin_mve_vcmpeqq_v8hi (__a, __b); |
33203b4c SP |
5053 | } |
5054 | ||
5055 | __extension__ extern __inline mve_pred16_t | |
5056 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5057 | __arm_vcmpeqq_n_s16 (int16x8_t __a, int16_t __b) | |
5058 | { | |
929056a7 | 5059 | return __builtin_mve_vcmpeqq_n_v8hi (__a, __b); |
33203b4c SP |
5060 | } |
5061 | ||
5062 | __extension__ extern __inline uint16x8_t | |
5063 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5064 | __arm_vqshluq_n_s16 (int16x8_t __a, const int __imm) | |
5065 | { | |
5066 | return __builtin_mve_vqshluq_n_sv8hi (__a, __imm); | |
5067 | } | |
5068 | ||
5069 | __extension__ extern __inline int32_t | |
5070 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5071 | __arm_vaddvq_p_s16 (int16x8_t __a, mve_pred16_t __p) | |
5072 | { | |
5073 | return __builtin_mve_vaddvq_p_sv8hi (__a, __p); | |
5074 | } | |
5075 | ||
5076 | __extension__ extern __inline int16x8_t | |
5077 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5078 | __arm_vsubq_s16 (int16x8_t __a, int16x8_t __b) | |
5079 | { | |
5080 | return __builtin_mve_vsubq_sv8hi (__a, __b); | |
5081 | } | |
5082 | ||
5083 | __extension__ extern __inline int16x8_t | |
5084 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5085 | __arm_vsubq_n_s16 (int16x8_t __a, int16_t __b) | |
5086 | { | |
5087 | return __builtin_mve_vsubq_n_sv8hi (__a, __b); | |
5088 | } | |
5089 | ||
5090 | __extension__ extern __inline int16x8_t | |
5091 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5092 | __arm_vshlq_r_s16 (int16x8_t __a, int32_t __b) | |
5093 | { | |
5094 | return __builtin_mve_vshlq_r_sv8hi (__a, __b); | |
5095 | } | |
5096 | ||
5097 | __extension__ extern __inline int16x8_t | |
5098 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5099 | __arm_vrshlq_s16 (int16x8_t __a, int16x8_t __b) | |
5100 | { | |
5101 | return __builtin_mve_vrshlq_sv8hi (__a, __b); | |
5102 | } | |
5103 | ||
5104 | __extension__ extern __inline int16x8_t | |
5105 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5106 | __arm_vrshlq_n_s16 (int16x8_t __a, int32_t __b) | |
5107 | { | |
5108 | return __builtin_mve_vrshlq_n_sv8hi (__a, __b); | |
5109 | } | |
5110 | ||
5111 | __extension__ extern __inline int16x8_t | |
5112 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5113 | __arm_vrmulhq_s16 (int16x8_t __a, int16x8_t __b) | |
5114 | { | |
5115 | return __builtin_mve_vrmulhq_sv8hi (__a, __b); | |
5116 | } | |
5117 | ||
5118 | __extension__ extern __inline int16x8_t | |
5119 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5120 | __arm_vrhaddq_s16 (int16x8_t __a, int16x8_t __b) | |
5121 | { | |
5122 | return __builtin_mve_vrhaddq_sv8hi (__a, __b); | |
5123 | } | |
5124 | ||
5125 | __extension__ extern __inline int16x8_t | |
5126 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5127 | __arm_vqsubq_s16 (int16x8_t __a, int16x8_t __b) | |
5128 | { | |
5129 | return __builtin_mve_vqsubq_sv8hi (__a, __b); | |
5130 | } | |
5131 | ||
5132 | __extension__ extern __inline int16x8_t | |
5133 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5134 | __arm_vqsubq_n_s16 (int16x8_t __a, int16_t __b) | |
5135 | { | |
5136 | return __builtin_mve_vqsubq_n_sv8hi (__a, __b); | |
5137 | } | |
5138 | ||
5139 | __extension__ extern __inline int16x8_t | |
5140 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5141 | __arm_vqshlq_s16 (int16x8_t __a, int16x8_t __b) | |
5142 | { | |
5143 | return __builtin_mve_vqshlq_sv8hi (__a, __b); | |
5144 | } | |
5145 | ||
5146 | __extension__ extern __inline int16x8_t | |
5147 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5148 | __arm_vqshlq_r_s16 (int16x8_t __a, int32_t __b) | |
5149 | { | |
5150 | return __builtin_mve_vqshlq_r_sv8hi (__a, __b); | |
5151 | } | |
5152 | ||
5153 | __extension__ extern __inline int16x8_t | |
5154 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5155 | __arm_vqrshlq_s16 (int16x8_t __a, int16x8_t __b) | |
5156 | { | |
5157 | return __builtin_mve_vqrshlq_sv8hi (__a, __b); | |
5158 | } | |
5159 | ||
5160 | __extension__ extern __inline int16x8_t | |
5161 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5162 | __arm_vqrshlq_n_s16 (int16x8_t __a, int32_t __b) | |
5163 | { | |
5164 | return __builtin_mve_vqrshlq_n_sv8hi (__a, __b); | |
5165 | } | |
5166 | ||
5167 | __extension__ extern __inline int16x8_t | |
5168 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5169 | __arm_vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b) | |
5170 | { | |
5171 | return __builtin_mve_vqrdmulhq_sv8hi (__a, __b); | |
5172 | } | |
5173 | ||
5174 | __extension__ extern __inline int16x8_t | |
5175 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5176 | __arm_vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b) | |
5177 | { | |
5178 | return __builtin_mve_vqrdmulhq_n_sv8hi (__a, __b); | |
5179 | } | |
5180 | ||
5181 | __extension__ extern __inline int16x8_t | |
5182 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5183 | __arm_vqdmulhq_s16 (int16x8_t __a, int16x8_t __b) | |
5184 | { | |
5185 | return __builtin_mve_vqdmulhq_sv8hi (__a, __b); | |
5186 | } | |
5187 | ||
5188 | __extension__ extern __inline int16x8_t | |
5189 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5190 | __arm_vqdmulhq_n_s16 (int16x8_t __a, int16_t __b) | |
5191 | { | |
5192 | return __builtin_mve_vqdmulhq_n_sv8hi (__a, __b); | |
5193 | } | |
5194 | ||
5195 | __extension__ extern __inline int16x8_t | |
5196 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5197 | __arm_vqaddq_s16 (int16x8_t __a, int16x8_t __b) | |
5198 | { | |
5199 | return __builtin_mve_vqaddq_sv8hi (__a, __b); | |
5200 | } | |
5201 | ||
5202 | __extension__ extern __inline int16x8_t | |
5203 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5204 | __arm_vqaddq_n_s16 (int16x8_t __a, int16_t __b) | |
5205 | { | |
5206 | return __builtin_mve_vqaddq_n_sv8hi (__a, __b); | |
5207 | } | |
5208 | ||
5209 | __extension__ extern __inline int16x8_t | |
5210 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5211 | __arm_vorrq_s16 (int16x8_t __a, int16x8_t __b) | |
5212 | { | |
5213 | return __builtin_mve_vorrq_sv8hi (__a, __b); | |
5214 | } | |
5215 | ||
5216 | __extension__ extern __inline int16x8_t | |
5217 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5218 | __arm_vornq_s16 (int16x8_t __a, int16x8_t __b) | |
5219 | { | |
5220 | return __builtin_mve_vornq_sv8hi (__a, __b); | |
5221 | } | |
5222 | ||
5223 | __extension__ extern __inline int16x8_t | |
5224 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5225 | __arm_vmulq_s16 (int16x8_t __a, int16x8_t __b) | |
5226 | { | |
5227 | return __builtin_mve_vmulq_sv8hi (__a, __b); | |
5228 | } | |
5229 | ||
5230 | __extension__ extern __inline int16x8_t | |
5231 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5232 | __arm_vmulq_n_s16 (int16x8_t __a, int16_t __b) | |
5233 | { | |
5234 | return __builtin_mve_vmulq_n_sv8hi (__a, __b); | |
5235 | } | |
5236 | ||
5237 | __extension__ extern __inline int32x4_t | |
5238 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5239 | __arm_vmulltq_int_s16 (int16x8_t __a, int16x8_t __b) | |
5240 | { | |
5241 | return __builtin_mve_vmulltq_int_sv8hi (__a, __b); | |
5242 | } | |
5243 | ||
5244 | __extension__ extern __inline int32x4_t | |
5245 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5246 | __arm_vmullbq_int_s16 (int16x8_t __a, int16x8_t __b) | |
5247 | { | |
5248 | return __builtin_mve_vmullbq_int_sv8hi (__a, __b); | |
5249 | } | |
5250 | ||
5251 | __extension__ extern __inline int16x8_t | |
5252 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5253 | __arm_vmulhq_s16 (int16x8_t __a, int16x8_t __b) | |
5254 | { | |
5255 | return __builtin_mve_vmulhq_sv8hi (__a, __b); | |
5256 | } | |
5257 | ||
5258 | __extension__ extern __inline int32_t | |
5259 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5260 | __arm_vmlsdavxq_s16 (int16x8_t __a, int16x8_t __b) | |
5261 | { | |
5262 | return __builtin_mve_vmlsdavxq_sv8hi (__a, __b); | |
5263 | } | |
5264 | ||
5265 | __extension__ extern __inline int32_t | |
5266 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5267 | __arm_vmlsdavq_s16 (int16x8_t __a, int16x8_t __b) | |
5268 | { | |
5269 | return __builtin_mve_vmlsdavq_sv8hi (__a, __b); | |
5270 | } | |
5271 | ||
5272 | __extension__ extern __inline int32_t | |
5273 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5274 | __arm_vmladavxq_s16 (int16x8_t __a, int16x8_t __b) | |
5275 | { | |
5276 | return __builtin_mve_vmladavxq_sv8hi (__a, __b); | |
5277 | } | |
5278 | ||
5279 | __extension__ extern __inline int32_t | |
5280 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5281 | __arm_vmladavq_s16 (int16x8_t __a, int16x8_t __b) | |
5282 | { | |
5283 | return __builtin_mve_vmladavq_sv8hi (__a, __b); | |
5284 | } | |
5285 | ||
5286 | __extension__ extern __inline int16_t | |
5287 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5288 | __arm_vminvq_s16 (int16_t __a, int16x8_t __b) | |
5289 | { | |
5290 | return __builtin_mve_vminvq_sv8hi (__a, __b); | |
5291 | } | |
5292 | ||
5293 | __extension__ extern __inline int16x8_t | |
5294 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5295 | __arm_vminq_s16 (int16x8_t __a, int16x8_t __b) | |
5296 | { | |
5297 | return __builtin_mve_vminq_sv8hi (__a, __b); | |
5298 | } | |
5299 | ||
5300 | __extension__ extern __inline int16_t | |
5301 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5302 | __arm_vmaxvq_s16 (int16_t __a, int16x8_t __b) | |
5303 | { | |
5304 | return __builtin_mve_vmaxvq_sv8hi (__a, __b); | |
5305 | } | |
5306 | ||
5307 | __extension__ extern __inline int16x8_t | |
5308 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5309 | __arm_vmaxq_s16 (int16x8_t __a, int16x8_t __b) | |
5310 | { | |
5311 | return __builtin_mve_vmaxq_sv8hi (__a, __b); | |
5312 | } | |
5313 | ||
5314 | __extension__ extern __inline int16x8_t | |
5315 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5316 | __arm_vhsubq_s16 (int16x8_t __a, int16x8_t __b) | |
5317 | { | |
5318 | return __builtin_mve_vhsubq_sv8hi (__a, __b); | |
5319 | } | |
5320 | ||
5321 | __extension__ extern __inline int16x8_t | |
5322 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5323 | __arm_vhsubq_n_s16 (int16x8_t __a, int16_t __b) | |
5324 | { | |
5325 | return __builtin_mve_vhsubq_n_sv8hi (__a, __b); | |
5326 | } | |
5327 | ||
5328 | __extension__ extern __inline int16x8_t | |
5329 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5330 | __arm_vhcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b) | |
5331 | { | |
5332 | return __builtin_mve_vhcaddq_rot90_sv8hi (__a, __b); | |
5333 | } | |
5334 | ||
5335 | __extension__ extern __inline int16x8_t | |
5336 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5337 | __arm_vhcaddq_rot270_s16 (int16x8_t __a, int16x8_t __b) | |
5338 | { | |
5339 | return __builtin_mve_vhcaddq_rot270_sv8hi (__a, __b); | |
5340 | } | |
5341 | ||
5342 | __extension__ extern __inline int16x8_t | |
5343 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5344 | __arm_vhaddq_s16 (int16x8_t __a, int16x8_t __b) | |
5345 | { | |
5346 | return __builtin_mve_vhaddq_sv8hi (__a, __b); | |
5347 | } | |
5348 | ||
5349 | __extension__ extern __inline int16x8_t | |
5350 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5351 | __arm_vhaddq_n_s16 (int16x8_t __a, int16_t __b) | |
5352 | { | |
5353 | return __builtin_mve_vhaddq_n_sv8hi (__a, __b); | |
5354 | } | |
5355 | ||
5356 | __extension__ extern __inline int16x8_t | |
5357 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5358 | __arm_veorq_s16 (int16x8_t __a, int16x8_t __b) | |
5359 | { | |
5360 | return __builtin_mve_veorq_sv8hi (__a, __b); | |
5361 | } | |
5362 | ||
5363 | __extension__ extern __inline int16x8_t | |
5364 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5365 | __arm_vcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b) | |
5366 | { | |
9732dc85 | 5367 | return __builtin_mve_vcaddq_rot90v8hi (__a, __b); |
33203b4c SP |
5368 | } |
5369 | ||
5370 | __extension__ extern __inline int16x8_t | |
5371 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5372 | __arm_vcaddq_rot270_s16 (int16x8_t __a, int16x8_t __b) | |
5373 | { | |
9732dc85 | 5374 | return __builtin_mve_vcaddq_rot270v8hi (__a, __b); |
33203b4c SP |
5375 | } |
5376 | ||
5377 | __extension__ extern __inline int16x8_t | |
5378 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5379 | __arm_vbrsrq_n_s16 (int16x8_t __a, int32_t __b) | |
5380 | { | |
5381 | return __builtin_mve_vbrsrq_n_sv8hi (__a, __b); | |
5382 | } | |
5383 | ||
5384 | __extension__ extern __inline int16x8_t | |
5385 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5386 | __arm_vbicq_s16 (int16x8_t __a, int16x8_t __b) | |
5387 | { | |
5388 | return __builtin_mve_vbicq_sv8hi (__a, __b); | |
5389 | } | |
5390 | ||
5391 | __extension__ extern __inline int16x8_t | |
5392 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5393 | __arm_vandq_s16 (int16x8_t __a, int16x8_t __b) | |
5394 | { | |
5395 | return __builtin_mve_vandq_sv8hi (__a, __b); | |
5396 | } | |
5397 | ||
5398 | __extension__ extern __inline int32_t | |
5399 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5400 | __arm_vaddvaq_s16 (int32_t __a, int16x8_t __b) | |
5401 | { | |
5402 | return __builtin_mve_vaddvaq_sv8hi (__a, __b); | |
5403 | } | |
5404 | ||
5405 | __extension__ extern __inline int16x8_t | |
5406 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5407 | __arm_vaddq_n_s16 (int16x8_t __a, int16_t __b) | |
5408 | { | |
5409 | return __builtin_mve_vaddq_n_sv8hi (__a, __b); | |
5410 | } | |
5411 | ||
5412 | __extension__ extern __inline int16x8_t | |
5413 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5414 | __arm_vabdq_s16 (int16x8_t __a, int16x8_t __b) | |
5415 | { | |
5416 | return __builtin_mve_vabdq_sv8hi (__a, __b); | |
5417 | } | |
5418 | ||
5419 | __extension__ extern __inline int16x8_t | |
5420 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5421 | __arm_vshlq_n_s16 (int16x8_t __a, const int __imm) | |
5422 | { | |
5423 | return __builtin_mve_vshlq_n_sv8hi (__a, __imm); | |
5424 | } | |
5425 | ||
5426 | __extension__ extern __inline int16x8_t | |
5427 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5428 | __arm_vrshrq_n_s16 (int16x8_t __a, const int __imm) | |
5429 | { | |
5430 | return __builtin_mve_vrshrq_n_sv8hi (__a, __imm); | |
5431 | } | |
5432 | ||
5433 | __extension__ extern __inline int16x8_t | |
5434 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5435 | __arm_vqshlq_n_s16 (int16x8_t __a, const int __imm) | |
5436 | { | |
5437 | return __builtin_mve_vqshlq_n_sv8hi (__a, __imm); | |
5438 | } | |
5439 | ||
5440 | __extension__ extern __inline uint32x4_t | |
5441 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5442 | __arm_vsubq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5443 | { | |
5444 | return __builtin_mve_vsubq_uv4si (__a, __b); | |
5445 | } | |
5446 | ||
5447 | __extension__ extern __inline uint32x4_t | |
5448 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5449 | __arm_vsubq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5450 | { | |
5451 | return __builtin_mve_vsubq_n_uv4si (__a, __b); | |
5452 | } | |
5453 | ||
5454 | __extension__ extern __inline uint32x4_t | |
5455 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5456 | __arm_vrmulhq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5457 | { | |
5458 | return __builtin_mve_vrmulhq_uv4si (__a, __b); | |
5459 | } | |
5460 | ||
5461 | __extension__ extern __inline uint32x4_t | |
5462 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5463 | __arm_vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5464 | { | |
5465 | return __builtin_mve_vrhaddq_uv4si (__a, __b); | |
5466 | } | |
5467 | ||
5468 | __extension__ extern __inline uint32x4_t | |
5469 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5470 | __arm_vqsubq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5471 | { | |
5472 | return __builtin_mve_vqsubq_uv4si (__a, __b); | |
5473 | } | |
5474 | ||
5475 | __extension__ extern __inline uint32x4_t | |
5476 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5477 | __arm_vqsubq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5478 | { | |
5479 | return __builtin_mve_vqsubq_n_uv4si (__a, __b); | |
5480 | } | |
5481 | ||
5482 | __extension__ extern __inline uint32x4_t | |
5483 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5484 | __arm_vqaddq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5485 | { | |
5486 | return __builtin_mve_vqaddq_uv4si (__a, __b); | |
5487 | } | |
5488 | ||
5489 | __extension__ extern __inline uint32x4_t | |
5490 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5491 | __arm_vqaddq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5492 | { | |
5493 | return __builtin_mve_vqaddq_n_uv4si (__a, __b); | |
5494 | } | |
5495 | ||
5496 | __extension__ extern __inline uint32x4_t | |
5497 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5498 | __arm_vorrq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5499 | { | |
5500 | return __builtin_mve_vorrq_uv4si (__a, __b); | |
5501 | } | |
5502 | ||
5503 | __extension__ extern __inline uint32x4_t | |
5504 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5505 | __arm_vornq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5506 | { | |
5507 | return __builtin_mve_vornq_uv4si (__a, __b); | |
5508 | } | |
5509 | ||
5510 | __extension__ extern __inline uint32x4_t | |
5511 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5512 | __arm_vmulq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5513 | { | |
5514 | return __builtin_mve_vmulq_uv4si (__a, __b); | |
5515 | } | |
5516 | ||
5517 | __extension__ extern __inline uint32x4_t | |
5518 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5519 | __arm_vmulq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5520 | { | |
5521 | return __builtin_mve_vmulq_n_uv4si (__a, __b); | |
5522 | } | |
5523 | ||
5524 | __extension__ extern __inline uint64x2_t | |
5525 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5526 | __arm_vmulltq_int_u32 (uint32x4_t __a, uint32x4_t __b) | |
5527 | { | |
5528 | return __builtin_mve_vmulltq_int_uv4si (__a, __b); | |
5529 | } | |
5530 | ||
5531 | __extension__ extern __inline uint64x2_t | |
5532 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5533 | __arm_vmullbq_int_u32 (uint32x4_t __a, uint32x4_t __b) | |
5534 | { | |
5535 | return __builtin_mve_vmullbq_int_uv4si (__a, __b); | |
5536 | } | |
5537 | ||
5538 | __extension__ extern __inline uint32x4_t | |
5539 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5540 | __arm_vmulhq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5541 | { | |
5542 | return __builtin_mve_vmulhq_uv4si (__a, __b); | |
5543 | } | |
5544 | ||
5545 | __extension__ extern __inline uint32_t | |
5546 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5547 | __arm_vmladavq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5548 | { | |
5549 | return __builtin_mve_vmladavq_uv4si (__a, __b); | |
5550 | } | |
5551 | ||
5552 | __extension__ extern __inline uint32_t | |
5553 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5554 | __arm_vminvq_u32 (uint32_t __a, uint32x4_t __b) | |
5555 | { | |
5556 | return __builtin_mve_vminvq_uv4si (__a, __b); | |
5557 | } | |
5558 | ||
5559 | __extension__ extern __inline uint32x4_t | |
5560 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5561 | __arm_vminq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5562 | { | |
5563 | return __builtin_mve_vminq_uv4si (__a, __b); | |
5564 | } | |
5565 | ||
5566 | __extension__ extern __inline uint32_t | |
5567 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5568 | __arm_vmaxvq_u32 (uint32_t __a, uint32x4_t __b) | |
5569 | { | |
5570 | return __builtin_mve_vmaxvq_uv4si (__a, __b); | |
5571 | } | |
5572 | ||
5573 | __extension__ extern __inline uint32x4_t | |
5574 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5575 | __arm_vmaxq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5576 | { | |
5577 | return __builtin_mve_vmaxq_uv4si (__a, __b); | |
5578 | } | |
5579 | ||
5580 | __extension__ extern __inline uint32x4_t | |
5581 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5582 | __arm_vhsubq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5583 | { | |
5584 | return __builtin_mve_vhsubq_uv4si (__a, __b); | |
5585 | } | |
5586 | ||
5587 | __extension__ extern __inline uint32x4_t | |
5588 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5589 | __arm_vhsubq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5590 | { | |
5591 | return __builtin_mve_vhsubq_n_uv4si (__a, __b); | |
5592 | } | |
5593 | ||
5594 | __extension__ extern __inline uint32x4_t | |
5595 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5596 | __arm_vhaddq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5597 | { | |
5598 | return __builtin_mve_vhaddq_uv4si (__a, __b); | |
5599 | } | |
5600 | ||
5601 | __extension__ extern __inline uint32x4_t | |
5602 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5603 | __arm_vhaddq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5604 | { | |
5605 | return __builtin_mve_vhaddq_n_uv4si (__a, __b); | |
5606 | } | |
5607 | ||
5608 | __extension__ extern __inline uint32x4_t | |
5609 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5610 | __arm_veorq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5611 | { | |
5612 | return __builtin_mve_veorq_uv4si (__a, __b); | |
5613 | } | |
5614 | ||
5615 | __extension__ extern __inline mve_pred16_t | |
5616 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5617 | __arm_vcmpneq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5618 | { | |
929056a7 | 5619 | return __builtin_mve_vcmpneq_n_v4si ((int32x4_t)__a, (int32_t)__b); |
33203b4c SP |
5620 | } |
5621 | ||
5622 | __extension__ extern __inline mve_pred16_t | |
5623 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5624 | __arm_vcmphiq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5625 | { | |
929056a7 | 5626 | return __builtin_mve_vcmphiq_v4si (__a, __b); |
33203b4c SP |
5627 | } |
5628 | ||
5629 | __extension__ extern __inline mve_pred16_t | |
5630 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5631 | __arm_vcmphiq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5632 | { | |
929056a7 | 5633 | return __builtin_mve_vcmphiq_n_v4si (__a, __b); |
33203b4c SP |
5634 | } |
5635 | ||
5636 | __extension__ extern __inline mve_pred16_t | |
5637 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5638 | __arm_vcmpeqq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5639 | { | |
929056a7 | 5640 | return __builtin_mve_vcmpeqq_v4si ((int32x4_t)__a, (int32x4_t)__b); |
33203b4c SP |
5641 | } |
5642 | ||
5643 | __extension__ extern __inline mve_pred16_t | |
5644 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5645 | __arm_vcmpeqq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5646 | { | |
929056a7 | 5647 | return __builtin_mve_vcmpeqq_n_v4si ((int32x4_t)__a, (int32_t)__b); |
33203b4c SP |
5648 | } |
5649 | ||
5650 | __extension__ extern __inline mve_pred16_t | |
5651 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5652 | __arm_vcmpcsq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5653 | { | |
929056a7 | 5654 | return __builtin_mve_vcmpcsq_v4si (__a, __b); |
33203b4c SP |
5655 | } |
5656 | ||
5657 | __extension__ extern __inline mve_pred16_t | |
5658 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5659 | __arm_vcmpcsq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5660 | { | |
929056a7 | 5661 | return __builtin_mve_vcmpcsq_n_v4si (__a, __b); |
33203b4c SP |
5662 | } |
5663 | ||
5664 | __extension__ extern __inline uint32x4_t | |
5665 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5666 | __arm_vcaddq_rot90_u32 (uint32x4_t __a, uint32x4_t __b) | |
5667 | { | |
9732dc85 TC |
5668 | return (uint32x4_t) |
5669 | __builtin_mve_vcaddq_rot90v4si ((int32x4_t)__a, (int32x4_t)__b); | |
33203b4c SP |
5670 | } |
5671 | ||
5672 | __extension__ extern __inline uint32x4_t | |
5673 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5674 | __arm_vcaddq_rot270_u32 (uint32x4_t __a, uint32x4_t __b) | |
5675 | { | |
9732dc85 TC |
5676 | return (uint32x4_t) |
5677 | __builtin_mve_vcaddq_rot270v4si ((int32x4_t)__a, (int32x4_t)__b); | |
33203b4c SP |
5678 | } |
5679 | ||
5680 | __extension__ extern __inline uint32x4_t | |
5681 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5682 | __arm_vbicq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5683 | { | |
5684 | return __builtin_mve_vbicq_uv4si (__a, __b); | |
5685 | } | |
5686 | ||
5687 | __extension__ extern __inline uint32x4_t | |
5688 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5689 | __arm_vandq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5690 | { | |
5691 | return __builtin_mve_vandq_uv4si (__a, __b); | |
5692 | } | |
5693 | ||
5694 | __extension__ extern __inline uint32_t | |
5695 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5696 | __arm_vaddvq_p_u32 (uint32x4_t __a, mve_pred16_t __p) | |
5697 | { | |
5698 | return __builtin_mve_vaddvq_p_uv4si (__a, __p); | |
5699 | } | |
5700 | ||
5701 | __extension__ extern __inline uint32_t | |
5702 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5703 | __arm_vaddvaq_u32 (uint32_t __a, uint32x4_t __b) | |
5704 | { | |
5705 | return __builtin_mve_vaddvaq_uv4si (__a, __b); | |
5706 | } | |
5707 | ||
5708 | __extension__ extern __inline uint32x4_t | |
5709 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5710 | __arm_vaddq_n_u32 (uint32x4_t __a, uint32_t __b) | |
5711 | { | |
5712 | return __builtin_mve_vaddq_n_uv4si (__a, __b); | |
5713 | } | |
5714 | ||
5715 | __extension__ extern __inline uint32x4_t | |
5716 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5717 | __arm_vabdq_u32 (uint32x4_t __a, uint32x4_t __b) | |
5718 | { | |
5719 | return __builtin_mve_vabdq_uv4si (__a, __b); | |
5720 | } | |
5721 | ||
5722 | __extension__ extern __inline uint32x4_t | |
5723 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5724 | __arm_vshlq_r_u32 (uint32x4_t __a, int32_t __b) | |
5725 | { | |
5726 | return __builtin_mve_vshlq_r_uv4si (__a, __b); | |
5727 | } | |
5728 | ||
5729 | __extension__ extern __inline uint32x4_t | |
5730 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5731 | __arm_vrshlq_u32 (uint32x4_t __a, int32x4_t __b) | |
5732 | { | |
5733 | return __builtin_mve_vrshlq_uv4si (__a, __b); | |
5734 | } | |
5735 | ||
5736 | __extension__ extern __inline uint32x4_t | |
5737 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5738 | __arm_vrshlq_n_u32 (uint32x4_t __a, int32_t __b) | |
5739 | { | |
5740 | return __builtin_mve_vrshlq_n_uv4si (__a, __b); | |
5741 | } | |
5742 | ||
5743 | __extension__ extern __inline uint32x4_t | |
5744 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5745 | __arm_vqshlq_u32 (uint32x4_t __a, int32x4_t __b) | |
5746 | { | |
5747 | return __builtin_mve_vqshlq_uv4si (__a, __b); | |
5748 | } | |
5749 | ||
5750 | __extension__ extern __inline uint32x4_t | |
5751 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5752 | __arm_vqshlq_r_u32 (uint32x4_t __a, int32_t __b) | |
5753 | { | |
5754 | return __builtin_mve_vqshlq_r_uv4si (__a, __b); | |
5755 | } | |
5756 | ||
5757 | __extension__ extern __inline uint32x4_t | |
5758 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5759 | __arm_vqrshlq_u32 (uint32x4_t __a, int32x4_t __b) | |
5760 | { | |
5761 | return __builtin_mve_vqrshlq_uv4si (__a, __b); | |
5762 | } | |
5763 | ||
5764 | __extension__ extern __inline uint32x4_t | |
5765 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5766 | __arm_vqrshlq_n_u32 (uint32x4_t __a, int32_t __b) | |
5767 | { | |
5768 | return __builtin_mve_vqrshlq_n_uv4si (__a, __b); | |
5769 | } | |
5770 | ||
5771 | __extension__ extern __inline uint32_t | |
5772 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5773 | __arm_vminavq_s32 (uint32_t __a, int32x4_t __b) | |
5774 | { | |
5775 | return __builtin_mve_vminavq_sv4si (__a, __b); | |
5776 | } | |
5777 | ||
5778 | __extension__ extern __inline uint32x4_t | |
5779 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5780 | __arm_vminaq_s32 (uint32x4_t __a, int32x4_t __b) | |
5781 | { | |
5782 | return __builtin_mve_vminaq_sv4si (__a, __b); | |
5783 | } | |
5784 | ||
5785 | __extension__ extern __inline uint32_t | |
5786 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5787 | __arm_vmaxavq_s32 (uint32_t __a, int32x4_t __b) | |
5788 | { | |
5789 | return __builtin_mve_vmaxavq_sv4si (__a, __b); | |
5790 | } | |
5791 | ||
5792 | __extension__ extern __inline uint32x4_t | |
5793 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5794 | __arm_vmaxaq_s32 (uint32x4_t __a, int32x4_t __b) | |
5795 | { | |
5796 | return __builtin_mve_vmaxaq_sv4si (__a, __b); | |
5797 | } | |
5798 | ||
5799 | __extension__ extern __inline uint32x4_t | |
5800 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5801 | __arm_vbrsrq_n_u32 (uint32x4_t __a, int32_t __b) | |
5802 | { | |
5803 | return __builtin_mve_vbrsrq_n_uv4si (__a, __b); | |
5804 | } | |
5805 | ||
5806 | __extension__ extern __inline uint32x4_t | |
5807 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5808 | __arm_vshlq_n_u32 (uint32x4_t __a, const int __imm) | |
5809 | { | |
5810 | return __builtin_mve_vshlq_n_uv4si (__a, __imm); | |
5811 | } | |
5812 | ||
5813 | __extension__ extern __inline uint32x4_t | |
5814 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5815 | __arm_vrshrq_n_u32 (uint32x4_t __a, const int __imm) | |
5816 | { | |
5817 | return __builtin_mve_vrshrq_n_uv4si (__a, __imm); | |
5818 | } | |
5819 | ||
5820 | __extension__ extern __inline uint32x4_t | |
5821 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5822 | __arm_vqshlq_n_u32 (uint32x4_t __a, const int __imm) | |
5823 | { | |
5824 | return __builtin_mve_vqshlq_n_uv4si (__a, __imm); | |
5825 | } | |
5826 | ||
5827 | __extension__ extern __inline mve_pred16_t | |
5828 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5829 | __arm_vcmpneq_n_s32 (int32x4_t __a, int32_t __b) | |
5830 | { | |
929056a7 | 5831 | return __builtin_mve_vcmpneq_n_v4si (__a, __b); |
33203b4c SP |
5832 | } |
5833 | ||
5834 | __extension__ extern __inline mve_pred16_t | |
5835 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5836 | __arm_vcmpltq_s32 (int32x4_t __a, int32x4_t __b) | |
5837 | { | |
929056a7 | 5838 | return __builtin_mve_vcmpltq_v4si (__a, __b); |
33203b4c SP |
5839 | } |
5840 | ||
5841 | __extension__ extern __inline mve_pred16_t | |
5842 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5843 | __arm_vcmpltq_n_s32 (int32x4_t __a, int32_t __b) | |
5844 | { | |
929056a7 | 5845 | return __builtin_mve_vcmpltq_n_v4si (__a, __b); |
33203b4c SP |
5846 | } |
5847 | ||
5848 | __extension__ extern __inline mve_pred16_t | |
5849 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5850 | __arm_vcmpleq_s32 (int32x4_t __a, int32x4_t __b) | |
5851 | { | |
929056a7 | 5852 | return __builtin_mve_vcmpleq_v4si (__a, __b); |
33203b4c SP |
5853 | } |
5854 | ||
5855 | __extension__ extern __inline mve_pred16_t | |
5856 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5857 | __arm_vcmpleq_n_s32 (int32x4_t __a, int32_t __b) | |
5858 | { | |
929056a7 | 5859 | return __builtin_mve_vcmpleq_n_v4si (__a, __b); |
33203b4c SP |
5860 | } |
5861 | ||
5862 | __extension__ extern __inline mve_pred16_t | |
5863 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5864 | __arm_vcmpgtq_s32 (int32x4_t __a, int32x4_t __b) | |
5865 | { | |
929056a7 | 5866 | return __builtin_mve_vcmpgtq_v4si (__a, __b); |
33203b4c SP |
5867 | } |
5868 | ||
5869 | __extension__ extern __inline mve_pred16_t | |
5870 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5871 | __arm_vcmpgtq_n_s32 (int32x4_t __a, int32_t __b) | |
5872 | { | |
929056a7 | 5873 | return __builtin_mve_vcmpgtq_n_v4si (__a, __b); |
33203b4c SP |
5874 | } |
5875 | ||
5876 | __extension__ extern __inline mve_pred16_t | |
5877 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5878 | __arm_vcmpgeq_s32 (int32x4_t __a, int32x4_t __b) | |
5879 | { | |
929056a7 | 5880 | return __builtin_mve_vcmpgeq_v4si (__a, __b); |
33203b4c SP |
5881 | } |
5882 | ||
5883 | __extension__ extern __inline mve_pred16_t | |
5884 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5885 | __arm_vcmpgeq_n_s32 (int32x4_t __a, int32_t __b) | |
5886 | { | |
929056a7 | 5887 | return __builtin_mve_vcmpgeq_n_v4si (__a, __b); |
33203b4c SP |
5888 | } |
5889 | ||
5890 | __extension__ extern __inline mve_pred16_t | |
5891 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5892 | __arm_vcmpeqq_s32 (int32x4_t __a, int32x4_t __b) | |
5893 | { | |
929056a7 | 5894 | return __builtin_mve_vcmpeqq_v4si (__a, __b); |
33203b4c SP |
5895 | } |
5896 | ||
5897 | __extension__ extern __inline mve_pred16_t | |
5898 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5899 | __arm_vcmpeqq_n_s32 (int32x4_t __a, int32_t __b) | |
5900 | { | |
929056a7 | 5901 | return __builtin_mve_vcmpeqq_n_v4si (__a, __b); |
33203b4c SP |
5902 | } |
5903 | ||
5904 | __extension__ extern __inline uint32x4_t | |
5905 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5906 | __arm_vqshluq_n_s32 (int32x4_t __a, const int __imm) | |
5907 | { | |
5908 | return __builtin_mve_vqshluq_n_sv4si (__a, __imm); | |
5909 | } | |
5910 | ||
5911 | __extension__ extern __inline int32_t | |
5912 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5913 | __arm_vaddvq_p_s32 (int32x4_t __a, mve_pred16_t __p) | |
5914 | { | |
5915 | return __builtin_mve_vaddvq_p_sv4si (__a, __p); | |
5916 | } | |
5917 | ||
5918 | __extension__ extern __inline int32x4_t | |
5919 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5920 | __arm_vsubq_s32 (int32x4_t __a, int32x4_t __b) | |
5921 | { | |
5922 | return __builtin_mve_vsubq_sv4si (__a, __b); | |
5923 | } | |
5924 | ||
5925 | __extension__ extern __inline int32x4_t | |
5926 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5927 | __arm_vsubq_n_s32 (int32x4_t __a, int32_t __b) | |
5928 | { | |
5929 | return __builtin_mve_vsubq_n_sv4si (__a, __b); | |
5930 | } | |
5931 | ||
5932 | __extension__ extern __inline int32x4_t | |
5933 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5934 | __arm_vshlq_r_s32 (int32x4_t __a, int32_t __b) | |
5935 | { | |
5936 | return __builtin_mve_vshlq_r_sv4si (__a, __b); | |
5937 | } | |
5938 | ||
5939 | __extension__ extern __inline int32x4_t | |
5940 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5941 | __arm_vrshlq_s32 (int32x4_t __a, int32x4_t __b) | |
5942 | { | |
5943 | return __builtin_mve_vrshlq_sv4si (__a, __b); | |
5944 | } | |
5945 | ||
5946 | __extension__ extern __inline int32x4_t | |
5947 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5948 | __arm_vrshlq_n_s32 (int32x4_t __a, int32_t __b) | |
5949 | { | |
5950 | return __builtin_mve_vrshlq_n_sv4si (__a, __b); | |
5951 | } | |
5952 | ||
5953 | __extension__ extern __inline int32x4_t | |
5954 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5955 | __arm_vrmulhq_s32 (int32x4_t __a, int32x4_t __b) | |
5956 | { | |
5957 | return __builtin_mve_vrmulhq_sv4si (__a, __b); | |
5958 | } | |
5959 | ||
5960 | __extension__ extern __inline int32x4_t | |
5961 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5962 | __arm_vrhaddq_s32 (int32x4_t __a, int32x4_t __b) | |
5963 | { | |
5964 | return __builtin_mve_vrhaddq_sv4si (__a, __b); | |
5965 | } | |
5966 | ||
5967 | __extension__ extern __inline int32x4_t | |
5968 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5969 | __arm_vqsubq_s32 (int32x4_t __a, int32x4_t __b) | |
5970 | { | |
5971 | return __builtin_mve_vqsubq_sv4si (__a, __b); | |
5972 | } | |
5973 | ||
5974 | __extension__ extern __inline int32x4_t | |
5975 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5976 | __arm_vqsubq_n_s32 (int32x4_t __a, int32_t __b) | |
5977 | { | |
5978 | return __builtin_mve_vqsubq_n_sv4si (__a, __b); | |
5979 | } | |
5980 | ||
5981 | __extension__ extern __inline int32x4_t | |
5982 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5983 | __arm_vqshlq_s32 (int32x4_t __a, int32x4_t __b) | |
5984 | { | |
5985 | return __builtin_mve_vqshlq_sv4si (__a, __b); | |
5986 | } | |
5987 | ||
5988 | __extension__ extern __inline int32x4_t | |
5989 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5990 | __arm_vqshlq_r_s32 (int32x4_t __a, int32_t __b) | |
5991 | { | |
5992 | return __builtin_mve_vqshlq_r_sv4si (__a, __b); | |
5993 | } | |
5994 | ||
5995 | __extension__ extern __inline int32x4_t | |
5996 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
5997 | __arm_vqrshlq_s32 (int32x4_t __a, int32x4_t __b) | |
5998 | { | |
5999 | return __builtin_mve_vqrshlq_sv4si (__a, __b); | |
6000 | } | |
6001 | ||
6002 | __extension__ extern __inline int32x4_t | |
6003 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6004 | __arm_vqrshlq_n_s32 (int32x4_t __a, int32_t __b) | |
6005 | { | |
6006 | return __builtin_mve_vqrshlq_n_sv4si (__a, __b); | |
6007 | } | |
6008 | ||
6009 | __extension__ extern __inline int32x4_t | |
6010 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6011 | __arm_vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b) | |
6012 | { | |
6013 | return __builtin_mve_vqrdmulhq_sv4si (__a, __b); | |
6014 | } | |
6015 | ||
6016 | __extension__ extern __inline int32x4_t | |
6017 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6018 | __arm_vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b) | |
6019 | { | |
6020 | return __builtin_mve_vqrdmulhq_n_sv4si (__a, __b); | |
6021 | } | |
6022 | ||
6023 | __extension__ extern __inline int32x4_t | |
6024 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6025 | __arm_vqdmulhq_s32 (int32x4_t __a, int32x4_t __b) | |
6026 | { | |
6027 | return __builtin_mve_vqdmulhq_sv4si (__a, __b); | |
6028 | } | |
6029 | ||
6030 | __extension__ extern __inline int32x4_t | |
6031 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6032 | __arm_vqdmulhq_n_s32 (int32x4_t __a, int32_t __b) | |
6033 | { | |
6034 | return __builtin_mve_vqdmulhq_n_sv4si (__a, __b); | |
6035 | } | |
6036 | ||
6037 | __extension__ extern __inline int32x4_t | |
6038 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6039 | __arm_vqaddq_s32 (int32x4_t __a, int32x4_t __b) | |
6040 | { | |
6041 | return __builtin_mve_vqaddq_sv4si (__a, __b); | |
6042 | } | |
6043 | ||
6044 | __extension__ extern __inline int32x4_t | |
6045 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6046 | __arm_vqaddq_n_s32 (int32x4_t __a, int32_t __b) | |
6047 | { | |
6048 | return __builtin_mve_vqaddq_n_sv4si (__a, __b); | |
6049 | } | |
6050 | ||
6051 | __extension__ extern __inline int32x4_t | |
6052 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6053 | __arm_vorrq_s32 (int32x4_t __a, int32x4_t __b) | |
6054 | { | |
6055 | return __builtin_mve_vorrq_sv4si (__a, __b); | |
6056 | } | |
6057 | ||
6058 | __extension__ extern __inline int32x4_t | |
6059 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6060 | __arm_vornq_s32 (int32x4_t __a, int32x4_t __b) | |
6061 | { | |
6062 | return __builtin_mve_vornq_sv4si (__a, __b); | |
6063 | } | |
6064 | ||
6065 | __extension__ extern __inline int32x4_t | |
6066 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6067 | __arm_vmulq_s32 (int32x4_t __a, int32x4_t __b) | |
6068 | { | |
6069 | return __builtin_mve_vmulq_sv4si (__a, __b); | |
6070 | } | |
6071 | ||
6072 | __extension__ extern __inline int32x4_t | |
6073 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6074 | __arm_vmulq_n_s32 (int32x4_t __a, int32_t __b) | |
6075 | { | |
6076 | return __builtin_mve_vmulq_n_sv4si (__a, __b); | |
6077 | } | |
6078 | ||
6079 | __extension__ extern __inline int64x2_t | |
6080 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6081 | __arm_vmulltq_int_s32 (int32x4_t __a, int32x4_t __b) | |
6082 | { | |
6083 | return __builtin_mve_vmulltq_int_sv4si (__a, __b); | |
6084 | } | |
6085 | ||
6086 | __extension__ extern __inline int64x2_t | |
6087 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6088 | __arm_vmullbq_int_s32 (int32x4_t __a, int32x4_t __b) | |
6089 | { | |
6090 | return __builtin_mve_vmullbq_int_sv4si (__a, __b); | |
6091 | } | |
6092 | ||
6093 | __extension__ extern __inline int32x4_t | |
6094 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6095 | __arm_vmulhq_s32 (int32x4_t __a, int32x4_t __b) | |
6096 | { | |
6097 | return __builtin_mve_vmulhq_sv4si (__a, __b); | |
6098 | } | |
6099 | ||
6100 | __extension__ extern __inline int32_t | |
6101 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6102 | __arm_vmlsdavxq_s32 (int32x4_t __a, int32x4_t __b) | |
6103 | { | |
6104 | return __builtin_mve_vmlsdavxq_sv4si (__a, __b); | |
6105 | } | |
6106 | ||
6107 | __extension__ extern __inline int32_t | |
6108 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6109 | __arm_vmlsdavq_s32 (int32x4_t __a, int32x4_t __b) | |
6110 | { | |
6111 | return __builtin_mve_vmlsdavq_sv4si (__a, __b); | |
6112 | } | |
6113 | ||
6114 | __extension__ extern __inline int32_t | |
6115 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6116 | __arm_vmladavxq_s32 (int32x4_t __a, int32x4_t __b) | |
6117 | { | |
6118 | return __builtin_mve_vmladavxq_sv4si (__a, __b); | |
6119 | } | |
6120 | ||
6121 | __extension__ extern __inline int32_t | |
6122 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6123 | __arm_vmladavq_s32 (int32x4_t __a, int32x4_t __b) | |
6124 | { | |
6125 | return __builtin_mve_vmladavq_sv4si (__a, __b); | |
6126 | } | |
6127 | ||
6128 | __extension__ extern __inline int32_t | |
6129 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6130 | __arm_vminvq_s32 (int32_t __a, int32x4_t __b) | |
6131 | { | |
6132 | return __builtin_mve_vminvq_sv4si (__a, __b); | |
6133 | } | |
6134 | ||
6135 | __extension__ extern __inline int32x4_t | |
6136 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6137 | __arm_vminq_s32 (int32x4_t __a, int32x4_t __b) | |
6138 | { | |
6139 | return __builtin_mve_vminq_sv4si (__a, __b); | |
6140 | } | |
6141 | ||
6142 | __extension__ extern __inline int32_t | |
6143 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6144 | __arm_vmaxvq_s32 (int32_t __a, int32x4_t __b) | |
6145 | { | |
6146 | return __builtin_mve_vmaxvq_sv4si (__a, __b); | |
6147 | } | |
6148 | ||
6149 | __extension__ extern __inline int32x4_t | |
6150 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6151 | __arm_vmaxq_s32 (int32x4_t __a, int32x4_t __b) | |
6152 | { | |
6153 | return __builtin_mve_vmaxq_sv4si (__a, __b); | |
6154 | } | |
6155 | ||
6156 | __extension__ extern __inline int32x4_t | |
6157 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6158 | __arm_vhsubq_s32 (int32x4_t __a, int32x4_t __b) | |
6159 | { | |
6160 | return __builtin_mve_vhsubq_sv4si (__a, __b); | |
6161 | } | |
6162 | ||
6163 | __extension__ extern __inline int32x4_t | |
6164 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6165 | __arm_vhsubq_n_s32 (int32x4_t __a, int32_t __b) | |
6166 | { | |
6167 | return __builtin_mve_vhsubq_n_sv4si (__a, __b); | |
6168 | } | |
6169 | ||
6170 | __extension__ extern __inline int32x4_t | |
6171 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6172 | __arm_vhcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b) | |
6173 | { | |
6174 | return __builtin_mve_vhcaddq_rot90_sv4si (__a, __b); | |
6175 | } | |
6176 | ||
6177 | __extension__ extern __inline int32x4_t | |
6178 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6179 | __arm_vhcaddq_rot270_s32 (int32x4_t __a, int32x4_t __b) | |
6180 | { | |
6181 | return __builtin_mve_vhcaddq_rot270_sv4si (__a, __b); | |
6182 | } | |
6183 | ||
6184 | __extension__ extern __inline int32x4_t | |
6185 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6186 | __arm_vhaddq_s32 (int32x4_t __a, int32x4_t __b) | |
6187 | { | |
6188 | return __builtin_mve_vhaddq_sv4si (__a, __b); | |
6189 | } | |
6190 | ||
6191 | __extension__ extern __inline int32x4_t | |
6192 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6193 | __arm_vhaddq_n_s32 (int32x4_t __a, int32_t __b) | |
6194 | { | |
6195 | return __builtin_mve_vhaddq_n_sv4si (__a, __b); | |
6196 | } | |
6197 | ||
6198 | __extension__ extern __inline int32x4_t | |
6199 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6200 | __arm_veorq_s32 (int32x4_t __a, int32x4_t __b) | |
6201 | { | |
6202 | return __builtin_mve_veorq_sv4si (__a, __b); | |
6203 | } | |
6204 | ||
6205 | __extension__ extern __inline int32x4_t | |
6206 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6207 | __arm_vcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b) | |
6208 | { | |
9732dc85 | 6209 | return __builtin_mve_vcaddq_rot90v4si (__a, __b); |
33203b4c SP |
6210 | } |
6211 | ||
6212 | __extension__ extern __inline int32x4_t | |
6213 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6214 | __arm_vcaddq_rot270_s32 (int32x4_t __a, int32x4_t __b) | |
6215 | { | |
9732dc85 | 6216 | return __builtin_mve_vcaddq_rot270v4si (__a, __b); |
33203b4c SP |
6217 | } |
6218 | ||
6219 | __extension__ extern __inline int32x4_t | |
6220 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6221 | __arm_vbrsrq_n_s32 (int32x4_t __a, int32_t __b) | |
6222 | { | |
6223 | return __builtin_mve_vbrsrq_n_sv4si (__a, __b); | |
6224 | } | |
6225 | ||
6226 | __extension__ extern __inline int32x4_t | |
6227 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6228 | __arm_vbicq_s32 (int32x4_t __a, int32x4_t __b) | |
6229 | { | |
6230 | return __builtin_mve_vbicq_sv4si (__a, __b); | |
6231 | } | |
6232 | ||
6233 | __extension__ extern __inline int32x4_t | |
6234 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6235 | __arm_vandq_s32 (int32x4_t __a, int32x4_t __b) | |
6236 | { | |
6237 | return __builtin_mve_vandq_sv4si (__a, __b); | |
6238 | } | |
6239 | ||
6240 | __extension__ extern __inline int32_t | |
6241 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6242 | __arm_vaddvaq_s32 (int32_t __a, int32x4_t __b) | |
6243 | { | |
6244 | return __builtin_mve_vaddvaq_sv4si (__a, __b); | |
6245 | } | |
6246 | ||
6247 | __extension__ extern __inline int32x4_t | |
6248 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6249 | __arm_vaddq_n_s32 (int32x4_t __a, int32_t __b) | |
6250 | { | |
6251 | return __builtin_mve_vaddq_n_sv4si (__a, __b); | |
6252 | } | |
6253 | ||
6254 | __extension__ extern __inline int32x4_t | |
6255 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6256 | __arm_vabdq_s32 (int32x4_t __a, int32x4_t __b) | |
6257 | { | |
6258 | return __builtin_mve_vabdq_sv4si (__a, __b); | |
6259 | } | |
6260 | ||
6261 | __extension__ extern __inline int32x4_t | |
6262 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6263 | __arm_vshlq_n_s32 (int32x4_t __a, const int __imm) | |
6264 | { | |
6265 | return __builtin_mve_vshlq_n_sv4si (__a, __imm); | |
6266 | } | |
6267 | ||
6268 | __extension__ extern __inline int32x4_t | |
6269 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6270 | __arm_vrshrq_n_s32 (int32x4_t __a, const int __imm) | |
6271 | { | |
6272 | return __builtin_mve_vrshrq_n_sv4si (__a, __imm); | |
6273 | } | |
6274 | ||
6275 | __extension__ extern __inline int32x4_t | |
6276 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6277 | __arm_vqshlq_n_s32 (int32x4_t __a, const int __imm) | |
6278 | { | |
6279 | return __builtin_mve_vqshlq_n_sv4si (__a, __imm); | |
6280 | } | |
f166a8cd | 6281 | |
f9355dee | 6282 | __extension__ extern __inline uint8x16_t |
14782c81 | 6283 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6284 | __arm_vqmovntq_u16 (uint8x16_t __a, uint16x8_t __b) |
14782c81 | 6285 | { |
f9355dee | 6286 | return __builtin_mve_vqmovntq_uv8hi (__a, __b); |
14782c81 SP |
6287 | } |
6288 | ||
f9355dee | 6289 | __extension__ extern __inline uint8x16_t |
14782c81 | 6290 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6291 | __arm_vqmovnbq_u16 (uint8x16_t __a, uint16x8_t __b) |
14782c81 | 6292 | { |
f9355dee | 6293 | return __builtin_mve_vqmovnbq_uv8hi (__a, __b); |
14782c81 SP |
6294 | } |
6295 | ||
f9355dee | 6296 | __extension__ extern __inline uint16x8_t |
a50f6abf | 6297 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6298 | __arm_vmulltq_poly_p8 (uint8x16_t __a, uint8x16_t __b) |
a50f6abf | 6299 | { |
f9355dee | 6300 | return __builtin_mve_vmulltq_poly_pv16qi (__a, __b); |
a50f6abf SP |
6301 | } |
6302 | ||
f9355dee | 6303 | __extension__ extern __inline uint16x8_t |
a50f6abf | 6304 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6305 | __arm_vmullbq_poly_p8 (uint8x16_t __a, uint8x16_t __b) |
a50f6abf | 6306 | { |
f9355dee | 6307 | return __builtin_mve_vmullbq_poly_pv16qi (__a, __b); |
a50f6abf SP |
6308 | } |
6309 | ||
f9355dee | 6310 | __extension__ extern __inline uint8x16_t |
a50f6abf | 6311 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6312 | __arm_vmovntq_u16 (uint8x16_t __a, uint16x8_t __b) |
a50f6abf | 6313 | { |
f9355dee | 6314 | return __builtin_mve_vmovntq_uv8hi (__a, __b); |
a50f6abf SP |
6315 | } |
6316 | ||
f9355dee | 6317 | __extension__ extern __inline uint8x16_t |
a50f6abf | 6318 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6319 | __arm_vmovnbq_u16 (uint8x16_t __a, uint16x8_t __b) |
a50f6abf | 6320 | { |
f9355dee | 6321 | return __builtin_mve_vmovnbq_uv8hi (__a, __b); |
a50f6abf SP |
6322 | } |
6323 | ||
f9355dee | 6324 | __extension__ extern __inline uint64_t |
a50f6abf | 6325 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6326 | __arm_vmlaldavq_u16 (uint16x8_t __a, uint16x8_t __b) |
a50f6abf | 6327 | { |
f9355dee | 6328 | return __builtin_mve_vmlaldavq_uv8hi (__a, __b); |
a50f6abf SP |
6329 | } |
6330 | ||
f9355dee | 6331 | __extension__ extern __inline uint8x16_t |
a50f6abf | 6332 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6333 | __arm_vqmovuntq_s16 (uint8x16_t __a, int16x8_t __b) |
a50f6abf | 6334 | { |
f9355dee | 6335 | return __builtin_mve_vqmovuntq_sv8hi (__a, __b); |
a50f6abf SP |
6336 | } |
6337 | ||
f9355dee | 6338 | __extension__ extern __inline uint8x16_t |
a50f6abf | 6339 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6340 | __arm_vqmovunbq_s16 (uint8x16_t __a, int16x8_t __b) |
a50f6abf | 6341 | { |
f9355dee | 6342 | return __builtin_mve_vqmovunbq_sv8hi (__a, __b); |
a50f6abf SP |
6343 | } |
6344 | ||
f9355dee | 6345 | __extension__ extern __inline uint16x8_t |
a50f6abf | 6346 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6347 | __arm_vshlltq_n_u8 (uint8x16_t __a, const int __imm) |
a50f6abf | 6348 | { |
f9355dee | 6349 | return __builtin_mve_vshlltq_n_uv16qi (__a, __imm); |
a50f6abf SP |
6350 | } |
6351 | ||
f9355dee | 6352 | __extension__ extern __inline uint16x8_t |
a50f6abf | 6353 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6354 | __arm_vshllbq_n_u8 (uint8x16_t __a, const int __imm) |
a50f6abf | 6355 | { |
f9355dee | 6356 | return __builtin_mve_vshllbq_n_uv16qi (__a, __imm); |
a50f6abf SP |
6357 | } |
6358 | ||
f9355dee | 6359 | __extension__ extern __inline uint16x8_t |
a50f6abf | 6360 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6361 | __arm_vorrq_n_u16 (uint16x8_t __a, const int __imm) |
a50f6abf | 6362 | { |
f9355dee | 6363 | return __builtin_mve_vorrq_n_uv8hi (__a, __imm); |
a50f6abf SP |
6364 | } |
6365 | ||
f9355dee | 6366 | __extension__ extern __inline uint16x8_t |
a50f6abf | 6367 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
d34f510e | 6368 | __arm_vbicq_n_u16 (uint16x8_t __a, const int __imm) |
a50f6abf | 6369 | { |
f9355dee | 6370 | return __builtin_mve_vbicq_n_uv8hi (__a, __imm); |
a50f6abf SP |
6371 | } |
6372 | ||
f9355dee | 6373 | __extension__ extern __inline int8x16_t |
a50f6abf | 6374 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6375 | __arm_vqmovntq_s16 (int8x16_t __a, int16x8_t __b) |
a50f6abf | 6376 | { |
f9355dee | 6377 | return __builtin_mve_vqmovntq_sv8hi (__a, __b); |
a50f6abf SP |
6378 | } |
6379 | ||
f9355dee | 6380 | __extension__ extern __inline int8x16_t |
a50f6abf | 6381 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6382 | __arm_vqmovnbq_s16 (int8x16_t __a, int16x8_t __b) |
a50f6abf | 6383 | { |
f9355dee | 6384 | return __builtin_mve_vqmovnbq_sv8hi (__a, __b); |
a50f6abf SP |
6385 | } |
6386 | ||
f9355dee | 6387 | __extension__ extern __inline int32x4_t |
a50f6abf | 6388 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6389 | __arm_vqdmulltq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 6390 | { |
f9355dee | 6391 | return __builtin_mve_vqdmulltq_sv8hi (__a, __b); |
a50f6abf SP |
6392 | } |
6393 | ||
f9355dee | 6394 | __extension__ extern __inline int32x4_t |
a50f6abf | 6395 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6396 | __arm_vqdmulltq_n_s16 (int16x8_t __a, int16_t __b) |
a50f6abf | 6397 | { |
f9355dee | 6398 | return __builtin_mve_vqdmulltq_n_sv8hi (__a, __b); |
a50f6abf SP |
6399 | } |
6400 | ||
f9355dee | 6401 | __extension__ extern __inline int32x4_t |
a50f6abf | 6402 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6403 | __arm_vqdmullbq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 6404 | { |
f9355dee | 6405 | return __builtin_mve_vqdmullbq_sv8hi (__a, __b); |
a50f6abf SP |
6406 | } |
6407 | ||
f9355dee | 6408 | __extension__ extern __inline int32x4_t |
a50f6abf | 6409 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6410 | __arm_vqdmullbq_n_s16 (int16x8_t __a, int16_t __b) |
a50f6abf | 6411 | { |
f9355dee | 6412 | return __builtin_mve_vqdmullbq_n_sv8hi (__a, __b); |
a50f6abf SP |
6413 | } |
6414 | ||
f9355dee | 6415 | __extension__ extern __inline int8x16_t |
a50f6abf | 6416 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6417 | __arm_vmovntq_s16 (int8x16_t __a, int16x8_t __b) |
a50f6abf | 6418 | { |
f9355dee | 6419 | return __builtin_mve_vmovntq_sv8hi (__a, __b); |
a50f6abf SP |
6420 | } |
6421 | ||
f9355dee | 6422 | __extension__ extern __inline int8x16_t |
a50f6abf | 6423 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6424 | __arm_vmovnbq_s16 (int8x16_t __a, int16x8_t __b) |
a50f6abf | 6425 | { |
f9355dee | 6426 | return __builtin_mve_vmovnbq_sv8hi (__a, __b); |
a50f6abf SP |
6427 | } |
6428 | ||
f9355dee | 6429 | __extension__ extern __inline int64_t |
a50f6abf | 6430 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6431 | __arm_vmlsldavxq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 6432 | { |
f9355dee | 6433 | return __builtin_mve_vmlsldavxq_sv8hi (__a, __b); |
a50f6abf SP |
6434 | } |
6435 | ||
f9355dee | 6436 | __extension__ extern __inline int64_t |
a50f6abf | 6437 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6438 | __arm_vmlsldavq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 6439 | { |
f9355dee | 6440 | return __builtin_mve_vmlsldavq_sv8hi (__a, __b); |
a50f6abf SP |
6441 | } |
6442 | ||
f9355dee | 6443 | __extension__ extern __inline int64_t |
a50f6abf | 6444 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6445 | __arm_vmlaldavxq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 6446 | { |
f9355dee | 6447 | return __builtin_mve_vmlaldavxq_sv8hi (__a, __b); |
a50f6abf SP |
6448 | } |
6449 | ||
f9355dee | 6450 | __extension__ extern __inline int64_t |
a50f6abf | 6451 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6452 | __arm_vmlaldavq_s16 (int16x8_t __a, int16x8_t __b) |
a50f6abf | 6453 | { |
f9355dee | 6454 | return __builtin_mve_vmlaldavq_sv8hi (__a, __b); |
a50f6abf SP |
6455 | } |
6456 | ||
f9355dee | 6457 | __extension__ extern __inline int16x8_t |
a50f6abf | 6458 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6459 | __arm_vshlltq_n_s8 (int8x16_t __a, const int __imm) |
a50f6abf | 6460 | { |
f9355dee | 6461 | return __builtin_mve_vshlltq_n_sv16qi (__a, __imm); |
a50f6abf SP |
6462 | } |
6463 | ||
f9355dee | 6464 | __extension__ extern __inline int16x8_t |
a50f6abf | 6465 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6466 | __arm_vshllbq_n_s8 (int8x16_t __a, const int __imm) |
a50f6abf | 6467 | { |
f9355dee | 6468 | return __builtin_mve_vshllbq_n_sv16qi (__a, __imm); |
a50f6abf SP |
6469 | } |
6470 | ||
f9355dee | 6471 | __extension__ extern __inline int16x8_t |
a50f6abf | 6472 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6473 | __arm_vorrq_n_s16 (int16x8_t __a, const int __imm) |
a50f6abf | 6474 | { |
f9355dee | 6475 | return __builtin_mve_vorrq_n_sv8hi (__a, __imm); |
a50f6abf SP |
6476 | } |
6477 | ||
f9355dee | 6478 | __extension__ extern __inline int16x8_t |
a50f6abf | 6479 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
d34f510e | 6480 | __arm_vbicq_n_s16 (int16x8_t __a, const int __imm) |
a50f6abf | 6481 | { |
f9355dee | 6482 | return __builtin_mve_vbicq_n_sv8hi (__a, __imm); |
a50f6abf SP |
6483 | } |
6484 | ||
f9355dee | 6485 | __extension__ extern __inline uint16x8_t |
5db0eb95 | 6486 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6487 | __arm_vqmovntq_u32 (uint16x8_t __a, uint32x4_t __b) |
5db0eb95 | 6488 | { |
f9355dee | 6489 | return __builtin_mve_vqmovntq_uv4si (__a, __b); |
5db0eb95 SP |
6490 | } |
6491 | ||
f9355dee | 6492 | __extension__ extern __inline uint16x8_t |
5db0eb95 | 6493 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6494 | __arm_vqmovnbq_u32 (uint16x8_t __a, uint32x4_t __b) |
5db0eb95 | 6495 | { |
f9355dee | 6496 | return __builtin_mve_vqmovnbq_uv4si (__a, __b); |
5db0eb95 SP |
6497 | } |
6498 | ||
f9355dee | 6499 | __extension__ extern __inline uint32x4_t |
5db0eb95 | 6500 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6501 | __arm_vmulltq_poly_p16 (uint16x8_t __a, uint16x8_t __b) |
5db0eb95 | 6502 | { |
f9355dee | 6503 | return __builtin_mve_vmulltq_poly_pv8hi (__a, __b); |
5db0eb95 SP |
6504 | } |
6505 | ||
6506 | __extension__ extern __inline uint32x4_t | |
6507 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6508 | __arm_vmullbq_poly_p16 (uint16x8_t __a, uint16x8_t __b) |
5db0eb95 | 6509 | { |
f9355dee | 6510 | return __builtin_mve_vmullbq_poly_pv8hi (__a, __b); |
5db0eb95 SP |
6511 | } |
6512 | ||
6df4618c SP |
6513 | __extension__ extern __inline uint16x8_t |
6514 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6515 | __arm_vmovntq_u32 (uint16x8_t __a, uint32x4_t __b) |
6df4618c | 6516 | { |
f9355dee | 6517 | return __builtin_mve_vmovntq_uv4si (__a, __b); |
6df4618c SP |
6518 | } |
6519 | ||
f9355dee | 6520 | __extension__ extern __inline uint16x8_t |
6df4618c | 6521 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6522 | __arm_vmovnbq_u32 (uint16x8_t __a, uint32x4_t __b) |
6df4618c | 6523 | { |
f9355dee SP |
6524 | return __builtin_mve_vmovnbq_uv4si (__a, __b); |
6525 | } | |
6526 | ||
6527 | __extension__ extern __inline uint64_t | |
6528 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6529 | __arm_vmlaldavq_u32 (uint32x4_t __a, uint32x4_t __b) | |
6530 | { | |
6531 | return __builtin_mve_vmlaldavq_uv4si (__a, __b); | |
6df4618c SP |
6532 | } |
6533 | ||
6534 | __extension__ extern __inline uint16x8_t | |
6535 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6536 | __arm_vqmovuntq_s32 (uint16x8_t __a, int32x4_t __b) |
6df4618c | 6537 | { |
f9355dee | 6538 | return __builtin_mve_vqmovuntq_sv4si (__a, __b); |
6df4618c SP |
6539 | } |
6540 | ||
6541 | __extension__ extern __inline uint16x8_t | |
6542 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6543 | __arm_vqmovunbq_s32 (uint16x8_t __a, int32x4_t __b) |
6df4618c | 6544 | { |
f9355dee | 6545 | return __builtin_mve_vqmovunbq_sv4si (__a, __b); |
6df4618c SP |
6546 | } |
6547 | ||
6548 | __extension__ extern __inline uint32x4_t | |
6549 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6550 | __arm_vshlltq_n_u16 (uint16x8_t __a, const int __imm) |
6df4618c | 6551 | { |
f9355dee | 6552 | return __builtin_mve_vshlltq_n_uv8hi (__a, __imm); |
6df4618c SP |
6553 | } |
6554 | ||
f9355dee | 6555 | __extension__ extern __inline uint32x4_t |
6df4618c | 6556 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6557 | __arm_vshllbq_n_u16 (uint16x8_t __a, const int __imm) |
6df4618c | 6558 | { |
f9355dee | 6559 | return __builtin_mve_vshllbq_n_uv8hi (__a, __imm); |
6df4618c SP |
6560 | } |
6561 | ||
6562 | __extension__ extern __inline uint32x4_t | |
6563 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6564 | __arm_vorrq_n_u32 (uint32x4_t __a, const int __imm) |
6df4618c | 6565 | { |
f9355dee | 6566 | return __builtin_mve_vorrq_n_uv4si (__a, __imm); |
6df4618c SP |
6567 | } |
6568 | ||
f9355dee SP |
6569 | __extension__ extern __inline uint32x4_t |
6570 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
d34f510e | 6571 | __arm_vbicq_n_u32 (uint32x4_t __a, const int __imm) |
6df4618c | 6572 | { |
f9355dee | 6573 | return __builtin_mve_vbicq_n_uv4si (__a, __imm); |
6df4618c SP |
6574 | } |
6575 | ||
f9355dee | 6576 | __extension__ extern __inline int16x8_t |
6df4618c | 6577 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6578 | __arm_vqmovntq_s32 (int16x8_t __a, int32x4_t __b) |
6df4618c | 6579 | { |
f9355dee | 6580 | return __builtin_mve_vqmovntq_sv4si (__a, __b); |
6df4618c SP |
6581 | } |
6582 | ||
6583 | __extension__ extern __inline int16x8_t | |
6584 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6585 | __arm_vqmovnbq_s32 (int16x8_t __a, int32x4_t __b) |
6df4618c | 6586 | { |
f9355dee | 6587 | return __builtin_mve_vqmovnbq_sv4si (__a, __b); |
6df4618c SP |
6588 | } |
6589 | ||
f9355dee | 6590 | __extension__ extern __inline int64x2_t |
6df4618c | 6591 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6592 | __arm_vqdmulltq_s32 (int32x4_t __a, int32x4_t __b) |
6df4618c | 6593 | { |
f9355dee | 6594 | return __builtin_mve_vqdmulltq_sv4si (__a, __b); |
6df4618c SP |
6595 | } |
6596 | ||
f9355dee | 6597 | __extension__ extern __inline int64x2_t |
6df4618c | 6598 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6599 | __arm_vqdmulltq_n_s32 (int32x4_t __a, int32_t __b) |
6df4618c | 6600 | { |
f9355dee | 6601 | return __builtin_mve_vqdmulltq_n_sv4si (__a, __b); |
6df4618c SP |
6602 | } |
6603 | ||
f9355dee | 6604 | __extension__ extern __inline int64x2_t |
6df4618c | 6605 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6606 | __arm_vqdmullbq_s32 (int32x4_t __a, int32x4_t __b) |
6df4618c | 6607 | { |
f9355dee SP |
6608 | return __builtin_mve_vqdmullbq_sv4si (__a, __b); |
6609 | } | |
6610 | ||
6611 | __extension__ extern __inline int64x2_t | |
6612 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6613 | __arm_vqdmullbq_n_s32 (int32x4_t __a, int32_t __b) | |
6614 | { | |
6615 | return __builtin_mve_vqdmullbq_n_sv4si (__a, __b); | |
6df4618c SP |
6616 | } |
6617 | ||
6618 | __extension__ extern __inline int16x8_t | |
6619 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
f9355dee | 6620 | __arm_vmovntq_s32 (int16x8_t __a, int32x4_t __b) |
6df4618c | 6621 | { |
f9355dee | 6622 | return __builtin_mve_vmovntq_sv4si (__a, __b); |
6df4618c SP |
6623 | } |
6624 | ||
f9355dee | 6625 | __extension__ extern __inline int16x8_t |
6df4618c | 6626 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6627 | __arm_vmovnbq_s32 (int16x8_t __a, int32x4_t __b) |
6df4618c | 6628 | { |
f9355dee | 6629 | return __builtin_mve_vmovnbq_sv4si (__a, __b); |
6df4618c SP |
6630 | } |
6631 | ||
f9355dee | 6632 | __extension__ extern __inline int64_t |
4be8cf77 | 6633 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6634 | __arm_vmlsldavxq_s32 (int32x4_t __a, int32x4_t __b) |
4be8cf77 | 6635 | { |
f9355dee | 6636 | return __builtin_mve_vmlsldavxq_sv4si (__a, __b); |
4be8cf77 SP |
6637 | } |
6638 | ||
f9355dee | 6639 | __extension__ extern __inline int64_t |
4be8cf77 | 6640 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6641 | __arm_vmlsldavq_s32 (int32x4_t __a, int32x4_t __b) |
4be8cf77 | 6642 | { |
f9355dee | 6643 | return __builtin_mve_vmlsldavq_sv4si (__a, __b); |
4be8cf77 SP |
6644 | } |
6645 | ||
f9355dee | 6646 | __extension__ extern __inline int64_t |
4be8cf77 | 6647 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6648 | __arm_vmlaldavxq_s32 (int32x4_t __a, int32x4_t __b) |
4be8cf77 | 6649 | { |
f9355dee | 6650 | return __builtin_mve_vmlaldavxq_sv4si (__a, __b); |
4be8cf77 SP |
6651 | } |
6652 | ||
f9355dee | 6653 | __extension__ extern __inline int64_t |
4be8cf77 | 6654 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6655 | __arm_vmlaldavq_s32 (int32x4_t __a, int32x4_t __b) |
4be8cf77 | 6656 | { |
f9355dee | 6657 | return __builtin_mve_vmlaldavq_sv4si (__a, __b); |
4be8cf77 SP |
6658 | } |
6659 | ||
f9355dee | 6660 | __extension__ extern __inline int32x4_t |
4be8cf77 | 6661 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6662 | __arm_vshlltq_n_s16 (int16x8_t __a, const int __imm) |
4be8cf77 | 6663 | { |
f9355dee | 6664 | return __builtin_mve_vshlltq_n_sv8hi (__a, __imm); |
4be8cf77 SP |
6665 | } |
6666 | ||
f9355dee | 6667 | __extension__ extern __inline int32x4_t |
4be8cf77 | 6668 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6669 | __arm_vshllbq_n_s16 (int16x8_t __a, const int __imm) |
4be8cf77 | 6670 | { |
f9355dee | 6671 | return __builtin_mve_vshllbq_n_sv8hi (__a, __imm); |
4be8cf77 SP |
6672 | } |
6673 | ||
f9355dee | 6674 | __extension__ extern __inline int32x4_t |
4be8cf77 | 6675 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6676 | __arm_vorrq_n_s32 (int32x4_t __a, const int __imm) |
4be8cf77 | 6677 | { |
f9355dee | 6678 | return __builtin_mve_vorrq_n_sv4si (__a, __imm); |
4be8cf77 SP |
6679 | } |
6680 | ||
f9355dee | 6681 | __extension__ extern __inline int32x4_t |
4be8cf77 | 6682 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
d34f510e | 6683 | __arm_vbicq_n_s32 (int32x4_t __a, const int __imm) |
4be8cf77 | 6684 | { |
f9355dee | 6685 | return __builtin_mve_vbicq_n_sv4si (__a, __imm); |
4be8cf77 SP |
6686 | } |
6687 | ||
f9355dee | 6688 | __extension__ extern __inline uint64_t |
4be8cf77 | 6689 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6690 | __arm_vrmlaldavhq_u32 (uint32x4_t __a, uint32x4_t __b) |
4be8cf77 | 6691 | { |
f9355dee | 6692 | return __builtin_mve_vrmlaldavhq_uv4si (__a, __b); |
4be8cf77 SP |
6693 | } |
6694 | ||
f9355dee | 6695 | __extension__ extern __inline mve_pred16_t |
4be8cf77 | 6696 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6697 | __arm_vctp8q_m (uint32_t __a, mve_pred16_t __p) |
4be8cf77 | 6698 | { |
f9355dee | 6699 | return __builtin_mve_vctp8q_mhi (__a, __p); |
4be8cf77 SP |
6700 | } |
6701 | ||
f9355dee | 6702 | __extension__ extern __inline mve_pred16_t |
f166a8cd | 6703 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6704 | __arm_vctp64q_m (uint32_t __a, mve_pred16_t __p) |
f166a8cd | 6705 | { |
f9355dee | 6706 | return __builtin_mve_vctp64q_mhi (__a, __p); |
f166a8cd SP |
6707 | } |
6708 | ||
f9355dee | 6709 | __extension__ extern __inline mve_pred16_t |
f166a8cd | 6710 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6711 | __arm_vctp32q_m (uint32_t __a, mve_pred16_t __p) |
f166a8cd | 6712 | { |
f9355dee | 6713 | return __builtin_mve_vctp32q_mhi (__a, __p); |
f166a8cd SP |
6714 | } |
6715 | ||
f9355dee | 6716 | __extension__ extern __inline mve_pred16_t |
f166a8cd | 6717 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6718 | __arm_vctp16q_m (uint32_t __a, mve_pred16_t __p) |
f166a8cd | 6719 | { |
f9355dee | 6720 | return __builtin_mve_vctp16q_mhi (__a, __p); |
f166a8cd SP |
6721 | } |
6722 | ||
f9355dee | 6723 | __extension__ extern __inline uint64_t |
f166a8cd | 6724 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
f9355dee | 6725 | __arm_vaddlvaq_u32 (uint64_t __a, uint32x4_t __b) |
f166a8cd | 6726 | { |
f9355dee | 6727 | return __builtin_mve_vaddlvaq_uv4si (__a, __b); |
f166a8cd SP |
6728 | } |
6729 | ||
f9355dee SP |
6730 | __extension__ extern __inline int64_t |
6731 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6732 | __arm_vrmlsldavhxq_s32 (int32x4_t __a, int32x4_t __b) | |
6733 | { | |
6734 | return __builtin_mve_vrmlsldavhxq_sv4si (__a, __b); | |
6735 | } | |
14782c81 | 6736 | |
f9355dee SP |
6737 | __extension__ extern __inline int64_t |
6738 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6739 | __arm_vrmlsldavhq_s32 (int32x4_t __a, int32x4_t __b) | |
6740 | { | |
6741 | return __builtin_mve_vrmlsldavhq_sv4si (__a, __b); | |
6742 | } | |
6743 | ||
6744 | __extension__ extern __inline int64_t | |
6745 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6746 | __arm_vrmlaldavhxq_s32 (int32x4_t __a, int32x4_t __b) | |
6747 | { | |
6748 | return __builtin_mve_vrmlaldavhxq_sv4si (__a, __b); | |
6749 | } | |
6750 | ||
6751 | __extension__ extern __inline int64_t | |
6752 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6753 | __arm_vrmlaldavhq_s32 (int32x4_t __a, int32x4_t __b) | |
6754 | { | |
6755 | return __builtin_mve_vrmlaldavhq_sv4si (__a, __b); | |
6756 | } | |
6757 | ||
6758 | __extension__ extern __inline int64_t | |
6759 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6760 | __arm_vaddlvaq_s32 (int64_t __a, int32x4_t __b) | |
6761 | { | |
6762 | return __builtin_mve_vaddlvaq_sv4si (__a, __b); | |
6763 | } | |
6764 | ||
0dad5b33 SP |
6765 | __extension__ extern __inline uint32_t |
6766 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6767 | __arm_vabavq_s8 (uint32_t __a, int8x16_t __b, int8x16_t __c) | |
6768 | { | |
6769 | return __builtin_mve_vabavq_sv16qi (__a, __b, __c); | |
6770 | } | |
6771 | ||
6772 | __extension__ extern __inline uint32_t | |
6773 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6774 | __arm_vabavq_s16 (uint32_t __a, int16x8_t __b, int16x8_t __c) | |
6775 | { | |
6776 | return __builtin_mve_vabavq_sv8hi (__a, __b, __c); | |
6777 | } | |
6778 | ||
6779 | __extension__ extern __inline uint32_t | |
6780 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6781 | __arm_vabavq_s32 (uint32_t __a, int32x4_t __b, int32x4_t __c) | |
6782 | { | |
6783 | return __builtin_mve_vabavq_sv4si (__a, __b, __c); | |
6784 | } | |
6785 | ||
6786 | __extension__ extern __inline uint32_t | |
6787 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6788 | __arm_vabavq_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c) | |
6789 | { | |
6790 | return __builtin_mve_vabavq_uv16qi(__a, __b, __c); | |
6791 | } | |
6792 | ||
6793 | __extension__ extern __inline uint32_t | |
6794 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6795 | __arm_vabavq_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c) | |
6796 | { | |
6797 | return __builtin_mve_vabavq_uv8hi(__a, __b, __c); | |
6798 | } | |
6799 | ||
6800 | __extension__ extern __inline uint32_t | |
6801 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6802 | __arm_vabavq_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c) | |
6803 | { | |
6804 | return __builtin_mve_vabavq_uv4si(__a, __b, __c); | |
6805 | } | |
6806 | ||
6807 | __extension__ extern __inline int16x8_t | |
6808 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6809 | __arm_vbicq_m_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
6810 | { | |
6811 | return __builtin_mve_vbicq_m_n_sv8hi (__a, __imm, __p); | |
6812 | } | |
6813 | ||
6814 | __extension__ extern __inline int32x4_t | |
6815 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6816 | __arm_vbicq_m_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
6817 | { | |
6818 | return __builtin_mve_vbicq_m_n_sv4si (__a, __imm, __p); | |
6819 | } | |
6820 | ||
6821 | __extension__ extern __inline uint16x8_t | |
6822 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6823 | __arm_vbicq_m_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
6824 | { | |
6825 | return __builtin_mve_vbicq_m_n_uv8hi (__a, __imm, __p); | |
6826 | } | |
6827 | ||
6828 | __extension__ extern __inline uint32x4_t | |
6829 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6830 | __arm_vbicq_m_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
6831 | { | |
6832 | return __builtin_mve_vbicq_m_n_uv4si (__a, __imm, __p); | |
6833 | } | |
6834 | ||
6835 | __extension__ extern __inline int8x16_t | |
6836 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6837 | __arm_vqrshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) | |
6838 | { | |
6839 | return __builtin_mve_vqrshrnbq_n_sv8hi (__a, __b, __imm); | |
6840 | } | |
6841 | ||
6842 | __extension__ extern __inline uint8x16_t | |
6843 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6844 | __arm_vqrshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) | |
6845 | { | |
6846 | return __builtin_mve_vqrshrnbq_n_uv8hi (__a, __b, __imm); | |
6847 | } | |
6848 | ||
6849 | __extension__ extern __inline int16x8_t | |
6850 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6851 | __arm_vqrshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) | |
6852 | { | |
6853 | return __builtin_mve_vqrshrnbq_n_sv4si (__a, __b, __imm); | |
6854 | } | |
6855 | ||
6856 | __extension__ extern __inline uint16x8_t | |
6857 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6858 | __arm_vqrshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) | |
6859 | { | |
6860 | return __builtin_mve_vqrshrnbq_n_uv4si (__a, __b, __imm); | |
6861 | } | |
6862 | ||
6863 | __extension__ extern __inline uint8x16_t | |
6864 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6865 | __arm_vqrshrunbq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm) | |
6866 | { | |
6867 | return __builtin_mve_vqrshrunbq_n_sv8hi (__a, __b, __imm); | |
6868 | } | |
6869 | ||
6870 | __extension__ extern __inline uint16x8_t | |
6871 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6872 | __arm_vqrshrunbq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm) | |
6873 | { | |
6874 | return __builtin_mve_vqrshrunbq_n_sv4si (__a, __b, __imm); | |
6875 | } | |
6876 | ||
6877 | __extension__ extern __inline int64_t | |
6878 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6879 | __arm_vrmlaldavhaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) | |
6880 | { | |
6881 | return __builtin_mve_vrmlaldavhaq_sv4si (__a, __b, __c); | |
6882 | } | |
6883 | ||
6884 | __extension__ extern __inline uint64_t | |
6885 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6886 | __arm_vrmlaldavhaq_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c) | |
6887 | { | |
6888 | return __builtin_mve_vrmlaldavhaq_uv4si (__a, __b, __c); | |
6889 | } | |
6890 | ||
6891 | __extension__ extern __inline int8x16_t | |
6892 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6893 | __arm_vshlcq_s8 (int8x16_t __a, uint32_t * __b, const int __imm) | |
6894 | { | |
6895 | int8x16_t __res = __builtin_mve_vshlcq_vec_sv16qi (__a, *__b, __imm); | |
6896 | *__b = __builtin_mve_vshlcq_carry_sv16qi (__a, *__b, __imm); | |
6897 | return __res; | |
6898 | } | |
6899 | ||
6900 | __extension__ extern __inline uint8x16_t | |
6901 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6902 | __arm_vshlcq_u8 (uint8x16_t __a, uint32_t * __b, const int __imm) | |
6903 | { | |
6904 | uint8x16_t __res = __builtin_mve_vshlcq_vec_uv16qi (__a, *__b, __imm); | |
6905 | *__b = __builtin_mve_vshlcq_carry_uv16qi (__a, *__b, __imm); | |
6906 | return __res; | |
6907 | } | |
6908 | ||
6909 | __extension__ extern __inline int16x8_t | |
6910 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6911 | __arm_vshlcq_s16 (int16x8_t __a, uint32_t * __b, const int __imm) | |
6912 | { | |
6913 | int16x8_t __res = __builtin_mve_vshlcq_vec_sv8hi (__a, *__b, __imm); | |
6914 | *__b = __builtin_mve_vshlcq_carry_sv8hi (__a, *__b, __imm); | |
6915 | return __res; | |
6916 | } | |
6917 | ||
6918 | __extension__ extern __inline uint16x8_t | |
6919 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6920 | __arm_vshlcq_u16 (uint16x8_t __a, uint32_t * __b, const int __imm) | |
6921 | { | |
6922 | uint16x8_t __res = __builtin_mve_vshlcq_vec_uv8hi (__a, *__b, __imm); | |
6923 | *__b = __builtin_mve_vshlcq_carry_uv8hi (__a, *__b, __imm); | |
6924 | return __res; | |
6925 | } | |
6926 | ||
6927 | __extension__ extern __inline int32x4_t | |
6928 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6929 | __arm_vshlcq_s32 (int32x4_t __a, uint32_t * __b, const int __imm) | |
6930 | { | |
6931 | int32x4_t __res = __builtin_mve_vshlcq_vec_sv4si (__a, *__b, __imm); | |
6932 | *__b = __builtin_mve_vshlcq_carry_sv4si (__a, *__b, __imm); | |
6933 | return __res; | |
6934 | } | |
6935 | ||
6936 | __extension__ extern __inline uint32x4_t | |
6937 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6938 | __arm_vshlcq_u32 (uint32x4_t __a, uint32_t * __b, const int __imm) | |
6939 | { | |
6940 | uint32x4_t __res = __builtin_mve_vshlcq_vec_uv4si (__a, *__b, __imm); | |
6941 | *__b = __builtin_mve_vshlcq_carry_uv4si (__a, *__b, __imm); | |
6942 | return __res; | |
6943 | } | |
6944 | ||
8165795c SP |
6945 | __extension__ extern __inline uint8x16_t |
6946 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6947 | __arm_vpselq_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6948 | { | |
6949 | return __builtin_mve_vpselq_uv16qi (__a, __b, __p); | |
6950 | } | |
6951 | ||
6952 | __extension__ extern __inline int8x16_t | |
6953 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6954 | __arm_vpselq_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
6955 | { | |
6956 | return __builtin_mve_vpselq_sv16qi (__a, __b, __p); | |
6957 | } | |
6958 | ||
6959 | __extension__ extern __inline uint8x16_t | |
6960 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6961 | __arm_vrev64q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
6962 | { | |
6963 | return __builtin_mve_vrev64q_m_uv16qi (__inactive, __a, __p); | |
6964 | } | |
6965 | ||
8165795c SP |
6966 | __extension__ extern __inline uint8x16_t |
6967 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6968 | __arm_vmvnq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
6969 | { | |
6970 | return __builtin_mve_vmvnq_m_uv16qi (__inactive, __a, __p); | |
6971 | } | |
6972 | ||
6973 | __extension__ extern __inline uint8x16_t | |
6974 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6975 | __arm_vmlasq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) | |
6976 | { | |
6977 | return __builtin_mve_vmlasq_n_uv16qi (__a, __b, __c); | |
6978 | } | |
6979 | ||
6980 | __extension__ extern __inline uint8x16_t | |
6981 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6982 | __arm_vmlaq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) | |
6983 | { | |
6984 | return __builtin_mve_vmlaq_n_uv16qi (__a, __b, __c); | |
6985 | } | |
6986 | ||
6987 | __extension__ extern __inline uint32_t | |
6988 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6989 | __arm_vmladavq_p_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
6990 | { | |
6991 | return __builtin_mve_vmladavq_p_uv16qi (__a, __b, __p); | |
6992 | } | |
6993 | ||
6994 | __extension__ extern __inline uint32_t | |
6995 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
6996 | __arm_vmladavaq_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c) | |
6997 | { | |
6998 | return __builtin_mve_vmladavaq_uv16qi (__a, __b, __c); | |
6999 | } | |
7000 | ||
7001 | __extension__ extern __inline uint8_t | |
7002 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7003 | __arm_vminvq_p_u8 (uint8_t __a, uint8x16_t __b, mve_pred16_t __p) | |
7004 | { | |
7005 | return __builtin_mve_vminvq_p_uv16qi (__a, __b, __p); | |
7006 | } | |
7007 | ||
7008 | __extension__ extern __inline uint8_t | |
7009 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7010 | __arm_vmaxvq_p_u8 (uint8_t __a, uint8x16_t __b, mve_pred16_t __p) | |
7011 | { | |
7012 | return __builtin_mve_vmaxvq_p_uv16qi (__a, __b, __p); | |
7013 | } | |
7014 | ||
7015 | __extension__ extern __inline uint8x16_t | |
7016 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7017 | __arm_vdupq_m_n_u8 (uint8x16_t __inactive, uint8_t __a, mve_pred16_t __p) | |
7018 | { | |
7019 | return __builtin_mve_vdupq_m_n_uv16qi (__inactive, __a, __p); | |
7020 | } | |
7021 | ||
7022 | __extension__ extern __inline mve_pred16_t | |
7023 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7024 | __arm_vcmpneq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
7025 | { | |
7026 | return __builtin_mve_vcmpneq_m_uv16qi (__a, __b, __p); | |
7027 | } | |
7028 | ||
7029 | __extension__ extern __inline mve_pred16_t | |
7030 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7031 | __arm_vcmpneq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
7032 | { | |
7033 | return __builtin_mve_vcmpneq_m_n_uv16qi (__a, __b, __p); | |
7034 | } | |
7035 | ||
7036 | __extension__ extern __inline mve_pred16_t | |
7037 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7038 | __arm_vcmphiq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
7039 | { | |
7040 | return __builtin_mve_vcmphiq_m_uv16qi (__a, __b, __p); | |
7041 | } | |
7042 | ||
7043 | __extension__ extern __inline mve_pred16_t | |
7044 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7045 | __arm_vcmphiq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
7046 | { | |
7047 | return __builtin_mve_vcmphiq_m_n_uv16qi (__a, __b, __p); | |
7048 | } | |
7049 | ||
7050 | __extension__ extern __inline mve_pred16_t | |
7051 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7052 | __arm_vcmpeqq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
7053 | { | |
7054 | return __builtin_mve_vcmpeqq_m_uv16qi (__a, __b, __p); | |
7055 | } | |
7056 | ||
7057 | __extension__ extern __inline mve_pred16_t | |
7058 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7059 | __arm_vcmpeqq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
7060 | { | |
7061 | return __builtin_mve_vcmpeqq_m_n_uv16qi (__a, __b, __p); | |
7062 | } | |
7063 | ||
7064 | __extension__ extern __inline mve_pred16_t | |
7065 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7066 | __arm_vcmpcsq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
7067 | { | |
7068 | return __builtin_mve_vcmpcsq_m_uv16qi (__a, __b, __p); | |
7069 | } | |
7070 | ||
7071 | __extension__ extern __inline mve_pred16_t | |
7072 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7073 | __arm_vcmpcsq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
7074 | { | |
7075 | return __builtin_mve_vcmpcsq_m_n_uv16qi (__a, __b, __p); | |
7076 | } | |
7077 | ||
7078 | __extension__ extern __inline uint8x16_t | |
7079 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7080 | __arm_vclzq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
7081 | { | |
7082 | return __builtin_mve_vclzq_m_uv16qi (__inactive, __a, __p); | |
7083 | } | |
7084 | ||
7085 | __extension__ extern __inline uint32_t | |
7086 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7087 | __arm_vaddvaq_p_u8 (uint32_t __a, uint8x16_t __b, mve_pred16_t __p) | |
7088 | { | |
7089 | return __builtin_mve_vaddvaq_p_uv16qi (__a, __b, __p); | |
7090 | } | |
7091 | ||
7092 | __extension__ extern __inline uint8x16_t | |
7093 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7094 | __arm_vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm) | |
7095 | { | |
7096 | return __builtin_mve_vsriq_n_uv16qi (__a, __b, __imm); | |
7097 | } | |
7098 | ||
7099 | __extension__ extern __inline uint8x16_t | |
7100 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7101 | __arm_vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm) | |
7102 | { | |
7103 | return __builtin_mve_vsliq_n_uv16qi (__a, __b, __imm); | |
7104 | } | |
7105 | ||
7106 | __extension__ extern __inline uint8x16_t | |
7107 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7108 | __arm_vshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
7109 | { | |
7110 | return __builtin_mve_vshlq_m_r_uv16qi (__a, __b, __p); | |
7111 | } | |
7112 | ||
7113 | __extension__ extern __inline uint8x16_t | |
7114 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7115 | __arm_vrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
7116 | { | |
7117 | return __builtin_mve_vrshlq_m_n_uv16qi (__a, __b, __p); | |
7118 | } | |
7119 | ||
7120 | __extension__ extern __inline uint8x16_t | |
7121 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7122 | __arm_vqshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
7123 | { | |
7124 | return __builtin_mve_vqshlq_m_r_uv16qi (__a, __b, __p); | |
7125 | } | |
7126 | ||
7127 | __extension__ extern __inline uint8x16_t | |
7128 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7129 | __arm_vqrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
7130 | { | |
7131 | return __builtin_mve_vqrshlq_m_n_uv16qi (__a, __b, __p); | |
7132 | } | |
7133 | ||
7134 | __extension__ extern __inline uint8_t | |
7135 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7136 | __arm_vminavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p) | |
7137 | { | |
7138 | return __builtin_mve_vminavq_p_sv16qi (__a, __b, __p); | |
7139 | } | |
7140 | ||
7141 | __extension__ extern __inline uint8x16_t | |
7142 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7143 | __arm_vminaq_m_s8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
7144 | { | |
7145 | return __builtin_mve_vminaq_m_sv16qi (__a, __b, __p); | |
7146 | } | |
7147 | ||
7148 | __extension__ extern __inline uint8_t | |
7149 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7150 | __arm_vmaxavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p) | |
7151 | { | |
7152 | return __builtin_mve_vmaxavq_p_sv16qi (__a, __b, __p); | |
7153 | } | |
7154 | ||
7155 | __extension__ extern __inline uint8x16_t | |
7156 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7157 | __arm_vmaxaq_m_s8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
7158 | { | |
7159 | return __builtin_mve_vmaxaq_m_sv16qi (__a, __b, __p); | |
7160 | } | |
7161 | ||
7162 | __extension__ extern __inline mve_pred16_t | |
7163 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7164 | __arm_vcmpneq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
7165 | { | |
7166 | return __builtin_mve_vcmpneq_m_sv16qi (__a, __b, __p); | |
7167 | } | |
7168 | ||
7169 | __extension__ extern __inline mve_pred16_t | |
7170 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7171 | __arm_vcmpneq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
7172 | { | |
7173 | return __builtin_mve_vcmpneq_m_n_sv16qi (__a, __b, __p); | |
7174 | } | |
7175 | ||
7176 | __extension__ extern __inline mve_pred16_t | |
7177 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7178 | __arm_vcmpltq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
7179 | { | |
7180 | return __builtin_mve_vcmpltq_m_sv16qi (__a, __b, __p); | |
7181 | } | |
7182 | ||
7183 | __extension__ extern __inline mve_pred16_t | |
7184 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7185 | __arm_vcmpltq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
7186 | { | |
7187 | return __builtin_mve_vcmpltq_m_n_sv16qi (__a, __b, __p); | |
7188 | } | |
7189 | ||
7190 | __extension__ extern __inline mve_pred16_t | |
7191 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7192 | __arm_vcmpleq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
7193 | { | |
7194 | return __builtin_mve_vcmpleq_m_sv16qi (__a, __b, __p); | |
7195 | } | |
7196 | ||
7197 | __extension__ extern __inline mve_pred16_t | |
7198 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7199 | __arm_vcmpleq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
7200 | { | |
7201 | return __builtin_mve_vcmpleq_m_n_sv16qi (__a, __b, __p); | |
7202 | } | |
7203 | ||
7204 | __extension__ extern __inline mve_pred16_t | |
7205 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7206 | __arm_vcmpgtq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
7207 | { | |
7208 | return __builtin_mve_vcmpgtq_m_sv16qi (__a, __b, __p); | |
7209 | } | |
7210 | ||
7211 | __extension__ extern __inline mve_pred16_t | |
7212 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7213 | __arm_vcmpgtq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
7214 | { | |
7215 | return __builtin_mve_vcmpgtq_m_n_sv16qi (__a, __b, __p); | |
7216 | } | |
7217 | ||
7218 | __extension__ extern __inline mve_pred16_t | |
7219 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7220 | __arm_vcmpgeq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
7221 | { | |
7222 | return __builtin_mve_vcmpgeq_m_sv16qi (__a, __b, __p); | |
7223 | } | |
7224 | ||
7225 | __extension__ extern __inline mve_pred16_t | |
7226 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7227 | __arm_vcmpgeq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
7228 | { | |
7229 | return __builtin_mve_vcmpgeq_m_n_sv16qi (__a, __b, __p); | |
7230 | } | |
7231 | ||
7232 | __extension__ extern __inline mve_pred16_t | |
7233 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7234 | __arm_vcmpeqq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
7235 | { | |
7236 | return __builtin_mve_vcmpeqq_m_sv16qi (__a, __b, __p); | |
7237 | } | |
7238 | ||
7239 | __extension__ extern __inline mve_pred16_t | |
7240 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7241 | __arm_vcmpeqq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
7242 | { | |
7243 | return __builtin_mve_vcmpeqq_m_n_sv16qi (__a, __b, __p); | |
7244 | } | |
7245 | ||
7246 | __extension__ extern __inline int8x16_t | |
7247 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7248 | __arm_vshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
7249 | { | |
7250 | return __builtin_mve_vshlq_m_r_sv16qi (__a, __b, __p); | |
7251 | } | |
7252 | ||
7253 | __extension__ extern __inline int8x16_t | |
7254 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7255 | __arm_vrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
7256 | { | |
7257 | return __builtin_mve_vrshlq_m_n_sv16qi (__a, __b, __p); | |
7258 | } | |
7259 | ||
7260 | __extension__ extern __inline int8x16_t | |
7261 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7262 | __arm_vrev64q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
7263 | { | |
7264 | return __builtin_mve_vrev64q_m_sv16qi (__inactive, __a, __p); | |
7265 | } | |
7266 | ||
7267 | __extension__ extern __inline int8x16_t | |
7268 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7269 | __arm_vqshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
7270 | { | |
7271 | return __builtin_mve_vqshlq_m_r_sv16qi (__a, __b, __p); | |
7272 | } | |
7273 | ||
7274 | __extension__ extern __inline int8x16_t | |
7275 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7276 | __arm_vqrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
7277 | { | |
7278 | return __builtin_mve_vqrshlq_m_n_sv16qi (__a, __b, __p); | |
7279 | } | |
7280 | ||
7281 | __extension__ extern __inline int8x16_t | |
7282 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7283 | __arm_vqnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
7284 | { | |
7285 | return __builtin_mve_vqnegq_m_sv16qi (__inactive, __a, __p); | |
7286 | } | |
7287 | ||
7288 | __extension__ extern __inline int8x16_t | |
7289 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7290 | __arm_vqabsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
7291 | { | |
7292 | return __builtin_mve_vqabsq_m_sv16qi (__inactive, __a, __p); | |
7293 | } | |
7294 | ||
7295 | __extension__ extern __inline int8x16_t | |
7296 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7297 | __arm_vnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
7298 | { | |
7299 | return __builtin_mve_vnegq_m_sv16qi (__inactive, __a, __p); | |
7300 | } | |
7301 | ||
7302 | ||
7303 | __extension__ extern __inline int8x16_t | |
7304 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7305 | __arm_vmvnq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
7306 | { | |
7307 | return __builtin_mve_vmvnq_m_sv16qi (__inactive, __a, __p); | |
7308 | } | |
7309 | ||
7310 | __extension__ extern __inline int32_t | |
7311 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7312 | __arm_vmlsdavxq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
7313 | { | |
7314 | return __builtin_mve_vmlsdavxq_p_sv16qi (__a, __b, __p); | |
7315 | } | |
7316 | ||
7317 | __extension__ extern __inline int32_t | |
7318 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7319 | __arm_vmlsdavq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
7320 | { | |
7321 | return __builtin_mve_vmlsdavq_p_sv16qi (__a, __b, __p); | |
7322 | } | |
7323 | ||
7324 | __extension__ extern __inline int32_t | |
7325 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7326 | __arm_vmladavxq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
7327 | { | |
7328 | return __builtin_mve_vmladavxq_p_sv16qi (__a, __b, __p); | |
7329 | } | |
7330 | ||
7331 | __extension__ extern __inline int32_t | |
7332 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7333 | __arm_vmladavq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
7334 | { | |
7335 | return __builtin_mve_vmladavq_p_sv16qi (__a, __b, __p); | |
7336 | } | |
7337 | ||
7338 | __extension__ extern __inline int8_t | |
7339 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7340 | __arm_vminvq_p_s8 (int8_t __a, int8x16_t __b, mve_pred16_t __p) | |
7341 | { | |
7342 | return __builtin_mve_vminvq_p_sv16qi (__a, __b, __p); | |
7343 | } | |
7344 | ||
7345 | __extension__ extern __inline int8_t | |
7346 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7347 | __arm_vmaxvq_p_s8 (int8_t __a, int8x16_t __b, mve_pred16_t __p) | |
7348 | { | |
7349 | return __builtin_mve_vmaxvq_p_sv16qi (__a, __b, __p); | |
7350 | } | |
7351 | ||
7352 | __extension__ extern __inline int8x16_t | |
7353 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7354 | __arm_vdupq_m_n_s8 (int8x16_t __inactive, int8_t __a, mve_pred16_t __p) | |
7355 | { | |
7356 | return __builtin_mve_vdupq_m_n_sv16qi (__inactive, __a, __p); | |
7357 | } | |
7358 | ||
7359 | __extension__ extern __inline int8x16_t | |
7360 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7361 | __arm_vclzq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
7362 | { | |
7363 | return __builtin_mve_vclzq_m_sv16qi (__inactive, __a, __p); | |
7364 | } | |
7365 | ||
7366 | __extension__ extern __inline int8x16_t | |
7367 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7368 | __arm_vclsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
7369 | { | |
7370 | return __builtin_mve_vclsq_m_sv16qi (__inactive, __a, __p); | |
7371 | } | |
7372 | ||
7373 | __extension__ extern __inline int32_t | |
7374 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7375 | __arm_vaddvaq_p_s8 (int32_t __a, int8x16_t __b, mve_pred16_t __p) | |
7376 | { | |
7377 | return __builtin_mve_vaddvaq_p_sv16qi (__a, __b, __p); | |
7378 | } | |
7379 | ||
7380 | __extension__ extern __inline int8x16_t | |
7381 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7382 | __arm_vabsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
7383 | { | |
7384 | return __builtin_mve_vabsq_m_sv16qi (__inactive, __a, __p); | |
7385 | } | |
7386 | ||
7387 | __extension__ extern __inline int8x16_t | |
7388 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7389 | __arm_vqrdmlsdhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
7390 | { | |
7391 | return __builtin_mve_vqrdmlsdhxq_sv16qi (__inactive, __a, __b); | |
7392 | } | |
7393 | ||
7394 | __extension__ extern __inline int8x16_t | |
7395 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7396 | __arm_vqrdmlsdhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
7397 | { | |
7398 | return __builtin_mve_vqrdmlsdhq_sv16qi (__inactive, __a, __b); | |
7399 | } | |
7400 | ||
7401 | __extension__ extern __inline int8x16_t | |
7402 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7403 | __arm_vqrdmlashq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
7404 | { | |
7405 | return __builtin_mve_vqrdmlashq_n_sv16qi (__a, __b, __c); | |
7406 | } | |
7407 | ||
afb198ee CL |
7408 | __extension__ extern __inline int8x16_t |
7409 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7410 | __arm_vqdmlashq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
7411 | { | |
7412 | return __builtin_mve_vqdmlashq_n_sv16qi (__a, __b, __c); | |
7413 | } | |
7414 | ||
8165795c SP |
7415 | __extension__ extern __inline int8x16_t |
7416 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7417 | __arm_vqrdmlahq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
7418 | { | |
7419 | return __builtin_mve_vqrdmlahq_n_sv16qi (__a, __b, __c); | |
7420 | } | |
7421 | ||
7422 | __extension__ extern __inline int8x16_t | |
7423 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7424 | __arm_vqrdmladhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
7425 | { | |
7426 | return __builtin_mve_vqrdmladhxq_sv16qi (__inactive, __a, __b); | |
7427 | } | |
7428 | ||
7429 | __extension__ extern __inline int8x16_t | |
7430 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7431 | __arm_vqrdmladhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
7432 | { | |
7433 | return __builtin_mve_vqrdmladhq_sv16qi (__inactive, __a, __b); | |
7434 | } | |
7435 | ||
7436 | __extension__ extern __inline int8x16_t | |
7437 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7438 | __arm_vqdmlsdhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
7439 | { | |
7440 | return __builtin_mve_vqdmlsdhxq_sv16qi (__inactive, __a, __b); | |
7441 | } | |
7442 | ||
7443 | __extension__ extern __inline int8x16_t | |
7444 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7445 | __arm_vqdmlsdhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
7446 | { | |
7447 | return __builtin_mve_vqdmlsdhq_sv16qi (__inactive, __a, __b); | |
7448 | } | |
7449 | ||
7450 | __extension__ extern __inline int8x16_t | |
7451 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7452 | __arm_vqdmlahq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
7453 | { | |
7454 | return __builtin_mve_vqdmlahq_n_sv16qi (__a, __b, __c); | |
7455 | } | |
7456 | ||
7457 | __extension__ extern __inline int8x16_t | |
7458 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7459 | __arm_vqdmladhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
7460 | { | |
7461 | return __builtin_mve_vqdmladhxq_sv16qi (__inactive, __a, __b); | |
7462 | } | |
7463 | ||
7464 | __extension__ extern __inline int8x16_t | |
7465 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7466 | __arm_vqdmladhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
7467 | { | |
7468 | return __builtin_mve_vqdmladhq_sv16qi (__inactive, __a, __b); | |
7469 | } | |
7470 | ||
7471 | __extension__ extern __inline int32_t | |
7472 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7473 | __arm_vmlsdavaxq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) | |
7474 | { | |
7475 | return __builtin_mve_vmlsdavaxq_sv16qi (__a, __b, __c); | |
7476 | } | |
7477 | ||
7478 | __extension__ extern __inline int32_t | |
7479 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7480 | __arm_vmlsdavaq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) | |
7481 | { | |
7482 | return __builtin_mve_vmlsdavaq_sv16qi (__a, __b, __c); | |
7483 | } | |
7484 | ||
7485 | __extension__ extern __inline int8x16_t | |
7486 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7487 | __arm_vmlasq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
7488 | { | |
7489 | return __builtin_mve_vmlasq_n_sv16qi (__a, __b, __c); | |
7490 | } | |
7491 | ||
7492 | __extension__ extern __inline int8x16_t | |
7493 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7494 | __arm_vmlaq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) | |
7495 | { | |
7496 | return __builtin_mve_vmlaq_n_sv16qi (__a, __b, __c); | |
7497 | } | |
7498 | ||
7499 | __extension__ extern __inline int32_t | |
7500 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7501 | __arm_vmladavaxq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) | |
7502 | { | |
7503 | return __builtin_mve_vmladavaxq_sv16qi (__a, __b, __c); | |
7504 | } | |
7505 | ||
7506 | __extension__ extern __inline int32_t | |
7507 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7508 | __arm_vmladavaq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) | |
7509 | { | |
7510 | return __builtin_mve_vmladavaq_sv16qi (__a, __b, __c); | |
7511 | } | |
7512 | ||
7513 | __extension__ extern __inline int8x16_t | |
7514 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7515 | __arm_vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm) | |
7516 | { | |
7517 | return __builtin_mve_vsriq_n_sv16qi (__a, __b, __imm); | |
7518 | } | |
7519 | ||
7520 | __extension__ extern __inline int8x16_t | |
7521 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7522 | __arm_vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm) | |
7523 | { | |
7524 | return __builtin_mve_vsliq_n_sv16qi (__a, __b, __imm); | |
7525 | } | |
7526 | ||
7527 | __extension__ extern __inline uint16x8_t | |
7528 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7529 | __arm_vpselq_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7530 | { | |
7531 | return __builtin_mve_vpselq_uv8hi (__a, __b, __p); | |
7532 | } | |
7533 | ||
7534 | __extension__ extern __inline int16x8_t | |
7535 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7536 | __arm_vpselq_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7537 | { | |
7538 | return __builtin_mve_vpselq_sv8hi (__a, __b, __p); | |
7539 | } | |
7540 | ||
7541 | __extension__ extern __inline uint16x8_t | |
7542 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7543 | __arm_vrev64q_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
7544 | { | |
7545 | return __builtin_mve_vrev64q_m_uv8hi (__inactive, __a, __p); | |
7546 | } | |
7547 | ||
8165795c SP |
7548 | __extension__ extern __inline uint16x8_t |
7549 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7550 | __arm_vmvnq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
7551 | { | |
7552 | return __builtin_mve_vmvnq_m_uv8hi (__inactive, __a, __p); | |
7553 | } | |
7554 | ||
7555 | __extension__ extern __inline uint16x8_t | |
7556 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7557 | __arm_vmlasq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) | |
7558 | { | |
7559 | return __builtin_mve_vmlasq_n_uv8hi (__a, __b, __c); | |
7560 | } | |
7561 | ||
7562 | __extension__ extern __inline uint16x8_t | |
7563 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7564 | __arm_vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) | |
7565 | { | |
7566 | return __builtin_mve_vmlaq_n_uv8hi (__a, __b, __c); | |
7567 | } | |
7568 | ||
7569 | __extension__ extern __inline uint32_t | |
7570 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7571 | __arm_vmladavq_p_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7572 | { | |
7573 | return __builtin_mve_vmladavq_p_uv8hi (__a, __b, __p); | |
7574 | } | |
7575 | ||
7576 | __extension__ extern __inline uint32_t | |
7577 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7578 | __arm_vmladavaq_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c) | |
7579 | { | |
7580 | return __builtin_mve_vmladavaq_uv8hi (__a, __b, __c); | |
7581 | } | |
7582 | ||
7583 | __extension__ extern __inline uint16_t | |
7584 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7585 | __arm_vminvq_p_u16 (uint16_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7586 | { | |
7587 | return __builtin_mve_vminvq_p_uv8hi (__a, __b, __p); | |
7588 | } | |
7589 | ||
7590 | __extension__ extern __inline uint16_t | |
7591 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7592 | __arm_vmaxvq_p_u16 (uint16_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7593 | { | |
7594 | return __builtin_mve_vmaxvq_p_uv8hi (__a, __b, __p); | |
7595 | } | |
7596 | ||
7597 | __extension__ extern __inline uint16x8_t | |
7598 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7599 | __arm_vdupq_m_n_u16 (uint16x8_t __inactive, uint16_t __a, mve_pred16_t __p) | |
7600 | { | |
7601 | return __builtin_mve_vdupq_m_n_uv8hi (__inactive, __a, __p); | |
7602 | } | |
7603 | ||
7604 | __extension__ extern __inline mve_pred16_t | |
7605 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7606 | __arm_vcmpneq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7607 | { | |
7608 | return __builtin_mve_vcmpneq_m_uv8hi (__a, __b, __p); | |
7609 | } | |
7610 | ||
7611 | __extension__ extern __inline mve_pred16_t | |
7612 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7613 | __arm_vcmpneq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
7614 | { | |
7615 | return __builtin_mve_vcmpneq_m_n_uv8hi (__a, __b, __p); | |
7616 | } | |
7617 | ||
7618 | __extension__ extern __inline mve_pred16_t | |
7619 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7620 | __arm_vcmphiq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7621 | { | |
7622 | return __builtin_mve_vcmphiq_m_uv8hi (__a, __b, __p); | |
7623 | } | |
7624 | ||
7625 | __extension__ extern __inline mve_pred16_t | |
7626 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7627 | __arm_vcmphiq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
7628 | { | |
7629 | return __builtin_mve_vcmphiq_m_n_uv8hi (__a, __b, __p); | |
7630 | } | |
7631 | ||
7632 | __extension__ extern __inline mve_pred16_t | |
7633 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7634 | __arm_vcmpeqq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7635 | { | |
7636 | return __builtin_mve_vcmpeqq_m_uv8hi (__a, __b, __p); | |
7637 | } | |
7638 | ||
7639 | __extension__ extern __inline mve_pred16_t | |
7640 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7641 | __arm_vcmpeqq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
7642 | { | |
7643 | return __builtin_mve_vcmpeqq_m_n_uv8hi (__a, __b, __p); | |
7644 | } | |
7645 | ||
7646 | __extension__ extern __inline mve_pred16_t | |
7647 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7648 | __arm_vcmpcsq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7649 | { | |
7650 | return __builtin_mve_vcmpcsq_m_uv8hi (__a, __b, __p); | |
7651 | } | |
7652 | ||
7653 | __extension__ extern __inline mve_pred16_t | |
7654 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7655 | __arm_vcmpcsq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
7656 | { | |
7657 | return __builtin_mve_vcmpcsq_m_n_uv8hi (__a, __b, __p); | |
7658 | } | |
7659 | ||
7660 | __extension__ extern __inline uint16x8_t | |
7661 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7662 | __arm_vclzq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
7663 | { | |
7664 | return __builtin_mve_vclzq_m_uv8hi (__inactive, __a, __p); | |
7665 | } | |
7666 | ||
7667 | __extension__ extern __inline uint32_t | |
7668 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7669 | __arm_vaddvaq_p_u16 (uint32_t __a, uint16x8_t __b, mve_pred16_t __p) | |
7670 | { | |
7671 | return __builtin_mve_vaddvaq_p_uv8hi (__a, __b, __p); | |
7672 | } | |
7673 | ||
7674 | __extension__ extern __inline uint16x8_t | |
7675 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7676 | __arm_vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm) | |
7677 | { | |
7678 | return __builtin_mve_vsriq_n_uv8hi (__a, __b, __imm); | |
7679 | } | |
7680 | ||
7681 | __extension__ extern __inline uint16x8_t | |
7682 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7683 | __arm_vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm) | |
7684 | { | |
7685 | return __builtin_mve_vsliq_n_uv8hi (__a, __b, __imm); | |
7686 | } | |
7687 | ||
7688 | __extension__ extern __inline uint16x8_t | |
7689 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7690 | __arm_vshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7691 | { | |
7692 | return __builtin_mve_vshlq_m_r_uv8hi (__a, __b, __p); | |
7693 | } | |
7694 | ||
7695 | __extension__ extern __inline uint16x8_t | |
7696 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7697 | __arm_vrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7698 | { | |
7699 | return __builtin_mve_vrshlq_m_n_uv8hi (__a, __b, __p); | |
7700 | } | |
7701 | ||
7702 | __extension__ extern __inline uint16x8_t | |
7703 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7704 | __arm_vqshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7705 | { | |
7706 | return __builtin_mve_vqshlq_m_r_uv8hi (__a, __b, __p); | |
7707 | } | |
7708 | ||
7709 | __extension__ extern __inline uint16x8_t | |
7710 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7711 | __arm_vqrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7712 | { | |
7713 | return __builtin_mve_vqrshlq_m_n_uv8hi (__a, __b, __p); | |
7714 | } | |
7715 | ||
7716 | __extension__ extern __inline uint16_t | |
7717 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7718 | __arm_vminavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p) | |
7719 | { | |
7720 | return __builtin_mve_vminavq_p_sv8hi (__a, __b, __p); | |
7721 | } | |
7722 | ||
7723 | __extension__ extern __inline uint16x8_t | |
7724 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7725 | __arm_vminaq_m_s16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7726 | { | |
7727 | return __builtin_mve_vminaq_m_sv8hi (__a, __b, __p); | |
7728 | } | |
7729 | ||
7730 | __extension__ extern __inline uint16_t | |
7731 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7732 | __arm_vmaxavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p) | |
7733 | { | |
7734 | return __builtin_mve_vmaxavq_p_sv8hi (__a, __b, __p); | |
7735 | } | |
7736 | ||
7737 | __extension__ extern __inline uint16x8_t | |
7738 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7739 | __arm_vmaxaq_m_s16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7740 | { | |
7741 | return __builtin_mve_vmaxaq_m_sv8hi (__a, __b, __p); | |
7742 | } | |
7743 | ||
7744 | __extension__ extern __inline mve_pred16_t | |
7745 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7746 | __arm_vcmpneq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7747 | { | |
7748 | return __builtin_mve_vcmpneq_m_sv8hi (__a, __b, __p); | |
7749 | } | |
7750 | ||
7751 | __extension__ extern __inline mve_pred16_t | |
7752 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7753 | __arm_vcmpneq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7754 | { | |
7755 | return __builtin_mve_vcmpneq_m_n_sv8hi (__a, __b, __p); | |
7756 | } | |
7757 | ||
7758 | __extension__ extern __inline mve_pred16_t | |
7759 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7760 | __arm_vcmpltq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7761 | { | |
7762 | return __builtin_mve_vcmpltq_m_sv8hi (__a, __b, __p); | |
7763 | } | |
7764 | ||
7765 | __extension__ extern __inline mve_pred16_t | |
7766 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7767 | __arm_vcmpltq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7768 | { | |
7769 | return __builtin_mve_vcmpltq_m_n_sv8hi (__a, __b, __p); | |
7770 | } | |
7771 | ||
7772 | __extension__ extern __inline mve_pred16_t | |
7773 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7774 | __arm_vcmpleq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7775 | { | |
7776 | return __builtin_mve_vcmpleq_m_sv8hi (__a, __b, __p); | |
7777 | } | |
7778 | ||
7779 | __extension__ extern __inline mve_pred16_t | |
7780 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7781 | __arm_vcmpleq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7782 | { | |
7783 | return __builtin_mve_vcmpleq_m_n_sv8hi (__a, __b, __p); | |
7784 | } | |
7785 | ||
7786 | __extension__ extern __inline mve_pred16_t | |
7787 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7788 | __arm_vcmpgtq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7789 | { | |
7790 | return __builtin_mve_vcmpgtq_m_sv8hi (__a, __b, __p); | |
7791 | } | |
7792 | ||
7793 | __extension__ extern __inline mve_pred16_t | |
7794 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7795 | __arm_vcmpgtq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7796 | { | |
7797 | return __builtin_mve_vcmpgtq_m_n_sv8hi (__a, __b, __p); | |
7798 | } | |
7799 | ||
7800 | __extension__ extern __inline mve_pred16_t | |
7801 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7802 | __arm_vcmpgeq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7803 | { | |
7804 | return __builtin_mve_vcmpgeq_m_sv8hi (__a, __b, __p); | |
7805 | } | |
7806 | ||
7807 | __extension__ extern __inline mve_pred16_t | |
7808 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7809 | __arm_vcmpgeq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7810 | { | |
7811 | return __builtin_mve_vcmpgeq_m_n_sv8hi (__a, __b, __p); | |
7812 | } | |
7813 | ||
7814 | __extension__ extern __inline mve_pred16_t | |
7815 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7816 | __arm_vcmpeqq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7817 | { | |
7818 | return __builtin_mve_vcmpeqq_m_sv8hi (__a, __b, __p); | |
7819 | } | |
7820 | ||
7821 | __extension__ extern __inline mve_pred16_t | |
7822 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7823 | __arm_vcmpeqq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
7824 | { | |
7825 | return __builtin_mve_vcmpeqq_m_n_sv8hi (__a, __b, __p); | |
7826 | } | |
7827 | ||
7828 | __extension__ extern __inline int16x8_t | |
7829 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7830 | __arm_vshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7831 | { | |
7832 | return __builtin_mve_vshlq_m_r_sv8hi (__a, __b, __p); | |
7833 | } | |
7834 | ||
7835 | __extension__ extern __inline int16x8_t | |
7836 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7837 | __arm_vrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7838 | { | |
7839 | return __builtin_mve_vrshlq_m_n_sv8hi (__a, __b, __p); | |
7840 | } | |
7841 | ||
7842 | __extension__ extern __inline int16x8_t | |
7843 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7844 | __arm_vrev64q_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7845 | { | |
7846 | return __builtin_mve_vrev64q_m_sv8hi (__inactive, __a, __p); | |
7847 | } | |
7848 | ||
7849 | __extension__ extern __inline int16x8_t | |
7850 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7851 | __arm_vqshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7852 | { | |
7853 | return __builtin_mve_vqshlq_m_r_sv8hi (__a, __b, __p); | |
7854 | } | |
7855 | ||
7856 | __extension__ extern __inline int16x8_t | |
7857 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7858 | __arm_vqrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
7859 | { | |
7860 | return __builtin_mve_vqrshlq_m_n_sv8hi (__a, __b, __p); | |
7861 | } | |
7862 | ||
7863 | __extension__ extern __inline int16x8_t | |
7864 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7865 | __arm_vqnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7866 | { | |
7867 | return __builtin_mve_vqnegq_m_sv8hi (__inactive, __a, __p); | |
7868 | } | |
7869 | ||
7870 | __extension__ extern __inline int16x8_t | |
7871 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7872 | __arm_vqabsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7873 | { | |
7874 | return __builtin_mve_vqabsq_m_sv8hi (__inactive, __a, __p); | |
7875 | } | |
7876 | ||
7877 | __extension__ extern __inline int16x8_t | |
7878 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7879 | __arm_vnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7880 | { | |
7881 | return __builtin_mve_vnegq_m_sv8hi (__inactive, __a, __p); | |
7882 | } | |
7883 | ||
7884 | __extension__ extern __inline int16x8_t | |
7885 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7886 | __arm_vmvnq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7887 | { | |
7888 | return __builtin_mve_vmvnq_m_sv8hi (__inactive, __a, __p); | |
7889 | } | |
7890 | ||
7891 | __extension__ extern __inline int32_t | |
7892 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7893 | __arm_vmlsdavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7894 | { | |
7895 | return __builtin_mve_vmlsdavxq_p_sv8hi (__a, __b, __p); | |
7896 | } | |
7897 | ||
7898 | __extension__ extern __inline int32_t | |
7899 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7900 | __arm_vmlsdavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7901 | { | |
7902 | return __builtin_mve_vmlsdavq_p_sv8hi (__a, __b, __p); | |
7903 | } | |
7904 | ||
7905 | __extension__ extern __inline int32_t | |
7906 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7907 | __arm_vmladavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7908 | { | |
7909 | return __builtin_mve_vmladavxq_p_sv8hi (__a, __b, __p); | |
7910 | } | |
7911 | ||
7912 | __extension__ extern __inline int32_t | |
7913 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7914 | __arm_vmladavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
7915 | { | |
7916 | return __builtin_mve_vmladavq_p_sv8hi (__a, __b, __p); | |
7917 | } | |
7918 | ||
7919 | __extension__ extern __inline int16_t | |
7920 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7921 | __arm_vminvq_p_s16 (int16_t __a, int16x8_t __b, mve_pred16_t __p) | |
7922 | { | |
7923 | return __builtin_mve_vminvq_p_sv8hi (__a, __b, __p); | |
7924 | } | |
7925 | ||
7926 | __extension__ extern __inline int16_t | |
7927 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7928 | __arm_vmaxvq_p_s16 (int16_t __a, int16x8_t __b, mve_pred16_t __p) | |
7929 | { | |
7930 | return __builtin_mve_vmaxvq_p_sv8hi (__a, __b, __p); | |
7931 | } | |
7932 | ||
7933 | __extension__ extern __inline int16x8_t | |
7934 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7935 | __arm_vdupq_m_n_s16 (int16x8_t __inactive, int16_t __a, mve_pred16_t __p) | |
7936 | { | |
7937 | return __builtin_mve_vdupq_m_n_sv8hi (__inactive, __a, __p); | |
7938 | } | |
7939 | ||
7940 | __extension__ extern __inline int16x8_t | |
7941 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7942 | __arm_vclzq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7943 | { | |
7944 | return __builtin_mve_vclzq_m_sv8hi (__inactive, __a, __p); | |
7945 | } | |
7946 | ||
7947 | __extension__ extern __inline int16x8_t | |
7948 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7949 | __arm_vclsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7950 | { | |
7951 | return __builtin_mve_vclsq_m_sv8hi (__inactive, __a, __p); | |
7952 | } | |
7953 | ||
7954 | __extension__ extern __inline int32_t | |
7955 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7956 | __arm_vaddvaq_p_s16 (int32_t __a, int16x8_t __b, mve_pred16_t __p) | |
7957 | { | |
7958 | return __builtin_mve_vaddvaq_p_sv8hi (__a, __b, __p); | |
7959 | } | |
7960 | ||
7961 | __extension__ extern __inline int16x8_t | |
7962 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7963 | __arm_vabsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
7964 | { | |
7965 | return __builtin_mve_vabsq_m_sv8hi (__inactive, __a, __p); | |
7966 | } | |
7967 | ||
7968 | __extension__ extern __inline int16x8_t | |
7969 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7970 | __arm_vqrdmlsdhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7971 | { | |
7972 | return __builtin_mve_vqrdmlsdhxq_sv8hi (__inactive, __a, __b); | |
7973 | } | |
7974 | ||
7975 | __extension__ extern __inline int16x8_t | |
7976 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7977 | __arm_vqrdmlsdhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
7978 | { | |
7979 | return __builtin_mve_vqrdmlsdhq_sv8hi (__inactive, __a, __b); | |
7980 | } | |
7981 | ||
7982 | __extension__ extern __inline int16x8_t | |
7983 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7984 | __arm_vqrdmlashq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
7985 | { | |
7986 | return __builtin_mve_vqrdmlashq_n_sv8hi (__a, __b, __c); | |
7987 | } | |
7988 | ||
afb198ee CL |
7989 | __extension__ extern __inline int16x8_t |
7990 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7991 | __arm_vqdmlashq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
7992 | { | |
7993 | return __builtin_mve_vqdmlashq_n_sv8hi (__a, __b, __c); | |
7994 | } | |
7995 | ||
8165795c SP |
7996 | __extension__ extern __inline int16x8_t |
7997 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
7998 | __arm_vqrdmlahq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
7999 | { | |
8000 | return __builtin_mve_vqrdmlahq_n_sv8hi (__a, __b, __c); | |
8001 | } | |
8002 | ||
8003 | __extension__ extern __inline int16x8_t | |
8004 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8005 | __arm_vqrdmladhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
8006 | { | |
8007 | return __builtin_mve_vqrdmladhxq_sv8hi (__inactive, __a, __b); | |
8008 | } | |
8009 | ||
8010 | __extension__ extern __inline int16x8_t | |
8011 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8012 | __arm_vqrdmladhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
8013 | { | |
8014 | return __builtin_mve_vqrdmladhq_sv8hi (__inactive, __a, __b); | |
8015 | } | |
8016 | ||
8017 | __extension__ extern __inline int16x8_t | |
8018 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8019 | __arm_vqdmlsdhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
8020 | { | |
8021 | return __builtin_mve_vqdmlsdhxq_sv8hi (__inactive, __a, __b); | |
8022 | } | |
8023 | ||
8024 | __extension__ extern __inline int16x8_t | |
8025 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8026 | __arm_vqdmlsdhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
8027 | { | |
8028 | return __builtin_mve_vqdmlsdhq_sv8hi (__inactive, __a, __b); | |
8029 | } | |
8030 | ||
8031 | __extension__ extern __inline int16x8_t | |
8032 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8033 | __arm_vqdmlahq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
8034 | { | |
8035 | return __builtin_mve_vqdmlahq_n_sv8hi (__a, __b, __c); | |
8036 | } | |
8037 | ||
8038 | __extension__ extern __inline int16x8_t | |
8039 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8040 | __arm_vqdmladhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
8041 | { | |
8042 | return __builtin_mve_vqdmladhxq_sv8hi (__inactive, __a, __b); | |
8043 | } | |
8044 | ||
8045 | __extension__ extern __inline int16x8_t | |
8046 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8047 | __arm_vqdmladhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
8048 | { | |
8049 | return __builtin_mve_vqdmladhq_sv8hi (__inactive, __a, __b); | |
8050 | } | |
8051 | ||
8052 | __extension__ extern __inline int32_t | |
8053 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8054 | __arm_vmlsdavaxq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) | |
8055 | { | |
8056 | return __builtin_mve_vmlsdavaxq_sv8hi (__a, __b, __c); | |
8057 | } | |
8058 | ||
8059 | __extension__ extern __inline int32_t | |
8060 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8061 | __arm_vmlsdavaq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) | |
8062 | { | |
8063 | return __builtin_mve_vmlsdavaq_sv8hi (__a, __b, __c); | |
8064 | } | |
8065 | ||
8066 | __extension__ extern __inline int16x8_t | |
8067 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8068 | __arm_vmlasq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
8069 | { | |
8070 | return __builtin_mve_vmlasq_n_sv8hi (__a, __b, __c); | |
8071 | } | |
8072 | ||
8073 | __extension__ extern __inline int16x8_t | |
8074 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8075 | __arm_vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) | |
8076 | { | |
8077 | return __builtin_mve_vmlaq_n_sv8hi (__a, __b, __c); | |
8078 | } | |
8079 | ||
8080 | __extension__ extern __inline int32_t | |
8081 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8082 | __arm_vmladavaxq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) | |
8083 | { | |
8084 | return __builtin_mve_vmladavaxq_sv8hi (__a, __b, __c); | |
8085 | } | |
8086 | ||
8087 | __extension__ extern __inline int32_t | |
8088 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8089 | __arm_vmladavaq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) | |
8090 | { | |
8091 | return __builtin_mve_vmladavaq_sv8hi (__a, __b, __c); | |
8092 | } | |
8093 | ||
8094 | __extension__ extern __inline int16x8_t | |
8095 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8096 | __arm_vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm) | |
8097 | { | |
8098 | return __builtin_mve_vsriq_n_sv8hi (__a, __b, __imm); | |
8099 | } | |
8100 | ||
8101 | __extension__ extern __inline int16x8_t | |
8102 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8103 | __arm_vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm) | |
8104 | { | |
8105 | return __builtin_mve_vsliq_n_sv8hi (__a, __b, __imm); | |
8106 | } | |
8107 | ||
8108 | __extension__ extern __inline uint32x4_t | |
8109 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8110 | __arm_vpselq_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
8111 | { | |
8112 | return __builtin_mve_vpselq_uv4si (__a, __b, __p); | |
8113 | } | |
8114 | ||
8115 | __extension__ extern __inline int32x4_t | |
8116 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8117 | __arm_vpselq_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8118 | { | |
8119 | return __builtin_mve_vpselq_sv4si (__a, __b, __p); | |
8120 | } | |
8121 | ||
8122 | __extension__ extern __inline uint32x4_t | |
8123 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8124 | __arm_vrev64q_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
8125 | { | |
8126 | return __builtin_mve_vrev64q_m_uv4si (__inactive, __a, __p); | |
8127 | } | |
8128 | ||
8165795c SP |
8129 | __extension__ extern __inline uint32x4_t |
8130 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8131 | __arm_vmvnq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
8132 | { | |
8133 | return __builtin_mve_vmvnq_m_uv4si (__inactive, __a, __p); | |
8134 | } | |
8135 | ||
8136 | __extension__ extern __inline uint32x4_t | |
8137 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8138 | __arm_vmlasq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) | |
8139 | { | |
8140 | return __builtin_mve_vmlasq_n_uv4si (__a, __b, __c); | |
8141 | } | |
8142 | ||
8143 | __extension__ extern __inline uint32x4_t | |
8144 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8145 | __arm_vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) | |
8146 | { | |
8147 | return __builtin_mve_vmlaq_n_uv4si (__a, __b, __c); | |
8148 | } | |
8149 | ||
8150 | __extension__ extern __inline uint32_t | |
8151 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8152 | __arm_vmladavq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
8153 | { | |
8154 | return __builtin_mve_vmladavq_p_uv4si (__a, __b, __p); | |
8155 | } | |
8156 | ||
8157 | __extension__ extern __inline uint32_t | |
8158 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8159 | __arm_vmladavaq_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c) | |
8160 | { | |
8161 | return __builtin_mve_vmladavaq_uv4si (__a, __b, __c); | |
8162 | } | |
8163 | ||
8164 | __extension__ extern __inline uint32_t | |
8165 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8166 | __arm_vminvq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) | |
8167 | { | |
8168 | return __builtin_mve_vminvq_p_uv4si (__a, __b, __p); | |
8169 | } | |
8170 | ||
8171 | __extension__ extern __inline uint32_t | |
8172 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8173 | __arm_vmaxvq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) | |
8174 | { | |
8175 | return __builtin_mve_vmaxvq_p_uv4si (__a, __b, __p); | |
8176 | } | |
8177 | ||
8178 | __extension__ extern __inline uint32x4_t | |
8179 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8180 | __arm_vdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, mve_pred16_t __p) | |
8181 | { | |
8182 | return __builtin_mve_vdupq_m_n_uv4si (__inactive, __a, __p); | |
8183 | } | |
8184 | ||
8185 | __extension__ extern __inline mve_pred16_t | |
8186 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8187 | __arm_vcmpneq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
8188 | { | |
8189 | return __builtin_mve_vcmpneq_m_uv4si (__a, __b, __p); | |
8190 | } | |
8191 | ||
8192 | __extension__ extern __inline mve_pred16_t | |
8193 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8194 | __arm_vcmpneq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
8195 | { | |
8196 | return __builtin_mve_vcmpneq_m_n_uv4si (__a, __b, __p); | |
8197 | } | |
8198 | ||
8199 | __extension__ extern __inline mve_pred16_t | |
8200 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8201 | __arm_vcmphiq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
8202 | { | |
8203 | return __builtin_mve_vcmphiq_m_uv4si (__a, __b, __p); | |
8204 | } | |
8205 | ||
8206 | __extension__ extern __inline mve_pred16_t | |
8207 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8208 | __arm_vcmphiq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
8209 | { | |
8210 | return __builtin_mve_vcmphiq_m_n_uv4si (__a, __b, __p); | |
8211 | } | |
8212 | ||
8213 | __extension__ extern __inline mve_pred16_t | |
8214 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8215 | __arm_vcmpeqq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
8216 | { | |
8217 | return __builtin_mve_vcmpeqq_m_uv4si (__a, __b, __p); | |
8218 | } | |
8219 | ||
8220 | __extension__ extern __inline mve_pred16_t | |
8221 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8222 | __arm_vcmpeqq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
8223 | { | |
8224 | return __builtin_mve_vcmpeqq_m_n_uv4si (__a, __b, __p); | |
8225 | } | |
8226 | ||
8227 | __extension__ extern __inline mve_pred16_t | |
8228 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8229 | __arm_vcmpcsq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
8230 | { | |
8231 | return __builtin_mve_vcmpcsq_m_uv4si (__a, __b, __p); | |
8232 | } | |
8233 | ||
8234 | __extension__ extern __inline mve_pred16_t | |
8235 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8236 | __arm_vcmpcsq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
8237 | { | |
8238 | return __builtin_mve_vcmpcsq_m_n_uv4si (__a, __b, __p); | |
8239 | } | |
8240 | ||
8241 | __extension__ extern __inline uint32x4_t | |
8242 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8243 | __arm_vclzq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
8244 | { | |
8245 | return __builtin_mve_vclzq_m_uv4si (__inactive, __a, __p); | |
8246 | } | |
8247 | ||
8248 | __extension__ extern __inline uint32_t | |
8249 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8250 | __arm_vaddvaq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) | |
8251 | { | |
8252 | return __builtin_mve_vaddvaq_p_uv4si (__a, __b, __p); | |
8253 | } | |
8254 | ||
8255 | __extension__ extern __inline uint32x4_t | |
8256 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8257 | __arm_vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm) | |
8258 | { | |
8259 | return __builtin_mve_vsriq_n_uv4si (__a, __b, __imm); | |
8260 | } | |
8261 | ||
8262 | __extension__ extern __inline uint32x4_t | |
8263 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8264 | __arm_vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm) | |
8265 | { | |
8266 | return __builtin_mve_vsliq_n_uv4si (__a, __b, __imm); | |
8267 | } | |
8268 | ||
8269 | __extension__ extern __inline uint32x4_t | |
8270 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8271 | __arm_vshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8272 | { | |
8273 | return __builtin_mve_vshlq_m_r_uv4si (__a, __b, __p); | |
8274 | } | |
8275 | ||
8276 | __extension__ extern __inline uint32x4_t | |
8277 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8278 | __arm_vrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8279 | { | |
8280 | return __builtin_mve_vrshlq_m_n_uv4si (__a, __b, __p); | |
8281 | } | |
8282 | ||
8283 | __extension__ extern __inline uint32x4_t | |
8284 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8285 | __arm_vqshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8286 | { | |
8287 | return __builtin_mve_vqshlq_m_r_uv4si (__a, __b, __p); | |
8288 | } | |
8289 | ||
8290 | __extension__ extern __inline uint32x4_t | |
8291 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8292 | __arm_vqrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8293 | { | |
8294 | return __builtin_mve_vqrshlq_m_n_uv4si (__a, __b, __p); | |
8295 | } | |
8296 | ||
8297 | __extension__ extern __inline uint32_t | |
8298 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8299 | __arm_vminavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p) | |
8300 | { | |
8301 | return __builtin_mve_vminavq_p_sv4si (__a, __b, __p); | |
8302 | } | |
8303 | ||
8304 | __extension__ extern __inline uint32x4_t | |
8305 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8306 | __arm_vminaq_m_s32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8307 | { | |
8308 | return __builtin_mve_vminaq_m_sv4si (__a, __b, __p); | |
8309 | } | |
8310 | ||
8311 | __extension__ extern __inline uint32_t | |
8312 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8313 | __arm_vmaxavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p) | |
8314 | { | |
8315 | return __builtin_mve_vmaxavq_p_sv4si (__a, __b, __p); | |
8316 | } | |
8317 | ||
8318 | __extension__ extern __inline uint32x4_t | |
8319 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8320 | __arm_vmaxaq_m_s32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8321 | { | |
8322 | return __builtin_mve_vmaxaq_m_sv4si (__a, __b, __p); | |
8323 | } | |
8324 | ||
8325 | __extension__ extern __inline mve_pred16_t | |
8326 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8327 | __arm_vcmpneq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8328 | { | |
8329 | return __builtin_mve_vcmpneq_m_sv4si (__a, __b, __p); | |
8330 | } | |
8331 | ||
8332 | __extension__ extern __inline mve_pred16_t | |
8333 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8334 | __arm_vcmpneq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8335 | { | |
8336 | return __builtin_mve_vcmpneq_m_n_sv4si (__a, __b, __p); | |
8337 | } | |
8338 | ||
8339 | __extension__ extern __inline mve_pred16_t | |
8340 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8341 | __arm_vcmpltq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8342 | { | |
8343 | return __builtin_mve_vcmpltq_m_sv4si (__a, __b, __p); | |
8344 | } | |
8345 | ||
8346 | __extension__ extern __inline mve_pred16_t | |
8347 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8348 | __arm_vcmpltq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8349 | { | |
8350 | return __builtin_mve_vcmpltq_m_n_sv4si (__a, __b, __p); | |
8351 | } | |
8352 | ||
8353 | __extension__ extern __inline mve_pred16_t | |
8354 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8355 | __arm_vcmpleq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8356 | { | |
8357 | return __builtin_mve_vcmpleq_m_sv4si (__a, __b, __p); | |
8358 | } | |
8359 | ||
8360 | __extension__ extern __inline mve_pred16_t | |
8361 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8362 | __arm_vcmpleq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8363 | { | |
8364 | return __builtin_mve_vcmpleq_m_n_sv4si (__a, __b, __p); | |
8365 | } | |
8366 | ||
8367 | __extension__ extern __inline mve_pred16_t | |
8368 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8369 | __arm_vcmpgtq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8370 | { | |
8371 | return __builtin_mve_vcmpgtq_m_sv4si (__a, __b, __p); | |
8372 | } | |
8373 | ||
8374 | __extension__ extern __inline mve_pred16_t | |
8375 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8376 | __arm_vcmpgtq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8377 | { | |
8378 | return __builtin_mve_vcmpgtq_m_n_sv4si (__a, __b, __p); | |
8379 | } | |
8380 | ||
8381 | __extension__ extern __inline mve_pred16_t | |
8382 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8383 | __arm_vcmpgeq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8384 | { | |
8385 | return __builtin_mve_vcmpgeq_m_sv4si (__a, __b, __p); | |
8386 | } | |
8387 | ||
8388 | __extension__ extern __inline mve_pred16_t | |
8389 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8390 | __arm_vcmpgeq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8391 | { | |
8392 | return __builtin_mve_vcmpgeq_m_n_sv4si (__a, __b, __p); | |
8393 | } | |
8394 | ||
8395 | __extension__ extern __inline mve_pred16_t | |
8396 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8397 | __arm_vcmpeqq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8398 | { | |
8399 | return __builtin_mve_vcmpeqq_m_sv4si (__a, __b, __p); | |
8400 | } | |
8401 | ||
8402 | __extension__ extern __inline mve_pred16_t | |
8403 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8404 | __arm_vcmpeqq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8405 | { | |
8406 | return __builtin_mve_vcmpeqq_m_n_sv4si (__a, __b, __p); | |
8407 | } | |
8408 | ||
8409 | __extension__ extern __inline int32x4_t | |
8410 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8411 | __arm_vshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8412 | { | |
8413 | return __builtin_mve_vshlq_m_r_sv4si (__a, __b, __p); | |
8414 | } | |
8415 | ||
8416 | __extension__ extern __inline int32x4_t | |
8417 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8418 | __arm_vrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8419 | { | |
8420 | return __builtin_mve_vrshlq_m_n_sv4si (__a, __b, __p); | |
8421 | } | |
8422 | ||
8423 | __extension__ extern __inline int32x4_t | |
8424 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8425 | __arm_vrev64q_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8426 | { | |
8427 | return __builtin_mve_vrev64q_m_sv4si (__inactive, __a, __p); | |
8428 | } | |
8429 | ||
8430 | __extension__ extern __inline int32x4_t | |
8431 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8432 | __arm_vqshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8433 | { | |
8434 | return __builtin_mve_vqshlq_m_r_sv4si (__a, __b, __p); | |
8435 | } | |
8436 | ||
8437 | __extension__ extern __inline int32x4_t | |
8438 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8439 | __arm_vqrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
8440 | { | |
8441 | return __builtin_mve_vqrshlq_m_n_sv4si (__a, __b, __p); | |
8442 | } | |
8443 | ||
8444 | __extension__ extern __inline int32x4_t | |
8445 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8446 | __arm_vqnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8447 | { | |
8448 | return __builtin_mve_vqnegq_m_sv4si (__inactive, __a, __p); | |
8449 | } | |
8450 | ||
8451 | __extension__ extern __inline int32x4_t | |
8452 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8453 | __arm_vqabsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8454 | { | |
8455 | return __builtin_mve_vqabsq_m_sv4si (__inactive, __a, __p); | |
8456 | } | |
8457 | ||
8458 | __extension__ extern __inline int32x4_t | |
8459 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8460 | __arm_vnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8461 | { | |
8462 | return __builtin_mve_vnegq_m_sv4si (__inactive, __a, __p); | |
8463 | } | |
8464 | ||
8465 | __extension__ extern __inline int32x4_t | |
8466 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8467 | __arm_vmvnq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8468 | { | |
8469 | return __builtin_mve_vmvnq_m_sv4si (__inactive, __a, __p); | |
8470 | } | |
8471 | ||
8472 | __extension__ extern __inline int32_t | |
8473 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8474 | __arm_vmlsdavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8475 | { | |
8476 | return __builtin_mve_vmlsdavxq_p_sv4si (__a, __b, __p); | |
8477 | } | |
8478 | ||
8479 | __extension__ extern __inline int32_t | |
8480 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8481 | __arm_vmlsdavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8482 | { | |
8483 | return __builtin_mve_vmlsdavq_p_sv4si (__a, __b, __p); | |
8484 | } | |
8485 | ||
8486 | __extension__ extern __inline int32_t | |
8487 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8488 | __arm_vmladavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8489 | { | |
8490 | return __builtin_mve_vmladavxq_p_sv4si (__a, __b, __p); | |
8491 | } | |
8492 | ||
8493 | __extension__ extern __inline int32_t | |
8494 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8495 | __arm_vmladavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
8496 | { | |
8497 | return __builtin_mve_vmladavq_p_sv4si (__a, __b, __p); | |
8498 | } | |
8499 | ||
8500 | __extension__ extern __inline int32_t | |
8501 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8502 | __arm_vminvq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p) | |
8503 | { | |
8504 | return __builtin_mve_vminvq_p_sv4si (__a, __b, __p); | |
8505 | } | |
8506 | ||
8507 | __extension__ extern __inline int32_t | |
8508 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8509 | __arm_vmaxvq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p) | |
8510 | { | |
8511 | return __builtin_mve_vmaxvq_p_sv4si (__a, __b, __p); | |
8512 | } | |
8513 | ||
8514 | __extension__ extern __inline int32x4_t | |
8515 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8516 | __arm_vdupq_m_n_s32 (int32x4_t __inactive, int32_t __a, mve_pred16_t __p) | |
8517 | { | |
8518 | return __builtin_mve_vdupq_m_n_sv4si (__inactive, __a, __p); | |
8519 | } | |
8520 | ||
8521 | __extension__ extern __inline int32x4_t | |
8522 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8523 | __arm_vclzq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8524 | { | |
8525 | return __builtin_mve_vclzq_m_sv4si (__inactive, __a, __p); | |
8526 | } | |
8527 | ||
8528 | __extension__ extern __inline int32x4_t | |
8529 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8530 | __arm_vclsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8531 | { | |
8532 | return __builtin_mve_vclsq_m_sv4si (__inactive, __a, __p); | |
8533 | } | |
8534 | ||
8535 | __extension__ extern __inline int32_t | |
8536 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8537 | __arm_vaddvaq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p) | |
8538 | { | |
8539 | return __builtin_mve_vaddvaq_p_sv4si (__a, __b, __p); | |
8540 | } | |
8541 | ||
8542 | __extension__ extern __inline int32x4_t | |
8543 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8544 | __arm_vabsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
8545 | { | |
8546 | return __builtin_mve_vabsq_m_sv4si (__inactive, __a, __p); | |
8547 | } | |
8548 | ||
8549 | __extension__ extern __inline int32x4_t | |
8550 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8551 | __arm_vqrdmlsdhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8552 | { | |
8553 | return __builtin_mve_vqrdmlsdhxq_sv4si (__inactive, __a, __b); | |
8554 | } | |
8555 | ||
8556 | __extension__ extern __inline int32x4_t | |
8557 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8558 | __arm_vqrdmlsdhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8559 | { | |
8560 | return __builtin_mve_vqrdmlsdhq_sv4si (__inactive, __a, __b); | |
8561 | } | |
8562 | ||
8563 | __extension__ extern __inline int32x4_t | |
8564 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8565 | __arm_vqrdmlashq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8566 | { | |
8567 | return __builtin_mve_vqrdmlashq_n_sv4si (__a, __b, __c); | |
8568 | } | |
8569 | ||
afb198ee CL |
8570 | __extension__ extern __inline int32x4_t |
8571 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8572 | __arm_vqdmlashq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8573 | { | |
8574 | return __builtin_mve_vqdmlashq_n_sv4si (__a, __b, __c); | |
8575 | } | |
8576 | ||
8165795c SP |
8577 | __extension__ extern __inline int32x4_t |
8578 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8579 | __arm_vqrdmlahq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8580 | { | |
8581 | return __builtin_mve_vqrdmlahq_n_sv4si (__a, __b, __c); | |
8582 | } | |
8583 | ||
8584 | __extension__ extern __inline int32x4_t | |
8585 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8586 | __arm_vqrdmladhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8587 | { | |
8588 | return __builtin_mve_vqrdmladhxq_sv4si (__inactive, __a, __b); | |
8589 | } | |
8590 | ||
8591 | __extension__ extern __inline int32x4_t | |
8592 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8593 | __arm_vqrdmladhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8594 | { | |
8595 | return __builtin_mve_vqrdmladhq_sv4si (__inactive, __a, __b); | |
8596 | } | |
8597 | ||
8598 | __extension__ extern __inline int32x4_t | |
8599 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8600 | __arm_vqdmlsdhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8601 | { | |
8602 | return __builtin_mve_vqdmlsdhxq_sv4si (__inactive, __a, __b); | |
8603 | } | |
8604 | ||
8605 | __extension__ extern __inline int32x4_t | |
8606 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8607 | __arm_vqdmlsdhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8608 | { | |
8609 | return __builtin_mve_vqdmlsdhq_sv4si (__inactive, __a, __b); | |
8610 | } | |
8611 | ||
8612 | __extension__ extern __inline int32x4_t | |
8613 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8614 | __arm_vqdmlahq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8615 | { | |
8616 | return __builtin_mve_vqdmlahq_n_sv4si (__a, __b, __c); | |
8617 | } | |
8618 | ||
8619 | __extension__ extern __inline int32x4_t | |
8620 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8621 | __arm_vqdmladhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8622 | { | |
8623 | return __builtin_mve_vqdmladhxq_sv4si (__inactive, __a, __b); | |
8624 | } | |
8625 | ||
8626 | __extension__ extern __inline int32x4_t | |
8627 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8628 | __arm_vqdmladhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
8629 | { | |
8630 | return __builtin_mve_vqdmladhq_sv4si (__inactive, __a, __b); | |
8631 | } | |
8632 | ||
8633 | __extension__ extern __inline int32_t | |
8634 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8635 | __arm_vmlsdavaxq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) | |
8636 | { | |
8637 | return __builtin_mve_vmlsdavaxq_sv4si (__a, __b, __c); | |
8638 | } | |
8639 | ||
8640 | __extension__ extern __inline int32_t | |
8641 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8642 | __arm_vmlsdavaq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) | |
8643 | { | |
8644 | return __builtin_mve_vmlsdavaq_sv4si (__a, __b, __c); | |
8645 | } | |
8646 | ||
8647 | __extension__ extern __inline int32x4_t | |
8648 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8649 | __arm_vmlasq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8650 | { | |
8651 | return __builtin_mve_vmlasq_n_sv4si (__a, __b, __c); | |
8652 | } | |
8653 | ||
8654 | __extension__ extern __inline int32x4_t | |
8655 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8656 | __arm_vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) | |
8657 | { | |
8658 | return __builtin_mve_vmlaq_n_sv4si (__a, __b, __c); | |
8659 | } | |
8660 | ||
8661 | __extension__ extern __inline int32_t | |
8662 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8663 | __arm_vmladavaxq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) | |
8664 | { | |
8665 | return __builtin_mve_vmladavaxq_sv4si (__a, __b, __c); | |
8666 | } | |
8667 | ||
8668 | __extension__ extern __inline int32_t | |
8669 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8670 | __arm_vmladavaq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) | |
8671 | { | |
8672 | return __builtin_mve_vmladavaq_sv4si (__a, __b, __c); | |
8673 | } | |
8674 | ||
8675 | __extension__ extern __inline int32x4_t | |
8676 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8677 | __arm_vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm) | |
8678 | { | |
8679 | return __builtin_mve_vsriq_n_sv4si (__a, __b, __imm); | |
8680 | } | |
8681 | ||
8682 | __extension__ extern __inline int32x4_t | |
8683 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8684 | __arm_vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm) | |
8685 | { | |
8686 | return __builtin_mve_vsliq_n_sv4si (__a, __b, __imm); | |
8687 | } | |
8688 | ||
8689 | __extension__ extern __inline uint64x2_t | |
8690 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8691 | __arm_vpselq_u64 (uint64x2_t __a, uint64x2_t __b, mve_pred16_t __p) | |
8692 | { | |
8693 | return __builtin_mve_vpselq_uv2di (__a, __b, __p); | |
8694 | } | |
8695 | ||
8696 | __extension__ extern __inline int64x2_t | |
8697 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
8698 | __arm_vpselq_s64 (int64x2_t __a, int64x2_t __b, mve_pred16_t __p) | |
8699 | { | |
8700 | return __builtin_mve_vpselq_sv2di (__a, __b, __p); | |
8701 | } | |
f9355dee | 8702 | |
e3678b44 | 8703 | __extension__ extern __inline int64_t |
f9355dee | 8704 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8705 | __arm_vrmlaldavhaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8706 | { |
e3678b44 | 8707 | return __builtin_mve_vrmlaldavhaxq_sv4si (__a, __b, __c); |
f9355dee SP |
8708 | } |
8709 | ||
e3678b44 | 8710 | __extension__ extern __inline int64_t |
f9355dee | 8711 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8712 | __arm_vrmlsldavhaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8713 | { |
e3678b44 | 8714 | return __builtin_mve_vrmlsldavhaq_sv4si (__a, __b, __c); |
f9355dee SP |
8715 | } |
8716 | ||
e3678b44 | 8717 | __extension__ extern __inline int64_t |
f9355dee | 8718 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8719 | __arm_vrmlsldavhaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 8720 | { |
e3678b44 | 8721 | return __builtin_mve_vrmlsldavhaxq_sv4si (__a, __b, __c); |
f9355dee SP |
8722 | } |
8723 | ||
e3678b44 | 8724 | __extension__ extern __inline int64_t |
f9355dee | 8725 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8726 | __arm_vaddlvaq_p_s32 (int64_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8727 | { |
e3678b44 | 8728 | return __builtin_mve_vaddlvaq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8729 | } |
8730 | ||
e3678b44 | 8731 | __extension__ extern __inline int8x16_t |
f9355dee | 8732 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8733 | __arm_vrev16q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) |
f9355dee | 8734 | { |
e3678b44 | 8735 | return __builtin_mve_vrev16q_m_sv16qi (__inactive, __a, __p); |
f9355dee SP |
8736 | } |
8737 | ||
e3678b44 | 8738 | __extension__ extern __inline int64_t |
f9355dee | 8739 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8740 | __arm_vrmlaldavhq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8741 | { |
e3678b44 | 8742 | return __builtin_mve_vrmlaldavhq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8743 | } |
8744 | ||
e3678b44 | 8745 | __extension__ extern __inline int64_t |
f9355dee | 8746 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8747 | __arm_vrmlaldavhxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8748 | { |
e3678b44 | 8749 | return __builtin_mve_vrmlaldavhxq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8750 | } |
8751 | ||
e3678b44 | 8752 | __extension__ extern __inline int64_t |
f9355dee | 8753 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8754 | __arm_vrmlsldavhq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8755 | { |
e3678b44 | 8756 | return __builtin_mve_vrmlsldavhq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8757 | } |
8758 | ||
e3678b44 | 8759 | __extension__ extern __inline int64_t |
f9355dee | 8760 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8761 | __arm_vrmlsldavhxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 8762 | { |
e3678b44 | 8763 | return __builtin_mve_vrmlsldavhxq_p_sv4si (__a, __b, __p); |
f9355dee SP |
8764 | } |
8765 | ||
e3678b44 | 8766 | __extension__ extern __inline uint64_t |
f9355dee | 8767 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8768 | __arm_vaddlvaq_p_u32 (uint64_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 8769 | { |
e3678b44 | 8770 | return __builtin_mve_vaddlvaq_p_uv4si (__a, __b, __p); |
f9355dee SP |
8771 | } |
8772 | ||
e3678b44 | 8773 | __extension__ extern __inline uint8x16_t |
f9355dee | 8774 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8775 | __arm_vrev16q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) |
f9355dee | 8776 | { |
e3678b44 | 8777 | return __builtin_mve_vrev16q_m_uv16qi (__inactive, __a, __p); |
f9355dee SP |
8778 | } |
8779 | ||
e3678b44 | 8780 | __extension__ extern __inline uint64_t |
f9355dee | 8781 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8782 | __arm_vrmlaldavhq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 8783 | { |
e3678b44 | 8784 | return __builtin_mve_vrmlaldavhq_p_uv4si (__a, __b, __p); |
f9355dee SP |
8785 | } |
8786 | ||
e3678b44 | 8787 | __extension__ extern __inline int16x8_t |
f9355dee | 8788 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8789 | __arm_vmvnq_m_n_s16 (int16x8_t __inactive, const int __imm, mve_pred16_t __p) |
f9355dee | 8790 | { |
e3678b44 | 8791 | return __builtin_mve_vmvnq_m_n_sv8hi (__inactive, __imm, __p); |
f9355dee SP |
8792 | } |
8793 | ||
e3678b44 | 8794 | __extension__ extern __inline int16x8_t |
f9355dee | 8795 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8796 | __arm_vorrq_m_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 8797 | { |
e3678b44 | 8798 | return __builtin_mve_vorrq_m_n_sv8hi (__a, __imm, __p); |
f9355dee SP |
8799 | } |
8800 | ||
e3678b44 | 8801 | __extension__ extern __inline int8x16_t |
f9355dee | 8802 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8803 | __arm_vqrshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8804 | { |
e3678b44 | 8805 | return __builtin_mve_vqrshrntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8806 | } |
8807 | ||
e3678b44 | 8808 | __extension__ extern __inline int8x16_t |
f9355dee | 8809 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8810 | __arm_vqshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8811 | { |
e3678b44 | 8812 | return __builtin_mve_vqshrnbq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8813 | } |
8814 | ||
e3678b44 | 8815 | __extension__ extern __inline int8x16_t |
f9355dee | 8816 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8817 | __arm_vqshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8818 | { |
e3678b44 | 8819 | return __builtin_mve_vqshrntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8820 | } |
8821 | ||
e3678b44 | 8822 | __extension__ extern __inline int8x16_t |
f9355dee | 8823 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8824 | __arm_vrshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8825 | { |
e3678b44 | 8826 | return __builtin_mve_vrshrnbq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8827 | } |
8828 | ||
e3678b44 | 8829 | __extension__ extern __inline int8x16_t |
f9355dee | 8830 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8831 | __arm_vrshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8832 | { |
e3678b44 | 8833 | return __builtin_mve_vrshrntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8834 | } |
8835 | ||
e3678b44 | 8836 | __extension__ extern __inline int8x16_t |
f9355dee | 8837 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8838 | __arm_vshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8839 | { |
e3678b44 | 8840 | return __builtin_mve_vshrnbq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8841 | } |
8842 | ||
e3678b44 | 8843 | __extension__ extern __inline int8x16_t |
f9355dee | 8844 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8845 | __arm_vshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8846 | { |
e3678b44 | 8847 | return __builtin_mve_vshrntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8848 | } |
8849 | ||
e3678b44 | 8850 | __extension__ extern __inline int64_t |
f9355dee | 8851 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8852 | __arm_vmlaldavaq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c) |
f9355dee | 8853 | { |
e3678b44 | 8854 | return __builtin_mve_vmlaldavaq_sv8hi (__a, __b, __c); |
f9355dee SP |
8855 | } |
8856 | ||
e3678b44 | 8857 | __extension__ extern __inline int64_t |
f9355dee | 8858 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8859 | __arm_vmlaldavaxq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c) |
f9355dee | 8860 | { |
e3678b44 | 8861 | return __builtin_mve_vmlaldavaxq_sv8hi (__a, __b, __c); |
f9355dee SP |
8862 | } |
8863 | ||
e3678b44 | 8864 | __extension__ extern __inline int64_t |
f9355dee | 8865 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8866 | __arm_vmlsldavaq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c) |
f9355dee | 8867 | { |
e3678b44 | 8868 | return __builtin_mve_vmlsldavaq_sv8hi (__a, __b, __c); |
f9355dee SP |
8869 | } |
8870 | ||
e3678b44 | 8871 | __extension__ extern __inline int64_t |
f9355dee | 8872 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8873 | __arm_vmlsldavaxq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c) |
f9355dee | 8874 | { |
e3678b44 | 8875 | return __builtin_mve_vmlsldavaxq_sv8hi (__a, __b, __c); |
f9355dee SP |
8876 | } |
8877 | ||
e3678b44 | 8878 | __extension__ extern __inline int64_t |
f9355dee | 8879 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8880 | __arm_vmlaldavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8881 | { |
e3678b44 | 8882 | return __builtin_mve_vmlaldavq_p_sv8hi (__a, __b, __p); |
f9355dee SP |
8883 | } |
8884 | ||
e3678b44 | 8885 | __extension__ extern __inline int64_t |
f9355dee | 8886 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8887 | __arm_vmlaldavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8888 | { |
e3678b44 | 8889 | return __builtin_mve_vmlaldavxq_p_sv8hi (__a, __b, __p); |
f9355dee SP |
8890 | } |
8891 | ||
e3678b44 | 8892 | __extension__ extern __inline int64_t |
f9355dee | 8893 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8894 | __arm_vmlsldavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8895 | { |
e3678b44 | 8896 | return __builtin_mve_vmlsldavq_p_sv8hi (__a, __b, __p); |
f9355dee SP |
8897 | } |
8898 | ||
e3678b44 | 8899 | __extension__ extern __inline int64_t |
f9355dee | 8900 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8901 | __arm_vmlsldavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8902 | { |
e3678b44 | 8903 | return __builtin_mve_vmlsldavxq_p_sv8hi (__a, __b, __p); |
f9355dee SP |
8904 | } |
8905 | ||
8906 | __extension__ extern __inline int16x8_t | |
8907 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e3678b44 | 8908 | __arm_vmovlbq_m_s8 (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p) |
f9355dee | 8909 | { |
e3678b44 | 8910 | return __builtin_mve_vmovlbq_m_sv16qi (__inactive, __a, __p); |
f9355dee SP |
8911 | } |
8912 | ||
e3678b44 | 8913 | __extension__ extern __inline int16x8_t |
f9355dee | 8914 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8915 | __arm_vmovltq_m_s8 (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p) |
f9355dee | 8916 | { |
e3678b44 | 8917 | return __builtin_mve_vmovltq_m_sv16qi (__inactive, __a, __p); |
f9355dee SP |
8918 | } |
8919 | ||
e3678b44 | 8920 | __extension__ extern __inline int8x16_t |
f9355dee | 8921 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8922 | __arm_vmovnbq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8923 | { |
e3678b44 | 8924 | return __builtin_mve_vmovnbq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8925 | } |
8926 | ||
e3678b44 | 8927 | __extension__ extern __inline int8x16_t |
f9355dee | 8928 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8929 | __arm_vmovntq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8930 | { |
e3678b44 | 8931 | return __builtin_mve_vmovntq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8932 | } |
8933 | ||
e3678b44 | 8934 | __extension__ extern __inline int8x16_t |
f9355dee | 8935 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8936 | __arm_vqmovnbq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8937 | { |
e3678b44 | 8938 | return __builtin_mve_vqmovnbq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8939 | } |
8940 | ||
e3678b44 | 8941 | __extension__ extern __inline int8x16_t |
f9355dee | 8942 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8943 | __arm_vqmovntq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8944 | { |
e3678b44 | 8945 | return __builtin_mve_vqmovntq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8946 | } |
8947 | ||
e3678b44 | 8948 | __extension__ extern __inline int8x16_t |
f9355dee | 8949 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8950 | __arm_vrev32q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) |
f9355dee | 8951 | { |
e3678b44 | 8952 | return __builtin_mve_vrev32q_m_sv16qi (__inactive, __a, __p); |
f9355dee SP |
8953 | } |
8954 | ||
8955 | __extension__ extern __inline uint16x8_t | |
8956 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e3678b44 | 8957 | __arm_vmvnq_m_n_u16 (uint16x8_t __inactive, const int __imm, mve_pred16_t __p) |
f9355dee | 8958 | { |
e3678b44 | 8959 | return __builtin_mve_vmvnq_m_n_uv8hi (__inactive, __imm, __p); |
f9355dee SP |
8960 | } |
8961 | ||
e3678b44 | 8962 | __extension__ extern __inline uint16x8_t |
f9355dee | 8963 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8964 | __arm_vorrq_m_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 8965 | { |
e3678b44 | 8966 | return __builtin_mve_vorrq_m_n_uv8hi (__a, __imm, __p); |
f9355dee SP |
8967 | } |
8968 | ||
e3678b44 | 8969 | __extension__ extern __inline uint8x16_t |
f9355dee | 8970 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8971 | __arm_vqrshruntq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8972 | { |
e3678b44 | 8973 | return __builtin_mve_vqrshruntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8974 | } |
8975 | ||
e3678b44 | 8976 | __extension__ extern __inline uint8x16_t |
f9355dee | 8977 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8978 | __arm_vqshrunbq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8979 | { |
e3678b44 | 8980 | return __builtin_mve_vqshrunbq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8981 | } |
8982 | ||
e3678b44 | 8983 | __extension__ extern __inline uint8x16_t |
f9355dee | 8984 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8985 | __arm_vqshruntq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm) |
f9355dee | 8986 | { |
e3678b44 | 8987 | return __builtin_mve_vqshruntq_n_sv8hi (__a, __b, __imm); |
f9355dee SP |
8988 | } |
8989 | ||
e3678b44 | 8990 | __extension__ extern __inline uint8x16_t |
f9355dee | 8991 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8992 | __arm_vqmovunbq_m_s16 (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 8993 | { |
e3678b44 | 8994 | return __builtin_mve_vqmovunbq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
8995 | } |
8996 | ||
e3678b44 | 8997 | __extension__ extern __inline uint8x16_t |
f9355dee | 8998 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 8999 | __arm_vqmovuntq_m_s16 (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p) |
f9355dee | 9000 | { |
e3678b44 | 9001 | return __builtin_mve_vqmovuntq_m_sv8hi (__a, __b, __p); |
f9355dee SP |
9002 | } |
9003 | ||
e3678b44 | 9004 | __extension__ extern __inline uint8x16_t |
f9355dee | 9005 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9006 | __arm_vqrshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 9007 | { |
e3678b44 | 9008 | return __builtin_mve_vqrshrntq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
9009 | } |
9010 | ||
e3678b44 | 9011 | __extension__ extern __inline uint8x16_t |
f9355dee | 9012 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9013 | __arm_vqshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 9014 | { |
e3678b44 | 9015 | return __builtin_mve_vqshrnbq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
9016 | } |
9017 | ||
e3678b44 | 9018 | __extension__ extern __inline uint8x16_t |
f9355dee | 9019 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9020 | __arm_vqshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 9021 | { |
e3678b44 | 9022 | return __builtin_mve_vqshrntq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
9023 | } |
9024 | ||
e3678b44 | 9025 | __extension__ extern __inline uint8x16_t |
f9355dee | 9026 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9027 | __arm_vrshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 9028 | { |
e3678b44 | 9029 | return __builtin_mve_vrshrnbq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
9030 | } |
9031 | ||
e3678b44 | 9032 | __extension__ extern __inline uint8x16_t |
f9355dee | 9033 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9034 | __arm_vrshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 9035 | { |
e3678b44 | 9036 | return __builtin_mve_vrshrntq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
9037 | } |
9038 | ||
e3678b44 | 9039 | __extension__ extern __inline uint8x16_t |
f9355dee | 9040 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9041 | __arm_vshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 9042 | { |
e3678b44 | 9043 | return __builtin_mve_vshrnbq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
9044 | } |
9045 | ||
e3678b44 | 9046 | __extension__ extern __inline uint8x16_t |
f9355dee | 9047 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9048 | __arm_vshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm) |
f9355dee | 9049 | { |
e3678b44 | 9050 | return __builtin_mve_vshrntq_n_uv8hi (__a, __b, __imm); |
f9355dee SP |
9051 | } |
9052 | ||
e3678b44 | 9053 | __extension__ extern __inline uint64_t |
f9355dee | 9054 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9055 | __arm_vmlaldavaq_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c) |
f9355dee | 9056 | { |
e3678b44 | 9057 | return __builtin_mve_vmlaldavaq_uv8hi (__a, __b, __c); |
f9355dee SP |
9058 | } |
9059 | ||
e3678b44 | 9060 | __extension__ extern __inline uint64_t |
f9355dee | 9061 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9062 | __arm_vmlaldavq_p_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
f9355dee | 9063 | { |
e3678b44 | 9064 | return __builtin_mve_vmlaldavq_p_uv8hi (__a, __b, __p); |
f9355dee SP |
9065 | } |
9066 | ||
e3678b44 | 9067 | __extension__ extern __inline uint16x8_t |
f9355dee | 9068 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9069 | __arm_vmovlbq_m_u8 (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p) |
f9355dee | 9070 | { |
e3678b44 | 9071 | return __builtin_mve_vmovlbq_m_uv16qi (__inactive, __a, __p); |
f9355dee SP |
9072 | } |
9073 | ||
e3678b44 | 9074 | __extension__ extern __inline uint16x8_t |
f9355dee | 9075 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9076 | __arm_vmovltq_m_u8 (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p) |
f9355dee | 9077 | { |
e3678b44 | 9078 | return __builtin_mve_vmovltq_m_uv16qi (__inactive, __a, __p); |
f9355dee SP |
9079 | } |
9080 | ||
e3678b44 | 9081 | __extension__ extern __inline uint8x16_t |
f9355dee | 9082 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9083 | __arm_vmovnbq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) |
f9355dee | 9084 | { |
e3678b44 | 9085 | return __builtin_mve_vmovnbq_m_uv8hi (__a, __b, __p); |
f9355dee SP |
9086 | } |
9087 | ||
e3678b44 | 9088 | __extension__ extern __inline uint8x16_t |
f9355dee | 9089 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9090 | __arm_vmovntq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) |
f9355dee | 9091 | { |
e3678b44 | 9092 | return __builtin_mve_vmovntq_m_uv8hi (__a, __b, __p); |
f9355dee SP |
9093 | } |
9094 | ||
e3678b44 | 9095 | __extension__ extern __inline uint8x16_t |
f9355dee | 9096 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9097 | __arm_vqmovnbq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) |
f9355dee | 9098 | { |
e3678b44 | 9099 | return __builtin_mve_vqmovnbq_m_uv8hi (__a, __b, __p); |
f9355dee SP |
9100 | } |
9101 | ||
e3678b44 | 9102 | __extension__ extern __inline uint8x16_t |
f9355dee | 9103 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9104 | __arm_vqmovntq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) |
f9355dee | 9105 | { |
e3678b44 | 9106 | return __builtin_mve_vqmovntq_m_uv8hi (__a, __b, __p); |
f9355dee SP |
9107 | } |
9108 | ||
e3678b44 | 9109 | __extension__ extern __inline uint8x16_t |
f9355dee | 9110 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9111 | __arm_vrev32q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) |
f9355dee | 9112 | { |
e3678b44 | 9113 | return __builtin_mve_vrev32q_m_uv16qi (__inactive, __a, __p); |
f9355dee SP |
9114 | } |
9115 | ||
9116 | __extension__ extern __inline int32x4_t | |
9117 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e3678b44 | 9118 | __arm_vmvnq_m_n_s32 (int32x4_t __inactive, const int __imm, mve_pred16_t __p) |
f9355dee | 9119 | { |
e3678b44 | 9120 | return __builtin_mve_vmvnq_m_n_sv4si (__inactive, __imm, __p); |
f9355dee SP |
9121 | } |
9122 | ||
e3678b44 | 9123 | __extension__ extern __inline int32x4_t |
f9355dee | 9124 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9125 | __arm_vorrq_m_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 9126 | { |
e3678b44 | 9127 | return __builtin_mve_vorrq_m_n_sv4si (__a, __imm, __p); |
f9355dee SP |
9128 | } |
9129 | ||
e3678b44 | 9130 | __extension__ extern __inline int16x8_t |
f9355dee | 9131 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9132 | __arm_vqrshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 9133 | { |
e3678b44 | 9134 | return __builtin_mve_vqrshrntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
9135 | } |
9136 | ||
e3678b44 | 9137 | __extension__ extern __inline int16x8_t |
f9355dee | 9138 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9139 | __arm_vqshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 9140 | { |
e3678b44 | 9141 | return __builtin_mve_vqshrnbq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
9142 | } |
9143 | ||
e3678b44 | 9144 | __extension__ extern __inline int16x8_t |
f9355dee | 9145 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9146 | __arm_vqshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 9147 | { |
e3678b44 | 9148 | return __builtin_mve_vqshrntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
9149 | } |
9150 | ||
e3678b44 | 9151 | __extension__ extern __inline int16x8_t |
f9355dee | 9152 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9153 | __arm_vrshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 9154 | { |
e3678b44 | 9155 | return __builtin_mve_vrshrnbq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
9156 | } |
9157 | ||
e3678b44 | 9158 | __extension__ extern __inline int16x8_t |
f9355dee | 9159 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9160 | __arm_vrshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 9161 | { |
e3678b44 | 9162 | return __builtin_mve_vrshrntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
9163 | } |
9164 | ||
e3678b44 | 9165 | __extension__ extern __inline int16x8_t |
f9355dee | 9166 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9167 | __arm_vshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 9168 | { |
e3678b44 | 9169 | return __builtin_mve_vshrnbq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
9170 | } |
9171 | ||
e3678b44 | 9172 | __extension__ extern __inline int16x8_t |
f9355dee | 9173 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9174 | __arm_vshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 9175 | { |
e3678b44 | 9176 | return __builtin_mve_vshrntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
9177 | } |
9178 | ||
e3678b44 | 9179 | __extension__ extern __inline int64_t |
f9355dee | 9180 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9181 | __arm_vmlaldavaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 9182 | { |
e3678b44 | 9183 | return __builtin_mve_vmlaldavaq_sv4si (__a, __b, __c); |
f9355dee SP |
9184 | } |
9185 | ||
e3678b44 | 9186 | __extension__ extern __inline int64_t |
f9355dee | 9187 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9188 | __arm_vmlaldavaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 9189 | { |
e3678b44 | 9190 | return __builtin_mve_vmlaldavaxq_sv4si (__a, __b, __c); |
f9355dee SP |
9191 | } |
9192 | ||
e3678b44 | 9193 | __extension__ extern __inline int64_t |
f9355dee | 9194 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9195 | __arm_vmlsldavaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 9196 | { |
e3678b44 | 9197 | return __builtin_mve_vmlsldavaq_sv4si (__a, __b, __c); |
f9355dee SP |
9198 | } |
9199 | ||
e3678b44 | 9200 | __extension__ extern __inline int64_t |
f9355dee | 9201 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9202 | __arm_vmlsldavaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c) |
f9355dee | 9203 | { |
e3678b44 | 9204 | return __builtin_mve_vmlsldavaxq_sv4si (__a, __b, __c); |
f9355dee SP |
9205 | } |
9206 | ||
e3678b44 | 9207 | __extension__ extern __inline int64_t |
f9355dee | 9208 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9209 | __arm_vmlaldavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 9210 | { |
e3678b44 | 9211 | return __builtin_mve_vmlaldavq_p_sv4si (__a, __b, __p); |
f9355dee SP |
9212 | } |
9213 | ||
e3678b44 | 9214 | __extension__ extern __inline int64_t |
f9355dee | 9215 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9216 | __arm_vmlaldavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 9217 | { |
e3678b44 | 9218 | return __builtin_mve_vmlaldavxq_p_sv4si (__a, __b, __p); |
f9355dee SP |
9219 | } |
9220 | ||
e3678b44 | 9221 | __extension__ extern __inline int64_t |
f9355dee | 9222 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9223 | __arm_vmlsldavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 9224 | { |
e3678b44 | 9225 | return __builtin_mve_vmlsldavq_p_sv4si (__a, __b, __p); |
f9355dee SP |
9226 | } |
9227 | ||
e3678b44 | 9228 | __extension__ extern __inline int64_t |
f9355dee | 9229 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9230 | __arm_vmlsldavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 9231 | { |
e3678b44 | 9232 | return __builtin_mve_vmlsldavxq_p_sv4si (__a, __b, __p); |
f9355dee SP |
9233 | } |
9234 | ||
e3678b44 | 9235 | __extension__ extern __inline int32x4_t |
f9355dee | 9236 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9237 | __arm_vmovlbq_m_s16 (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p) |
f9355dee | 9238 | { |
e3678b44 | 9239 | return __builtin_mve_vmovlbq_m_sv8hi (__inactive, __a, __p); |
f9355dee SP |
9240 | } |
9241 | ||
e3678b44 | 9242 | __extension__ extern __inline int32x4_t |
f9355dee | 9243 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9244 | __arm_vmovltq_m_s16 (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p) |
f9355dee | 9245 | { |
e3678b44 | 9246 | return __builtin_mve_vmovltq_m_sv8hi (__inactive, __a, __p); |
f9355dee SP |
9247 | } |
9248 | ||
e3678b44 | 9249 | __extension__ extern __inline int16x8_t |
f9355dee | 9250 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9251 | __arm_vmovnbq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 9252 | { |
e3678b44 | 9253 | return __builtin_mve_vmovnbq_m_sv4si (__a, __b, __p); |
f9355dee SP |
9254 | } |
9255 | ||
e3678b44 | 9256 | __extension__ extern __inline int16x8_t |
f9355dee | 9257 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9258 | __arm_vmovntq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 9259 | { |
e3678b44 | 9260 | return __builtin_mve_vmovntq_m_sv4si (__a, __b, __p); |
f9355dee SP |
9261 | } |
9262 | ||
e3678b44 | 9263 | __extension__ extern __inline int16x8_t |
f9355dee | 9264 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9265 | __arm_vqmovnbq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 9266 | { |
e3678b44 | 9267 | return __builtin_mve_vqmovnbq_m_sv4si (__a, __b, __p); |
f9355dee SP |
9268 | } |
9269 | ||
e3678b44 | 9270 | __extension__ extern __inline int16x8_t |
f9355dee | 9271 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9272 | __arm_vqmovntq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 9273 | { |
e3678b44 | 9274 | return __builtin_mve_vqmovntq_m_sv4si (__a, __b, __p); |
f9355dee SP |
9275 | } |
9276 | ||
e3678b44 | 9277 | __extension__ extern __inline int16x8_t |
f9355dee | 9278 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9279 | __arm_vrev32q_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) |
f9355dee | 9280 | { |
e3678b44 | 9281 | return __builtin_mve_vrev32q_m_sv8hi (__inactive, __a, __p); |
f9355dee SP |
9282 | } |
9283 | ||
e3678b44 | 9284 | __extension__ extern __inline uint32x4_t |
f9355dee | 9285 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9286 | __arm_vmvnq_m_n_u32 (uint32x4_t __inactive, const int __imm, mve_pred16_t __p) |
f9355dee | 9287 | { |
e3678b44 | 9288 | return __builtin_mve_vmvnq_m_n_uv4si (__inactive, __imm, __p); |
f9355dee SP |
9289 | } |
9290 | ||
e3678b44 | 9291 | __extension__ extern __inline uint32x4_t |
f9355dee | 9292 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9293 | __arm_vorrq_m_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 9294 | { |
e3678b44 | 9295 | return __builtin_mve_vorrq_m_n_uv4si (__a, __imm, __p); |
f9355dee SP |
9296 | } |
9297 | ||
e3678b44 | 9298 | __extension__ extern __inline uint16x8_t |
f9355dee | 9299 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9300 | __arm_vqrshruntq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 9301 | { |
e3678b44 | 9302 | return __builtin_mve_vqrshruntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
9303 | } |
9304 | ||
e3678b44 | 9305 | __extension__ extern __inline uint16x8_t |
f9355dee | 9306 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9307 | __arm_vqshrunbq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 9308 | { |
e3678b44 | 9309 | return __builtin_mve_vqshrunbq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
9310 | } |
9311 | ||
e3678b44 | 9312 | __extension__ extern __inline uint16x8_t |
f9355dee | 9313 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9314 | __arm_vqshruntq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm) |
f9355dee | 9315 | { |
e3678b44 | 9316 | return __builtin_mve_vqshruntq_n_sv4si (__a, __b, __imm); |
f9355dee SP |
9317 | } |
9318 | ||
e3678b44 | 9319 | __extension__ extern __inline uint16x8_t |
f9355dee | 9320 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9321 | __arm_vqmovunbq_m_s32 (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 9322 | { |
e3678b44 | 9323 | return __builtin_mve_vqmovunbq_m_sv4si (__a, __b, __p); |
f9355dee SP |
9324 | } |
9325 | ||
e3678b44 | 9326 | __extension__ extern __inline uint16x8_t |
f9355dee | 9327 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9328 | __arm_vqmovuntq_m_s32 (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p) |
f9355dee | 9329 | { |
e3678b44 | 9330 | return __builtin_mve_vqmovuntq_m_sv4si (__a, __b, __p); |
f9355dee SP |
9331 | } |
9332 | ||
e3678b44 | 9333 | __extension__ extern __inline uint16x8_t |
f9355dee | 9334 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9335 | __arm_vqrshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 9336 | { |
e3678b44 | 9337 | return __builtin_mve_vqrshrntq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
9338 | } |
9339 | ||
e3678b44 | 9340 | __extension__ extern __inline uint16x8_t |
f9355dee | 9341 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9342 | __arm_vqshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 9343 | { |
e3678b44 | 9344 | return __builtin_mve_vqshrnbq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
9345 | } |
9346 | ||
e3678b44 | 9347 | __extension__ extern __inline uint16x8_t |
f9355dee | 9348 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9349 | __arm_vqshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 9350 | { |
e3678b44 | 9351 | return __builtin_mve_vqshrntq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
9352 | } |
9353 | ||
e3678b44 | 9354 | __extension__ extern __inline uint16x8_t |
f9355dee | 9355 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9356 | __arm_vrshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 9357 | { |
e3678b44 | 9358 | return __builtin_mve_vrshrnbq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
9359 | } |
9360 | ||
e3678b44 | 9361 | __extension__ extern __inline uint16x8_t |
f9355dee | 9362 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9363 | __arm_vrshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 9364 | { |
e3678b44 | 9365 | return __builtin_mve_vrshrntq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
9366 | } |
9367 | ||
e3678b44 | 9368 | __extension__ extern __inline uint16x8_t |
f9355dee | 9369 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9370 | __arm_vshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 9371 | { |
e3678b44 | 9372 | return __builtin_mve_vshrnbq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
9373 | } |
9374 | ||
e3678b44 | 9375 | __extension__ extern __inline uint16x8_t |
f9355dee | 9376 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9377 | __arm_vshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm) |
f9355dee | 9378 | { |
e3678b44 | 9379 | return __builtin_mve_vshrntq_n_uv4si (__a, __b, __imm); |
f9355dee SP |
9380 | } |
9381 | ||
e3678b44 | 9382 | __extension__ extern __inline uint64_t |
f9355dee | 9383 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9384 | __arm_vmlaldavaq_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c) |
f9355dee | 9385 | { |
e3678b44 | 9386 | return __builtin_mve_vmlaldavaq_uv4si (__a, __b, __c); |
f9355dee SP |
9387 | } |
9388 | ||
e3678b44 | 9389 | __extension__ extern __inline uint64_t |
f9355dee | 9390 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9391 | __arm_vmlaldavq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 9392 | { |
e3678b44 | 9393 | return __builtin_mve_vmlaldavq_p_uv4si (__a, __b, __p); |
f9355dee SP |
9394 | } |
9395 | ||
e3678b44 | 9396 | __extension__ extern __inline uint32x4_t |
f9355dee | 9397 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9398 | __arm_vmovlbq_m_u16 (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p) |
f9355dee | 9399 | { |
e3678b44 | 9400 | return __builtin_mve_vmovlbq_m_uv8hi (__inactive, __a, __p); |
f9355dee SP |
9401 | } |
9402 | ||
e3678b44 | 9403 | __extension__ extern __inline uint32x4_t |
f9355dee | 9404 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9405 | __arm_vmovltq_m_u16 (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p) |
f9355dee | 9406 | { |
e3678b44 | 9407 | return __builtin_mve_vmovltq_m_uv8hi (__inactive, __a, __p); |
f9355dee SP |
9408 | } |
9409 | ||
e3678b44 | 9410 | __extension__ extern __inline uint16x8_t |
f9355dee | 9411 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9412 | __arm_vmovnbq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 9413 | { |
e3678b44 | 9414 | return __builtin_mve_vmovnbq_m_uv4si (__a, __b, __p); |
f9355dee SP |
9415 | } |
9416 | ||
e3678b44 | 9417 | __extension__ extern __inline uint16x8_t |
f9355dee | 9418 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9419 | __arm_vmovntq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 9420 | { |
e3678b44 | 9421 | return __builtin_mve_vmovntq_m_uv4si (__a, __b, __p); |
f9355dee SP |
9422 | } |
9423 | ||
e3678b44 | 9424 | __extension__ extern __inline uint16x8_t |
f9355dee | 9425 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9426 | __arm_vqmovnbq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 9427 | { |
e3678b44 | 9428 | return __builtin_mve_vqmovnbq_m_uv4si (__a, __b, __p); |
f9355dee SP |
9429 | } |
9430 | ||
e3678b44 | 9431 | __extension__ extern __inline uint16x8_t |
f9355dee | 9432 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9433 | __arm_vqmovntq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) |
f9355dee | 9434 | { |
e3678b44 | 9435 | return __builtin_mve_vqmovntq_m_uv4si (__a, __b, __p); |
f9355dee SP |
9436 | } |
9437 | ||
e3678b44 | 9438 | __extension__ extern __inline uint16x8_t |
f9355dee | 9439 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
e3678b44 | 9440 | __arm_vrev32q_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) |
f9355dee | 9441 | { |
e3678b44 | 9442 | return __builtin_mve_vrev32q_m_uv8hi (__inactive, __a, __p); |
f9355dee | 9443 | } |
db5db9d2 SP |
9444 | |
9445 | __extension__ extern __inline int8x16_t | |
9446 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9447 | __arm_vsriq_m_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p) | |
9448 | { | |
9449 | return __builtin_mve_vsriq_m_n_sv16qi (__a, __b, __imm, __p); | |
9450 | } | |
9451 | ||
9452 | __extension__ extern __inline int8x16_t | |
9453 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9454 | __arm_vsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9455 | { | |
9456 | return __builtin_mve_vsubq_m_sv16qi (__inactive, __a, __b, __p); | |
9457 | } | |
9458 | ||
9459 | __extension__ extern __inline uint8x16_t | |
9460 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9461 | __arm_vqshluq_m_n_s8 (uint8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
9462 | { | |
9463 | return __builtin_mve_vqshluq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
9464 | } | |
9465 | ||
9466 | __extension__ extern __inline uint32_t | |
9467 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9468 | __arm_vabavq_p_s8 (uint32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
9469 | { | |
9470 | return __builtin_mve_vabavq_p_sv16qi (__a, __b, __c, __p); | |
9471 | } | |
9472 | ||
9473 | __extension__ extern __inline uint8x16_t | |
9474 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9475 | __arm_vsriq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p) | |
9476 | { | |
9477 | return __builtin_mve_vsriq_m_n_uv16qi (__a, __b, __imm, __p); | |
9478 | } | |
9479 | ||
9480 | __extension__ extern __inline uint8x16_t | |
9481 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9482 | __arm_vshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9483 | { | |
9484 | return __builtin_mve_vshlq_m_uv16qi (__inactive, __a, __b, __p); | |
9485 | } | |
9486 | ||
9487 | __extension__ extern __inline uint8x16_t | |
9488 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9489 | __arm_vsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9490 | { | |
9491 | return __builtin_mve_vsubq_m_uv16qi (__inactive, __a, __b, __p); | |
9492 | } | |
9493 | ||
9494 | __extension__ extern __inline uint32_t | |
9495 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9496 | __arm_vabavq_p_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p) | |
9497 | { | |
9498 | return __builtin_mve_vabavq_p_uv16qi (__a, __b, __c, __p); | |
9499 | } | |
9500 | ||
9501 | __extension__ extern __inline int8x16_t | |
9502 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9503 | __arm_vshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9504 | { | |
9505 | return __builtin_mve_vshlq_m_sv16qi (__inactive, __a, __b, __p); | |
9506 | } | |
9507 | ||
9508 | __extension__ extern __inline int16x8_t | |
9509 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9510 | __arm_vsriq_m_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
9511 | { | |
9512 | return __builtin_mve_vsriq_m_n_sv8hi (__a, __b, __imm, __p); | |
9513 | } | |
9514 | ||
9515 | __extension__ extern __inline int16x8_t | |
9516 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9517 | __arm_vsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9518 | { | |
9519 | return __builtin_mve_vsubq_m_sv8hi (__inactive, __a, __b, __p); | |
9520 | } | |
9521 | ||
9522 | __extension__ extern __inline uint16x8_t | |
9523 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9524 | __arm_vqshluq_m_n_s16 (uint16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
9525 | { | |
9526 | return __builtin_mve_vqshluq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
9527 | } | |
9528 | ||
9529 | __extension__ extern __inline uint32_t | |
9530 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9531 | __arm_vabavq_p_s16 (uint32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
9532 | { | |
9533 | return __builtin_mve_vabavq_p_sv8hi (__a, __b, __c, __p); | |
9534 | } | |
9535 | ||
9536 | __extension__ extern __inline uint16x8_t | |
9537 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9538 | __arm_vsriq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
9539 | { | |
9540 | return __builtin_mve_vsriq_m_n_uv8hi (__a, __b, __imm, __p); | |
9541 | } | |
9542 | ||
9543 | __extension__ extern __inline uint16x8_t | |
9544 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9545 | __arm_vshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9546 | { | |
9547 | return __builtin_mve_vshlq_m_uv8hi (__inactive, __a, __b, __p); | |
9548 | } | |
9549 | ||
9550 | __extension__ extern __inline uint16x8_t | |
9551 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9552 | __arm_vsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9553 | { | |
9554 | return __builtin_mve_vsubq_m_uv8hi (__inactive, __a, __b, __p); | |
9555 | } | |
9556 | ||
9557 | __extension__ extern __inline uint32_t | |
9558 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9559 | __arm_vabavq_p_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) | |
9560 | { | |
9561 | return __builtin_mve_vabavq_p_uv8hi (__a, __b, __c, __p); | |
9562 | } | |
9563 | ||
9564 | __extension__ extern __inline int16x8_t | |
9565 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9566 | __arm_vshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9567 | { | |
9568 | return __builtin_mve_vshlq_m_sv8hi (__inactive, __a, __b, __p); | |
9569 | } | |
9570 | ||
9571 | __extension__ extern __inline int32x4_t | |
9572 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9573 | __arm_vsriq_m_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
9574 | { | |
9575 | return __builtin_mve_vsriq_m_n_sv4si (__a, __b, __imm, __p); | |
9576 | } | |
9577 | ||
9578 | __extension__ extern __inline int32x4_t | |
9579 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9580 | __arm_vsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9581 | { | |
9582 | return __builtin_mve_vsubq_m_sv4si (__inactive, __a, __b, __p); | |
9583 | } | |
9584 | ||
9585 | __extension__ extern __inline uint32x4_t | |
9586 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9587 | __arm_vqshluq_m_n_s32 (uint32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
9588 | { | |
9589 | return __builtin_mve_vqshluq_m_n_sv4si (__inactive, __a, __imm, __p); | |
9590 | } | |
9591 | ||
9592 | __extension__ extern __inline uint32_t | |
9593 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9594 | __arm_vabavq_p_s32 (uint32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
9595 | { | |
9596 | return __builtin_mve_vabavq_p_sv4si (__a, __b, __c, __p); | |
9597 | } | |
9598 | ||
9599 | __extension__ extern __inline uint32x4_t | |
9600 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9601 | __arm_vsriq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
9602 | { | |
9603 | return __builtin_mve_vsriq_m_n_uv4si (__a, __b, __imm, __p); | |
9604 | } | |
9605 | ||
9606 | __extension__ extern __inline uint32x4_t | |
9607 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9608 | __arm_vshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9609 | { | |
9610 | return __builtin_mve_vshlq_m_uv4si (__inactive, __a, __b, __p); | |
9611 | } | |
9612 | ||
9613 | __extension__ extern __inline uint32x4_t | |
9614 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9615 | __arm_vsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9616 | { | |
9617 | return __builtin_mve_vsubq_m_uv4si (__inactive, __a, __b, __p); | |
9618 | } | |
9619 | ||
9620 | __extension__ extern __inline uint32_t | |
9621 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9622 | __arm_vabavq_p_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
9623 | { | |
9624 | return __builtin_mve_vabavq_p_uv4si (__a, __b, __c, __p); | |
9625 | } | |
9626 | ||
9627 | __extension__ extern __inline int32x4_t | |
9628 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9629 | __arm_vshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9630 | { | |
9631 | return __builtin_mve_vshlq_m_sv4si (__inactive, __a, __b, __p); | |
9632 | } | |
9633 | ||
8eb3b6b9 SP |
9634 | __extension__ extern __inline int8x16_t |
9635 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9636 | __arm_vabdq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9637 | { | |
9638 | return __builtin_mve_vabdq_m_sv16qi (__inactive, __a, __b, __p); | |
9639 | } | |
9640 | ||
9641 | __extension__ extern __inline int32x4_t | |
9642 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9643 | __arm_vabdq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9644 | { | |
9645 | return __builtin_mve_vabdq_m_sv4si (__inactive, __a, __b, __p); | |
9646 | } | |
9647 | ||
9648 | __extension__ extern __inline int16x8_t | |
9649 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9650 | __arm_vabdq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9651 | { | |
9652 | return __builtin_mve_vabdq_m_sv8hi (__inactive, __a, __b, __p); | |
9653 | } | |
9654 | ||
9655 | __extension__ extern __inline uint8x16_t | |
9656 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9657 | __arm_vabdq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9658 | { | |
9659 | return __builtin_mve_vabdq_m_uv16qi (__inactive, __a, __b, __p); | |
9660 | } | |
9661 | ||
9662 | __extension__ extern __inline uint32x4_t | |
9663 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9664 | __arm_vabdq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9665 | { | |
9666 | return __builtin_mve_vabdq_m_uv4si (__inactive, __a, __b, __p); | |
9667 | } | |
9668 | ||
9669 | __extension__ extern __inline uint16x8_t | |
9670 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9671 | __arm_vabdq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9672 | { | |
9673 | return __builtin_mve_vabdq_m_uv8hi (__inactive, __a, __b, __p); | |
9674 | } | |
9675 | ||
9676 | __extension__ extern __inline int8x16_t | |
9677 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e0dd75fe | 9678 | __arm_vaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) |
8eb3b6b9 SP |
9679 | { |
9680 | return __builtin_mve_vaddq_m_n_sv16qi (__inactive, __a, __b, __p); | |
9681 | } | |
9682 | ||
9683 | __extension__ extern __inline int32x4_t | |
9684 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e0dd75fe | 9685 | __arm_vaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) |
8eb3b6b9 SP |
9686 | { |
9687 | return __builtin_mve_vaddq_m_n_sv4si (__inactive, __a, __b, __p); | |
9688 | } | |
9689 | ||
9690 | __extension__ extern __inline int16x8_t | |
9691 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e0dd75fe | 9692 | __arm_vaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) |
8eb3b6b9 SP |
9693 | { |
9694 | return __builtin_mve_vaddq_m_n_sv8hi (__inactive, __a, __b, __p); | |
9695 | } | |
9696 | ||
9697 | __extension__ extern __inline uint8x16_t | |
9698 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e0dd75fe | 9699 | __arm_vaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
8eb3b6b9 SP |
9700 | { |
9701 | return __builtin_mve_vaddq_m_n_uv16qi (__inactive, __a, __b, __p); | |
9702 | } | |
9703 | ||
9704 | __extension__ extern __inline uint32x4_t | |
9705 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e0dd75fe | 9706 | __arm_vaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
8eb3b6b9 SP |
9707 | { |
9708 | return __builtin_mve_vaddq_m_n_uv4si (__inactive, __a, __b, __p); | |
9709 | } | |
9710 | ||
9711 | __extension__ extern __inline uint16x8_t | |
9712 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e0dd75fe | 9713 | __arm_vaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
8eb3b6b9 SP |
9714 | { |
9715 | return __builtin_mve_vaddq_m_n_uv8hi (__inactive, __a, __b, __p); | |
9716 | } | |
9717 | ||
9718 | __extension__ extern __inline int8x16_t | |
9719 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9720 | __arm_vaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9721 | { | |
9722 | return __builtin_mve_vaddq_m_sv16qi (__inactive, __a, __b, __p); | |
9723 | } | |
9724 | ||
9725 | __extension__ extern __inline int32x4_t | |
9726 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9727 | __arm_vaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9728 | { | |
9729 | return __builtin_mve_vaddq_m_sv4si (__inactive, __a, __b, __p); | |
9730 | } | |
9731 | ||
9732 | __extension__ extern __inline int16x8_t | |
9733 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9734 | __arm_vaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9735 | { | |
9736 | return __builtin_mve_vaddq_m_sv8hi (__inactive, __a, __b, __p); | |
9737 | } | |
9738 | ||
9739 | __extension__ extern __inline uint8x16_t | |
9740 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9741 | __arm_vaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9742 | { | |
9743 | return __builtin_mve_vaddq_m_uv16qi (__inactive, __a, __b, __p); | |
9744 | } | |
9745 | ||
9746 | __extension__ extern __inline uint32x4_t | |
9747 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9748 | __arm_vaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9749 | { | |
9750 | return __builtin_mve_vaddq_m_uv4si (__inactive, __a, __b, __p); | |
9751 | } | |
9752 | ||
9753 | __extension__ extern __inline uint16x8_t | |
9754 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9755 | __arm_vaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9756 | { | |
9757 | return __builtin_mve_vaddq_m_uv8hi (__inactive, __a, __b, __p); | |
9758 | } | |
9759 | ||
9760 | __extension__ extern __inline int8x16_t | |
9761 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9762 | __arm_vandq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9763 | { | |
9764 | return __builtin_mve_vandq_m_sv16qi (__inactive, __a, __b, __p); | |
9765 | } | |
9766 | ||
9767 | __extension__ extern __inline int32x4_t | |
9768 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9769 | __arm_vandq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9770 | { | |
9771 | return __builtin_mve_vandq_m_sv4si (__inactive, __a, __b, __p); | |
9772 | } | |
9773 | ||
9774 | __extension__ extern __inline int16x8_t | |
9775 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9776 | __arm_vandq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9777 | { | |
9778 | return __builtin_mve_vandq_m_sv8hi (__inactive, __a, __b, __p); | |
9779 | } | |
9780 | ||
9781 | __extension__ extern __inline uint8x16_t | |
9782 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9783 | __arm_vandq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9784 | { | |
9785 | return __builtin_mve_vandq_m_uv16qi (__inactive, __a, __b, __p); | |
9786 | } | |
9787 | ||
9788 | __extension__ extern __inline uint32x4_t | |
9789 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9790 | __arm_vandq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9791 | { | |
9792 | return __builtin_mve_vandq_m_uv4si (__inactive, __a, __b, __p); | |
9793 | } | |
9794 | ||
9795 | __extension__ extern __inline uint16x8_t | |
9796 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9797 | __arm_vandq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9798 | { | |
9799 | return __builtin_mve_vandq_m_uv8hi (__inactive, __a, __b, __p); | |
9800 | } | |
9801 | ||
9802 | __extension__ extern __inline int8x16_t | |
9803 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9804 | __arm_vbicq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9805 | { | |
9806 | return __builtin_mve_vbicq_m_sv16qi (__inactive, __a, __b, __p); | |
9807 | } | |
9808 | ||
9809 | __extension__ extern __inline int32x4_t | |
9810 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9811 | __arm_vbicq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9812 | { | |
9813 | return __builtin_mve_vbicq_m_sv4si (__inactive, __a, __b, __p); | |
9814 | } | |
9815 | ||
9816 | __extension__ extern __inline int16x8_t | |
9817 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9818 | __arm_vbicq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9819 | { | |
9820 | return __builtin_mve_vbicq_m_sv8hi (__inactive, __a, __b, __p); | |
9821 | } | |
9822 | ||
9823 | __extension__ extern __inline uint8x16_t | |
9824 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9825 | __arm_vbicq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9826 | { | |
9827 | return __builtin_mve_vbicq_m_uv16qi (__inactive, __a, __b, __p); | |
9828 | } | |
9829 | ||
9830 | __extension__ extern __inline uint32x4_t | |
9831 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9832 | __arm_vbicq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9833 | { | |
9834 | return __builtin_mve_vbicq_m_uv4si (__inactive, __a, __b, __p); | |
9835 | } | |
9836 | ||
9837 | __extension__ extern __inline uint16x8_t | |
9838 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9839 | __arm_vbicq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9840 | { | |
9841 | return __builtin_mve_vbicq_m_uv8hi (__inactive, __a, __b, __p); | |
9842 | } | |
9843 | ||
9844 | __extension__ extern __inline int8x16_t | |
9845 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9846 | __arm_vbrsrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
9847 | { | |
9848 | return __builtin_mve_vbrsrq_m_n_sv16qi (__inactive, __a, __b, __p); | |
9849 | } | |
9850 | ||
9851 | __extension__ extern __inline int32x4_t | |
9852 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9853 | __arm_vbrsrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
9854 | { | |
9855 | return __builtin_mve_vbrsrq_m_n_sv4si (__inactive, __a, __b, __p); | |
9856 | } | |
9857 | ||
9858 | __extension__ extern __inline int16x8_t | |
9859 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9860 | __arm_vbrsrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
9861 | { | |
9862 | return __builtin_mve_vbrsrq_m_n_sv8hi (__inactive, __a, __b, __p); | |
9863 | } | |
9864 | ||
9865 | __extension__ extern __inline uint8x16_t | |
9866 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9867 | __arm_vbrsrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
9868 | { | |
9869 | return __builtin_mve_vbrsrq_m_n_uv16qi (__inactive, __a, __b, __p); | |
9870 | } | |
9871 | ||
9872 | __extension__ extern __inline uint32x4_t | |
9873 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9874 | __arm_vbrsrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
9875 | { | |
9876 | return __builtin_mve_vbrsrq_m_n_uv4si (__inactive, __a, __b, __p); | |
9877 | } | |
9878 | ||
9879 | __extension__ extern __inline uint16x8_t | |
9880 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9881 | __arm_vbrsrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
9882 | { | |
9883 | return __builtin_mve_vbrsrq_m_n_uv8hi (__inactive, __a, __b, __p); | |
9884 | } | |
9885 | ||
9886 | __extension__ extern __inline int8x16_t | |
9887 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9888 | __arm_vcaddq_rot270_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9889 | { | |
9890 | return __builtin_mve_vcaddq_rot270_m_sv16qi (__inactive, __a, __b, __p); | |
9891 | } | |
9892 | ||
9893 | __extension__ extern __inline int32x4_t | |
9894 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9895 | __arm_vcaddq_rot270_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9896 | { | |
9897 | return __builtin_mve_vcaddq_rot270_m_sv4si (__inactive, __a, __b, __p); | |
9898 | } | |
9899 | ||
9900 | __extension__ extern __inline int16x8_t | |
9901 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9902 | __arm_vcaddq_rot270_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9903 | { | |
9904 | return __builtin_mve_vcaddq_rot270_m_sv8hi (__inactive, __a, __b, __p); | |
9905 | } | |
9906 | ||
9907 | __extension__ extern __inline uint8x16_t | |
9908 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9909 | __arm_vcaddq_rot270_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9910 | { | |
9911 | return __builtin_mve_vcaddq_rot270_m_uv16qi (__inactive, __a, __b, __p); | |
9912 | } | |
9913 | ||
9914 | __extension__ extern __inline uint32x4_t | |
9915 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9916 | __arm_vcaddq_rot270_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9917 | { | |
9918 | return __builtin_mve_vcaddq_rot270_m_uv4si (__inactive, __a, __b, __p); | |
9919 | } | |
9920 | ||
9921 | __extension__ extern __inline uint16x8_t | |
9922 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9923 | __arm_vcaddq_rot270_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9924 | { | |
9925 | return __builtin_mve_vcaddq_rot270_m_uv8hi (__inactive, __a, __b, __p); | |
9926 | } | |
9927 | ||
9928 | __extension__ extern __inline int8x16_t | |
9929 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9930 | __arm_vcaddq_rot90_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9931 | { | |
9932 | return __builtin_mve_vcaddq_rot90_m_sv16qi (__inactive, __a, __b, __p); | |
9933 | } | |
9934 | ||
9935 | __extension__ extern __inline int32x4_t | |
9936 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9937 | __arm_vcaddq_rot90_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9938 | { | |
9939 | return __builtin_mve_vcaddq_rot90_m_sv4si (__inactive, __a, __b, __p); | |
9940 | } | |
9941 | ||
9942 | __extension__ extern __inline int16x8_t | |
9943 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9944 | __arm_vcaddq_rot90_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9945 | { | |
9946 | return __builtin_mve_vcaddq_rot90_m_sv8hi (__inactive, __a, __b, __p); | |
9947 | } | |
9948 | ||
9949 | __extension__ extern __inline uint8x16_t | |
9950 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9951 | __arm_vcaddq_rot90_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9952 | { | |
9953 | return __builtin_mve_vcaddq_rot90_m_uv16qi (__inactive, __a, __b, __p); | |
9954 | } | |
9955 | ||
9956 | __extension__ extern __inline uint32x4_t | |
9957 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9958 | __arm_vcaddq_rot90_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
9959 | { | |
9960 | return __builtin_mve_vcaddq_rot90_m_uv4si (__inactive, __a, __b, __p); | |
9961 | } | |
9962 | ||
9963 | __extension__ extern __inline uint16x8_t | |
9964 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9965 | __arm_vcaddq_rot90_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
9966 | { | |
9967 | return __builtin_mve_vcaddq_rot90_m_uv8hi (__inactive, __a, __b, __p); | |
9968 | } | |
9969 | ||
9970 | __extension__ extern __inline int8x16_t | |
9971 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9972 | __arm_veorq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
9973 | { | |
9974 | return __builtin_mve_veorq_m_sv16qi (__inactive, __a, __b, __p); | |
9975 | } | |
9976 | ||
9977 | __extension__ extern __inline int32x4_t | |
9978 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9979 | __arm_veorq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
9980 | { | |
9981 | return __builtin_mve_veorq_m_sv4si (__inactive, __a, __b, __p); | |
9982 | } | |
9983 | ||
9984 | __extension__ extern __inline int16x8_t | |
9985 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9986 | __arm_veorq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
9987 | { | |
9988 | return __builtin_mve_veorq_m_sv8hi (__inactive, __a, __b, __p); | |
9989 | } | |
9990 | ||
9991 | __extension__ extern __inline uint8x16_t | |
9992 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
9993 | __arm_veorq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
9994 | { | |
9995 | return __builtin_mve_veorq_m_uv16qi (__inactive, __a, __b, __p); | |
9996 | } | |
9997 | ||
9998 | __extension__ extern __inline uint32x4_t | |
9999 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10000 | __arm_veorq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10001 | { | |
10002 | return __builtin_mve_veorq_m_uv4si (__inactive, __a, __b, __p); | |
10003 | } | |
10004 | ||
10005 | __extension__ extern __inline uint16x8_t | |
10006 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10007 | __arm_veorq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10008 | { | |
10009 | return __builtin_mve_veorq_m_uv8hi (__inactive, __a, __b, __p); | |
10010 | } | |
10011 | ||
10012 | __extension__ extern __inline int8x16_t | |
10013 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10014 | __arm_vhaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
10015 | { | |
10016 | return __builtin_mve_vhaddq_m_n_sv16qi (__inactive, __a, __b, __p); | |
10017 | } | |
10018 | ||
10019 | __extension__ extern __inline int32x4_t | |
10020 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10021 | __arm_vhaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
10022 | { | |
10023 | return __builtin_mve_vhaddq_m_n_sv4si (__inactive, __a, __b, __p); | |
10024 | } | |
10025 | ||
10026 | __extension__ extern __inline int16x8_t | |
10027 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10028 | __arm_vhaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
10029 | { | |
10030 | return __builtin_mve_vhaddq_m_n_sv8hi (__inactive, __a, __b, __p); | |
10031 | } | |
10032 | ||
10033 | __extension__ extern __inline uint8x16_t | |
10034 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10035 | __arm_vhaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
10036 | { | |
10037 | return __builtin_mve_vhaddq_m_n_uv16qi (__inactive, __a, __b, __p); | |
10038 | } | |
10039 | ||
10040 | __extension__ extern __inline uint32x4_t | |
10041 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10042 | __arm_vhaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
10043 | { | |
10044 | return __builtin_mve_vhaddq_m_n_uv4si (__inactive, __a, __b, __p); | |
10045 | } | |
10046 | ||
10047 | __extension__ extern __inline uint16x8_t | |
10048 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10049 | __arm_vhaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
10050 | { | |
10051 | return __builtin_mve_vhaddq_m_n_uv8hi (__inactive, __a, __b, __p); | |
10052 | } | |
10053 | ||
10054 | __extension__ extern __inline int8x16_t | |
10055 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10056 | __arm_vhaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10057 | { | |
10058 | return __builtin_mve_vhaddq_m_sv16qi (__inactive, __a, __b, __p); | |
10059 | } | |
10060 | ||
10061 | __extension__ extern __inline int32x4_t | |
10062 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10063 | __arm_vhaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10064 | { | |
10065 | return __builtin_mve_vhaddq_m_sv4si (__inactive, __a, __b, __p); | |
10066 | } | |
10067 | ||
10068 | __extension__ extern __inline int16x8_t | |
10069 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10070 | __arm_vhaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10071 | { | |
10072 | return __builtin_mve_vhaddq_m_sv8hi (__inactive, __a, __b, __p); | |
10073 | } | |
10074 | ||
10075 | __extension__ extern __inline uint8x16_t | |
10076 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10077 | __arm_vhaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10078 | { | |
10079 | return __builtin_mve_vhaddq_m_uv16qi (__inactive, __a, __b, __p); | |
10080 | } | |
10081 | ||
10082 | __extension__ extern __inline uint32x4_t | |
10083 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10084 | __arm_vhaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10085 | { | |
10086 | return __builtin_mve_vhaddq_m_uv4si (__inactive, __a, __b, __p); | |
10087 | } | |
10088 | ||
10089 | __extension__ extern __inline uint16x8_t | |
10090 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10091 | __arm_vhaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10092 | { | |
10093 | return __builtin_mve_vhaddq_m_uv8hi (__inactive, __a, __b, __p); | |
10094 | } | |
10095 | ||
10096 | __extension__ extern __inline int8x16_t | |
10097 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10098 | __arm_vhcaddq_rot270_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10099 | { | |
10100 | return __builtin_mve_vhcaddq_rot270_m_sv16qi (__inactive, __a, __b, __p); | |
10101 | } | |
10102 | ||
10103 | __extension__ extern __inline int32x4_t | |
10104 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10105 | __arm_vhcaddq_rot270_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10106 | { | |
10107 | return __builtin_mve_vhcaddq_rot270_m_sv4si (__inactive, __a, __b, __p); | |
10108 | } | |
10109 | ||
10110 | __extension__ extern __inline int16x8_t | |
10111 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10112 | __arm_vhcaddq_rot270_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10113 | { | |
10114 | return __builtin_mve_vhcaddq_rot270_m_sv8hi (__inactive, __a, __b, __p); | |
10115 | } | |
10116 | ||
10117 | __extension__ extern __inline int8x16_t | |
10118 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10119 | __arm_vhcaddq_rot90_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10120 | { | |
10121 | return __builtin_mve_vhcaddq_rot90_m_sv16qi (__inactive, __a, __b, __p); | |
10122 | } | |
10123 | ||
10124 | __extension__ extern __inline int32x4_t | |
10125 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10126 | __arm_vhcaddq_rot90_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10127 | { | |
10128 | return __builtin_mve_vhcaddq_rot90_m_sv4si (__inactive, __a, __b, __p); | |
10129 | } | |
10130 | ||
10131 | __extension__ extern __inline int16x8_t | |
10132 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10133 | __arm_vhcaddq_rot90_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10134 | { | |
10135 | return __builtin_mve_vhcaddq_rot90_m_sv8hi (__inactive, __a, __b, __p); | |
10136 | } | |
10137 | ||
10138 | __extension__ extern __inline int8x16_t | |
10139 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10140 | __arm_vhsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
10141 | { | |
10142 | return __builtin_mve_vhsubq_m_n_sv16qi (__inactive, __a, __b, __p); | |
10143 | } | |
10144 | ||
10145 | __extension__ extern __inline int32x4_t | |
10146 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10147 | __arm_vhsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
10148 | { | |
10149 | return __builtin_mve_vhsubq_m_n_sv4si (__inactive, __a, __b, __p); | |
10150 | } | |
10151 | ||
10152 | __extension__ extern __inline int16x8_t | |
10153 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10154 | __arm_vhsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
10155 | { | |
10156 | return __builtin_mve_vhsubq_m_n_sv8hi (__inactive, __a, __b, __p); | |
10157 | } | |
10158 | ||
10159 | __extension__ extern __inline uint8x16_t | |
10160 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10161 | __arm_vhsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
10162 | { | |
10163 | return __builtin_mve_vhsubq_m_n_uv16qi (__inactive, __a, __b, __p); | |
10164 | } | |
10165 | ||
10166 | __extension__ extern __inline uint32x4_t | |
10167 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10168 | __arm_vhsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
10169 | { | |
10170 | return __builtin_mve_vhsubq_m_n_uv4si (__inactive, __a, __b, __p); | |
10171 | } | |
10172 | ||
10173 | __extension__ extern __inline uint16x8_t | |
10174 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10175 | __arm_vhsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
10176 | { | |
10177 | return __builtin_mve_vhsubq_m_n_uv8hi (__inactive, __a, __b, __p); | |
10178 | } | |
10179 | ||
10180 | __extension__ extern __inline int8x16_t | |
10181 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10182 | __arm_vhsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10183 | { | |
10184 | return __builtin_mve_vhsubq_m_sv16qi (__inactive, __a, __b, __p); | |
10185 | } | |
10186 | ||
10187 | __extension__ extern __inline int32x4_t | |
10188 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10189 | __arm_vhsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10190 | { | |
10191 | return __builtin_mve_vhsubq_m_sv4si (__inactive, __a, __b, __p); | |
10192 | } | |
10193 | ||
10194 | __extension__ extern __inline int16x8_t | |
10195 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10196 | __arm_vhsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10197 | { | |
10198 | return __builtin_mve_vhsubq_m_sv8hi (__inactive, __a, __b, __p); | |
10199 | } | |
10200 | ||
10201 | __extension__ extern __inline uint8x16_t | |
10202 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10203 | __arm_vhsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10204 | { | |
10205 | return __builtin_mve_vhsubq_m_uv16qi (__inactive, __a, __b, __p); | |
10206 | } | |
10207 | ||
10208 | __extension__ extern __inline uint32x4_t | |
10209 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10210 | __arm_vhsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10211 | { | |
10212 | return __builtin_mve_vhsubq_m_uv4si (__inactive, __a, __b, __p); | |
10213 | } | |
10214 | ||
10215 | __extension__ extern __inline uint16x8_t | |
10216 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10217 | __arm_vhsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10218 | { | |
10219 | return __builtin_mve_vhsubq_m_uv8hi (__inactive, __a, __b, __p); | |
10220 | } | |
10221 | ||
10222 | __extension__ extern __inline int8x16_t | |
10223 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10224 | __arm_vmaxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10225 | { | |
10226 | return __builtin_mve_vmaxq_m_sv16qi (__inactive, __a, __b, __p); | |
10227 | } | |
10228 | ||
10229 | __extension__ extern __inline int32x4_t | |
10230 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10231 | __arm_vmaxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10232 | { | |
10233 | return __builtin_mve_vmaxq_m_sv4si (__inactive, __a, __b, __p); | |
10234 | } | |
10235 | ||
10236 | __extension__ extern __inline int16x8_t | |
10237 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10238 | __arm_vmaxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10239 | { | |
10240 | return __builtin_mve_vmaxq_m_sv8hi (__inactive, __a, __b, __p); | |
10241 | } | |
10242 | ||
10243 | __extension__ extern __inline uint8x16_t | |
10244 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10245 | __arm_vmaxq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10246 | { | |
10247 | return __builtin_mve_vmaxq_m_uv16qi (__inactive, __a, __b, __p); | |
10248 | } | |
10249 | ||
10250 | __extension__ extern __inline uint32x4_t | |
10251 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10252 | __arm_vmaxq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10253 | { | |
10254 | return __builtin_mve_vmaxq_m_uv4si (__inactive, __a, __b, __p); | |
10255 | } | |
10256 | ||
10257 | __extension__ extern __inline uint16x8_t | |
10258 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10259 | __arm_vmaxq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10260 | { | |
10261 | return __builtin_mve_vmaxq_m_uv8hi (__inactive, __a, __b, __p); | |
10262 | } | |
10263 | ||
10264 | __extension__ extern __inline int8x16_t | |
10265 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10266 | __arm_vminq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10267 | { | |
10268 | return __builtin_mve_vminq_m_sv16qi (__inactive, __a, __b, __p); | |
10269 | } | |
10270 | ||
10271 | __extension__ extern __inline int32x4_t | |
10272 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10273 | __arm_vminq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10274 | { | |
10275 | return __builtin_mve_vminq_m_sv4si (__inactive, __a, __b, __p); | |
10276 | } | |
10277 | ||
10278 | __extension__ extern __inline int16x8_t | |
10279 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10280 | __arm_vminq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10281 | { | |
10282 | return __builtin_mve_vminq_m_sv8hi (__inactive, __a, __b, __p); | |
10283 | } | |
10284 | ||
10285 | __extension__ extern __inline uint8x16_t | |
10286 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10287 | __arm_vminq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10288 | { | |
10289 | return __builtin_mve_vminq_m_uv16qi (__inactive, __a, __b, __p); | |
10290 | } | |
10291 | ||
10292 | __extension__ extern __inline uint32x4_t | |
10293 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10294 | __arm_vminq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10295 | { | |
10296 | return __builtin_mve_vminq_m_uv4si (__inactive, __a, __b, __p); | |
10297 | } | |
10298 | ||
10299 | __extension__ extern __inline uint16x8_t | |
10300 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10301 | __arm_vminq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10302 | { | |
10303 | return __builtin_mve_vminq_m_uv8hi (__inactive, __a, __b, __p); | |
10304 | } | |
10305 | ||
10306 | __extension__ extern __inline int32_t | |
10307 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10308 | __arm_vmladavaq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
10309 | { | |
10310 | return __builtin_mve_vmladavaq_p_sv16qi (__a, __b, __c, __p); | |
10311 | } | |
10312 | ||
10313 | __extension__ extern __inline int32_t | |
10314 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10315 | __arm_vmladavaq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
10316 | { | |
10317 | return __builtin_mve_vmladavaq_p_sv4si (__a, __b, __c, __p); | |
10318 | } | |
10319 | ||
10320 | __extension__ extern __inline int32_t | |
10321 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10322 | __arm_vmladavaq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
10323 | { | |
10324 | return __builtin_mve_vmladavaq_p_sv8hi (__a, __b, __c, __p); | |
10325 | } | |
10326 | ||
10327 | __extension__ extern __inline uint32_t | |
10328 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10329 | __arm_vmladavaq_p_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p) | |
10330 | { | |
10331 | return __builtin_mve_vmladavaq_p_uv16qi (__a, __b, __c, __p); | |
10332 | } | |
10333 | ||
10334 | __extension__ extern __inline uint32_t | |
10335 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10336 | __arm_vmladavaq_p_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
10337 | { | |
10338 | return __builtin_mve_vmladavaq_p_uv4si (__a, __b, __c, __p); | |
10339 | } | |
10340 | ||
10341 | __extension__ extern __inline uint32_t | |
10342 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10343 | __arm_vmladavaq_p_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) | |
10344 | { | |
10345 | return __builtin_mve_vmladavaq_p_uv8hi (__a, __b, __c, __p); | |
10346 | } | |
10347 | ||
10348 | __extension__ extern __inline int32_t | |
10349 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10350 | __arm_vmladavaxq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
10351 | { | |
10352 | return __builtin_mve_vmladavaxq_p_sv16qi (__a, __b, __c, __p); | |
10353 | } | |
10354 | ||
10355 | __extension__ extern __inline int32_t | |
10356 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10357 | __arm_vmladavaxq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
10358 | { | |
10359 | return __builtin_mve_vmladavaxq_p_sv4si (__a, __b, __c, __p); | |
10360 | } | |
10361 | ||
10362 | __extension__ extern __inline int32_t | |
10363 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10364 | __arm_vmladavaxq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
10365 | { | |
10366 | return __builtin_mve_vmladavaxq_p_sv8hi (__a, __b, __c, __p); | |
10367 | } | |
10368 | ||
10369 | __extension__ extern __inline int8x16_t | |
10370 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10371 | __arm_vmlaq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
10372 | { | |
10373 | return __builtin_mve_vmlaq_m_n_sv16qi (__a, __b, __c, __p); | |
10374 | } | |
10375 | ||
10376 | __extension__ extern __inline int32x4_t | |
10377 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10378 | __arm_vmlaq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
10379 | { | |
10380 | return __builtin_mve_vmlaq_m_n_sv4si (__a, __b, __c, __p); | |
10381 | } | |
10382 | ||
10383 | __extension__ extern __inline int16x8_t | |
10384 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10385 | __arm_vmlaq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
10386 | { | |
10387 | return __builtin_mve_vmlaq_m_n_sv8hi (__a, __b, __c, __p); | |
10388 | } | |
10389 | ||
10390 | __extension__ extern __inline uint8x16_t | |
10391 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10392 | __arm_vmlaq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p) | |
10393 | { | |
10394 | return __builtin_mve_vmlaq_m_n_uv16qi (__a, __b, __c, __p); | |
10395 | } | |
10396 | ||
10397 | __extension__ extern __inline uint32x4_t | |
10398 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10399 | __arm_vmlaq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p) | |
10400 | { | |
10401 | return __builtin_mve_vmlaq_m_n_uv4si (__a, __b, __c, __p); | |
10402 | } | |
10403 | ||
10404 | __extension__ extern __inline uint16x8_t | |
10405 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10406 | __arm_vmlaq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p) | |
10407 | { | |
10408 | return __builtin_mve_vmlaq_m_n_uv8hi (__a, __b, __c, __p); | |
10409 | } | |
10410 | ||
10411 | __extension__ extern __inline int8x16_t | |
10412 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10413 | __arm_vmlasq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
10414 | { | |
10415 | return __builtin_mve_vmlasq_m_n_sv16qi (__a, __b, __c, __p); | |
10416 | } | |
10417 | ||
10418 | __extension__ extern __inline int32x4_t | |
10419 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10420 | __arm_vmlasq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
10421 | { | |
10422 | return __builtin_mve_vmlasq_m_n_sv4si (__a, __b, __c, __p); | |
10423 | } | |
10424 | ||
10425 | __extension__ extern __inline int16x8_t | |
10426 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10427 | __arm_vmlasq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
10428 | { | |
10429 | return __builtin_mve_vmlasq_m_n_sv8hi (__a, __b, __c, __p); | |
10430 | } | |
10431 | ||
10432 | __extension__ extern __inline uint8x16_t | |
10433 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10434 | __arm_vmlasq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p) | |
10435 | { | |
10436 | return __builtin_mve_vmlasq_m_n_uv16qi (__a, __b, __c, __p); | |
10437 | } | |
10438 | ||
10439 | __extension__ extern __inline uint32x4_t | |
10440 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10441 | __arm_vmlasq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p) | |
10442 | { | |
10443 | return __builtin_mve_vmlasq_m_n_uv4si (__a, __b, __c, __p); | |
10444 | } | |
10445 | ||
10446 | __extension__ extern __inline uint16x8_t | |
10447 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10448 | __arm_vmlasq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p) | |
10449 | { | |
10450 | return __builtin_mve_vmlasq_m_n_uv8hi (__a, __b, __c, __p); | |
10451 | } | |
10452 | ||
10453 | __extension__ extern __inline int32_t | |
10454 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10455 | __arm_vmlsdavaq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
10456 | { | |
10457 | return __builtin_mve_vmlsdavaq_p_sv16qi (__a, __b, __c, __p); | |
10458 | } | |
10459 | ||
10460 | __extension__ extern __inline int32_t | |
10461 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10462 | __arm_vmlsdavaq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
10463 | { | |
10464 | return __builtin_mve_vmlsdavaq_p_sv4si (__a, __b, __c, __p); | |
10465 | } | |
10466 | ||
10467 | __extension__ extern __inline int32_t | |
10468 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10469 | __arm_vmlsdavaq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
10470 | { | |
10471 | return __builtin_mve_vmlsdavaq_p_sv8hi (__a, __b, __c, __p); | |
10472 | } | |
10473 | ||
10474 | __extension__ extern __inline int32_t | |
10475 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10476 | __arm_vmlsdavaxq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
10477 | { | |
10478 | return __builtin_mve_vmlsdavaxq_p_sv16qi (__a, __b, __c, __p); | |
10479 | } | |
10480 | ||
10481 | __extension__ extern __inline int32_t | |
10482 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10483 | __arm_vmlsdavaxq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
10484 | { | |
10485 | return __builtin_mve_vmlsdavaxq_p_sv4si (__a, __b, __c, __p); | |
10486 | } | |
10487 | ||
10488 | __extension__ extern __inline int32_t | |
10489 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10490 | __arm_vmlsdavaxq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
10491 | { | |
10492 | return __builtin_mve_vmlsdavaxq_p_sv8hi (__a, __b, __c, __p); | |
10493 | } | |
10494 | ||
10495 | __extension__ extern __inline int8x16_t | |
10496 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10497 | __arm_vmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10498 | { | |
10499 | return __builtin_mve_vmulhq_m_sv16qi (__inactive, __a, __b, __p); | |
10500 | } | |
10501 | ||
10502 | __extension__ extern __inline int32x4_t | |
10503 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10504 | __arm_vmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10505 | { | |
10506 | return __builtin_mve_vmulhq_m_sv4si (__inactive, __a, __b, __p); | |
10507 | } | |
10508 | ||
10509 | __extension__ extern __inline int16x8_t | |
10510 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10511 | __arm_vmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10512 | { | |
10513 | return __builtin_mve_vmulhq_m_sv8hi (__inactive, __a, __b, __p); | |
10514 | } | |
10515 | ||
10516 | __extension__ extern __inline uint8x16_t | |
10517 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10518 | __arm_vmulhq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10519 | { | |
10520 | return __builtin_mve_vmulhq_m_uv16qi (__inactive, __a, __b, __p); | |
10521 | } | |
10522 | ||
10523 | __extension__ extern __inline uint32x4_t | |
10524 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10525 | __arm_vmulhq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10526 | { | |
10527 | return __builtin_mve_vmulhq_m_uv4si (__inactive, __a, __b, __p); | |
10528 | } | |
10529 | ||
10530 | __extension__ extern __inline uint16x8_t | |
10531 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10532 | __arm_vmulhq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10533 | { | |
10534 | return __builtin_mve_vmulhq_m_uv8hi (__inactive, __a, __b, __p); | |
10535 | } | |
10536 | ||
10537 | __extension__ extern __inline int16x8_t | |
10538 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10539 | __arm_vmullbq_int_m_s8 (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10540 | { | |
10541 | return __builtin_mve_vmullbq_int_m_sv16qi (__inactive, __a, __b, __p); | |
10542 | } | |
10543 | ||
10544 | __extension__ extern __inline int64x2_t | |
10545 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10546 | __arm_vmullbq_int_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10547 | { | |
10548 | return __builtin_mve_vmullbq_int_m_sv4si (__inactive, __a, __b, __p); | |
10549 | } | |
10550 | ||
10551 | __extension__ extern __inline int32x4_t | |
10552 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10553 | __arm_vmullbq_int_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10554 | { | |
10555 | return __builtin_mve_vmullbq_int_m_sv8hi (__inactive, __a, __b, __p); | |
10556 | } | |
10557 | ||
10558 | __extension__ extern __inline uint16x8_t | |
10559 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10560 | __arm_vmullbq_int_m_u8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10561 | { | |
10562 | return __builtin_mve_vmullbq_int_m_uv16qi (__inactive, __a, __b, __p); | |
10563 | } | |
10564 | ||
10565 | __extension__ extern __inline uint64x2_t | |
10566 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10567 | __arm_vmullbq_int_m_u32 (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10568 | { | |
10569 | return __builtin_mve_vmullbq_int_m_uv4si (__inactive, __a, __b, __p); | |
10570 | } | |
10571 | ||
10572 | __extension__ extern __inline uint32x4_t | |
10573 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10574 | __arm_vmullbq_int_m_u16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10575 | { | |
10576 | return __builtin_mve_vmullbq_int_m_uv8hi (__inactive, __a, __b, __p); | |
10577 | } | |
10578 | ||
10579 | __extension__ extern __inline int16x8_t | |
10580 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10581 | __arm_vmulltq_int_m_s8 (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10582 | { | |
10583 | return __builtin_mve_vmulltq_int_m_sv16qi (__inactive, __a, __b, __p); | |
10584 | } | |
10585 | ||
10586 | __extension__ extern __inline int64x2_t | |
10587 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10588 | __arm_vmulltq_int_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10589 | { | |
10590 | return __builtin_mve_vmulltq_int_m_sv4si (__inactive, __a, __b, __p); | |
10591 | } | |
10592 | ||
10593 | __extension__ extern __inline int32x4_t | |
10594 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10595 | __arm_vmulltq_int_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10596 | { | |
10597 | return __builtin_mve_vmulltq_int_m_sv8hi (__inactive, __a, __b, __p); | |
10598 | } | |
10599 | ||
10600 | __extension__ extern __inline uint16x8_t | |
10601 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10602 | __arm_vmulltq_int_m_u8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10603 | { | |
10604 | return __builtin_mve_vmulltq_int_m_uv16qi (__inactive, __a, __b, __p); | |
10605 | } | |
10606 | ||
10607 | __extension__ extern __inline uint64x2_t | |
10608 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10609 | __arm_vmulltq_int_m_u32 (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10610 | { | |
10611 | return __builtin_mve_vmulltq_int_m_uv4si (__inactive, __a, __b, __p); | |
10612 | } | |
10613 | ||
10614 | __extension__ extern __inline uint32x4_t | |
10615 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10616 | __arm_vmulltq_int_m_u16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10617 | { | |
10618 | return __builtin_mve_vmulltq_int_m_uv8hi (__inactive, __a, __b, __p); | |
10619 | } | |
10620 | ||
10621 | __extension__ extern __inline int8x16_t | |
10622 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10623 | __arm_vmulq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
10624 | { | |
10625 | return __builtin_mve_vmulq_m_n_sv16qi (__inactive, __a, __b, __p); | |
10626 | } | |
10627 | ||
10628 | __extension__ extern __inline int32x4_t | |
10629 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10630 | __arm_vmulq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
10631 | { | |
10632 | return __builtin_mve_vmulq_m_n_sv4si (__inactive, __a, __b, __p); | |
10633 | } | |
10634 | ||
10635 | __extension__ extern __inline int16x8_t | |
10636 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10637 | __arm_vmulq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
10638 | { | |
10639 | return __builtin_mve_vmulq_m_n_sv8hi (__inactive, __a, __b, __p); | |
10640 | } | |
10641 | ||
10642 | __extension__ extern __inline uint8x16_t | |
10643 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10644 | __arm_vmulq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
10645 | { | |
10646 | return __builtin_mve_vmulq_m_n_uv16qi (__inactive, __a, __b, __p); | |
10647 | } | |
10648 | ||
10649 | __extension__ extern __inline uint32x4_t | |
10650 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10651 | __arm_vmulq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
10652 | { | |
10653 | return __builtin_mve_vmulq_m_n_uv4si (__inactive, __a, __b, __p); | |
10654 | } | |
10655 | ||
10656 | __extension__ extern __inline uint16x8_t | |
10657 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10658 | __arm_vmulq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
10659 | { | |
10660 | return __builtin_mve_vmulq_m_n_uv8hi (__inactive, __a, __b, __p); | |
10661 | } | |
10662 | ||
10663 | __extension__ extern __inline int8x16_t | |
10664 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10665 | __arm_vmulq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10666 | { | |
10667 | return __builtin_mve_vmulq_m_sv16qi (__inactive, __a, __b, __p); | |
10668 | } | |
10669 | ||
10670 | __extension__ extern __inline int32x4_t | |
10671 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10672 | __arm_vmulq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10673 | { | |
10674 | return __builtin_mve_vmulq_m_sv4si (__inactive, __a, __b, __p); | |
10675 | } | |
10676 | ||
10677 | __extension__ extern __inline int16x8_t | |
10678 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10679 | __arm_vmulq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10680 | { | |
10681 | return __builtin_mve_vmulq_m_sv8hi (__inactive, __a, __b, __p); | |
10682 | } | |
10683 | ||
10684 | __extension__ extern __inline uint8x16_t | |
10685 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10686 | __arm_vmulq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10687 | { | |
10688 | return __builtin_mve_vmulq_m_uv16qi (__inactive, __a, __b, __p); | |
10689 | } | |
10690 | ||
10691 | __extension__ extern __inline uint32x4_t | |
10692 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10693 | __arm_vmulq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10694 | { | |
10695 | return __builtin_mve_vmulq_m_uv4si (__inactive, __a, __b, __p); | |
10696 | } | |
10697 | ||
10698 | __extension__ extern __inline uint16x8_t | |
10699 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10700 | __arm_vmulq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10701 | { | |
10702 | return __builtin_mve_vmulq_m_uv8hi (__inactive, __a, __b, __p); | |
10703 | } | |
10704 | ||
10705 | __extension__ extern __inline int8x16_t | |
10706 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10707 | __arm_vornq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10708 | { | |
10709 | return __builtin_mve_vornq_m_sv16qi (__inactive, __a, __b, __p); | |
10710 | } | |
10711 | ||
10712 | __extension__ extern __inline int32x4_t | |
10713 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10714 | __arm_vornq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10715 | { | |
10716 | return __builtin_mve_vornq_m_sv4si (__inactive, __a, __b, __p); | |
10717 | } | |
10718 | ||
10719 | __extension__ extern __inline int16x8_t | |
10720 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10721 | __arm_vornq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10722 | { | |
10723 | return __builtin_mve_vornq_m_sv8hi (__inactive, __a, __b, __p); | |
10724 | } | |
10725 | ||
10726 | __extension__ extern __inline uint8x16_t | |
10727 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10728 | __arm_vornq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10729 | { | |
10730 | return __builtin_mve_vornq_m_uv16qi (__inactive, __a, __b, __p); | |
10731 | } | |
10732 | ||
10733 | __extension__ extern __inline uint32x4_t | |
10734 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10735 | __arm_vornq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10736 | { | |
10737 | return __builtin_mve_vornq_m_uv4si (__inactive, __a, __b, __p); | |
10738 | } | |
10739 | ||
10740 | __extension__ extern __inline uint16x8_t | |
10741 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10742 | __arm_vornq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10743 | { | |
10744 | return __builtin_mve_vornq_m_uv8hi (__inactive, __a, __b, __p); | |
10745 | } | |
10746 | ||
10747 | __extension__ extern __inline int8x16_t | |
10748 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10749 | __arm_vorrq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10750 | { | |
10751 | return __builtin_mve_vorrq_m_sv16qi (__inactive, __a, __b, __p); | |
10752 | } | |
10753 | ||
10754 | __extension__ extern __inline int32x4_t | |
10755 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10756 | __arm_vorrq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10757 | { | |
10758 | return __builtin_mve_vorrq_m_sv4si (__inactive, __a, __b, __p); | |
10759 | } | |
10760 | ||
10761 | __extension__ extern __inline int16x8_t | |
10762 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10763 | __arm_vorrq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10764 | { | |
10765 | return __builtin_mve_vorrq_m_sv8hi (__inactive, __a, __b, __p); | |
10766 | } | |
10767 | ||
10768 | __extension__ extern __inline uint8x16_t | |
10769 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10770 | __arm_vorrq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10771 | { | |
10772 | return __builtin_mve_vorrq_m_uv16qi (__inactive, __a, __b, __p); | |
10773 | } | |
10774 | ||
10775 | __extension__ extern __inline uint32x4_t | |
10776 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10777 | __arm_vorrq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10778 | { | |
10779 | return __builtin_mve_vorrq_m_uv4si (__inactive, __a, __b, __p); | |
10780 | } | |
10781 | ||
10782 | __extension__ extern __inline uint16x8_t | |
10783 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10784 | __arm_vorrq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10785 | { | |
10786 | return __builtin_mve_vorrq_m_uv8hi (__inactive, __a, __b, __p); | |
10787 | } | |
10788 | ||
10789 | __extension__ extern __inline int8x16_t | |
10790 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10791 | __arm_vqaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
10792 | { | |
10793 | return __builtin_mve_vqaddq_m_n_sv16qi (__inactive, __a, __b, __p); | |
10794 | } | |
10795 | ||
10796 | __extension__ extern __inline int32x4_t | |
10797 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10798 | __arm_vqaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
10799 | { | |
10800 | return __builtin_mve_vqaddq_m_n_sv4si (__inactive, __a, __b, __p); | |
10801 | } | |
10802 | ||
10803 | __extension__ extern __inline int16x8_t | |
10804 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10805 | __arm_vqaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
10806 | { | |
10807 | return __builtin_mve_vqaddq_m_n_sv8hi (__inactive, __a, __b, __p); | |
10808 | } | |
10809 | ||
10810 | __extension__ extern __inline uint8x16_t | |
10811 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10812 | __arm_vqaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
10813 | { | |
10814 | return __builtin_mve_vqaddq_m_n_uv16qi (__inactive, __a, __b, __p); | |
10815 | } | |
10816 | ||
10817 | __extension__ extern __inline uint32x4_t | |
10818 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10819 | __arm_vqaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
10820 | { | |
10821 | return __builtin_mve_vqaddq_m_n_uv4si (__inactive, __a, __b, __p); | |
10822 | } | |
10823 | ||
10824 | __extension__ extern __inline uint16x8_t | |
10825 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10826 | __arm_vqaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
10827 | { | |
10828 | return __builtin_mve_vqaddq_m_n_uv8hi (__inactive, __a, __b, __p); | |
10829 | } | |
10830 | ||
10831 | __extension__ extern __inline int8x16_t | |
10832 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10833 | __arm_vqaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10834 | { | |
10835 | return __builtin_mve_vqaddq_m_sv16qi (__inactive, __a, __b, __p); | |
10836 | } | |
10837 | ||
10838 | __extension__ extern __inline int32x4_t | |
10839 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10840 | __arm_vqaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10841 | { | |
10842 | return __builtin_mve_vqaddq_m_sv4si (__inactive, __a, __b, __p); | |
10843 | } | |
10844 | ||
10845 | __extension__ extern __inline int16x8_t | |
10846 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10847 | __arm_vqaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10848 | { | |
10849 | return __builtin_mve_vqaddq_m_sv8hi (__inactive, __a, __b, __p); | |
10850 | } | |
10851 | ||
10852 | __extension__ extern __inline uint8x16_t | |
10853 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10854 | __arm_vqaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
10855 | { | |
10856 | return __builtin_mve_vqaddq_m_uv16qi (__inactive, __a, __b, __p); | |
10857 | } | |
10858 | ||
10859 | __extension__ extern __inline uint32x4_t | |
10860 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10861 | __arm_vqaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
10862 | { | |
10863 | return __builtin_mve_vqaddq_m_uv4si (__inactive, __a, __b, __p); | |
10864 | } | |
10865 | ||
10866 | __extension__ extern __inline uint16x8_t | |
10867 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10868 | __arm_vqaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
10869 | { | |
10870 | return __builtin_mve_vqaddq_m_uv8hi (__inactive, __a, __b, __p); | |
10871 | } | |
10872 | ||
10873 | __extension__ extern __inline int8x16_t | |
10874 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10875 | __arm_vqdmladhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10876 | { | |
10877 | return __builtin_mve_vqdmladhq_m_sv16qi (__inactive, __a, __b, __p); | |
10878 | } | |
10879 | ||
10880 | __extension__ extern __inline int32x4_t | |
10881 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10882 | __arm_vqdmladhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10883 | { | |
10884 | return __builtin_mve_vqdmladhq_m_sv4si (__inactive, __a, __b, __p); | |
10885 | } | |
10886 | ||
10887 | __extension__ extern __inline int16x8_t | |
10888 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10889 | __arm_vqdmladhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10890 | { | |
10891 | return __builtin_mve_vqdmladhq_m_sv8hi (__inactive, __a, __b, __p); | |
10892 | } | |
10893 | ||
10894 | __extension__ extern __inline int8x16_t | |
10895 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10896 | __arm_vqdmladhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10897 | { | |
10898 | return __builtin_mve_vqdmladhxq_m_sv16qi (__inactive, __a, __b, __p); | |
10899 | } | |
10900 | ||
10901 | __extension__ extern __inline int32x4_t | |
10902 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10903 | __arm_vqdmladhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10904 | { | |
10905 | return __builtin_mve_vqdmladhxq_m_sv4si (__inactive, __a, __b, __p); | |
10906 | } | |
10907 | ||
10908 | __extension__ extern __inline int16x8_t | |
10909 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10910 | __arm_vqdmladhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10911 | { | |
10912 | return __builtin_mve_vqdmladhxq_m_sv8hi (__inactive, __a, __b, __p); | |
10913 | } | |
10914 | ||
10915 | __extension__ extern __inline int8x16_t | |
10916 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10917 | __arm_vqdmlahq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
10918 | { | |
10919 | return __builtin_mve_vqdmlahq_m_n_sv16qi (__a, __b, __c, __p); | |
10920 | } | |
10921 | ||
10922 | __extension__ extern __inline int32x4_t | |
10923 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10924 | __arm_vqdmlahq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
10925 | { | |
10926 | return __builtin_mve_vqdmlahq_m_n_sv4si (__a, __b, __c, __p); | |
10927 | } | |
10928 | ||
10929 | __extension__ extern __inline int16x8_t | |
10930 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10931 | __arm_vqdmlahq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
10932 | { | |
10933 | return __builtin_mve_vqdmlahq_m_n_sv8hi (__a, __b, __c, __p); | |
10934 | } | |
10935 | ||
10936 | __extension__ extern __inline int8x16_t | |
10937 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10938 | __arm_vqdmlsdhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10939 | { | |
10940 | return __builtin_mve_vqdmlsdhq_m_sv16qi (__inactive, __a, __b, __p); | |
10941 | } | |
10942 | ||
10943 | __extension__ extern __inline int32x4_t | |
10944 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10945 | __arm_vqdmlsdhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10946 | { | |
10947 | return __builtin_mve_vqdmlsdhq_m_sv4si (__inactive, __a, __b, __p); | |
10948 | } | |
10949 | ||
10950 | __extension__ extern __inline int16x8_t | |
10951 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10952 | __arm_vqdmlsdhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10953 | { | |
10954 | return __builtin_mve_vqdmlsdhq_m_sv8hi (__inactive, __a, __b, __p); | |
10955 | } | |
10956 | ||
10957 | __extension__ extern __inline int8x16_t | |
10958 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10959 | __arm_vqdmlsdhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
10960 | { | |
10961 | return __builtin_mve_vqdmlsdhxq_m_sv16qi (__inactive, __a, __b, __p); | |
10962 | } | |
10963 | ||
10964 | __extension__ extern __inline int32x4_t | |
10965 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10966 | __arm_vqdmlsdhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
10967 | { | |
10968 | return __builtin_mve_vqdmlsdhxq_m_sv4si (__inactive, __a, __b, __p); | |
10969 | } | |
10970 | ||
10971 | __extension__ extern __inline int16x8_t | |
10972 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10973 | __arm_vqdmlsdhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
10974 | { | |
10975 | return __builtin_mve_vqdmlsdhxq_m_sv8hi (__inactive, __a, __b, __p); | |
10976 | } | |
10977 | ||
10978 | __extension__ extern __inline int8x16_t | |
10979 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10980 | __arm_vqdmulhq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
10981 | { | |
10982 | return __builtin_mve_vqdmulhq_m_n_sv16qi (__inactive, __a, __b, __p); | |
10983 | } | |
10984 | ||
10985 | __extension__ extern __inline int32x4_t | |
10986 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10987 | __arm_vqdmulhq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
10988 | { | |
10989 | return __builtin_mve_vqdmulhq_m_n_sv4si (__inactive, __a, __b, __p); | |
10990 | } | |
10991 | ||
10992 | __extension__ extern __inline int16x8_t | |
10993 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
10994 | __arm_vqdmulhq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
10995 | { | |
10996 | return __builtin_mve_vqdmulhq_m_n_sv8hi (__inactive, __a, __b, __p); | |
10997 | } | |
10998 | ||
10999 | __extension__ extern __inline int8x16_t | |
11000 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11001 | __arm_vqdmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11002 | { | |
11003 | return __builtin_mve_vqdmulhq_m_sv16qi (__inactive, __a, __b, __p); | |
11004 | } | |
11005 | ||
11006 | __extension__ extern __inline int32x4_t | |
11007 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11008 | __arm_vqdmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11009 | { | |
11010 | return __builtin_mve_vqdmulhq_m_sv4si (__inactive, __a, __b, __p); | |
11011 | } | |
11012 | ||
11013 | __extension__ extern __inline int16x8_t | |
11014 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11015 | __arm_vqdmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11016 | { | |
11017 | return __builtin_mve_vqdmulhq_m_sv8hi (__inactive, __a, __b, __p); | |
11018 | } | |
11019 | ||
11020 | __extension__ extern __inline int8x16_t | |
11021 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11022 | __arm_vqrdmladhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11023 | { | |
11024 | return __builtin_mve_vqrdmladhq_m_sv16qi (__inactive, __a, __b, __p); | |
11025 | } | |
11026 | ||
11027 | __extension__ extern __inline int32x4_t | |
11028 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11029 | __arm_vqrdmladhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11030 | { | |
11031 | return __builtin_mve_vqrdmladhq_m_sv4si (__inactive, __a, __b, __p); | |
11032 | } | |
11033 | ||
11034 | __extension__ extern __inline int16x8_t | |
11035 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11036 | __arm_vqrdmladhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11037 | { | |
11038 | return __builtin_mve_vqrdmladhq_m_sv8hi (__inactive, __a, __b, __p); | |
11039 | } | |
11040 | ||
11041 | __extension__ extern __inline int8x16_t | |
11042 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11043 | __arm_vqrdmladhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11044 | { | |
11045 | return __builtin_mve_vqrdmladhxq_m_sv16qi (__inactive, __a, __b, __p); | |
11046 | } | |
11047 | ||
11048 | __extension__ extern __inline int32x4_t | |
11049 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11050 | __arm_vqrdmladhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11051 | { | |
11052 | return __builtin_mve_vqrdmladhxq_m_sv4si (__inactive, __a, __b, __p); | |
11053 | } | |
11054 | ||
11055 | __extension__ extern __inline int16x8_t | |
11056 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11057 | __arm_vqrdmladhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11058 | { | |
11059 | return __builtin_mve_vqrdmladhxq_m_sv8hi (__inactive, __a, __b, __p); | |
11060 | } | |
11061 | ||
11062 | __extension__ extern __inline int8x16_t | |
11063 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11064 | __arm_vqrdmlahq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
11065 | { | |
11066 | return __builtin_mve_vqrdmlahq_m_n_sv16qi (__a, __b, __c, __p); | |
11067 | } | |
11068 | ||
11069 | __extension__ extern __inline int32x4_t | |
11070 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11071 | __arm_vqrdmlahq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
11072 | { | |
11073 | return __builtin_mve_vqrdmlahq_m_n_sv4si (__a, __b, __c, __p); | |
11074 | } | |
11075 | ||
11076 | __extension__ extern __inline int16x8_t | |
11077 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11078 | __arm_vqrdmlahq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
11079 | { | |
11080 | return __builtin_mve_vqrdmlahq_m_n_sv8hi (__a, __b, __c, __p); | |
11081 | } | |
11082 | ||
11083 | __extension__ extern __inline int8x16_t | |
11084 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11085 | __arm_vqrdmlashq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
11086 | { | |
11087 | return __builtin_mve_vqrdmlashq_m_n_sv16qi (__a, __b, __c, __p); | |
11088 | } | |
11089 | ||
11090 | __extension__ extern __inline int32x4_t | |
11091 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11092 | __arm_vqrdmlashq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
11093 | { | |
11094 | return __builtin_mve_vqrdmlashq_m_n_sv4si (__a, __b, __c, __p); | |
11095 | } | |
11096 | ||
11097 | __extension__ extern __inline int16x8_t | |
11098 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11099 | __arm_vqrdmlashq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
11100 | { | |
11101 | return __builtin_mve_vqrdmlashq_m_n_sv8hi (__a, __b, __c, __p); | |
11102 | } | |
11103 | ||
afb198ee CL |
11104 | __extension__ extern __inline int8x16_t |
11105 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11106 | __arm_vqdmlashq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
11107 | { | |
11108 | return __builtin_mve_vqdmlashq_m_n_sv16qi (__a, __b, __c, __p); | |
11109 | } | |
11110 | ||
11111 | __extension__ extern __inline int16x8_t | |
11112 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11113 | __arm_vqdmlashq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
11114 | { | |
11115 | return __builtin_mve_vqdmlashq_m_n_sv8hi (__a, __b, __c, __p); | |
11116 | } | |
11117 | ||
11118 | __extension__ extern __inline int32x4_t | |
11119 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11120 | __arm_vqdmlashq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
11121 | { | |
11122 | return __builtin_mve_vqdmlashq_m_n_sv4si (__a, __b, __c, __p); | |
11123 | } | |
11124 | ||
8eb3b6b9 SP |
11125 | __extension__ extern __inline int8x16_t |
11126 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11127 | __arm_vqrdmlsdhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11128 | { | |
11129 | return __builtin_mve_vqrdmlsdhq_m_sv16qi (__inactive, __a, __b, __p); | |
11130 | } | |
11131 | ||
11132 | __extension__ extern __inline int32x4_t | |
11133 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11134 | __arm_vqrdmlsdhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11135 | { | |
11136 | return __builtin_mve_vqrdmlsdhq_m_sv4si (__inactive, __a, __b, __p); | |
11137 | } | |
11138 | ||
11139 | __extension__ extern __inline int16x8_t | |
11140 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11141 | __arm_vqrdmlsdhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11142 | { | |
11143 | return __builtin_mve_vqrdmlsdhq_m_sv8hi (__inactive, __a, __b, __p); | |
11144 | } | |
11145 | ||
11146 | __extension__ extern __inline int8x16_t | |
11147 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11148 | __arm_vqrdmlsdhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11149 | { | |
11150 | return __builtin_mve_vqrdmlsdhxq_m_sv16qi (__inactive, __a, __b, __p); | |
11151 | } | |
11152 | ||
11153 | __extension__ extern __inline int32x4_t | |
11154 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11155 | __arm_vqrdmlsdhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11156 | { | |
11157 | return __builtin_mve_vqrdmlsdhxq_m_sv4si (__inactive, __a, __b, __p); | |
11158 | } | |
11159 | ||
11160 | __extension__ extern __inline int16x8_t | |
11161 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11162 | __arm_vqrdmlsdhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11163 | { | |
11164 | return __builtin_mve_vqrdmlsdhxq_m_sv8hi (__inactive, __a, __b, __p); | |
11165 | } | |
11166 | ||
11167 | __extension__ extern __inline int8x16_t | |
11168 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11169 | __arm_vqrdmulhq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
11170 | { | |
11171 | return __builtin_mve_vqrdmulhq_m_n_sv16qi (__inactive, __a, __b, __p); | |
11172 | } | |
11173 | ||
11174 | __extension__ extern __inline int32x4_t | |
11175 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11176 | __arm_vqrdmulhq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
11177 | { | |
11178 | return __builtin_mve_vqrdmulhq_m_n_sv4si (__inactive, __a, __b, __p); | |
11179 | } | |
11180 | ||
11181 | __extension__ extern __inline int16x8_t | |
11182 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11183 | __arm_vqrdmulhq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
11184 | { | |
11185 | return __builtin_mve_vqrdmulhq_m_n_sv8hi (__inactive, __a, __b, __p); | |
11186 | } | |
11187 | ||
11188 | __extension__ extern __inline int8x16_t | |
11189 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11190 | __arm_vqrdmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11191 | { | |
11192 | return __builtin_mve_vqrdmulhq_m_sv16qi (__inactive, __a, __b, __p); | |
11193 | } | |
11194 | ||
11195 | __extension__ extern __inline int32x4_t | |
11196 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11197 | __arm_vqrdmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11198 | { | |
11199 | return __builtin_mve_vqrdmulhq_m_sv4si (__inactive, __a, __b, __p); | |
11200 | } | |
11201 | ||
11202 | __extension__ extern __inline int16x8_t | |
11203 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11204 | __arm_vqrdmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11205 | { | |
11206 | return __builtin_mve_vqrdmulhq_m_sv8hi (__inactive, __a, __b, __p); | |
11207 | } | |
11208 | ||
11209 | __extension__ extern __inline int8x16_t | |
11210 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11211 | __arm_vqrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11212 | { | |
11213 | return __builtin_mve_vqrshlq_m_sv16qi (__inactive, __a, __b, __p); | |
11214 | } | |
11215 | ||
11216 | __extension__ extern __inline int32x4_t | |
11217 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11218 | __arm_vqrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11219 | { | |
11220 | return __builtin_mve_vqrshlq_m_sv4si (__inactive, __a, __b, __p); | |
11221 | } | |
11222 | ||
11223 | __extension__ extern __inline int16x8_t | |
11224 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11225 | __arm_vqrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11226 | { | |
11227 | return __builtin_mve_vqrshlq_m_sv8hi (__inactive, __a, __b, __p); | |
11228 | } | |
11229 | ||
11230 | __extension__ extern __inline uint8x16_t | |
11231 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11232 | __arm_vqrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11233 | { | |
11234 | return __builtin_mve_vqrshlq_m_uv16qi (__inactive, __a, __b, __p); | |
11235 | } | |
11236 | ||
11237 | __extension__ extern __inline uint32x4_t | |
11238 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11239 | __arm_vqrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11240 | { | |
11241 | return __builtin_mve_vqrshlq_m_uv4si (__inactive, __a, __b, __p); | |
11242 | } | |
11243 | ||
11244 | __extension__ extern __inline uint16x8_t | |
11245 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11246 | __arm_vqrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11247 | { | |
11248 | return __builtin_mve_vqrshlq_m_uv8hi (__inactive, __a, __b, __p); | |
11249 | } | |
11250 | ||
11251 | __extension__ extern __inline int8x16_t | |
11252 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11253 | __arm_vqshlq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
11254 | { | |
11255 | return __builtin_mve_vqshlq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
11256 | } | |
11257 | ||
11258 | __extension__ extern __inline int32x4_t | |
11259 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11260 | __arm_vqshlq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
11261 | { | |
11262 | return __builtin_mve_vqshlq_m_n_sv4si (__inactive, __a, __imm, __p); | |
11263 | } | |
11264 | ||
11265 | __extension__ extern __inline int16x8_t | |
11266 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11267 | __arm_vqshlq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
11268 | { | |
11269 | return __builtin_mve_vqshlq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
11270 | } | |
11271 | ||
11272 | __extension__ extern __inline uint8x16_t | |
11273 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11274 | __arm_vqshlq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
11275 | { | |
11276 | return __builtin_mve_vqshlq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
11277 | } | |
11278 | ||
11279 | __extension__ extern __inline uint32x4_t | |
11280 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11281 | __arm_vqshlq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
11282 | { | |
11283 | return __builtin_mve_vqshlq_m_n_uv4si (__inactive, __a, __imm, __p); | |
11284 | } | |
11285 | ||
11286 | __extension__ extern __inline uint16x8_t | |
11287 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11288 | __arm_vqshlq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
11289 | { | |
11290 | return __builtin_mve_vqshlq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
11291 | } | |
11292 | ||
11293 | __extension__ extern __inline int8x16_t | |
11294 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11295 | __arm_vqshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11296 | { | |
11297 | return __builtin_mve_vqshlq_m_sv16qi (__inactive, __a, __b, __p); | |
11298 | } | |
11299 | ||
11300 | __extension__ extern __inline int32x4_t | |
11301 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11302 | __arm_vqshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11303 | { | |
11304 | return __builtin_mve_vqshlq_m_sv4si (__inactive, __a, __b, __p); | |
11305 | } | |
11306 | ||
11307 | __extension__ extern __inline int16x8_t | |
11308 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11309 | __arm_vqshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11310 | { | |
11311 | return __builtin_mve_vqshlq_m_sv8hi (__inactive, __a, __b, __p); | |
11312 | } | |
11313 | ||
11314 | __extension__ extern __inline uint8x16_t | |
11315 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11316 | __arm_vqshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11317 | { | |
11318 | return __builtin_mve_vqshlq_m_uv16qi (__inactive, __a, __b, __p); | |
11319 | } | |
11320 | ||
11321 | __extension__ extern __inline uint32x4_t | |
11322 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11323 | __arm_vqshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11324 | { | |
11325 | return __builtin_mve_vqshlq_m_uv4si (__inactive, __a, __b, __p); | |
11326 | } | |
11327 | ||
11328 | __extension__ extern __inline uint16x8_t | |
11329 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11330 | __arm_vqshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11331 | { | |
11332 | return __builtin_mve_vqshlq_m_uv8hi (__inactive, __a, __b, __p); | |
11333 | } | |
11334 | ||
11335 | __extension__ extern __inline int8x16_t | |
11336 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11337 | __arm_vqsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
11338 | { | |
11339 | return __builtin_mve_vqsubq_m_n_sv16qi (__inactive, __a, __b, __p); | |
11340 | } | |
11341 | ||
11342 | __extension__ extern __inline int32x4_t | |
11343 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11344 | __arm_vqsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
11345 | { | |
11346 | return __builtin_mve_vqsubq_m_n_sv4si (__inactive, __a, __b, __p); | |
11347 | } | |
11348 | ||
11349 | __extension__ extern __inline int16x8_t | |
11350 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11351 | __arm_vqsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
11352 | { | |
11353 | return __builtin_mve_vqsubq_m_n_sv8hi (__inactive, __a, __b, __p); | |
11354 | } | |
11355 | ||
11356 | __extension__ extern __inline uint8x16_t | |
11357 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11358 | __arm_vqsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
11359 | { | |
11360 | return __builtin_mve_vqsubq_m_n_uv16qi (__inactive, __a, __b, __p); | |
11361 | } | |
11362 | ||
11363 | __extension__ extern __inline uint32x4_t | |
11364 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11365 | __arm_vqsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
11366 | { | |
11367 | return __builtin_mve_vqsubq_m_n_uv4si (__inactive, __a, __b, __p); | |
11368 | } | |
11369 | ||
11370 | __extension__ extern __inline uint16x8_t | |
11371 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11372 | __arm_vqsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
11373 | { | |
11374 | return __builtin_mve_vqsubq_m_n_uv8hi (__inactive, __a, __b, __p); | |
11375 | } | |
11376 | ||
11377 | __extension__ extern __inline int8x16_t | |
11378 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11379 | __arm_vqsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11380 | { | |
11381 | return __builtin_mve_vqsubq_m_sv16qi (__inactive, __a, __b, __p); | |
11382 | } | |
11383 | ||
11384 | __extension__ extern __inline int32x4_t | |
11385 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11386 | __arm_vqsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11387 | { | |
11388 | return __builtin_mve_vqsubq_m_sv4si (__inactive, __a, __b, __p); | |
11389 | } | |
11390 | ||
11391 | __extension__ extern __inline int16x8_t | |
11392 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11393 | __arm_vqsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11394 | { | |
11395 | return __builtin_mve_vqsubq_m_sv8hi (__inactive, __a, __b, __p); | |
11396 | } | |
11397 | ||
11398 | __extension__ extern __inline uint8x16_t | |
11399 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11400 | __arm_vqsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
11401 | { | |
11402 | return __builtin_mve_vqsubq_m_uv16qi (__inactive, __a, __b, __p); | |
11403 | } | |
11404 | ||
11405 | __extension__ extern __inline uint32x4_t | |
11406 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11407 | __arm_vqsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
11408 | { | |
11409 | return __builtin_mve_vqsubq_m_uv4si (__inactive, __a, __b, __p); | |
11410 | } | |
11411 | ||
11412 | __extension__ extern __inline uint16x8_t | |
11413 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11414 | __arm_vqsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
11415 | { | |
11416 | return __builtin_mve_vqsubq_m_uv8hi (__inactive, __a, __b, __p); | |
11417 | } | |
11418 | ||
11419 | __extension__ extern __inline int8x16_t | |
11420 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11421 | __arm_vrhaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11422 | { | |
11423 | return __builtin_mve_vrhaddq_m_sv16qi (__inactive, __a, __b, __p); | |
11424 | } | |
11425 | ||
11426 | __extension__ extern __inline int32x4_t | |
11427 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11428 | __arm_vrhaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11429 | { | |
11430 | return __builtin_mve_vrhaddq_m_sv4si (__inactive, __a, __b, __p); | |
11431 | } | |
11432 | ||
11433 | __extension__ extern __inline int16x8_t | |
11434 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11435 | __arm_vrhaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11436 | { | |
11437 | return __builtin_mve_vrhaddq_m_sv8hi (__inactive, __a, __b, __p); | |
11438 | } | |
11439 | ||
11440 | __extension__ extern __inline uint8x16_t | |
11441 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11442 | __arm_vrhaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
11443 | { | |
11444 | return __builtin_mve_vrhaddq_m_uv16qi (__inactive, __a, __b, __p); | |
11445 | } | |
11446 | ||
11447 | __extension__ extern __inline uint32x4_t | |
11448 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11449 | __arm_vrhaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
11450 | { | |
11451 | return __builtin_mve_vrhaddq_m_uv4si (__inactive, __a, __b, __p); | |
11452 | } | |
11453 | ||
11454 | __extension__ extern __inline uint16x8_t | |
11455 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11456 | __arm_vrhaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
11457 | { | |
11458 | return __builtin_mve_vrhaddq_m_uv8hi (__inactive, __a, __b, __p); | |
11459 | } | |
11460 | ||
11461 | __extension__ extern __inline int8x16_t | |
11462 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11463 | __arm_vrmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11464 | { | |
11465 | return __builtin_mve_vrmulhq_m_sv16qi (__inactive, __a, __b, __p); | |
11466 | } | |
11467 | ||
11468 | __extension__ extern __inline int32x4_t | |
11469 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11470 | __arm_vrmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11471 | { | |
11472 | return __builtin_mve_vrmulhq_m_sv4si (__inactive, __a, __b, __p); | |
11473 | } | |
11474 | ||
11475 | __extension__ extern __inline int16x8_t | |
11476 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11477 | __arm_vrmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11478 | { | |
11479 | return __builtin_mve_vrmulhq_m_sv8hi (__inactive, __a, __b, __p); | |
11480 | } | |
11481 | ||
11482 | __extension__ extern __inline uint8x16_t | |
11483 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11484 | __arm_vrmulhq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
11485 | { | |
11486 | return __builtin_mve_vrmulhq_m_uv16qi (__inactive, __a, __b, __p); | |
11487 | } | |
11488 | ||
11489 | __extension__ extern __inline uint32x4_t | |
11490 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11491 | __arm_vrmulhq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
11492 | { | |
11493 | return __builtin_mve_vrmulhq_m_uv4si (__inactive, __a, __b, __p); | |
11494 | } | |
11495 | ||
11496 | __extension__ extern __inline uint16x8_t | |
11497 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11498 | __arm_vrmulhq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
11499 | { | |
11500 | return __builtin_mve_vrmulhq_m_uv8hi (__inactive, __a, __b, __p); | |
11501 | } | |
11502 | ||
11503 | __extension__ extern __inline int8x16_t | |
11504 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11505 | __arm_vrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11506 | { | |
11507 | return __builtin_mve_vrshlq_m_sv16qi (__inactive, __a, __b, __p); | |
11508 | } | |
11509 | ||
11510 | __extension__ extern __inline int32x4_t | |
11511 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11512 | __arm_vrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11513 | { | |
11514 | return __builtin_mve_vrshlq_m_sv4si (__inactive, __a, __b, __p); | |
11515 | } | |
11516 | ||
11517 | __extension__ extern __inline int16x8_t | |
11518 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11519 | __arm_vrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11520 | { | |
11521 | return __builtin_mve_vrshlq_m_sv8hi (__inactive, __a, __b, __p); | |
11522 | } | |
11523 | ||
11524 | __extension__ extern __inline uint8x16_t | |
11525 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11526 | __arm_vrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
11527 | { | |
11528 | return __builtin_mve_vrshlq_m_uv16qi (__inactive, __a, __b, __p); | |
11529 | } | |
11530 | ||
11531 | __extension__ extern __inline uint32x4_t | |
11532 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11533 | __arm_vrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11534 | { | |
11535 | return __builtin_mve_vrshlq_m_uv4si (__inactive, __a, __b, __p); | |
11536 | } | |
11537 | ||
11538 | __extension__ extern __inline uint16x8_t | |
11539 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11540 | __arm_vrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11541 | { | |
11542 | return __builtin_mve_vrshlq_m_uv8hi (__inactive, __a, __b, __p); | |
11543 | } | |
11544 | ||
11545 | __extension__ extern __inline int8x16_t | |
11546 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11547 | __arm_vrshrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
11548 | { | |
11549 | return __builtin_mve_vrshrq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
11550 | } | |
11551 | ||
11552 | __extension__ extern __inline int32x4_t | |
11553 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11554 | __arm_vrshrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
11555 | { | |
11556 | return __builtin_mve_vrshrq_m_n_sv4si (__inactive, __a, __imm, __p); | |
11557 | } | |
11558 | ||
11559 | __extension__ extern __inline int16x8_t | |
11560 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11561 | __arm_vrshrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
11562 | { | |
11563 | return __builtin_mve_vrshrq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
11564 | } | |
11565 | ||
11566 | __extension__ extern __inline uint8x16_t | |
11567 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11568 | __arm_vrshrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
11569 | { | |
11570 | return __builtin_mve_vrshrq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
11571 | } | |
11572 | ||
11573 | __extension__ extern __inline uint32x4_t | |
11574 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11575 | __arm_vrshrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
11576 | { | |
11577 | return __builtin_mve_vrshrq_m_n_uv4si (__inactive, __a, __imm, __p); | |
11578 | } | |
11579 | ||
11580 | __extension__ extern __inline uint16x8_t | |
11581 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11582 | __arm_vrshrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
11583 | { | |
11584 | return __builtin_mve_vrshrq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
11585 | } | |
11586 | ||
11587 | __extension__ extern __inline int8x16_t | |
11588 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11589 | __arm_vshlq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
11590 | { | |
11591 | return __builtin_mve_vshlq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
11592 | } | |
11593 | ||
11594 | __extension__ extern __inline int32x4_t | |
11595 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11596 | __arm_vshlq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
11597 | { | |
11598 | return __builtin_mve_vshlq_m_n_sv4si (__inactive, __a, __imm, __p); | |
11599 | } | |
11600 | ||
11601 | __extension__ extern __inline int16x8_t | |
11602 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11603 | __arm_vshlq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
11604 | { | |
11605 | return __builtin_mve_vshlq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
11606 | } | |
11607 | ||
11608 | __extension__ extern __inline uint8x16_t | |
11609 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11610 | __arm_vshlq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
11611 | { | |
11612 | return __builtin_mve_vshlq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
11613 | } | |
11614 | ||
11615 | __extension__ extern __inline uint32x4_t | |
11616 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11617 | __arm_vshlq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
11618 | { | |
11619 | return __builtin_mve_vshlq_m_n_uv4si (__inactive, __a, __imm, __p); | |
11620 | } | |
11621 | ||
11622 | __extension__ extern __inline uint16x8_t | |
11623 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11624 | __arm_vshlq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
11625 | { | |
11626 | return __builtin_mve_vshlq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
11627 | } | |
11628 | ||
11629 | __extension__ extern __inline int8x16_t | |
11630 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11631 | __arm_vshrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
11632 | { | |
11633 | return __builtin_mve_vshrq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
11634 | } | |
11635 | ||
11636 | __extension__ extern __inline int32x4_t | |
11637 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11638 | __arm_vshrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
11639 | { | |
11640 | return __builtin_mve_vshrq_m_n_sv4si (__inactive, __a, __imm, __p); | |
11641 | } | |
11642 | ||
11643 | __extension__ extern __inline int16x8_t | |
11644 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11645 | __arm_vshrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
11646 | { | |
11647 | return __builtin_mve_vshrq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
11648 | } | |
11649 | ||
11650 | __extension__ extern __inline uint8x16_t | |
11651 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11652 | __arm_vshrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
11653 | { | |
11654 | return __builtin_mve_vshrq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
11655 | } | |
11656 | ||
11657 | __extension__ extern __inline uint32x4_t | |
11658 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11659 | __arm_vshrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
11660 | { | |
11661 | return __builtin_mve_vshrq_m_n_uv4si (__inactive, __a, __imm, __p); | |
11662 | } | |
11663 | ||
11664 | __extension__ extern __inline uint16x8_t | |
11665 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11666 | __arm_vshrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
11667 | { | |
11668 | return __builtin_mve_vshrq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
11669 | } | |
11670 | ||
11671 | __extension__ extern __inline int8x16_t | |
11672 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11673 | __arm_vsliq_m_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p) | |
11674 | { | |
11675 | return __builtin_mve_vsliq_m_n_sv16qi (__a, __b, __imm, __p); | |
11676 | } | |
11677 | ||
11678 | __extension__ extern __inline int32x4_t | |
11679 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11680 | __arm_vsliq_m_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11681 | { | |
11682 | return __builtin_mve_vsliq_m_n_sv4si (__a, __b, __imm, __p); | |
11683 | } | |
11684 | ||
11685 | __extension__ extern __inline int16x8_t | |
11686 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11687 | __arm_vsliq_m_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11688 | { | |
11689 | return __builtin_mve_vsliq_m_n_sv8hi (__a, __b, __imm, __p); | |
11690 | } | |
11691 | ||
11692 | __extension__ extern __inline uint8x16_t | |
11693 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11694 | __arm_vsliq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p) | |
11695 | { | |
11696 | return __builtin_mve_vsliq_m_n_uv16qi (__a, __b, __imm, __p); | |
11697 | } | |
11698 | ||
11699 | __extension__ extern __inline uint32x4_t | |
11700 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11701 | __arm_vsliq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11702 | { | |
11703 | return __builtin_mve_vsliq_m_n_uv4si (__a, __b, __imm, __p); | |
11704 | } | |
11705 | ||
11706 | __extension__ extern __inline uint16x8_t | |
11707 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11708 | __arm_vsliq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11709 | { | |
11710 | return __builtin_mve_vsliq_m_n_uv8hi (__a, __b, __imm, __p); | |
11711 | } | |
11712 | ||
11713 | __extension__ extern __inline int8x16_t | |
11714 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11715 | __arm_vsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
11716 | { | |
11717 | return __builtin_mve_vsubq_m_n_sv16qi (__inactive, __a, __b, __p); | |
11718 | } | |
11719 | ||
11720 | __extension__ extern __inline int32x4_t | |
11721 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11722 | __arm_vsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
11723 | { | |
11724 | return __builtin_mve_vsubq_m_n_sv4si (__inactive, __a, __b, __p); | |
11725 | } | |
11726 | ||
11727 | __extension__ extern __inline int16x8_t | |
11728 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11729 | __arm_vsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
11730 | { | |
11731 | return __builtin_mve_vsubq_m_n_sv8hi (__inactive, __a, __b, __p); | |
11732 | } | |
11733 | ||
11734 | __extension__ extern __inline uint8x16_t | |
11735 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11736 | __arm_vsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
11737 | { | |
11738 | return __builtin_mve_vsubq_m_n_uv16qi (__inactive, __a, __b, __p); | |
11739 | } | |
11740 | ||
11741 | __extension__ extern __inline uint32x4_t | |
11742 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11743 | __arm_vsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
11744 | { | |
11745 | return __builtin_mve_vsubq_m_n_uv4si (__inactive, __a, __b, __p); | |
11746 | } | |
11747 | ||
11748 | __extension__ extern __inline uint16x8_t | |
11749 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11750 | __arm_vsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
11751 | { | |
11752 | return __builtin_mve_vsubq_m_n_uv8hi (__inactive, __a, __b, __p); | |
11753 | } | |
11754 | ||
f2170a37 SP |
11755 | __extension__ extern __inline int64_t |
11756 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11757 | __arm_vmlaldavaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11758 | { | |
11759 | return __builtin_mve_vmlaldavaq_p_sv4si (__a, __b, __c, __p); | |
11760 | } | |
11761 | ||
11762 | __extension__ extern __inline int64_t | |
11763 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11764 | __arm_vmlaldavaq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
11765 | { | |
11766 | return __builtin_mve_vmlaldavaq_p_sv8hi (__a, __b, __c, __p); | |
11767 | } | |
11768 | ||
11769 | __extension__ extern __inline uint64_t | |
11770 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11771 | __arm_vmlaldavaq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
11772 | { | |
11773 | return __builtin_mve_vmlaldavaq_p_uv4si (__a, __b, __c, __p); | |
11774 | } | |
11775 | ||
11776 | __extension__ extern __inline uint64_t | |
11777 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11778 | __arm_vmlaldavaq_p_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) | |
11779 | { | |
11780 | return __builtin_mve_vmlaldavaq_p_uv8hi (__a, __b, __c, __p); | |
11781 | } | |
11782 | ||
11783 | __extension__ extern __inline int64_t | |
11784 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11785 | __arm_vmlaldavaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11786 | { | |
11787 | return __builtin_mve_vmlaldavaxq_p_sv4si (__a, __b, __c, __p); | |
11788 | } | |
11789 | ||
11790 | __extension__ extern __inline int64_t | |
11791 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11792 | __arm_vmlaldavaxq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
11793 | { | |
11794 | return __builtin_mve_vmlaldavaxq_p_sv8hi (__a, __b, __c, __p); | |
11795 | } | |
11796 | ||
f2170a37 SP |
11797 | __extension__ extern __inline int64_t |
11798 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11799 | __arm_vmlsldavaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11800 | { | |
11801 | return __builtin_mve_vmlsldavaq_p_sv4si (__a, __b, __c, __p); | |
11802 | } | |
11803 | ||
11804 | __extension__ extern __inline int64_t | |
11805 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11806 | __arm_vmlsldavaq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
11807 | { | |
11808 | return __builtin_mve_vmlsldavaq_p_sv8hi (__a, __b, __c, __p); | |
11809 | } | |
11810 | ||
11811 | __extension__ extern __inline int64_t | |
11812 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11813 | __arm_vmlsldavaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
11814 | { | |
11815 | return __builtin_mve_vmlsldavaxq_p_sv4si (__a, __b, __c, __p); | |
11816 | } | |
11817 | ||
11818 | __extension__ extern __inline int64_t | |
11819 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11820 | __arm_vmlsldavaxq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
11821 | { | |
11822 | return __builtin_mve_vmlsldavaxq_p_sv8hi (__a, __b, __c, __p); | |
11823 | } | |
11824 | ||
11825 | __extension__ extern __inline uint16x8_t | |
11826 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11827 | __arm_vmullbq_poly_m_p8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
11828 | { | |
11829 | return __builtin_mve_vmullbq_poly_m_pv16qi (__inactive, __a, __b, __p); | |
11830 | } | |
11831 | ||
11832 | __extension__ extern __inline uint32x4_t | |
11833 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11834 | __arm_vmullbq_poly_m_p16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
11835 | { | |
11836 | return __builtin_mve_vmullbq_poly_m_pv8hi (__inactive, __a, __b, __p); | |
11837 | } | |
11838 | ||
11839 | __extension__ extern __inline uint16x8_t | |
11840 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11841 | __arm_vmulltq_poly_m_p8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
11842 | { | |
11843 | return __builtin_mve_vmulltq_poly_m_pv16qi (__inactive, __a, __b, __p); | |
11844 | } | |
11845 | ||
11846 | __extension__ extern __inline uint32x4_t | |
11847 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11848 | __arm_vmulltq_poly_m_p16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
11849 | { | |
11850 | return __builtin_mve_vmulltq_poly_m_pv8hi (__inactive, __a, __b, __p); | |
11851 | } | |
11852 | ||
11853 | __extension__ extern __inline int64x2_t | |
11854 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11855 | __arm_vqdmullbq_m_n_s32 (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
11856 | { | |
11857 | return __builtin_mve_vqdmullbq_m_n_sv4si (__inactive, __a, __b, __p); | |
11858 | } | |
11859 | ||
11860 | __extension__ extern __inline int32x4_t | |
11861 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11862 | __arm_vqdmullbq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
11863 | { | |
11864 | return __builtin_mve_vqdmullbq_m_n_sv8hi (__inactive, __a, __b, __p); | |
11865 | } | |
11866 | ||
11867 | __extension__ extern __inline int64x2_t | |
11868 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11869 | __arm_vqdmullbq_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11870 | { | |
11871 | return __builtin_mve_vqdmullbq_m_sv4si (__inactive, __a, __b, __p); | |
11872 | } | |
11873 | ||
11874 | __extension__ extern __inline int32x4_t | |
11875 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11876 | __arm_vqdmullbq_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11877 | { | |
11878 | return __builtin_mve_vqdmullbq_m_sv8hi (__inactive, __a, __b, __p); | |
11879 | } | |
11880 | ||
11881 | __extension__ extern __inline int64x2_t | |
11882 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11883 | __arm_vqdmulltq_m_n_s32 (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
11884 | { | |
11885 | return __builtin_mve_vqdmulltq_m_n_sv4si (__inactive, __a, __b, __p); | |
11886 | } | |
11887 | ||
11888 | __extension__ extern __inline int32x4_t | |
11889 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11890 | __arm_vqdmulltq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
11891 | { | |
11892 | return __builtin_mve_vqdmulltq_m_n_sv8hi (__inactive, __a, __b, __p); | |
11893 | } | |
11894 | ||
11895 | __extension__ extern __inline int64x2_t | |
11896 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11897 | __arm_vqdmulltq_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
11898 | { | |
11899 | return __builtin_mve_vqdmulltq_m_sv4si (__inactive, __a, __b, __p); | |
11900 | } | |
11901 | ||
11902 | __extension__ extern __inline int32x4_t | |
11903 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11904 | __arm_vqdmulltq_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
11905 | { | |
11906 | return __builtin_mve_vqdmulltq_m_sv8hi (__inactive, __a, __b, __p); | |
11907 | } | |
11908 | ||
11909 | __extension__ extern __inline int16x8_t | |
11910 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11911 | __arm_vqrshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11912 | { | |
11913 | return __builtin_mve_vqrshrnbq_m_n_sv4si (__a, __b, __imm, __p); | |
11914 | } | |
11915 | ||
11916 | __extension__ extern __inline int8x16_t | |
11917 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11918 | __arm_vqrshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11919 | { | |
11920 | return __builtin_mve_vqrshrnbq_m_n_sv8hi (__a, __b, __imm, __p); | |
11921 | } | |
11922 | ||
11923 | __extension__ extern __inline uint16x8_t | |
11924 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11925 | __arm_vqrshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11926 | { | |
11927 | return __builtin_mve_vqrshrnbq_m_n_uv4si (__a, __b, __imm, __p); | |
11928 | } | |
11929 | ||
11930 | __extension__ extern __inline uint8x16_t | |
11931 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11932 | __arm_vqrshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11933 | { | |
11934 | return __builtin_mve_vqrshrnbq_m_n_uv8hi (__a, __b, __imm, __p); | |
11935 | } | |
11936 | ||
11937 | __extension__ extern __inline int16x8_t | |
11938 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11939 | __arm_vqrshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11940 | { | |
11941 | return __builtin_mve_vqrshrntq_m_n_sv4si (__a, __b, __imm, __p); | |
11942 | } | |
11943 | ||
11944 | __extension__ extern __inline int8x16_t | |
11945 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11946 | __arm_vqrshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11947 | { | |
11948 | return __builtin_mve_vqrshrntq_m_n_sv8hi (__a, __b, __imm, __p); | |
11949 | } | |
11950 | ||
11951 | __extension__ extern __inline uint16x8_t | |
11952 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11953 | __arm_vqrshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
11954 | { | |
11955 | return __builtin_mve_vqrshrntq_m_n_uv4si (__a, __b, __imm, __p); | |
11956 | } | |
11957 | ||
11958 | __extension__ extern __inline uint8x16_t | |
11959 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11960 | __arm_vqrshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
11961 | { | |
11962 | return __builtin_mve_vqrshrntq_m_n_uv8hi (__a, __b, __imm, __p); | |
11963 | } | |
11964 | ||
11965 | __extension__ extern __inline uint16x8_t | |
11966 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11967 | __arm_vqrshrunbq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11968 | { | |
11969 | return __builtin_mve_vqrshrunbq_m_n_sv4si (__a, __b, __imm, __p); | |
11970 | } | |
11971 | ||
11972 | __extension__ extern __inline uint8x16_t | |
11973 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11974 | __arm_vqrshrunbq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11975 | { | |
11976 | return __builtin_mve_vqrshrunbq_m_n_sv8hi (__a, __b, __imm, __p); | |
11977 | } | |
11978 | ||
11979 | __extension__ extern __inline uint16x8_t | |
11980 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11981 | __arm_vqrshruntq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11982 | { | |
11983 | return __builtin_mve_vqrshruntq_m_n_sv4si (__a, __b, __imm, __p); | |
11984 | } | |
11985 | ||
11986 | __extension__ extern __inline uint8x16_t | |
11987 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11988 | __arm_vqrshruntq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
11989 | { | |
11990 | return __builtin_mve_vqrshruntq_m_n_sv8hi (__a, __b, __imm, __p); | |
11991 | } | |
11992 | ||
11993 | __extension__ extern __inline int16x8_t | |
11994 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
11995 | __arm_vqshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
11996 | { | |
11997 | return __builtin_mve_vqshrnbq_m_n_sv4si (__a, __b, __imm, __p); | |
11998 | } | |
11999 | ||
12000 | __extension__ extern __inline int8x16_t | |
12001 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12002 | __arm_vqshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
12003 | { | |
12004 | return __builtin_mve_vqshrnbq_m_n_sv8hi (__a, __b, __imm, __p); | |
12005 | } | |
12006 | ||
12007 | __extension__ extern __inline uint16x8_t | |
12008 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12009 | __arm_vqshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
12010 | { | |
12011 | return __builtin_mve_vqshrnbq_m_n_uv4si (__a, __b, __imm, __p); | |
12012 | } | |
12013 | ||
12014 | __extension__ extern __inline uint8x16_t | |
12015 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12016 | __arm_vqshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
12017 | { | |
12018 | return __builtin_mve_vqshrnbq_m_n_uv8hi (__a, __b, __imm, __p); | |
12019 | } | |
12020 | ||
12021 | __extension__ extern __inline int16x8_t | |
12022 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12023 | __arm_vqshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
12024 | { | |
12025 | return __builtin_mve_vqshrntq_m_n_sv4si (__a, __b, __imm, __p); | |
12026 | } | |
12027 | ||
12028 | __extension__ extern __inline int8x16_t | |
12029 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12030 | __arm_vqshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
12031 | { | |
12032 | return __builtin_mve_vqshrntq_m_n_sv8hi (__a, __b, __imm, __p); | |
12033 | } | |
12034 | ||
12035 | __extension__ extern __inline uint16x8_t | |
12036 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12037 | __arm_vqshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
12038 | { | |
12039 | return __builtin_mve_vqshrntq_m_n_uv4si (__a, __b, __imm, __p); | |
12040 | } | |
12041 | ||
12042 | __extension__ extern __inline uint8x16_t | |
12043 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12044 | __arm_vqshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
12045 | { | |
12046 | return __builtin_mve_vqshrntq_m_n_uv8hi (__a, __b, __imm, __p); | |
12047 | } | |
12048 | ||
12049 | __extension__ extern __inline uint16x8_t | |
12050 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12051 | __arm_vqshrunbq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
12052 | { | |
12053 | return __builtin_mve_vqshrunbq_m_n_sv4si (__a, __b, __imm, __p); | |
12054 | } | |
12055 | ||
12056 | __extension__ extern __inline uint8x16_t | |
12057 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12058 | __arm_vqshrunbq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
12059 | { | |
12060 | return __builtin_mve_vqshrunbq_m_n_sv8hi (__a, __b, __imm, __p); | |
12061 | } | |
12062 | ||
12063 | __extension__ extern __inline uint16x8_t | |
12064 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12065 | __arm_vqshruntq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
12066 | { | |
12067 | return __builtin_mve_vqshruntq_m_n_sv4si (__a, __b, __imm, __p); | |
12068 | } | |
12069 | ||
12070 | __extension__ extern __inline uint8x16_t | |
12071 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12072 | __arm_vqshruntq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
12073 | { | |
12074 | return __builtin_mve_vqshruntq_m_n_sv8hi (__a, __b, __imm, __p); | |
12075 | } | |
12076 | ||
12077 | __extension__ extern __inline int64_t | |
12078 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12079 | __arm_vrmlaldavhaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
12080 | { | |
12081 | return __builtin_mve_vrmlaldavhaq_p_sv4si (__a, __b, __c, __p); | |
12082 | } | |
12083 | ||
12084 | __extension__ extern __inline uint64_t | |
12085 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12086 | __arm_vrmlaldavhaq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
12087 | { | |
12088 | return __builtin_mve_vrmlaldavhaq_p_uv4si (__a, __b, __c, __p); | |
12089 | } | |
12090 | ||
12091 | __extension__ extern __inline int64_t | |
12092 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12093 | __arm_vrmlaldavhaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
12094 | { | |
12095 | return __builtin_mve_vrmlaldavhaxq_p_sv4si (__a, __b, __c, __p); | |
12096 | } | |
12097 | ||
12098 | __extension__ extern __inline int64_t | |
12099 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12100 | __arm_vrmlsldavhaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
12101 | { | |
12102 | return __builtin_mve_vrmlsldavhaq_p_sv4si (__a, __b, __c, __p); | |
12103 | } | |
12104 | ||
12105 | __extension__ extern __inline int64_t | |
12106 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12107 | __arm_vrmlsldavhaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
12108 | { | |
12109 | return __builtin_mve_vrmlsldavhaxq_p_sv4si (__a, __b, __c, __p); | |
12110 | } | |
12111 | ||
12112 | __extension__ extern __inline int16x8_t | |
12113 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12114 | __arm_vrshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
12115 | { | |
12116 | return __builtin_mve_vrshrnbq_m_n_sv4si (__a, __b, __imm, __p); | |
12117 | } | |
12118 | ||
12119 | __extension__ extern __inline int8x16_t | |
12120 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12121 | __arm_vrshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
12122 | { | |
12123 | return __builtin_mve_vrshrnbq_m_n_sv8hi (__a, __b, __imm, __p); | |
12124 | } | |
12125 | ||
12126 | __extension__ extern __inline uint16x8_t | |
12127 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12128 | __arm_vrshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
12129 | { | |
12130 | return __builtin_mve_vrshrnbq_m_n_uv4si (__a, __b, __imm, __p); | |
12131 | } | |
12132 | ||
12133 | __extension__ extern __inline uint8x16_t | |
12134 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12135 | __arm_vrshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
12136 | { | |
12137 | return __builtin_mve_vrshrnbq_m_n_uv8hi (__a, __b, __imm, __p); | |
12138 | } | |
12139 | ||
12140 | __extension__ extern __inline int16x8_t | |
12141 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12142 | __arm_vrshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
12143 | { | |
12144 | return __builtin_mve_vrshrntq_m_n_sv4si (__a, __b, __imm, __p); | |
12145 | } | |
12146 | ||
12147 | __extension__ extern __inline int8x16_t | |
12148 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12149 | __arm_vrshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
12150 | { | |
12151 | return __builtin_mve_vrshrntq_m_n_sv8hi (__a, __b, __imm, __p); | |
12152 | } | |
12153 | ||
12154 | __extension__ extern __inline uint16x8_t | |
12155 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12156 | __arm_vrshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
12157 | { | |
12158 | return __builtin_mve_vrshrntq_m_n_uv4si (__a, __b, __imm, __p); | |
12159 | } | |
12160 | ||
12161 | __extension__ extern __inline uint8x16_t | |
12162 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12163 | __arm_vrshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
12164 | { | |
12165 | return __builtin_mve_vrshrntq_m_n_uv8hi (__a, __b, __imm, __p); | |
12166 | } | |
12167 | ||
12168 | __extension__ extern __inline int16x8_t | |
12169 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12170 | __arm_vshllbq_m_n_s8 (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
12171 | { | |
12172 | return __builtin_mve_vshllbq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
12173 | } | |
12174 | ||
12175 | __extension__ extern __inline int32x4_t | |
12176 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12177 | __arm_vshllbq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
12178 | { | |
12179 | return __builtin_mve_vshllbq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
12180 | } | |
12181 | ||
12182 | __extension__ extern __inline uint16x8_t | |
12183 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12184 | __arm_vshllbq_m_n_u8 (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
12185 | { | |
12186 | return __builtin_mve_vshllbq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
12187 | } | |
12188 | ||
12189 | __extension__ extern __inline uint32x4_t | |
12190 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12191 | __arm_vshllbq_m_n_u16 (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
12192 | { | |
12193 | return __builtin_mve_vshllbq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
12194 | } | |
12195 | ||
12196 | __extension__ extern __inline int16x8_t | |
12197 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12198 | __arm_vshlltq_m_n_s8 (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
12199 | { | |
12200 | return __builtin_mve_vshlltq_m_n_sv16qi (__inactive, __a, __imm, __p); | |
12201 | } | |
12202 | ||
12203 | __extension__ extern __inline int32x4_t | |
12204 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12205 | __arm_vshlltq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
12206 | { | |
12207 | return __builtin_mve_vshlltq_m_n_sv8hi (__inactive, __a, __imm, __p); | |
12208 | } | |
12209 | ||
12210 | __extension__ extern __inline uint16x8_t | |
12211 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12212 | __arm_vshlltq_m_n_u8 (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
12213 | { | |
12214 | return __builtin_mve_vshlltq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
12215 | } | |
12216 | ||
12217 | __extension__ extern __inline uint32x4_t | |
12218 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12219 | __arm_vshlltq_m_n_u16 (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
12220 | { | |
12221 | return __builtin_mve_vshlltq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
12222 | } | |
12223 | ||
12224 | __extension__ extern __inline int16x8_t | |
12225 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12226 | __arm_vshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
12227 | { | |
12228 | return __builtin_mve_vshrnbq_m_n_sv4si (__a, __b, __imm, __p); | |
12229 | } | |
12230 | ||
12231 | __extension__ extern __inline int8x16_t | |
12232 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12233 | __arm_vshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
12234 | { | |
12235 | return __builtin_mve_vshrnbq_m_n_sv8hi (__a, __b, __imm, __p); | |
12236 | } | |
12237 | ||
12238 | __extension__ extern __inline uint16x8_t | |
12239 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12240 | __arm_vshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
12241 | { | |
12242 | return __builtin_mve_vshrnbq_m_n_uv4si (__a, __b, __imm, __p); | |
12243 | } | |
12244 | ||
12245 | __extension__ extern __inline uint8x16_t | |
12246 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12247 | __arm_vshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
12248 | { | |
12249 | return __builtin_mve_vshrnbq_m_n_uv8hi (__a, __b, __imm, __p); | |
12250 | } | |
12251 | ||
12252 | __extension__ extern __inline int16x8_t | |
12253 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12254 | __arm_vshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
12255 | { | |
12256 | return __builtin_mve_vshrntq_m_n_sv4si (__a, __b, __imm, __p); | |
12257 | } | |
12258 | ||
12259 | __extension__ extern __inline int8x16_t | |
12260 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12261 | __arm_vshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
12262 | { | |
12263 | return __builtin_mve_vshrntq_m_n_sv8hi (__a, __b, __imm, __p); | |
12264 | } | |
12265 | ||
12266 | __extension__ extern __inline uint16x8_t | |
12267 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12268 | __arm_vshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
12269 | { | |
12270 | return __builtin_mve_vshrntq_m_n_uv4si (__a, __b, __imm, __p); | |
12271 | } | |
12272 | ||
12273 | __extension__ extern __inline uint8x16_t | |
12274 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12275 | __arm_vshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
12276 | { | |
12277 | return __builtin_mve_vshrntq_m_n_uv8hi (__a, __b, __imm, __p); | |
12278 | } | |
12279 | ||
4ff68575 SP |
12280 | __extension__ extern __inline void |
12281 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12282 | __arm_vstrbq_scatter_offset_s8 (int8_t * __base, uint8x16_t __offset, int8x16_t __value) | |
12283 | { | |
12284 | __builtin_mve_vstrbq_scatter_offset_sv16qi ((__builtin_neon_qi *) __base, __offset, __value); | |
12285 | } | |
12286 | ||
12287 | __extension__ extern __inline void | |
12288 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12289 | __arm_vstrbq_scatter_offset_s32 (int8_t * __base, uint32x4_t __offset, int32x4_t __value) | |
12290 | { | |
12291 | __builtin_mve_vstrbq_scatter_offset_sv4si ((__builtin_neon_qi *) __base, __offset, __value); | |
12292 | } | |
12293 | ||
12294 | __extension__ extern __inline void | |
12295 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12296 | __arm_vstrbq_scatter_offset_s16 (int8_t * __base, uint16x8_t __offset, int16x8_t __value) | |
12297 | { | |
12298 | __builtin_mve_vstrbq_scatter_offset_sv8hi ((__builtin_neon_qi *) __base, __offset, __value); | |
12299 | } | |
12300 | ||
12301 | __extension__ extern __inline void | |
12302 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12303 | __arm_vstrbq_scatter_offset_u8 (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value) | |
12304 | { | |
12305 | __builtin_mve_vstrbq_scatter_offset_uv16qi ((__builtin_neon_qi *) __base, __offset, __value); | |
12306 | } | |
12307 | ||
12308 | __extension__ extern __inline void | |
12309 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12310 | __arm_vstrbq_scatter_offset_u32 (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
12311 | { | |
12312 | __builtin_mve_vstrbq_scatter_offset_uv4si ((__builtin_neon_qi *) __base, __offset, __value); | |
12313 | } | |
12314 | ||
12315 | __extension__ extern __inline void | |
12316 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12317 | __arm_vstrbq_scatter_offset_u16 (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value) | |
12318 | { | |
12319 | __builtin_mve_vstrbq_scatter_offset_uv8hi ((__builtin_neon_qi *) __base, __offset, __value); | |
12320 | } | |
12321 | ||
12322 | __extension__ extern __inline void | |
12323 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12324 | __arm_vstrbq_s8 (int8_t * __addr, int8x16_t __value) | |
12325 | { | |
12326 | __builtin_mve_vstrbq_sv16qi ((__builtin_neon_qi *) __addr, __value); | |
12327 | } | |
12328 | ||
12329 | __extension__ extern __inline void | |
12330 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12331 | __arm_vstrbq_s32 (int8_t * __addr, int32x4_t __value) | |
12332 | { | |
12333 | __builtin_mve_vstrbq_sv4si ((__builtin_neon_qi *) __addr, __value); | |
12334 | } | |
12335 | ||
12336 | __extension__ extern __inline void | |
12337 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12338 | __arm_vstrbq_s16 (int8_t * __addr, int16x8_t __value) | |
12339 | { | |
12340 | __builtin_mve_vstrbq_sv8hi ((__builtin_neon_qi *) __addr, __value); | |
12341 | } | |
12342 | ||
12343 | __extension__ extern __inline void | |
12344 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12345 | __arm_vstrbq_u8 (uint8_t * __addr, uint8x16_t __value) | |
12346 | { | |
12347 | __builtin_mve_vstrbq_uv16qi ((__builtin_neon_qi *) __addr, __value); | |
12348 | } | |
12349 | ||
12350 | __extension__ extern __inline void | |
12351 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12352 | __arm_vstrbq_u32 (uint8_t * __addr, uint32x4_t __value) | |
12353 | { | |
12354 | __builtin_mve_vstrbq_uv4si ((__builtin_neon_qi *) __addr, __value); | |
12355 | } | |
12356 | ||
12357 | __extension__ extern __inline void | |
12358 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12359 | __arm_vstrbq_u16 (uint8_t * __addr, uint16x8_t __value) | |
12360 | { | |
12361 | __builtin_mve_vstrbq_uv8hi ((__builtin_neon_qi *) __addr, __value); | |
12362 | } | |
12363 | ||
12364 | __extension__ extern __inline void | |
12365 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12366 | __arm_vstrwq_scatter_base_s32 (uint32x4_t __addr, const int __offset, int32x4_t __value) | |
12367 | { | |
12368 | __builtin_mve_vstrwq_scatter_base_sv4si (__addr, __offset, __value); | |
12369 | } | |
12370 | ||
12371 | __extension__ extern __inline void | |
12372 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12373 | __arm_vstrwq_scatter_base_u32 (uint32x4_t __addr, const int __offset, uint32x4_t __value) | |
12374 | { | |
12375 | __builtin_mve_vstrwq_scatter_base_uv4si (__addr, __offset, __value); | |
12376 | } | |
535a8645 SP |
12377 | |
12378 | __extension__ extern __inline uint8x16_t | |
12379 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12380 | __arm_vldrbq_gather_offset_u8 (uint8_t const * __base, uint8x16_t __offset) | |
12381 | { | |
12382 | return __builtin_mve_vldrbq_gather_offset_uv16qi ((__builtin_neon_qi *) __base, __offset); | |
12383 | } | |
12384 | ||
12385 | __extension__ extern __inline int8x16_t | |
12386 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12387 | __arm_vldrbq_gather_offset_s8 (int8_t const * __base, uint8x16_t __offset) | |
12388 | { | |
12389 | return __builtin_mve_vldrbq_gather_offset_sv16qi ((__builtin_neon_qi *) __base, __offset); | |
12390 | } | |
12391 | ||
12392 | __extension__ extern __inline int8x16_t | |
12393 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12394 | __arm_vldrbq_s8 (int8_t const * __base) | |
12395 | { | |
12396 | return __builtin_mve_vldrbq_sv16qi ((__builtin_neon_qi *) __base); | |
12397 | } | |
12398 | ||
12399 | __extension__ extern __inline uint8x16_t | |
12400 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12401 | __arm_vldrbq_u8 (uint8_t const * __base) | |
12402 | { | |
12403 | return __builtin_mve_vldrbq_uv16qi ((__builtin_neon_qi *) __base); | |
12404 | } | |
12405 | ||
12406 | __extension__ extern __inline uint16x8_t | |
12407 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12408 | __arm_vldrbq_gather_offset_u16 (uint8_t const * __base, uint16x8_t __offset) | |
12409 | { | |
12410 | return __builtin_mve_vldrbq_gather_offset_uv8hi ((__builtin_neon_qi *) __base, __offset); | |
12411 | } | |
12412 | ||
12413 | __extension__ extern __inline int16x8_t | |
12414 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12415 | __arm_vldrbq_gather_offset_s16 (int8_t const * __base, uint16x8_t __offset) | |
12416 | { | |
12417 | return __builtin_mve_vldrbq_gather_offset_sv8hi ((__builtin_neon_qi *) __base, __offset); | |
12418 | } | |
12419 | ||
12420 | __extension__ extern __inline int16x8_t | |
12421 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12422 | __arm_vldrbq_s16 (int8_t const * __base) | |
12423 | { | |
12424 | return __builtin_mve_vldrbq_sv8hi ((__builtin_neon_qi *) __base); | |
12425 | } | |
12426 | ||
12427 | __extension__ extern __inline uint16x8_t | |
12428 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12429 | __arm_vldrbq_u16 (uint8_t const * __base) | |
12430 | { | |
12431 | return __builtin_mve_vldrbq_uv8hi ((__builtin_neon_qi *) __base); | |
12432 | } | |
12433 | ||
12434 | __extension__ extern __inline uint32x4_t | |
12435 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12436 | __arm_vldrbq_gather_offset_u32 (uint8_t const * __base, uint32x4_t __offset) | |
12437 | { | |
12438 | return __builtin_mve_vldrbq_gather_offset_uv4si ((__builtin_neon_qi *) __base, __offset); | |
12439 | } | |
12440 | ||
12441 | __extension__ extern __inline int32x4_t | |
12442 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12443 | __arm_vldrbq_gather_offset_s32 (int8_t const * __base, uint32x4_t __offset) | |
12444 | { | |
12445 | return __builtin_mve_vldrbq_gather_offset_sv4si ((__builtin_neon_qi *) __base, __offset); | |
12446 | } | |
12447 | ||
12448 | __extension__ extern __inline int32x4_t | |
12449 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12450 | __arm_vldrbq_s32 (int8_t const * __base) | |
12451 | { | |
12452 | return __builtin_mve_vldrbq_sv4si ((__builtin_neon_qi *) __base); | |
12453 | } | |
12454 | ||
12455 | __extension__ extern __inline uint32x4_t | |
12456 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12457 | __arm_vldrbq_u32 (uint8_t const * __base) | |
12458 | { | |
12459 | return __builtin_mve_vldrbq_uv4si ((__builtin_neon_qi *) __base); | |
12460 | } | |
12461 | ||
12462 | __extension__ extern __inline int32x4_t | |
12463 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12464 | __arm_vldrwq_gather_base_s32 (uint32x4_t __addr, const int __offset) | |
12465 | { | |
12466 | return __builtin_mve_vldrwq_gather_base_sv4si (__addr, __offset); | |
12467 | } | |
12468 | ||
12469 | __extension__ extern __inline uint32x4_t | |
12470 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12471 | __arm_vldrwq_gather_base_u32 (uint32x4_t __addr, const int __offset) | |
12472 | { | |
12473 | return __builtin_mve_vldrwq_gather_base_uv4si (__addr, __offset); | |
12474 | } | |
12475 | ||
405e918c SP |
12476 | __extension__ extern __inline void |
12477 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12478 | __arm_vstrbq_p_s8 (int8_t * __addr, int8x16_t __value, mve_pred16_t __p) | |
12479 | { | |
12480 | __builtin_mve_vstrbq_p_sv16qi ((__builtin_neon_qi *) __addr, __value, __p); | |
12481 | } | |
12482 | ||
12483 | __extension__ extern __inline void | |
12484 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12485 | __arm_vstrbq_p_s32 (int8_t * __addr, int32x4_t __value, mve_pred16_t __p) | |
12486 | { | |
12487 | __builtin_mve_vstrbq_p_sv4si ((__builtin_neon_qi *) __addr, __value, __p); | |
12488 | } | |
12489 | ||
12490 | __extension__ extern __inline void | |
12491 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12492 | __arm_vstrbq_p_s16 (int8_t * __addr, int16x8_t __value, mve_pred16_t __p) | |
12493 | { | |
12494 | __builtin_mve_vstrbq_p_sv8hi ((__builtin_neon_qi *) __addr, __value, __p); | |
12495 | } | |
12496 | ||
12497 | __extension__ extern __inline void | |
12498 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12499 | __arm_vstrbq_p_u8 (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p) | |
12500 | { | |
12501 | __builtin_mve_vstrbq_p_uv16qi ((__builtin_neon_qi *) __addr, __value, __p); | |
12502 | } | |
12503 | ||
12504 | __extension__ extern __inline void | |
12505 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12506 | __arm_vstrbq_p_u32 (uint8_t * __addr, uint32x4_t __value, mve_pred16_t __p) | |
12507 | { | |
12508 | __builtin_mve_vstrbq_p_uv4si ((__builtin_neon_qi *) __addr, __value, __p); | |
12509 | } | |
12510 | ||
12511 | __extension__ extern __inline void | |
12512 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12513 | __arm_vstrbq_p_u16 (uint8_t * __addr, uint16x8_t __value, mve_pred16_t __p) | |
12514 | { | |
12515 | __builtin_mve_vstrbq_p_uv8hi ((__builtin_neon_qi *) __addr, __value, __p); | |
12516 | } | |
12517 | ||
12518 | __extension__ extern __inline void | |
12519 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12520 | __arm_vstrbq_scatter_offset_p_s8 (int8_t * __base, uint8x16_t __offset, int8x16_t __value, mve_pred16_t __p) | |
12521 | { | |
12522 | __builtin_mve_vstrbq_scatter_offset_p_sv16qi ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12523 | } | |
12524 | ||
12525 | __extension__ extern __inline void | |
12526 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12527 | __arm_vstrbq_scatter_offset_p_s32 (int8_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
12528 | { | |
12529 | __builtin_mve_vstrbq_scatter_offset_p_sv4si ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12530 | } | |
12531 | ||
12532 | __extension__ extern __inline void | |
12533 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12534 | __arm_vstrbq_scatter_offset_p_s16 (int8_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p) | |
12535 | { | |
12536 | __builtin_mve_vstrbq_scatter_offset_p_sv8hi ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12537 | } | |
12538 | ||
12539 | __extension__ extern __inline void | |
12540 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12541 | __arm_vstrbq_scatter_offset_p_u8 (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value, mve_pred16_t __p) | |
12542 | { | |
12543 | __builtin_mve_vstrbq_scatter_offset_p_uv16qi ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12544 | } | |
12545 | ||
12546 | __extension__ extern __inline void | |
12547 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12548 | __arm_vstrbq_scatter_offset_p_u32 (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
12549 | { | |
12550 | __builtin_mve_vstrbq_scatter_offset_p_uv4si ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12551 | } | |
12552 | ||
12553 | __extension__ extern __inline void | |
12554 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12555 | __arm_vstrbq_scatter_offset_p_u16 (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p) | |
12556 | { | |
12557 | __builtin_mve_vstrbq_scatter_offset_p_uv8hi ((__builtin_neon_qi *) __base, __offset, __value, __p); | |
12558 | } | |
12559 | ||
12560 | __extension__ extern __inline void | |
12561 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12562 | __arm_vstrwq_scatter_base_p_s32 (uint32x4_t __addr, const int __offset, int32x4_t __value, mve_pred16_t __p) | |
12563 | { | |
12564 | __builtin_mve_vstrwq_scatter_base_p_sv4si (__addr, __offset, __value, __p); | |
12565 | } | |
12566 | ||
12567 | __extension__ extern __inline void | |
12568 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12569 | __arm_vstrwq_scatter_base_p_u32 (uint32x4_t __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p) | |
12570 | { | |
12571 | __builtin_mve_vstrwq_scatter_base_p_uv4si (__addr, __offset, __value, __p); | |
12572 | } | |
429d607b SP |
12573 | |
12574 | __extension__ extern __inline int8x16_t | |
12575 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12576 | __arm_vldrbq_gather_offset_z_s8 (int8_t const * __base, uint8x16_t __offset, mve_pred16_t __p) | |
12577 | { | |
12578 | return __builtin_mve_vldrbq_gather_offset_z_sv16qi ((__builtin_neon_qi *) __base, __offset, __p); | |
12579 | } | |
12580 | ||
12581 | __extension__ extern __inline int32x4_t | |
12582 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12583 | __arm_vldrbq_gather_offset_z_s32 (int8_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12584 | { | |
12585 | return __builtin_mve_vldrbq_gather_offset_z_sv4si ((__builtin_neon_qi *) __base, __offset, __p); | |
12586 | } | |
12587 | ||
12588 | __extension__ extern __inline int16x8_t | |
12589 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12590 | __arm_vldrbq_gather_offset_z_s16 (int8_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12591 | { | |
12592 | return __builtin_mve_vldrbq_gather_offset_z_sv8hi ((__builtin_neon_qi *) __base, __offset, __p); | |
12593 | } | |
12594 | ||
12595 | __extension__ extern __inline uint8x16_t | |
12596 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12597 | __arm_vldrbq_gather_offset_z_u8 (uint8_t const * __base, uint8x16_t __offset, mve_pred16_t __p) | |
12598 | { | |
12599 | return __builtin_mve_vldrbq_gather_offset_z_uv16qi ((__builtin_neon_qi *) __base, __offset, __p); | |
12600 | } | |
12601 | ||
12602 | __extension__ extern __inline uint32x4_t | |
12603 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12604 | __arm_vldrbq_gather_offset_z_u32 (uint8_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12605 | { | |
12606 | return __builtin_mve_vldrbq_gather_offset_z_uv4si ((__builtin_neon_qi *) __base, __offset, __p); | |
12607 | } | |
12608 | ||
12609 | __extension__ extern __inline uint16x8_t | |
12610 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12611 | __arm_vldrbq_gather_offset_z_u16 (uint8_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12612 | { | |
12613 | return __builtin_mve_vldrbq_gather_offset_z_uv8hi ((__builtin_neon_qi *) __base, __offset, __p); | |
12614 | } | |
12615 | ||
12616 | __extension__ extern __inline int8x16_t | |
12617 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12618 | __arm_vldrbq_z_s8 (int8_t const * __base, mve_pred16_t __p) | |
12619 | { | |
12620 | return __builtin_mve_vldrbq_z_sv16qi ((__builtin_neon_qi *) __base, __p); | |
12621 | } | |
12622 | ||
12623 | __extension__ extern __inline int32x4_t | |
12624 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12625 | __arm_vldrbq_z_s32 (int8_t const * __base, mve_pred16_t __p) | |
12626 | { | |
12627 | return __builtin_mve_vldrbq_z_sv4si ((__builtin_neon_qi *) __base, __p); | |
12628 | } | |
12629 | ||
12630 | __extension__ extern __inline int16x8_t | |
12631 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12632 | __arm_vldrbq_z_s16 (int8_t const * __base, mve_pred16_t __p) | |
12633 | { | |
12634 | return __builtin_mve_vldrbq_z_sv8hi ((__builtin_neon_qi *) __base, __p); | |
12635 | } | |
12636 | ||
12637 | __extension__ extern __inline uint8x16_t | |
12638 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12639 | __arm_vldrbq_z_u8 (uint8_t const * __base, mve_pred16_t __p) | |
12640 | { | |
12641 | return __builtin_mve_vldrbq_z_uv16qi ((__builtin_neon_qi *) __base, __p); | |
12642 | } | |
12643 | ||
12644 | __extension__ extern __inline uint32x4_t | |
12645 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12646 | __arm_vldrbq_z_u32 (uint8_t const * __base, mve_pred16_t __p) | |
12647 | { | |
12648 | return __builtin_mve_vldrbq_z_uv4si ((__builtin_neon_qi *) __base, __p); | |
12649 | } | |
12650 | ||
12651 | __extension__ extern __inline uint16x8_t | |
12652 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12653 | __arm_vldrbq_z_u16 (uint8_t const * __base, mve_pred16_t __p) | |
12654 | { | |
12655 | return __builtin_mve_vldrbq_z_uv8hi ((__builtin_neon_qi *) __base, __p); | |
12656 | } | |
12657 | ||
12658 | __extension__ extern __inline int32x4_t | |
12659 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12660 | __arm_vldrwq_gather_base_z_s32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p) | |
12661 | { | |
12662 | return __builtin_mve_vldrwq_gather_base_z_sv4si (__addr, __offset, __p); | |
12663 | } | |
12664 | ||
12665 | __extension__ extern __inline uint32x4_t | |
12666 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12667 | __arm_vldrwq_gather_base_z_u32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p) | |
12668 | { | |
12669 | return __builtin_mve_vldrwq_gather_base_z_uv4si (__addr, __offset, __p); | |
12670 | } | |
12671 | ||
bf1e3d5a SP |
12672 | __extension__ extern __inline int8x16_t |
12673 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12674 | __arm_vld1q_s8 (int8_t const * __base) | |
12675 | { | |
12676 | return __builtin_mve_vld1q_sv16qi ((__builtin_neon_qi *) __base); | |
12677 | } | |
12678 | ||
12679 | __extension__ extern __inline int32x4_t | |
12680 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12681 | __arm_vld1q_s32 (int32_t const * __base) | |
12682 | { | |
12683 | return __builtin_mve_vld1q_sv4si ((__builtin_neon_si *) __base); | |
12684 | } | |
12685 | ||
12686 | __extension__ extern __inline int16x8_t | |
12687 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12688 | __arm_vld1q_s16 (int16_t const * __base) | |
12689 | { | |
12690 | return __builtin_mve_vld1q_sv8hi ((__builtin_neon_hi *) __base); | |
12691 | } | |
12692 | ||
12693 | __extension__ extern __inline uint8x16_t | |
12694 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12695 | __arm_vld1q_u8 (uint8_t const * __base) | |
12696 | { | |
12697 | return __builtin_mve_vld1q_uv16qi ((__builtin_neon_qi *) __base); | |
12698 | } | |
12699 | ||
12700 | __extension__ extern __inline uint32x4_t | |
12701 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12702 | __arm_vld1q_u32 (uint32_t const * __base) | |
12703 | { | |
12704 | return __builtin_mve_vld1q_uv4si ((__builtin_neon_si *) __base); | |
12705 | } | |
12706 | ||
12707 | __extension__ extern __inline uint16x8_t | |
12708 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12709 | __arm_vld1q_u16 (uint16_t const * __base) | |
12710 | { | |
12711 | return __builtin_mve_vld1q_uv8hi ((__builtin_neon_hi *) __base); | |
12712 | } | |
12713 | ||
12714 | __extension__ extern __inline int32x4_t | |
12715 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12716 | __arm_vldrhq_gather_offset_s32 (int16_t const * __base, uint32x4_t __offset) | |
12717 | { | |
12718 | return __builtin_mve_vldrhq_gather_offset_sv4si ((__builtin_neon_hi *) __base, __offset); | |
12719 | } | |
12720 | ||
12721 | __extension__ extern __inline int16x8_t | |
12722 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12723 | __arm_vldrhq_gather_offset_s16 (int16_t const * __base, uint16x8_t __offset) | |
12724 | { | |
12725 | return __builtin_mve_vldrhq_gather_offset_sv8hi ((__builtin_neon_hi *) __base, __offset); | |
12726 | } | |
12727 | ||
12728 | __extension__ extern __inline uint32x4_t | |
12729 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12730 | __arm_vldrhq_gather_offset_u32 (uint16_t const * __base, uint32x4_t __offset) | |
12731 | { | |
12732 | return __builtin_mve_vldrhq_gather_offset_uv4si ((__builtin_neon_hi *) __base, __offset); | |
12733 | } | |
12734 | ||
12735 | __extension__ extern __inline uint16x8_t | |
12736 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12737 | __arm_vldrhq_gather_offset_u16 (uint16_t const * __base, uint16x8_t __offset) | |
12738 | { | |
12739 | return __builtin_mve_vldrhq_gather_offset_uv8hi ((__builtin_neon_hi *) __base, __offset); | |
12740 | } | |
12741 | ||
12742 | __extension__ extern __inline int32x4_t | |
12743 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12744 | __arm_vldrhq_gather_offset_z_s32 (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12745 | { | |
12746 | return __builtin_mve_vldrhq_gather_offset_z_sv4si ((__builtin_neon_hi *) __base, __offset, __p); | |
12747 | } | |
12748 | ||
12749 | __extension__ extern __inline int16x8_t | |
12750 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12751 | __arm_vldrhq_gather_offset_z_s16 (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12752 | { | |
12753 | return __builtin_mve_vldrhq_gather_offset_z_sv8hi ((__builtin_neon_hi *) __base, __offset, __p); | |
12754 | } | |
12755 | ||
12756 | __extension__ extern __inline uint32x4_t | |
12757 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12758 | __arm_vldrhq_gather_offset_z_u32 (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12759 | { | |
12760 | return __builtin_mve_vldrhq_gather_offset_z_uv4si ((__builtin_neon_hi *) __base, __offset, __p); | |
12761 | } | |
12762 | ||
12763 | __extension__ extern __inline uint16x8_t | |
12764 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12765 | __arm_vldrhq_gather_offset_z_u16 (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12766 | { | |
12767 | return __builtin_mve_vldrhq_gather_offset_z_uv8hi ((__builtin_neon_hi *) __base, __offset, __p); | |
12768 | } | |
12769 | ||
12770 | __extension__ extern __inline int32x4_t | |
12771 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12772 | __arm_vldrhq_gather_shifted_offset_s32 (int16_t const * __base, uint32x4_t __offset) | |
12773 | { | |
12774 | return __builtin_mve_vldrhq_gather_shifted_offset_sv4si ((__builtin_neon_hi *) __base, __offset); | |
12775 | } | |
12776 | ||
12777 | __extension__ extern __inline int16x8_t | |
12778 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12779 | __arm_vldrhq_gather_shifted_offset_s16 (int16_t const * __base, uint16x8_t __offset) | |
12780 | { | |
12781 | return __builtin_mve_vldrhq_gather_shifted_offset_sv8hi ((__builtin_neon_hi *) __base, __offset); | |
12782 | } | |
12783 | ||
12784 | __extension__ extern __inline uint32x4_t | |
12785 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12786 | __arm_vldrhq_gather_shifted_offset_u32 (uint16_t const * __base, uint32x4_t __offset) | |
12787 | { | |
12788 | return __builtin_mve_vldrhq_gather_shifted_offset_uv4si ((__builtin_neon_hi *) __base, __offset); | |
12789 | } | |
12790 | ||
12791 | __extension__ extern __inline uint16x8_t | |
12792 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12793 | __arm_vldrhq_gather_shifted_offset_u16 (uint16_t const * __base, uint16x8_t __offset) | |
12794 | { | |
12795 | return __builtin_mve_vldrhq_gather_shifted_offset_uv8hi ((__builtin_neon_hi *) __base, __offset); | |
12796 | } | |
12797 | ||
12798 | __extension__ extern __inline int32x4_t | |
12799 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12800 | __arm_vldrhq_gather_shifted_offset_z_s32 (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12801 | { | |
12802 | return __builtin_mve_vldrhq_gather_shifted_offset_z_sv4si ((__builtin_neon_hi *) __base, __offset, __p); | |
12803 | } | |
12804 | ||
12805 | __extension__ extern __inline int16x8_t | |
12806 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12807 | __arm_vldrhq_gather_shifted_offset_z_s16 (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12808 | { | |
12809 | return __builtin_mve_vldrhq_gather_shifted_offset_z_sv8hi ((__builtin_neon_hi *) __base, __offset, __p); | |
12810 | } | |
12811 | ||
12812 | __extension__ extern __inline uint32x4_t | |
12813 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12814 | __arm_vldrhq_gather_shifted_offset_z_u32 (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
12815 | { | |
12816 | return __builtin_mve_vldrhq_gather_shifted_offset_z_uv4si ((__builtin_neon_hi *) __base, __offset, __p); | |
12817 | } | |
12818 | ||
12819 | __extension__ extern __inline uint16x8_t | |
12820 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12821 | __arm_vldrhq_gather_shifted_offset_z_u16 (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
12822 | { | |
12823 | return __builtin_mve_vldrhq_gather_shifted_offset_z_uv8hi ((__builtin_neon_hi *) __base, __offset, __p); | |
12824 | } | |
12825 | ||
12826 | __extension__ extern __inline int32x4_t | |
12827 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12828 | __arm_vldrhq_s32 (int16_t const * __base) | |
12829 | { | |
12830 | return __builtin_mve_vldrhq_sv4si ((__builtin_neon_hi *) __base); | |
12831 | } | |
12832 | ||
12833 | __extension__ extern __inline int16x8_t | |
12834 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12835 | __arm_vldrhq_s16 (int16_t const * __base) | |
12836 | { | |
12837 | return __builtin_mve_vldrhq_sv8hi ((__builtin_neon_hi *) __base); | |
12838 | } | |
12839 | ||
12840 | __extension__ extern __inline uint32x4_t | |
12841 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12842 | __arm_vldrhq_u32 (uint16_t const * __base) | |
12843 | { | |
12844 | return __builtin_mve_vldrhq_uv4si ((__builtin_neon_hi *) __base); | |
12845 | } | |
12846 | ||
12847 | __extension__ extern __inline uint16x8_t | |
12848 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12849 | __arm_vldrhq_u16 (uint16_t const * __base) | |
12850 | { | |
12851 | return __builtin_mve_vldrhq_uv8hi ((__builtin_neon_hi *) __base); | |
12852 | } | |
12853 | ||
12854 | __extension__ extern __inline int32x4_t | |
12855 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12856 | __arm_vldrhq_z_s32 (int16_t const * __base, mve_pred16_t __p) | |
12857 | { | |
12858 | return __builtin_mve_vldrhq_z_sv4si ((__builtin_neon_hi *) __base, __p); | |
12859 | } | |
12860 | ||
12861 | __extension__ extern __inline int16x8_t | |
12862 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12863 | __arm_vldrhq_z_s16 (int16_t const * __base, mve_pred16_t __p) | |
12864 | { | |
12865 | return __builtin_mve_vldrhq_z_sv8hi ((__builtin_neon_hi *) __base, __p); | |
12866 | } | |
12867 | ||
12868 | __extension__ extern __inline uint32x4_t | |
12869 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12870 | __arm_vldrhq_z_u32 (uint16_t const * __base, mve_pred16_t __p) | |
12871 | { | |
12872 | return __builtin_mve_vldrhq_z_uv4si ((__builtin_neon_hi *) __base, __p); | |
12873 | } | |
12874 | ||
12875 | __extension__ extern __inline uint16x8_t | |
12876 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12877 | __arm_vldrhq_z_u16 (uint16_t const * __base, mve_pred16_t __p) | |
12878 | { | |
12879 | return __builtin_mve_vldrhq_z_uv8hi ((__builtin_neon_hi *) __base, __p); | |
12880 | } | |
12881 | ||
12882 | __extension__ extern __inline int32x4_t | |
12883 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12884 | __arm_vldrwq_s32 (int32_t const * __base) | |
12885 | { | |
12886 | return __builtin_mve_vldrwq_sv4si ((__builtin_neon_si *) __base); | |
12887 | } | |
12888 | ||
12889 | __extension__ extern __inline uint32x4_t | |
12890 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12891 | __arm_vldrwq_u32 (uint32_t const * __base) | |
12892 | { | |
12893 | return __builtin_mve_vldrwq_uv4si ((__builtin_neon_si *) __base); | |
12894 | } | |
12895 | ||
12896 | ||
12897 | __extension__ extern __inline int32x4_t | |
12898 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12899 | __arm_vldrwq_z_s32 (int32_t const * __base, mve_pred16_t __p) | |
12900 | { | |
12901 | return __builtin_mve_vldrwq_z_sv4si ((__builtin_neon_si *) __base, __p); | |
12902 | } | |
12903 | ||
12904 | __extension__ extern __inline uint32x4_t | |
12905 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12906 | __arm_vldrwq_z_u32 (uint32_t const * __base, mve_pred16_t __p) | |
12907 | { | |
12908 | return __builtin_mve_vldrwq_z_uv4si ((__builtin_neon_si *) __base, __p); | |
12909 | } | |
12910 | ||
4cc23303 SP |
12911 | __extension__ extern __inline int64x2_t |
12912 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12913 | __arm_vldrdq_gather_base_s64 (uint64x2_t __addr, const int __offset) | |
12914 | { | |
12915 | return __builtin_mve_vldrdq_gather_base_sv2di (__addr, __offset); | |
12916 | } | |
12917 | ||
12918 | __extension__ extern __inline uint64x2_t | |
12919 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12920 | __arm_vldrdq_gather_base_u64 (uint64x2_t __addr, const int __offset) | |
12921 | { | |
12922 | return __builtin_mve_vldrdq_gather_base_uv2di (__addr, __offset); | |
12923 | } | |
12924 | ||
12925 | __extension__ extern __inline int64x2_t | |
12926 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12927 | __arm_vldrdq_gather_base_z_s64 (uint64x2_t __addr, const int __offset, mve_pred16_t __p) | |
12928 | { | |
12929 | return __builtin_mve_vldrdq_gather_base_z_sv2di (__addr, __offset, __p); | |
12930 | } | |
12931 | ||
12932 | __extension__ extern __inline uint64x2_t | |
12933 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12934 | __arm_vldrdq_gather_base_z_u64 (uint64x2_t __addr, const int __offset, mve_pred16_t __p) | |
12935 | { | |
12936 | return __builtin_mve_vldrdq_gather_base_z_uv2di (__addr, __offset, __p); | |
12937 | } | |
12938 | ||
12939 | __extension__ extern __inline int64x2_t | |
12940 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12941 | __arm_vldrdq_gather_offset_s64 (int64_t const * __base, uint64x2_t __offset) | |
12942 | { | |
12943 | return __builtin_mve_vldrdq_gather_offset_sv2di ((__builtin_neon_di *) __base, __offset); | |
12944 | } | |
12945 | ||
12946 | __extension__ extern __inline uint64x2_t | |
12947 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12948 | __arm_vldrdq_gather_offset_u64 (uint64_t const * __base, uint64x2_t __offset) | |
12949 | { | |
12950 | return __builtin_mve_vldrdq_gather_offset_uv2di ((__builtin_neon_di *) __base, __offset); | |
12951 | } | |
12952 | ||
12953 | __extension__ extern __inline int64x2_t | |
12954 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12955 | __arm_vldrdq_gather_offset_z_s64 (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
12956 | { | |
12957 | return __builtin_mve_vldrdq_gather_offset_z_sv2di ((__builtin_neon_di *) __base, __offset, __p); | |
12958 | } | |
12959 | ||
12960 | ||
12961 | __extension__ extern __inline uint64x2_t | |
12962 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12963 | __arm_vldrdq_gather_offset_z_u64 (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
12964 | { | |
12965 | return __builtin_mve_vldrdq_gather_offset_z_uv2di ((__builtin_neon_di *) __base, __offset, __p); | |
12966 | } | |
12967 | ||
12968 | __extension__ extern __inline int64x2_t | |
12969 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12970 | __arm_vldrdq_gather_shifted_offset_s64 (int64_t const * __base, uint64x2_t __offset) | |
12971 | { | |
12972 | return __builtin_mve_vldrdq_gather_shifted_offset_sv2di ((__builtin_neon_di *) __base, __offset); | |
12973 | } | |
12974 | ||
12975 | __extension__ extern __inline uint64x2_t | |
12976 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12977 | __arm_vldrdq_gather_shifted_offset_u64 (uint64_t const * __base, uint64x2_t __offset) | |
12978 | { | |
12979 | return __builtin_mve_vldrdq_gather_shifted_offset_uv2di ((__builtin_neon_di *) __base, __offset); | |
12980 | } | |
12981 | ||
12982 | __extension__ extern __inline int64x2_t | |
12983 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12984 | __arm_vldrdq_gather_shifted_offset_z_s64 (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
12985 | { | |
12986 | return __builtin_mve_vldrdq_gather_shifted_offset_z_sv2di ((__builtin_neon_di *) __base, __offset, __p); | |
12987 | } | |
12988 | ||
12989 | __extension__ extern __inline uint64x2_t | |
12990 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12991 | __arm_vldrdq_gather_shifted_offset_z_u64 (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
12992 | { | |
12993 | return __builtin_mve_vldrdq_gather_shifted_offset_z_uv2di ((__builtin_neon_di *) __base, __offset, __p); | |
12994 | } | |
12995 | ||
12996 | __extension__ extern __inline int32x4_t | |
12997 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
12998 | __arm_vldrwq_gather_offset_s32 (int32_t const * __base, uint32x4_t __offset) | |
12999 | { | |
13000 | return __builtin_mve_vldrwq_gather_offset_sv4si ((__builtin_neon_si *) __base, __offset); | |
13001 | } | |
13002 | ||
13003 | __extension__ extern __inline uint32x4_t | |
13004 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13005 | __arm_vldrwq_gather_offset_u32 (uint32_t const * __base, uint32x4_t __offset) | |
13006 | { | |
13007 | return __builtin_mve_vldrwq_gather_offset_uv4si ((__builtin_neon_si *) __base, __offset); | |
13008 | } | |
13009 | ||
13010 | __extension__ extern __inline int32x4_t | |
13011 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13012 | __arm_vldrwq_gather_offset_z_s32 (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
13013 | { | |
13014 | return __builtin_mve_vldrwq_gather_offset_z_sv4si ((__builtin_neon_si *) __base, __offset, __p); | |
13015 | } | |
13016 | ||
13017 | __extension__ extern __inline uint32x4_t | |
13018 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13019 | __arm_vldrwq_gather_offset_z_u32 (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
13020 | { | |
13021 | return __builtin_mve_vldrwq_gather_offset_z_uv4si ((__builtin_neon_si *) __base, __offset, __p); | |
13022 | } | |
13023 | ||
13024 | __extension__ extern __inline int32x4_t | |
13025 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13026 | __arm_vldrwq_gather_shifted_offset_s32 (int32_t const * __base, uint32x4_t __offset) | |
13027 | { | |
13028 | return __builtin_mve_vldrwq_gather_shifted_offset_sv4si ((__builtin_neon_si *) __base, __offset); | |
13029 | } | |
13030 | ||
13031 | __extension__ extern __inline uint32x4_t | |
13032 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13033 | __arm_vldrwq_gather_shifted_offset_u32 (uint32_t const * __base, uint32x4_t __offset) | |
13034 | { | |
13035 | return __builtin_mve_vldrwq_gather_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset); | |
13036 | } | |
13037 | ||
13038 | __extension__ extern __inline int32x4_t | |
13039 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13040 | __arm_vldrwq_gather_shifted_offset_z_s32 (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
13041 | { | |
13042 | return __builtin_mve_vldrwq_gather_shifted_offset_z_sv4si ((__builtin_neon_si *) __base, __offset, __p); | |
13043 | } | |
13044 | ||
13045 | __extension__ extern __inline uint32x4_t | |
13046 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13047 | __arm_vldrwq_gather_shifted_offset_z_u32 (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
13048 | { | |
13049 | return __builtin_mve_vldrwq_gather_shifted_offset_z_uv4si ((__builtin_neon_si *) __base, __offset, __p); | |
13050 | } | |
13051 | ||
5cad47e0 SP |
13052 | __extension__ extern __inline void |
13053 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13054 | __arm_vst1q_s8 (int8_t * __addr, int8x16_t __value) | |
13055 | { | |
13056 | __builtin_mve_vst1q_sv16qi ((__builtin_neon_qi *) __addr, __value); | |
13057 | } | |
13058 | ||
13059 | __extension__ extern __inline void | |
13060 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13061 | __arm_vst1q_s32 (int32_t * __addr, int32x4_t __value) | |
13062 | { | |
13063 | __builtin_mve_vst1q_sv4si ((__builtin_neon_si *) __addr, __value); | |
13064 | } | |
13065 | ||
13066 | __extension__ extern __inline void | |
13067 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13068 | __arm_vst1q_s16 (int16_t * __addr, int16x8_t __value) | |
13069 | { | |
13070 | __builtin_mve_vst1q_sv8hi ((__builtin_neon_hi *) __addr, __value); | |
13071 | } | |
13072 | ||
13073 | __extension__ extern __inline void | |
13074 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13075 | __arm_vst1q_u8 (uint8_t * __addr, uint8x16_t __value) | |
13076 | { | |
13077 | __builtin_mve_vst1q_uv16qi ((__builtin_neon_qi *) __addr, __value); | |
13078 | } | |
13079 | ||
13080 | __extension__ extern __inline void | |
13081 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13082 | __arm_vst1q_u32 (uint32_t * __addr, uint32x4_t __value) | |
13083 | { | |
13084 | __builtin_mve_vst1q_uv4si ((__builtin_neon_si *) __addr, __value); | |
13085 | } | |
13086 | ||
13087 | __extension__ extern __inline void | |
13088 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13089 | __arm_vst1q_u16 (uint16_t * __addr, uint16x8_t __value) | |
13090 | { | |
13091 | __builtin_mve_vst1q_uv8hi ((__builtin_neon_hi *) __addr, __value); | |
13092 | } | |
13093 | ||
13094 | __extension__ extern __inline void | |
13095 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13096 | __arm_vstrhq_scatter_offset_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value) | |
13097 | { | |
13098 | __builtin_mve_vstrhq_scatter_offset_sv4si ((__builtin_neon_hi *) __base, __offset, __value); | |
13099 | } | |
13100 | ||
13101 | __extension__ extern __inline void | |
13102 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13103 | __arm_vstrhq_scatter_offset_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value) | |
13104 | { | |
13105 | __builtin_mve_vstrhq_scatter_offset_sv8hi ((__builtin_neon_hi *) __base, __offset, __value); | |
13106 | } | |
13107 | ||
13108 | __extension__ extern __inline void | |
13109 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13110 | __arm_vstrhq_scatter_offset_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
13111 | { | |
13112 | __builtin_mve_vstrhq_scatter_offset_uv4si ((__builtin_neon_hi *) __base, __offset, __value); | |
13113 | } | |
13114 | ||
13115 | __extension__ extern __inline void | |
13116 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13117 | __arm_vstrhq_scatter_offset_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value) | |
13118 | { | |
13119 | __builtin_mve_vstrhq_scatter_offset_uv8hi ((__builtin_neon_hi *) __base, __offset, __value); | |
13120 | } | |
13121 | ||
13122 | __extension__ extern __inline void | |
13123 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13124 | __arm_vstrhq_scatter_offset_p_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
13125 | { | |
13126 | __builtin_mve_vstrhq_scatter_offset_p_sv4si ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
13127 | } | |
13128 | ||
13129 | __extension__ extern __inline void | |
13130 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13131 | __arm_vstrhq_scatter_offset_p_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p) | |
13132 | { | |
13133 | __builtin_mve_vstrhq_scatter_offset_p_sv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
13134 | } | |
13135 | ||
13136 | __extension__ extern __inline void | |
13137 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13138 | __arm_vstrhq_scatter_offset_p_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
13139 | { | |
13140 | __builtin_mve_vstrhq_scatter_offset_p_uv4si ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
13141 | } | |
13142 | ||
13143 | __extension__ extern __inline void | |
13144 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13145 | __arm_vstrhq_scatter_offset_p_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p) | |
13146 | { | |
13147 | __builtin_mve_vstrhq_scatter_offset_p_uv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
13148 | } | |
13149 | ||
13150 | __extension__ extern __inline void | |
13151 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13152 | __arm_vstrhq_scatter_shifted_offset_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value) | |
13153 | { | |
13154 | __builtin_mve_vstrhq_scatter_shifted_offset_sv4si ((__builtin_neon_hi *) __base, __offset, __value); | |
13155 | } | |
13156 | ||
13157 | __extension__ extern __inline void | |
13158 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13159 | __arm_vstrhq_scatter_shifted_offset_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value) | |
13160 | { | |
13161 | __builtin_mve_vstrhq_scatter_shifted_offset_sv8hi ((__builtin_neon_hi *) __base, __offset, __value); | |
13162 | } | |
13163 | ||
13164 | __extension__ extern __inline void | |
13165 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13166 | __arm_vstrhq_scatter_shifted_offset_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
13167 | { | |
13168 | __builtin_mve_vstrhq_scatter_shifted_offset_uv4si ((__builtin_neon_hi *) __base, __offset, __value); | |
13169 | } | |
13170 | ||
13171 | __extension__ extern __inline void | |
13172 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13173 | __arm_vstrhq_scatter_shifted_offset_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value) | |
13174 | { | |
13175 | __builtin_mve_vstrhq_scatter_shifted_offset_uv8hi ((__builtin_neon_hi *) __base, __offset, __value); | |
13176 | } | |
13177 | ||
13178 | __extension__ extern __inline void | |
13179 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13180 | __arm_vstrhq_scatter_shifted_offset_p_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
13181 | { | |
13182 | __builtin_mve_vstrhq_scatter_shifted_offset_p_sv4si ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
13183 | } | |
13184 | ||
13185 | __extension__ extern __inline void | |
13186 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13187 | __arm_vstrhq_scatter_shifted_offset_p_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p) | |
13188 | { | |
13189 | __builtin_mve_vstrhq_scatter_shifted_offset_p_sv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
13190 | } | |
13191 | ||
13192 | __extension__ extern __inline void | |
13193 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13194 | __arm_vstrhq_scatter_shifted_offset_p_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
13195 | { | |
13196 | __builtin_mve_vstrhq_scatter_shifted_offset_p_uv4si ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
13197 | } | |
13198 | ||
13199 | __extension__ extern __inline void | |
13200 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13201 | __arm_vstrhq_scatter_shifted_offset_p_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p) | |
13202 | { | |
13203 | __builtin_mve_vstrhq_scatter_shifted_offset_p_uv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p); | |
13204 | } | |
13205 | ||
13206 | __extension__ extern __inline void | |
13207 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13208 | __arm_vstrhq_s32 (int16_t * __addr, int32x4_t __value) | |
13209 | { | |
13210 | __builtin_mve_vstrhq_sv4si ((__builtin_neon_hi *) __addr, __value); | |
13211 | } | |
13212 | ||
13213 | __extension__ extern __inline void | |
13214 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13215 | __arm_vstrhq_s16 (int16_t * __addr, int16x8_t __value) | |
13216 | { | |
13217 | __builtin_mve_vstrhq_sv8hi ((__builtin_neon_hi *) __addr, __value); | |
13218 | } | |
13219 | ||
13220 | __extension__ extern __inline void | |
13221 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13222 | __arm_vstrhq_u32 (uint16_t * __addr, uint32x4_t __value) | |
13223 | { | |
13224 | __builtin_mve_vstrhq_uv4si ((__builtin_neon_hi *) __addr, __value); | |
13225 | } | |
13226 | ||
13227 | __extension__ extern __inline void | |
13228 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13229 | __arm_vstrhq_u16 (uint16_t * __addr, uint16x8_t __value) | |
13230 | { | |
13231 | __builtin_mve_vstrhq_uv8hi ((__builtin_neon_hi *) __addr, __value); | |
13232 | } | |
13233 | ||
13234 | __extension__ extern __inline void | |
13235 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13236 | __arm_vstrhq_p_s32 (int16_t * __addr, int32x4_t __value, mve_pred16_t __p) | |
13237 | { | |
13238 | __builtin_mve_vstrhq_p_sv4si ((__builtin_neon_hi *) __addr, __value, __p); | |
13239 | } | |
13240 | ||
13241 | __extension__ extern __inline void | |
13242 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13243 | __arm_vstrhq_p_s16 (int16_t * __addr, int16x8_t __value, mve_pred16_t __p) | |
13244 | { | |
13245 | __builtin_mve_vstrhq_p_sv8hi ((__builtin_neon_hi *) __addr, __value, __p); | |
13246 | } | |
13247 | ||
13248 | __extension__ extern __inline void | |
13249 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13250 | __arm_vstrhq_p_u32 (uint16_t * __addr, uint32x4_t __value, mve_pred16_t __p) | |
13251 | { | |
13252 | __builtin_mve_vstrhq_p_uv4si ((__builtin_neon_hi *) __addr, __value, __p); | |
13253 | } | |
13254 | ||
13255 | __extension__ extern __inline void | |
13256 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13257 | __arm_vstrhq_p_u16 (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p) | |
13258 | { | |
13259 | __builtin_mve_vstrhq_p_uv8hi ((__builtin_neon_hi *) __addr, __value, __p); | |
13260 | } | |
13261 | ||
13262 | __extension__ extern __inline void | |
13263 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13264 | __arm_vstrwq_s32 (int32_t * __addr, int32x4_t __value) | |
13265 | { | |
13266 | __builtin_mve_vstrwq_sv4si ((__builtin_neon_si *) __addr, __value); | |
13267 | } | |
13268 | ||
13269 | __extension__ extern __inline void | |
13270 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13271 | __arm_vstrwq_u32 (uint32_t * __addr, uint32x4_t __value) | |
13272 | { | |
13273 | __builtin_mve_vstrwq_uv4si ((__builtin_neon_si *) __addr, __value); | |
13274 | } | |
13275 | ||
13276 | __extension__ extern __inline void | |
13277 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13278 | __arm_vstrwq_p_s32 (int32_t * __addr, int32x4_t __value, mve_pred16_t __p) | |
13279 | { | |
13280 | __builtin_mve_vstrwq_p_sv4si ((__builtin_neon_si *) __addr, __value, __p); | |
13281 | } | |
13282 | ||
13283 | __extension__ extern __inline void | |
13284 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13285 | __arm_vstrwq_p_u32 (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p) | |
13286 | { | |
13287 | __builtin_mve_vstrwq_p_uv4si ((__builtin_neon_si *) __addr, __value, __p); | |
13288 | } | |
13289 | ||
7a5fffa5 SP |
13290 | __extension__ extern __inline void |
13291 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13292 | __arm_vstrdq_scatter_base_p_s64 (uint64x2_t __addr, const int __offset, int64x2_t __value, mve_pred16_t __p) | |
13293 | { | |
13294 | __builtin_mve_vstrdq_scatter_base_p_sv2di (__addr, __offset, __value, __p); | |
13295 | } | |
13296 | ||
13297 | __extension__ extern __inline void | |
13298 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13299 | __arm_vstrdq_scatter_base_p_u64 (uint64x2_t __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p) | |
13300 | { | |
13301 | __builtin_mve_vstrdq_scatter_base_p_uv2di (__addr, __offset, __value, __p); | |
13302 | } | |
13303 | ||
13304 | __extension__ extern __inline void | |
13305 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13306 | __arm_vstrdq_scatter_base_s64 (uint64x2_t __addr, const int __offset, int64x2_t __value) | |
13307 | { | |
13308 | __builtin_mve_vstrdq_scatter_base_sv2di (__addr, __offset, __value); | |
13309 | } | |
13310 | ||
13311 | __extension__ extern __inline void | |
13312 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13313 | __arm_vstrdq_scatter_base_u64 (uint64x2_t __addr, const int __offset, uint64x2_t __value) | |
13314 | { | |
13315 | __builtin_mve_vstrdq_scatter_base_uv2di (__addr, __offset, __value); | |
13316 | } | |
13317 | ||
13318 | __extension__ extern __inline void | |
13319 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13320 | __arm_vstrdq_scatter_offset_p_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p) | |
13321 | { | |
ff0597dc | 13322 | __builtin_mve_vstrdq_scatter_offset_p_sv2di ((__builtin_neon_di *) __base, __offset, __value, __p); |
7a5fffa5 SP |
13323 | } |
13324 | ||
13325 | __extension__ extern __inline void | |
13326 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13327 | __arm_vstrdq_scatter_offset_p_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p) | |
13328 | { | |
ff0597dc | 13329 | __builtin_mve_vstrdq_scatter_offset_p_uv2di ((__builtin_neon_di *) __base, __offset, __value, __p); |
7a5fffa5 SP |
13330 | } |
13331 | ||
13332 | __extension__ extern __inline void | |
13333 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13334 | __arm_vstrdq_scatter_offset_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value) | |
13335 | { | |
ff0597dc | 13336 | __builtin_mve_vstrdq_scatter_offset_sv2di ((__builtin_neon_di *) __base, __offset, __value); |
7a5fffa5 SP |
13337 | } |
13338 | ||
13339 | __extension__ extern __inline void | |
13340 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13341 | __arm_vstrdq_scatter_offset_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value) | |
13342 | { | |
ff0597dc | 13343 | __builtin_mve_vstrdq_scatter_offset_uv2di ((__builtin_neon_di *) __base, __offset, __value); |
7a5fffa5 SP |
13344 | } |
13345 | ||
13346 | __extension__ extern __inline void | |
13347 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13348 | __arm_vstrdq_scatter_shifted_offset_p_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p) | |
13349 | { | |
ff0597dc | 13350 | __builtin_mve_vstrdq_scatter_shifted_offset_p_sv2di ((__builtin_neon_di *) __base, __offset, __value, __p); |
7a5fffa5 SP |
13351 | } |
13352 | ||
13353 | __extension__ extern __inline void | |
13354 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13355 | __arm_vstrdq_scatter_shifted_offset_p_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p) | |
13356 | { | |
ff0597dc | 13357 | __builtin_mve_vstrdq_scatter_shifted_offset_p_uv2di ((__builtin_neon_di *) __base, __offset, __value, __p); |
7a5fffa5 SP |
13358 | } |
13359 | ||
13360 | __extension__ extern __inline void | |
13361 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13362 | __arm_vstrdq_scatter_shifted_offset_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value) | |
13363 | { | |
ff0597dc | 13364 | __builtin_mve_vstrdq_scatter_shifted_offset_sv2di ((__builtin_neon_di *) __base, __offset, __value); |
7a5fffa5 SP |
13365 | } |
13366 | ||
13367 | __extension__ extern __inline void | |
13368 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13369 | __arm_vstrdq_scatter_shifted_offset_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value) | |
13370 | { | |
ff0597dc | 13371 | __builtin_mve_vstrdq_scatter_shifted_offset_uv2di ((__builtin_neon_di *) __base, __offset, __value); |
7a5fffa5 SP |
13372 | } |
13373 | ||
13374 | __extension__ extern __inline void | |
13375 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13376 | __arm_vstrwq_scatter_offset_p_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
13377 | { | |
13378 | __builtin_mve_vstrwq_scatter_offset_p_sv4si ((__builtin_neon_si *) __base, __offset, __value, __p); | |
13379 | } | |
13380 | ||
13381 | __extension__ extern __inline void | |
13382 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13383 | __arm_vstrwq_scatter_offset_p_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
13384 | { | |
13385 | __builtin_mve_vstrwq_scatter_offset_p_uv4si ((__builtin_neon_si *) __base, __offset, __value, __p); | |
13386 | } | |
13387 | ||
13388 | __extension__ extern __inline void | |
13389 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13390 | __arm_vstrwq_scatter_offset_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value) | |
13391 | { | |
13392 | __builtin_mve_vstrwq_scatter_offset_sv4si ((__builtin_neon_si *) __base, __offset, __value); | |
13393 | } | |
13394 | ||
13395 | __extension__ extern __inline void | |
13396 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13397 | __arm_vstrwq_scatter_offset_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
13398 | { | |
13399 | __builtin_mve_vstrwq_scatter_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value); | |
13400 | } | |
13401 | ||
13402 | __extension__ extern __inline void | |
13403 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13404 | __arm_vstrwq_scatter_shifted_offset_p_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
13405 | { | |
13406 | __builtin_mve_vstrwq_scatter_shifted_offset_p_sv4si ((__builtin_neon_si *) __base, __offset, __value, __p); | |
13407 | } | |
13408 | ||
13409 | __extension__ extern __inline void | |
13410 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13411 | __arm_vstrwq_scatter_shifted_offset_p_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
13412 | { | |
13413 | __builtin_mve_vstrwq_scatter_shifted_offset_p_uv4si ((__builtin_neon_si *) __base, __offset, __value, __p); | |
13414 | } | |
13415 | ||
13416 | __extension__ extern __inline void | |
13417 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13418 | __arm_vstrwq_scatter_shifted_offset_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value) | |
13419 | { | |
13420 | __builtin_mve_vstrwq_scatter_shifted_offset_sv4si ((__builtin_neon_si *) __base, __offset, __value); | |
13421 | } | |
13422 | ||
13423 | __extension__ extern __inline void | |
13424 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13425 | __arm_vstrwq_scatter_shifted_offset_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
13426 | { | |
13427 | __builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value); | |
13428 | } | |
13429 | ||
3eff57aa SP |
13430 | __extension__ extern __inline int8x16_t |
13431 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13432 | __arm_vaddq_s8 (int8x16_t __a, int8x16_t __b) | |
13433 | { | |
13434 | return __a + __b; | |
13435 | } | |
13436 | ||
13437 | __extension__ extern __inline int16x8_t | |
13438 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13439 | __arm_vaddq_s16 (int16x8_t __a, int16x8_t __b) | |
13440 | { | |
13441 | return __a + __b; | |
13442 | } | |
13443 | ||
13444 | __extension__ extern __inline int32x4_t | |
13445 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13446 | __arm_vaddq_s32 (int32x4_t __a, int32x4_t __b) | |
13447 | { | |
13448 | return __a + __b; | |
13449 | } | |
13450 | ||
13451 | __extension__ extern __inline uint8x16_t | |
13452 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13453 | __arm_vaddq_u8 (uint8x16_t __a, uint8x16_t __b) | |
13454 | { | |
13455 | return __a + __b; | |
13456 | } | |
13457 | ||
13458 | __extension__ extern __inline uint16x8_t | |
13459 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13460 | __arm_vaddq_u16 (uint16x8_t __a, uint16x8_t __b) | |
13461 | { | |
13462 | return __a + __b; | |
13463 | } | |
13464 | ||
13465 | __extension__ extern __inline uint32x4_t | |
13466 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13467 | __arm_vaddq_u32 (uint32x4_t __a, uint32x4_t __b) | |
13468 | { | |
13469 | return __a + __b; | |
13470 | } | |
13471 | ||
92f80065 SP |
13472 | __extension__ extern __inline uint8x16_t |
13473 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13474 | __arm_vddupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13475 | { | |
13476 | return __builtin_mve_vddupq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
13477 | } | |
13478 | ||
13479 | __extension__ extern __inline uint32x4_t | |
13480 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13481 | __arm_vddupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13482 | { | |
13483 | return __builtin_mve_vddupq_m_n_uv4si (__inactive, __a, __imm, __p); | |
13484 | } | |
13485 | ||
13486 | __extension__ extern __inline uint16x8_t | |
13487 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13488 | __arm_vddupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13489 | { | |
13490 | return __builtin_mve_vddupq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
13491 | } | |
13492 | ||
13493 | __extension__ extern __inline uint8x16_t | |
13494 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13495 | __arm_vddupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13496 | { | |
13497 | uint8x16_t __res = __builtin_mve_vddupq_m_n_uv16qi (__inactive, * __a, __imm, __p); | |
13498 | *__a -= __imm * 16u; | |
13499 | return __res; | |
13500 | } | |
13501 | ||
13502 | __extension__ extern __inline uint16x8_t | |
13503 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13504 | __arm_vddupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13505 | { | |
13506 | uint16x8_t __res = __builtin_mve_vddupq_m_n_uv8hi (__inactive, *__a, __imm, __p); | |
13507 | *__a -= __imm * 8u; | |
13508 | return __res; | |
13509 | } | |
13510 | ||
13511 | __extension__ extern __inline uint32x4_t | |
13512 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13513 | __arm_vddupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13514 | { | |
13515 | uint32x4_t __res = __builtin_mve_vddupq_m_n_uv4si (__inactive, *__a, __imm, __p); | |
13516 | *__a -= __imm * 4u; | |
13517 | return __res; | |
13518 | } | |
13519 | ||
13520 | __extension__ extern __inline uint8x16_t | |
13521 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13522 | __arm_vddupq_n_u8 (uint32_t __a, const int __imm) | |
13523 | { | |
13524 | return __builtin_mve_vddupq_n_uv16qi (__a, __imm); | |
13525 | } | |
13526 | ||
13527 | __extension__ extern __inline uint32x4_t | |
13528 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13529 | __arm_vddupq_n_u32 (uint32_t __a, const int __imm) | |
13530 | { | |
13531 | return __builtin_mve_vddupq_n_uv4si (__a, __imm); | |
13532 | } | |
13533 | ||
13534 | __extension__ extern __inline uint16x8_t | |
13535 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13536 | __arm_vddupq_n_u16 (uint32_t __a, const int __imm) | |
13537 | { | |
13538 | return __builtin_mve_vddupq_n_uv8hi (__a, __imm); | |
13539 | } | |
13540 | ||
13541 | __extension__ extern __inline uint8x16_t | |
13542 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13543 | __arm_vdwdupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13544 | { | |
9ce780ef ASDV |
13545 | uint64_t __c = ((uint64_t) __b) << 32; |
13546 | return __builtin_mve_vdwdupq_m_n_uv16qi (__inactive, __a, __c, __imm, __p); | |
92f80065 SP |
13547 | } |
13548 | ||
13549 | __extension__ extern __inline uint32x4_t | |
13550 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13551 | __arm_vdwdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13552 | { | |
9ce780ef ASDV |
13553 | uint64_t __c = ((uint64_t) __b) << 32; |
13554 | return __builtin_mve_vdwdupq_m_n_uv4si (__inactive, __a, __c, __imm, __p); | |
92f80065 SP |
13555 | } |
13556 | ||
13557 | __extension__ extern __inline uint16x8_t | |
13558 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13559 | __arm_vdwdupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13560 | { | |
9ce780ef ASDV |
13561 | uint64_t __c = ((uint64_t) __b) << 32; |
13562 | return __builtin_mve_vdwdupq_m_n_uv8hi (__inactive, __a, __c, __imm, __p); | |
92f80065 SP |
13563 | } |
13564 | ||
13565 | __extension__ extern __inline uint8x16_t | |
13566 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13567 | __arm_vdwdupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13568 | { | |
9ce780ef ASDV |
13569 | uint64_t __c = ((uint64_t) __b) << 32; |
13570 | uint8x16_t __res = __builtin_mve_vdwdupq_m_n_uv16qi (__inactive, *__a, __c, __imm, __p); | |
13571 | *__a = __builtin_mve_vdwdupq_m_wb_uv16qi (__inactive, *__a, __c, __imm, __p); | |
92f80065 SP |
13572 | return __res; |
13573 | } | |
13574 | ||
13575 | __extension__ extern __inline uint32x4_t | |
13576 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13577 | __arm_vdwdupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13578 | { | |
9ce780ef ASDV |
13579 | uint64_t __c = ((uint64_t) __b) << 32; |
13580 | uint32x4_t __res = __builtin_mve_vdwdupq_m_n_uv4si (__inactive, *__a, __c, __imm, __p); | |
13581 | *__a = __builtin_mve_vdwdupq_m_wb_uv4si (__inactive, *__a, __c, __imm, __p); | |
92f80065 SP |
13582 | return __res; |
13583 | } | |
13584 | ||
13585 | __extension__ extern __inline uint16x8_t | |
13586 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13587 | __arm_vdwdupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13588 | { | |
9ce780ef ASDV |
13589 | uint64_t __c = ((uint64_t) __b) << 32; |
13590 | uint16x8_t __res = __builtin_mve_vdwdupq_m_n_uv8hi (__inactive, *__a, __c, __imm, __p); | |
13591 | *__a = __builtin_mve_vdwdupq_m_wb_uv8hi (__inactive, *__a, __c, __imm, __p); | |
92f80065 SP |
13592 | return __res; |
13593 | } | |
13594 | ||
13595 | __extension__ extern __inline uint8x16_t | |
13596 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13597 | __arm_vdwdupq_n_u8 (uint32_t __a, uint32_t __b, const int __imm) | |
13598 | { | |
9ce780ef ASDV |
13599 | uint64_t __c = ((uint64_t) __b) << 32; |
13600 | return __builtin_mve_vdwdupq_n_uv16qi (__a, __c, __imm); | |
92f80065 SP |
13601 | } |
13602 | ||
13603 | __extension__ extern __inline uint32x4_t | |
13604 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13605 | __arm_vdwdupq_n_u32 (uint32_t __a, uint32_t __b, const int __imm) | |
13606 | { | |
9ce780ef ASDV |
13607 | uint64_t __c = ((uint64_t) __b) << 32; |
13608 | return __builtin_mve_vdwdupq_n_uv4si (__a, __c, __imm); | |
92f80065 SP |
13609 | } |
13610 | ||
13611 | __extension__ extern __inline uint16x8_t | |
13612 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13613 | __arm_vdwdupq_n_u16 (uint32_t __a, uint32_t __b, const int __imm) | |
13614 | { | |
9ce780ef ASDV |
13615 | uint64_t __c = ((uint64_t) __b) << 32; |
13616 | return __builtin_mve_vdwdupq_n_uv8hi (__a, __c, __imm); | |
92f80065 SP |
13617 | } |
13618 | ||
13619 | __extension__ extern __inline uint8x16_t | |
13620 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13621 | __arm_vdwdupq_wb_u8 (uint32_t * __a, uint32_t __b, const int __imm) | |
13622 | { | |
9ce780ef ASDV |
13623 | uint64_t __c = ((uint64_t) __b) << 32; |
13624 | uint8x16_t __res = __builtin_mve_vdwdupq_n_uv16qi (*__a, __c, __imm); | |
13625 | *__a = __builtin_mve_vdwdupq_wb_uv16qi (*__a, __c, __imm); | |
92f80065 SP |
13626 | return __res; |
13627 | } | |
13628 | ||
13629 | __extension__ extern __inline uint32x4_t | |
13630 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13631 | __arm_vdwdupq_wb_u32 (uint32_t * __a, uint32_t __b, const int __imm) | |
13632 | { | |
9ce780ef ASDV |
13633 | uint64_t __c = ((uint64_t) __b) << 32; |
13634 | uint32x4_t __res = __builtin_mve_vdwdupq_n_uv4si (*__a, __c, __imm); | |
13635 | *__a = __builtin_mve_vdwdupq_wb_uv4si (*__a, __c, __imm); | |
92f80065 SP |
13636 | return __res; |
13637 | } | |
13638 | ||
13639 | __extension__ extern __inline uint16x8_t | |
13640 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13641 | __arm_vdwdupq_wb_u16 (uint32_t * __a, uint32_t __b, const int __imm) | |
13642 | { | |
9ce780ef ASDV |
13643 | uint64_t __c = ((uint64_t) __b) << 32; |
13644 | uint16x8_t __res = __builtin_mve_vdwdupq_n_uv8hi (*__a, __c, __imm); | |
13645 | *__a = __builtin_mve_vdwdupq_wb_uv8hi (*__a, __c, __imm); | |
92f80065 SP |
13646 | return __res; |
13647 | } | |
13648 | ||
13649 | __extension__ extern __inline uint8x16_t | |
13650 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13651 | __arm_vidupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13652 | { | |
13653 | return __builtin_mve_vidupq_m_n_uv16qi (__inactive, __a, __imm, __p); | |
13654 | } | |
13655 | ||
13656 | __extension__ extern __inline uint32x4_t | |
13657 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13658 | __arm_vidupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13659 | { | |
13660 | return __builtin_mve_vidupq_m_n_uv4si (__inactive, __a, __imm, __p); | |
13661 | } | |
13662 | ||
13663 | __extension__ extern __inline uint16x8_t | |
13664 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13665 | __arm_vidupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
13666 | { | |
13667 | return __builtin_mve_vidupq_m_n_uv8hi (__inactive, __a, __imm, __p); | |
13668 | } | |
13669 | ||
13670 | __extension__ extern __inline uint8x16_t | |
13671 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13672 | __arm_vidupq_n_u8 (uint32_t __a, const int __imm) | |
13673 | { | |
13674 | return __builtin_mve_vidupq_n_uv16qi (__a, __imm); | |
13675 | } | |
13676 | ||
13677 | __extension__ extern __inline uint8x16_t | |
13678 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13679 | __arm_vidupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13680 | { | |
13681 | uint8x16_t __res = __builtin_mve_vidupq_m_n_uv16qi (__inactive, *__a, __imm, __p); | |
13682 | *__a += __imm * 16u; | |
13683 | return __res; | |
13684 | } | |
13685 | ||
13686 | __extension__ extern __inline uint16x8_t | |
13687 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13688 | __arm_vidupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13689 | { | |
13690 | uint16x8_t __res = __builtin_mve_vidupq_m_n_uv8hi (__inactive, *__a, __imm, __p); | |
13691 | *__a += __imm * 8u; | |
13692 | return __res; | |
13693 | } | |
13694 | ||
13695 | __extension__ extern __inline uint32x4_t | |
13696 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13697 | __arm_vidupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
13698 | { | |
13699 | uint32x4_t __res = __builtin_mve_vidupq_m_n_uv4si (__inactive, *__a, __imm, __p); | |
13700 | *__a += __imm * 4u; | |
13701 | return __res; | |
13702 | } | |
13703 | ||
13704 | __extension__ extern __inline uint32x4_t | |
13705 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13706 | __arm_vidupq_n_u32 (uint32_t __a, const int __imm) | |
13707 | { | |
13708 | return __builtin_mve_vidupq_n_uv4si (__a, __imm); | |
13709 | } | |
13710 | ||
13711 | __extension__ extern __inline uint16x8_t | |
13712 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13713 | __arm_vidupq_n_u16 (uint32_t __a, const int __imm) | |
13714 | { | |
13715 | return __builtin_mve_vidupq_n_uv8hi (__a, __imm); | |
13716 | } | |
13717 | ||
13718 | __extension__ extern __inline uint8x16_t | |
13719 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13720 | __arm_vidupq_wb_u8 (uint32_t * __a, const int __imm) | |
13721 | { | |
13722 | uint8x16_t __res = __builtin_mve_vidupq_n_uv16qi (*__a, __imm); | |
13723 | *__a += __imm * 16u; | |
13724 | return __res; | |
13725 | } | |
13726 | ||
13727 | __extension__ extern __inline uint16x8_t | |
13728 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13729 | __arm_vidupq_wb_u16 (uint32_t * __a, const int __imm) | |
13730 | { | |
13731 | uint16x8_t __res = __builtin_mve_vidupq_n_uv8hi (*__a, __imm); | |
13732 | *__a += __imm * 8u; | |
13733 | return __res; | |
13734 | } | |
13735 | ||
13736 | __extension__ extern __inline uint32x4_t | |
13737 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13738 | __arm_vidupq_wb_u32 (uint32_t * __a, const int __imm) | |
13739 | { | |
13740 | uint32x4_t __res = __builtin_mve_vidupq_n_uv4si (*__a, __imm); | |
13741 | *__a += __imm * 4u; | |
13742 | return __res; | |
13743 | } | |
13744 | ||
13745 | __extension__ extern __inline uint8x16_t | |
13746 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13747 | __arm_vddupq_wb_u8 (uint32_t * __a, const int __imm) | |
13748 | { | |
13749 | uint8x16_t __res = __builtin_mve_vddupq_n_uv16qi (*__a, __imm); | |
13750 | *__a -= __imm * 16u; | |
13751 | return __res; | |
13752 | } | |
13753 | ||
13754 | __extension__ extern __inline uint16x8_t | |
13755 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13756 | __arm_vddupq_wb_u16 (uint32_t * __a, const int __imm) | |
13757 | { | |
13758 | uint16x8_t __res = __builtin_mve_vddupq_n_uv8hi (*__a, __imm); | |
13759 | *__a -= __imm * 8u; | |
13760 | return __res; | |
13761 | } | |
13762 | ||
13763 | __extension__ extern __inline uint32x4_t | |
13764 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13765 | __arm_vddupq_wb_u32 (uint32_t * __a, const int __imm) | |
13766 | { | |
13767 | uint32x4_t __res = __builtin_mve_vddupq_n_uv4si (*__a, __imm); | |
13768 | *__a -= __imm * 4u; | |
13769 | return __res; | |
13770 | } | |
13771 | ||
13772 | __extension__ extern __inline uint8x16_t | |
13773 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13774 | __arm_viwdupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13775 | { | |
9ce780ef ASDV |
13776 | uint64_t __c = ((uint64_t) __b) << 32; |
13777 | return __builtin_mve_viwdupq_m_n_uv16qi (__inactive, __a, __c, __imm, __p); | |
92f80065 SP |
13778 | } |
13779 | ||
13780 | __extension__ extern __inline uint32x4_t | |
13781 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13782 | __arm_viwdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13783 | { | |
9ce780ef ASDV |
13784 | uint64_t __c = ((uint64_t) __b) << 32; |
13785 | return __builtin_mve_viwdupq_m_n_uv4si (__inactive, __a, __c, __imm, __p); | |
92f80065 SP |
13786 | } |
13787 | ||
13788 | __extension__ extern __inline uint16x8_t | |
13789 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13790 | __arm_viwdupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13791 | { | |
9ce780ef ASDV |
13792 | uint64_t __c = ((uint64_t) __b) << 32; |
13793 | return __builtin_mve_viwdupq_m_n_uv8hi (__inactive, __a, __c, __imm, __p); | |
92f80065 SP |
13794 | } |
13795 | ||
13796 | __extension__ extern __inline uint8x16_t | |
13797 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13798 | __arm_viwdupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13799 | { | |
9ce780ef ASDV |
13800 | uint64_t __c = ((uint64_t) __b) << 32; |
13801 | uint8x16_t __res = __builtin_mve_viwdupq_m_n_uv16qi (__inactive, *__a, __c, __imm, __p); | |
13802 | *__a = __builtin_mve_viwdupq_m_wb_uv16qi (__inactive, *__a, __c, __imm, __p); | |
92f80065 SP |
13803 | return __res; |
13804 | } | |
13805 | ||
13806 | __extension__ extern __inline uint32x4_t | |
13807 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13808 | __arm_viwdupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13809 | { | |
9ce780ef ASDV |
13810 | uint64_t __c = ((uint64_t) __b) << 32; |
13811 | uint32x4_t __res = __builtin_mve_viwdupq_m_n_uv4si (__inactive, *__a, __c, __imm, __p); | |
13812 | *__a = __builtin_mve_viwdupq_m_wb_uv4si (__inactive, *__a, __c, __imm, __p); | |
92f80065 SP |
13813 | return __res; |
13814 | } | |
13815 | ||
13816 | __extension__ extern __inline uint16x8_t | |
13817 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13818 | __arm_viwdupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
13819 | { | |
9ce780ef ASDV |
13820 | uint64_t __c = ((uint64_t) __b) << 32; |
13821 | uint16x8_t __res = __builtin_mve_viwdupq_m_n_uv8hi (__inactive, *__a, __c, __imm, __p); | |
13822 | *__a = __builtin_mve_viwdupq_m_wb_uv8hi (__inactive, *__a, __c, __imm, __p); | |
92f80065 SP |
13823 | return __res; |
13824 | } | |
13825 | ||
13826 | __extension__ extern __inline uint8x16_t | |
13827 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13828 | __arm_viwdupq_n_u8 (uint32_t __a, uint32_t __b, const int __imm) | |
13829 | { | |
9ce780ef ASDV |
13830 | uint64_t __c = ((uint64_t) __b) << 32; |
13831 | return __builtin_mve_viwdupq_n_uv16qi (__a, __c, __imm); | |
92f80065 SP |
13832 | } |
13833 | ||
13834 | __extension__ extern __inline uint32x4_t | |
13835 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13836 | __arm_viwdupq_n_u32 (uint32_t __a, uint32_t __b, const int __imm) | |
13837 | { | |
9ce780ef ASDV |
13838 | uint64_t __c = ((uint64_t) __b) << 32; |
13839 | return __builtin_mve_viwdupq_n_uv4si (__a, __c, __imm); | |
92f80065 SP |
13840 | } |
13841 | ||
13842 | __extension__ extern __inline uint16x8_t | |
13843 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13844 | __arm_viwdupq_n_u16 (uint32_t __a, uint32_t __b, const int __imm) | |
13845 | { | |
9ce780ef ASDV |
13846 | uint64_t __c = ((uint64_t) __b) << 32; |
13847 | return __builtin_mve_viwdupq_n_uv8hi (__a, __c, __imm); | |
92f80065 SP |
13848 | } |
13849 | ||
13850 | __extension__ extern __inline uint8x16_t | |
13851 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13852 | __arm_viwdupq_wb_u8 (uint32_t * __a, uint32_t __b, const int __imm) | |
13853 | { | |
9ce780ef ASDV |
13854 | uint64_t __c = ((uint64_t) __b) << 32; |
13855 | uint8x16_t __res = __builtin_mve_viwdupq_n_uv16qi (*__a, __c, __imm); | |
13856 | *__a = __builtin_mve_viwdupq_wb_uv16qi (*__a, __c, __imm); | |
92f80065 SP |
13857 | return __res; |
13858 | } | |
13859 | ||
13860 | __extension__ extern __inline uint32x4_t | |
13861 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13862 | __arm_viwdupq_wb_u32 (uint32_t * __a, uint32_t __b, const int __imm) | |
13863 | { | |
9ce780ef ASDV |
13864 | uint64_t __c = ((uint64_t) __b) << 32; |
13865 | uint32x4_t __res = __builtin_mve_viwdupq_n_uv4si (*__a, __c, __imm); | |
13866 | *__a = __builtin_mve_viwdupq_wb_uv4si (*__a, __c, __imm); | |
92f80065 SP |
13867 | return __res; |
13868 | } | |
13869 | ||
13870 | __extension__ extern __inline uint16x8_t | |
13871 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13872 | __arm_viwdupq_wb_u16 (uint32_t * __a, uint32_t __b, const int __imm) | |
13873 | { | |
9ce780ef ASDV |
13874 | uint64_t __c = ((uint64_t) __b) << 32; |
13875 | uint16x8_t __res = __builtin_mve_viwdupq_n_uv8hi (*__a, __c, __imm); | |
13876 | *__a = __builtin_mve_viwdupq_wb_uv8hi (*__a, __c, __imm); | |
92f80065 SP |
13877 | return __res; |
13878 | } | |
13879 | ||
9ce780ef | 13880 | |
41e1a7ff SP |
13881 | __extension__ extern __inline int64x2_t |
13882 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13883 | __arm_vldrdq_gather_base_wb_s64 (uint64x2_t * __addr, const int __offset) | |
13884 | { | |
13885 | int64x2_t | |
ff825b81 SP |
13886 | result = __builtin_mve_vldrdq_gather_base_nowb_sv2di (*__addr, __offset); |
13887 | *__addr = __builtin_mve_vldrdq_gather_base_wb_sv2di (*__addr, __offset); | |
41e1a7ff SP |
13888 | return result; |
13889 | } | |
13890 | ||
13891 | __extension__ extern __inline uint64x2_t | |
13892 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13893 | __arm_vldrdq_gather_base_wb_u64 (uint64x2_t * __addr, const int __offset) | |
13894 | { | |
13895 | uint64x2_t | |
ff825b81 SP |
13896 | result = __builtin_mve_vldrdq_gather_base_nowb_uv2di (*__addr, __offset); |
13897 | *__addr = __builtin_mve_vldrdq_gather_base_wb_uv2di (*__addr, __offset); | |
41e1a7ff SP |
13898 | return result; |
13899 | } | |
13900 | ||
13901 | __extension__ extern __inline int64x2_t | |
13902 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13903 | __arm_vldrdq_gather_base_wb_z_s64 (uint64x2_t * __addr, const int __offset, mve_pred16_t __p) | |
13904 | { | |
13905 | int64x2_t | |
ff825b81 SP |
13906 | result = __builtin_mve_vldrdq_gather_base_nowb_z_sv2di (*__addr, __offset, __p); |
13907 | *__addr = __builtin_mve_vldrdq_gather_base_wb_z_sv2di (*__addr, __offset, __p); | |
41e1a7ff SP |
13908 | return result; |
13909 | } | |
13910 | ||
13911 | __extension__ extern __inline uint64x2_t | |
13912 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13913 | __arm_vldrdq_gather_base_wb_z_u64 (uint64x2_t * __addr, const int __offset, mve_pred16_t __p) | |
13914 | { | |
13915 | uint64x2_t | |
ff825b81 SP |
13916 | result = __builtin_mve_vldrdq_gather_base_nowb_z_uv2di (*__addr, __offset, __p); |
13917 | *__addr = __builtin_mve_vldrdq_gather_base_wb_z_uv2di (*__addr, __offset, __p); | |
41e1a7ff SP |
13918 | return result; |
13919 | } | |
13920 | ||
13921 | __extension__ extern __inline int32x4_t | |
13922 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13923 | __arm_vldrwq_gather_base_wb_s32 (uint32x4_t * __addr, const int __offset) | |
13924 | { | |
13925 | int32x4_t | |
ff825b81 SP |
13926 | result = __builtin_mve_vldrwq_gather_base_nowb_sv4si (*__addr, __offset); |
13927 | *__addr = __builtin_mve_vldrwq_gather_base_wb_sv4si (*__addr, __offset); | |
41e1a7ff SP |
13928 | return result; |
13929 | } | |
13930 | ||
13931 | __extension__ extern __inline uint32x4_t | |
13932 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13933 | __arm_vldrwq_gather_base_wb_u32 (uint32x4_t * __addr, const int __offset) | |
13934 | { | |
13935 | uint32x4_t | |
ff825b81 SP |
13936 | result = __builtin_mve_vldrwq_gather_base_nowb_uv4si (*__addr, __offset); |
13937 | *__addr = __builtin_mve_vldrwq_gather_base_wb_uv4si (*__addr, __offset); | |
41e1a7ff SP |
13938 | return result; |
13939 | } | |
13940 | ||
13941 | __extension__ extern __inline int32x4_t | |
13942 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13943 | __arm_vldrwq_gather_base_wb_z_s32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p) | |
13944 | { | |
13945 | int32x4_t | |
ff825b81 SP |
13946 | result = __builtin_mve_vldrwq_gather_base_nowb_z_sv4si (*__addr, __offset, __p); |
13947 | *__addr = __builtin_mve_vldrwq_gather_base_wb_z_sv4si (*__addr, __offset, __p); | |
41e1a7ff SP |
13948 | return result; |
13949 | } | |
13950 | ||
13951 | __extension__ extern __inline uint32x4_t | |
13952 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13953 | __arm_vldrwq_gather_base_wb_z_u32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p) | |
13954 | { | |
13955 | uint32x4_t | |
ff825b81 SP |
13956 | result = __builtin_mve_vldrwq_gather_base_nowb_z_uv4si (*__addr, __offset, __p); |
13957 | *__addr = __builtin_mve_vldrwq_gather_base_wb_z_uv4si (*__addr, __offset, __p); | |
41e1a7ff SP |
13958 | return result; |
13959 | } | |
13960 | ||
13961 | __extension__ extern __inline void | |
13962 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13963 | __arm_vstrdq_scatter_base_wb_s64 (uint64x2_t * __addr, const int __offset, int64x2_t __value) | |
13964 | { | |
37753588 | 13965 | *__addr = __builtin_mve_vstrdq_scatter_base_wb_sv2di (*__addr, __offset, __value); |
41e1a7ff SP |
13966 | } |
13967 | ||
13968 | __extension__ extern __inline void | |
13969 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13970 | __arm_vstrdq_scatter_base_wb_u64 (uint64x2_t * __addr, const int __offset, uint64x2_t __value) | |
13971 | { | |
37753588 | 13972 | *__addr = __builtin_mve_vstrdq_scatter_base_wb_uv2di (*__addr, __offset, __value); |
41e1a7ff SP |
13973 | } |
13974 | ||
13975 | __extension__ extern __inline void | |
13976 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13977 | __arm_vstrdq_scatter_base_wb_p_s64 (uint64x2_t * __addr, const int __offset, int64x2_t __value, mve_pred16_t __p) | |
13978 | { | |
37753588 | 13979 | *__addr = __builtin_mve_vstrdq_scatter_base_wb_p_sv2di (*__addr, __offset, __value, __p); |
41e1a7ff SP |
13980 | } |
13981 | ||
13982 | __extension__ extern __inline void | |
13983 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13984 | __arm_vstrdq_scatter_base_wb_p_u64 (uint64x2_t * __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p) | |
13985 | { | |
37753588 | 13986 | *__addr = __builtin_mve_vstrdq_scatter_base_wb_p_uv2di (*__addr, __offset, __value, __p); |
41e1a7ff SP |
13987 | } |
13988 | ||
13989 | __extension__ extern __inline void | |
13990 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13991 | __arm_vstrwq_scatter_base_wb_p_s32 (uint32x4_t * __addr, const int __offset, int32x4_t __value, mve_pred16_t __p) | |
13992 | { | |
37753588 | 13993 | *__addr = __builtin_mve_vstrwq_scatter_base_wb_p_sv4si (*__addr, __offset, __value, __p); |
41e1a7ff SP |
13994 | } |
13995 | ||
13996 | __extension__ extern __inline void | |
13997 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
13998 | __arm_vstrwq_scatter_base_wb_p_u32 (uint32x4_t * __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p) | |
13999 | { | |
37753588 | 14000 | *__addr = __builtin_mve_vstrwq_scatter_base_wb_p_uv4si (*__addr, __offset, __value, __p); |
41e1a7ff SP |
14001 | } |
14002 | ||
14003 | __extension__ extern __inline void | |
14004 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
14005 | __arm_vstrwq_scatter_base_wb_s32 (uint32x4_t * __addr, const int __offset, int32x4_t __value) | |
14006 | { | |
37753588 | 14007 | *__addr = __builtin_mve_vstrwq_scatter_base_wb_sv4si (*__addr, __offset, __value); |
41e1a7ff SP |
14008 | } |
14009 | ||
14010 | __extension__ extern __inline void | |
14011 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
14012 | __arm_vstrwq_scatter_base_wb_u32 (uint32x4_t * __addr, const int __offset, uint32x4_t __value) | |
14013 | { | |
37753588 | 14014 | *__addr = __builtin_mve_vstrwq_scatter_base_wb_uv4si (*__addr, __offset, __value); |
41e1a7ff SP |
14015 | } |
14016 | ||
261014a1 SP |
14017 | __extension__ extern __inline uint8x16_t |
14018 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
14019 | __arm_vddupq_x_n_u8 (uint32_t __a, const int __imm, mve_pred16_t __p) | |
14020 | { | |
c431634b | 14021 | return __builtin_mve_vddupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p); |
261014a1 | 14022 | } |
f9355dee | 14023 | |
261014a1 | 14024 | __extension__ extern __inline uint16x8_t |
f9355dee | 14025 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14026 | __arm_vddupq_x_n_u16 (uint32_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 14027 | { |
c431634b | 14028 | return __builtin_mve_vddupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p); |
f9355dee SP |
14029 | } |
14030 | ||
261014a1 | 14031 | __extension__ extern __inline uint32x4_t |
f9355dee | 14032 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14033 | __arm_vddupq_x_n_u32 (uint32_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 14034 | { |
c431634b | 14035 | return __builtin_mve_vddupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p); |
f9355dee SP |
14036 | } |
14037 | ||
261014a1 | 14038 | __extension__ extern __inline uint8x16_t |
f9355dee | 14039 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14040 | __arm_vddupq_x_wb_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 14041 | { |
c431634b | 14042 | uint8x16_t __arg1 = __arm_vuninitializedq_u8 (); |
261014a1 SP |
14043 | uint8x16_t __res = __builtin_mve_vddupq_m_n_uv16qi (__arg1, * __a, __imm, __p); |
14044 | *__a -= __imm * 16u; | |
14045 | return __res; | |
f9355dee SP |
14046 | } |
14047 | ||
261014a1 | 14048 | __extension__ extern __inline uint16x8_t |
f9355dee | 14049 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14050 | __arm_vddupq_x_wb_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 14051 | { |
c431634b | 14052 | uint16x8_t __arg1 = __arm_vuninitializedq_u16 (); |
261014a1 SP |
14053 | uint16x8_t __res = __builtin_mve_vddupq_m_n_uv8hi (__arg1, *__a, __imm, __p); |
14054 | *__a -= __imm * 8u; | |
14055 | return __res; | |
f9355dee SP |
14056 | } |
14057 | ||
261014a1 | 14058 | __extension__ extern __inline uint32x4_t |
f9355dee | 14059 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14060 | __arm_vddupq_x_wb_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 14061 | { |
c431634b | 14062 | uint32x4_t __arg1 = __arm_vuninitializedq_u32 (); |
261014a1 SP |
14063 | uint32x4_t __res = __builtin_mve_vddupq_m_n_uv4si (__arg1, *__a, __imm, __p); |
14064 | *__a -= __imm * 4u; | |
14065 | return __res; | |
f9355dee SP |
14066 | } |
14067 | ||
261014a1 | 14068 | __extension__ extern __inline uint8x16_t |
f9355dee | 14069 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14070 | __arm_vdwdupq_x_n_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14071 | { |
9ce780ef ASDV |
14072 | uint64_t __c = ((uint64_t) __b) << 32; |
14073 | return __builtin_mve_vdwdupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __c, __imm, __p); | |
f9355dee SP |
14074 | } |
14075 | ||
261014a1 | 14076 | __extension__ extern __inline uint16x8_t |
f9355dee | 14077 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14078 | __arm_vdwdupq_x_n_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14079 | { |
9ce780ef ASDV |
14080 | uint64_t __c = ((uint64_t) __b) << 32; |
14081 | return __builtin_mve_vdwdupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __c, __imm, __p); | |
f9355dee SP |
14082 | } |
14083 | ||
261014a1 | 14084 | __extension__ extern __inline uint32x4_t |
f9355dee | 14085 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14086 | __arm_vdwdupq_x_n_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14087 | { |
9ce780ef ASDV |
14088 | uint64_t __c = ((uint64_t) __b) << 32; |
14089 | return __builtin_mve_vdwdupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __c, __imm, __p); | |
f9355dee SP |
14090 | } |
14091 | ||
261014a1 | 14092 | __extension__ extern __inline uint8x16_t |
f9355dee | 14093 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14094 | __arm_vdwdupq_x_wb_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14095 | { |
9ce780ef ASDV |
14096 | uint64_t __c = ((uint64_t) __b) << 32; |
14097 | uint8x16_t __arg1 = __arm_vuninitializedq_u8 (); | |
14098 | uint8x16_t __res = __builtin_mve_vdwdupq_m_n_uv16qi (__arg1, *__a, __c, __imm, __p); | |
14099 | *__a = __builtin_mve_vdwdupq_m_wb_uv16qi (__arg1, *__a, __c, __imm, __p); | |
261014a1 | 14100 | return __res; |
f9355dee SP |
14101 | } |
14102 | ||
261014a1 | 14103 | __extension__ extern __inline uint16x8_t |
f9355dee | 14104 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14105 | __arm_vdwdupq_x_wb_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14106 | { |
9ce780ef ASDV |
14107 | uint64_t __c = ((uint64_t) __b) << 32; |
14108 | uint16x8_t __arg1 = __arm_vuninitializedq_u16 (); | |
14109 | uint16x8_t __res = __builtin_mve_vdwdupq_m_n_uv8hi (__arg1, *__a, __c, __imm, __p); | |
14110 | *__a = __builtin_mve_vdwdupq_m_wb_uv8hi (__arg1, *__a, __c, __imm, __p); | |
261014a1 | 14111 | return __res; |
f9355dee SP |
14112 | } |
14113 | ||
261014a1 | 14114 | __extension__ extern __inline uint32x4_t |
f9355dee | 14115 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14116 | __arm_vdwdupq_x_wb_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14117 | { |
9ce780ef ASDV |
14118 | uint64_t __c = ((uint64_t) __b) << 32; |
14119 | uint32x4_t __arg1 = __arm_vuninitializedq_u32 (); | |
14120 | uint32x4_t __res = __builtin_mve_vdwdupq_m_n_uv4si (__arg1, *__a, __c, __imm, __p); | |
14121 | *__a = __builtin_mve_vdwdupq_m_wb_uv4si (__arg1, *__a, __c, __imm, __p); | |
261014a1 | 14122 | return __res; |
f9355dee SP |
14123 | } |
14124 | ||
261014a1 | 14125 | __extension__ extern __inline uint8x16_t |
f9355dee | 14126 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14127 | __arm_vidupq_x_n_u8 (uint32_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 14128 | { |
c431634b | 14129 | return __builtin_mve_vidupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p); |
f9355dee SP |
14130 | } |
14131 | ||
261014a1 | 14132 | __extension__ extern __inline uint16x8_t |
f9355dee | 14133 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14134 | __arm_vidupq_x_n_u16 (uint32_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 14135 | { |
c431634b | 14136 | return __builtin_mve_vidupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p); |
f9355dee SP |
14137 | } |
14138 | ||
261014a1 | 14139 | __extension__ extern __inline uint32x4_t |
f9355dee | 14140 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14141 | __arm_vidupq_x_n_u32 (uint32_t __a, const int __imm, mve_pred16_t __p) |
f9355dee | 14142 | { |
c431634b | 14143 | return __builtin_mve_vidupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p); |
f9355dee SP |
14144 | } |
14145 | ||
261014a1 | 14146 | __extension__ extern __inline uint8x16_t |
f9355dee | 14147 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14148 | __arm_vidupq_x_wb_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 14149 | { |
c431634b | 14150 | uint8x16_t __arg1 = __arm_vuninitializedq_u8 (); |
261014a1 SP |
14151 | uint8x16_t __res = __builtin_mve_vidupq_m_n_uv16qi (__arg1, *__a, __imm, __p); |
14152 | *__a += __imm * 16u; | |
14153 | return __res; | |
f9355dee SP |
14154 | } |
14155 | ||
261014a1 | 14156 | __extension__ extern __inline uint16x8_t |
f9355dee | 14157 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14158 | __arm_vidupq_x_wb_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 14159 | { |
c431634b | 14160 | uint16x8_t __arg1 = __arm_vuninitializedq_u16 (); |
261014a1 SP |
14161 | uint16x8_t __res = __builtin_mve_vidupq_m_n_uv8hi (__arg1, *__a, __imm, __p); |
14162 | *__a += __imm * 8u; | |
14163 | return __res; | |
f9355dee SP |
14164 | } |
14165 | ||
261014a1 | 14166 | __extension__ extern __inline uint32x4_t |
f9355dee | 14167 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14168 | __arm_vidupq_x_wb_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p) |
f9355dee | 14169 | { |
c431634b | 14170 | uint32x4_t __arg1 = __arm_vuninitializedq_u32 (); |
261014a1 SP |
14171 | uint32x4_t __res = __builtin_mve_vidupq_m_n_uv4si (__arg1, *__a, __imm, __p); |
14172 | *__a += __imm * 4u; | |
14173 | return __res; | |
f9355dee SP |
14174 | } |
14175 | ||
261014a1 | 14176 | __extension__ extern __inline uint8x16_t |
f9355dee | 14177 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14178 | __arm_viwdupq_x_n_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14179 | { |
9ce780ef ASDV |
14180 | uint64_t __c = ((uint64_t) __b) << 32; |
14181 | return __builtin_mve_viwdupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __c, __imm, __p); | |
f9355dee SP |
14182 | } |
14183 | ||
261014a1 | 14184 | __extension__ extern __inline uint16x8_t |
f9355dee | 14185 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14186 | __arm_viwdupq_x_n_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14187 | { |
9ce780ef ASDV |
14188 | uint64_t __c = ((uint64_t) __b) << 32; |
14189 | return __builtin_mve_viwdupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __c, __imm, __p); | |
f9355dee SP |
14190 | } |
14191 | ||
261014a1 | 14192 | __extension__ extern __inline uint32x4_t |
f9355dee | 14193 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14194 | __arm_viwdupq_x_n_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14195 | { |
9ce780ef ASDV |
14196 | uint64_t __c = ((uint64_t) __b) << 32; |
14197 | return __builtin_mve_viwdupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __c, __imm, __p); | |
f9355dee SP |
14198 | } |
14199 | ||
261014a1 | 14200 | __extension__ extern __inline uint8x16_t |
f9355dee | 14201 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14202 | __arm_viwdupq_x_wb_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14203 | { |
9ce780ef ASDV |
14204 | uint64_t __c = ((uint64_t) __b) << 32; |
14205 | uint8x16_t __arg1 = __arm_vuninitializedq_u8 (); | |
14206 | uint8x16_t __res = __builtin_mve_viwdupq_m_n_uv16qi (__arg1, *__a, __c, __imm, __p); | |
14207 | *__a = __builtin_mve_viwdupq_m_wb_uv16qi (__arg1, *__a, __c, __imm, __p); | |
261014a1 | 14208 | return __res; |
f9355dee SP |
14209 | } |
14210 | ||
261014a1 | 14211 | __extension__ extern __inline uint16x8_t |
f9355dee | 14212 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14213 | __arm_viwdupq_x_wb_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14214 | { |
9ce780ef ASDV |
14215 | uint64_t __c = ((uint64_t) __b) << 32; |
14216 | uint16x8_t __arg1 = __arm_vuninitializedq_u16 (); | |
14217 | uint16x8_t __res = __builtin_mve_viwdupq_m_n_uv8hi (__arg1, *__a, __c, __imm, __p); | |
14218 | *__a = __builtin_mve_viwdupq_m_wb_uv8hi (__arg1, *__a, __c, __imm, __p); | |
261014a1 | 14219 | return __res; |
f9355dee SP |
14220 | } |
14221 | ||
261014a1 | 14222 | __extension__ extern __inline uint32x4_t |
f9355dee | 14223 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14224 | __arm_viwdupq_x_wb_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) |
f9355dee | 14225 | { |
9ce780ef ASDV |
14226 | uint64_t __c = ((uint64_t) __b) << 32; |
14227 | uint32x4_t __arg1 = __arm_vuninitializedq_u32 (); | |
14228 | uint32x4_t __res = __builtin_mve_viwdupq_m_n_uv4si (__arg1, *__a, __c, __imm, __p); | |
14229 | *__a = __builtin_mve_viwdupq_m_wb_uv4si (__arg1, *__a, __c, __imm, __p); | |
261014a1 | 14230 | return __res; |
f9355dee SP |
14231 | } |
14232 | ||
261014a1 | 14233 | __extension__ extern __inline int8x16_t |
f9355dee | 14234 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14235 | __arm_vdupq_x_n_s8 (int8_t __a, mve_pred16_t __p) |
f9355dee | 14236 | { |
c431634b | 14237 | return __builtin_mve_vdupq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __p); |
f9355dee SP |
14238 | } |
14239 | ||
261014a1 | 14240 | __extension__ extern __inline int16x8_t |
f9355dee | 14241 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14242 | __arm_vdupq_x_n_s16 (int16_t __a, mve_pred16_t __p) |
f9355dee | 14243 | { |
c431634b | 14244 | return __builtin_mve_vdupq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __p); |
f9355dee SP |
14245 | } |
14246 | ||
261014a1 | 14247 | __extension__ extern __inline int32x4_t |
f9355dee | 14248 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14249 | __arm_vdupq_x_n_s32 (int32_t __a, mve_pred16_t __p) |
f9355dee | 14250 | { |
c431634b | 14251 | return __builtin_mve_vdupq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __p); |
f9355dee SP |
14252 | } |
14253 | ||
261014a1 | 14254 | __extension__ extern __inline uint8x16_t |
f9355dee | 14255 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14256 | __arm_vdupq_x_n_u8 (uint8_t __a, mve_pred16_t __p) |
f9355dee | 14257 | { |
c431634b | 14258 | return __builtin_mve_vdupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __p); |
f9355dee SP |
14259 | } |
14260 | ||
261014a1 | 14261 | __extension__ extern __inline uint16x8_t |
f9355dee | 14262 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14263 | __arm_vdupq_x_n_u16 (uint16_t __a, mve_pred16_t __p) |
f9355dee | 14264 | { |
c431634b | 14265 | return __builtin_mve_vdupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __p); |
f9355dee SP |
14266 | } |
14267 | ||
261014a1 | 14268 | __extension__ extern __inline uint32x4_t |
f9355dee | 14269 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14270 | __arm_vdupq_x_n_u32 (uint32_t __a, mve_pred16_t __p) |
f9355dee | 14271 | { |
c431634b | 14272 | return __builtin_mve_vdupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __p); |
f9355dee SP |
14273 | } |
14274 | ||
261014a1 | 14275 | __extension__ extern __inline int8x16_t |
0dad5b33 | 14276 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14277 | __arm_vminq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
0dad5b33 | 14278 | { |
c431634b | 14279 | return __builtin_mve_vminq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
0dad5b33 SP |
14280 | } |
14281 | ||
261014a1 | 14282 | __extension__ extern __inline int16x8_t |
0dad5b33 | 14283 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14284 | __arm_vminq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
0dad5b33 | 14285 | { |
c431634b | 14286 | return __builtin_mve_vminq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
0dad5b33 SP |
14287 | } |
14288 | ||
261014a1 | 14289 | __extension__ extern __inline int32x4_t |
0dad5b33 | 14290 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14291 | __arm_vminq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
0dad5b33 | 14292 | { |
c431634b | 14293 | return __builtin_mve_vminq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14294 | } |
14295 | ||
261014a1 | 14296 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14297 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14298 | __arm_vminq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14299 | { |
c431634b | 14300 | return __builtin_mve_vminq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
0dad5b33 SP |
14301 | } |
14302 | ||
14303 | __extension__ extern __inline uint16x8_t | |
14304 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14305 | __arm_vminq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
0dad5b33 | 14306 | { |
c431634b | 14307 | return __builtin_mve_vminq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
0dad5b33 SP |
14308 | } |
14309 | ||
e3678b44 | 14310 | __extension__ extern __inline uint32x4_t |
0dad5b33 | 14311 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14312 | __arm_vminq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
0dad5b33 | 14313 | { |
c431634b | 14314 | return __builtin_mve_vminq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14315 | } |
14316 | ||
261014a1 | 14317 | __extension__ extern __inline int8x16_t |
e3678b44 | 14318 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14319 | __arm_vmaxq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14320 | { |
c431634b | 14321 | return __builtin_mve_vmaxq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14322 | } |
14323 | ||
261014a1 | 14324 | __extension__ extern __inline int16x8_t |
e3678b44 | 14325 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14326 | __arm_vmaxq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14327 | { |
c431634b | 14328 | return __builtin_mve_vmaxq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
0dad5b33 SP |
14329 | } |
14330 | ||
261014a1 | 14331 | __extension__ extern __inline int32x4_t |
0dad5b33 | 14332 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14333 | __arm_vmaxq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
0dad5b33 | 14334 | { |
c431634b | 14335 | return __builtin_mve_vmaxq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
261014a1 SP |
14336 | } |
14337 | ||
14338 | __extension__ extern __inline uint8x16_t | |
14339 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
14340 | __arm_vmaxq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
14341 | { | |
c431634b | 14342 | return __builtin_mve_vmaxq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14343 | } |
14344 | ||
14345 | __extension__ extern __inline uint16x8_t | |
14346 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14347 | __arm_vmaxq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14348 | { |
c431634b | 14349 | return __builtin_mve_vmaxq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14350 | } |
14351 | ||
14352 | __extension__ extern __inline uint32x4_t | |
14353 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14354 | __arm_vmaxq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14355 | { |
c431634b | 14356 | return __builtin_mve_vmaxq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
261014a1 SP |
14357 | } |
14358 | ||
14359 | __extension__ extern __inline int8x16_t | |
14360 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
14361 | __arm_vabdq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
14362 | { | |
c431634b | 14363 | return __builtin_mve_vabdq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14364 | } |
14365 | ||
14366 | __extension__ extern __inline int16x8_t | |
14367 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14368 | __arm_vabdq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14369 | { |
c431634b | 14370 | return __builtin_mve_vabdq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14371 | } |
14372 | ||
14373 | __extension__ extern __inline int32x4_t | |
14374 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14375 | __arm_vabdq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14376 | { |
c431634b | 14377 | return __builtin_mve_vabdq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14378 | } |
14379 | ||
261014a1 | 14380 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14381 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14382 | __arm_vabdq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14383 | { |
c431634b | 14384 | return __builtin_mve_vabdq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14385 | } |
14386 | ||
261014a1 | 14387 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14388 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14389 | __arm_vabdq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14390 | { |
c431634b | 14391 | return __builtin_mve_vabdq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14392 | } |
14393 | ||
261014a1 | 14394 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14395 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14396 | __arm_vabdq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14397 | { |
c431634b | 14398 | return __builtin_mve_vabdq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14399 | } |
14400 | ||
261014a1 | 14401 | __extension__ extern __inline int8x16_t |
e3678b44 | 14402 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14403 | __arm_vabsq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 14404 | { |
c431634b | 14405 | return __builtin_mve_vabsq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
14406 | } |
14407 | ||
14408 | __extension__ extern __inline int16x8_t | |
14409 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14410 | __arm_vabsq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 14411 | { |
c431634b | 14412 | return __builtin_mve_vabsq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
14413 | } |
14414 | ||
14415 | __extension__ extern __inline int32x4_t | |
14416 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 14417 | __arm_vabsq_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 14418 | { |
c431634b | 14419 | return __builtin_mve_vabsq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p); |
0dad5b33 SP |
14420 | } |
14421 | ||
261014a1 | 14422 | __extension__ extern __inline int8x16_t |
0dad5b33 | 14423 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14424 | __arm_vaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
0dad5b33 | 14425 | { |
c431634b | 14426 | return __builtin_mve_vaddq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14427 | } |
14428 | ||
261014a1 | 14429 | __extension__ extern __inline int16x8_t |
e3678b44 | 14430 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14431 | __arm_vaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14432 | { |
c431634b | 14433 | return __builtin_mve_vaddq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
0dad5b33 SP |
14434 | } |
14435 | ||
261014a1 | 14436 | __extension__ extern __inline int32x4_t |
0dad5b33 | 14437 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14438 | __arm_vaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
0dad5b33 | 14439 | { |
c431634b | 14440 | return __builtin_mve_vaddq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
0dad5b33 SP |
14441 | } |
14442 | ||
261014a1 | 14443 | __extension__ extern __inline int8x16_t |
0dad5b33 | 14444 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14445 | __arm_vaddq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) |
0dad5b33 | 14446 | { |
c431634b | 14447 | return __builtin_mve_vaddq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14448 | } |
14449 | ||
261014a1 | 14450 | __extension__ extern __inline int16x8_t |
e3678b44 | 14451 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14452 | __arm_vaddq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) |
e3678b44 | 14453 | { |
c431634b | 14454 | return __builtin_mve_vaddq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
0dad5b33 SP |
14455 | } |
14456 | ||
261014a1 | 14457 | __extension__ extern __inline int32x4_t |
0dad5b33 | 14458 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14459 | __arm_vaddq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
0dad5b33 | 14460 | { |
c431634b | 14461 | return __builtin_mve_vaddq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
0dad5b33 SP |
14462 | } |
14463 | ||
261014a1 | 14464 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14465 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14466 | __arm_vaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14467 | { |
c431634b | 14468 | return __builtin_mve_vaddq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 | 14469 | } |
f9355dee | 14470 | |
261014a1 | 14471 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14472 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14473 | __arm_vaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14474 | { |
c431634b | 14475 | return __builtin_mve_vaddq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14476 | } |
14477 | ||
261014a1 | 14478 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14479 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14480 | __arm_vaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14481 | { |
c431634b | 14482 | return __builtin_mve_vaddq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14483 | } |
14484 | ||
261014a1 | 14485 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14486 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14487 | __arm_vaddq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
e3678b44 | 14488 | { |
c431634b | 14489 | return __builtin_mve_vaddq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14490 | } |
14491 | ||
261014a1 | 14492 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14493 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14494 | __arm_vaddq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
e3678b44 | 14495 | { |
c431634b | 14496 | return __builtin_mve_vaddq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14497 | } |
14498 | ||
261014a1 | 14499 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14500 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14501 | __arm_vaddq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
e3678b44 | 14502 | { |
c431634b | 14503 | return __builtin_mve_vaddq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14504 | } |
14505 | ||
261014a1 | 14506 | __extension__ extern __inline int8x16_t |
e3678b44 | 14507 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14508 | __arm_vclsq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 14509 | { |
c431634b | 14510 | return __builtin_mve_vclsq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
14511 | } |
14512 | ||
261014a1 | 14513 | __extension__ extern __inline int16x8_t |
e3678b44 | 14514 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14515 | __arm_vclsq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 14516 | { |
c431634b | 14517 | return __builtin_mve_vclsq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
14518 | } |
14519 | ||
261014a1 | 14520 | __extension__ extern __inline int32x4_t |
e3678b44 | 14521 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14522 | __arm_vclsq_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 14523 | { |
c431634b | 14524 | return __builtin_mve_vclsq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
14525 | } |
14526 | ||
261014a1 | 14527 | __extension__ extern __inline int8x16_t |
e3678b44 | 14528 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14529 | __arm_vclzq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 14530 | { |
c431634b | 14531 | return __builtin_mve_vclzq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
14532 | } |
14533 | ||
261014a1 | 14534 | __extension__ extern __inline int16x8_t |
e3678b44 | 14535 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14536 | __arm_vclzq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 14537 | { |
c431634b | 14538 | return __builtin_mve_vclzq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
14539 | } |
14540 | ||
261014a1 | 14541 | __extension__ extern __inline int32x4_t |
e3678b44 | 14542 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14543 | __arm_vclzq_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 14544 | { |
c431634b | 14545 | return __builtin_mve_vclzq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
14546 | } |
14547 | ||
261014a1 | 14548 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14549 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14550 | __arm_vclzq_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 14551 | { |
c431634b | 14552 | return __builtin_mve_vclzq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p); |
e3678b44 SP |
14553 | } |
14554 | ||
261014a1 | 14555 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14556 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14557 | __arm_vclzq_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 14558 | { |
c431634b | 14559 | return __builtin_mve_vclzq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
14560 | } |
14561 | ||
261014a1 | 14562 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14563 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14564 | __arm_vclzq_x_u32 (uint32x4_t __a, mve_pred16_t __p) |
e3678b44 | 14565 | { |
c431634b | 14566 | return __builtin_mve_vclzq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p); |
e3678b44 SP |
14567 | } |
14568 | ||
261014a1 | 14569 | __extension__ extern __inline int8x16_t |
e3678b44 | 14570 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14571 | __arm_vnegq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 14572 | { |
c431634b | 14573 | return __builtin_mve_vnegq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
14574 | } |
14575 | ||
261014a1 | 14576 | __extension__ extern __inline int16x8_t |
e3678b44 | 14577 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14578 | __arm_vnegq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 14579 | { |
c431634b | 14580 | return __builtin_mve_vnegq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
14581 | } |
14582 | ||
261014a1 | 14583 | __extension__ extern __inline int32x4_t |
e3678b44 | 14584 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14585 | __arm_vnegq_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 14586 | { |
c431634b | 14587 | return __builtin_mve_vnegq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
14588 | } |
14589 | ||
261014a1 | 14590 | __extension__ extern __inline int8x16_t |
e3678b44 | 14591 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14592 | __arm_vmulhq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14593 | { |
c431634b | 14594 | return __builtin_mve_vmulhq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14595 | } |
14596 | ||
261014a1 | 14597 | __extension__ extern __inline int16x8_t |
e3678b44 | 14598 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14599 | __arm_vmulhq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14600 | { |
c431634b | 14601 | return __builtin_mve_vmulhq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14602 | } |
14603 | ||
261014a1 | 14604 | __extension__ extern __inline int32x4_t |
e3678b44 | 14605 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14606 | __arm_vmulhq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14607 | { |
c431634b | 14608 | return __builtin_mve_vmulhq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14609 | } |
14610 | ||
261014a1 | 14611 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14612 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14613 | __arm_vmulhq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14614 | { |
c431634b | 14615 | return __builtin_mve_vmulhq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14616 | } |
14617 | ||
261014a1 | 14618 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14619 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14620 | __arm_vmulhq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14621 | { |
c431634b | 14622 | return __builtin_mve_vmulhq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14623 | } |
14624 | ||
261014a1 | 14625 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14626 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14627 | __arm_vmulhq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14628 | { |
c431634b | 14629 | return __builtin_mve_vmulhq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14630 | } |
14631 | ||
261014a1 | 14632 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14633 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14634 | __arm_vmullbq_poly_x_p8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14635 | { |
c431634b | 14636 | return __builtin_mve_vmullbq_poly_m_pv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14637 | } |
14638 | ||
261014a1 | 14639 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14640 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14641 | __arm_vmullbq_poly_x_p16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14642 | { |
c431634b | 14643 | return __builtin_mve_vmullbq_poly_m_pv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14644 | } |
14645 | ||
261014a1 | 14646 | __extension__ extern __inline int16x8_t |
e3678b44 | 14647 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14648 | __arm_vmullbq_int_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14649 | { |
c431634b | 14650 | return __builtin_mve_vmullbq_int_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14651 | } |
14652 | ||
261014a1 | 14653 | __extension__ extern __inline int32x4_t |
e3678b44 | 14654 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14655 | __arm_vmullbq_int_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14656 | { |
c431634b | 14657 | return __builtin_mve_vmullbq_int_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14658 | } |
14659 | ||
261014a1 | 14660 | __extension__ extern __inline int64x2_t |
e3678b44 | 14661 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14662 | __arm_vmullbq_int_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14663 | { |
c431634b | 14664 | return __builtin_mve_vmullbq_int_m_sv4si (__arm_vuninitializedq_s64 (), __a, __b, __p); |
e3678b44 SP |
14665 | } |
14666 | ||
261014a1 | 14667 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14668 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14669 | __arm_vmullbq_int_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14670 | { |
c431634b | 14671 | return __builtin_mve_vmullbq_int_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14672 | } |
14673 | ||
261014a1 | 14674 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14675 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14676 | __arm_vmullbq_int_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14677 | { |
c431634b | 14678 | return __builtin_mve_vmullbq_int_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14679 | } |
14680 | ||
261014a1 | 14681 | __extension__ extern __inline uint64x2_t |
e3678b44 | 14682 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14683 | __arm_vmullbq_int_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14684 | { |
c431634b | 14685 | return __builtin_mve_vmullbq_int_m_uv4si (__arm_vuninitializedq_u64 (), __a, __b, __p); |
e3678b44 SP |
14686 | } |
14687 | ||
261014a1 | 14688 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14689 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14690 | __arm_vmulltq_poly_x_p8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14691 | { |
c431634b | 14692 | return __builtin_mve_vmulltq_poly_m_pv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14693 | } |
14694 | ||
261014a1 | 14695 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14696 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14697 | __arm_vmulltq_poly_x_p16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14698 | { |
c431634b | 14699 | return __builtin_mve_vmulltq_poly_m_pv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14700 | } |
14701 | ||
261014a1 | 14702 | __extension__ extern __inline int16x8_t |
e3678b44 | 14703 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14704 | __arm_vmulltq_int_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14705 | { |
c431634b | 14706 | return __builtin_mve_vmulltq_int_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14707 | } |
14708 | ||
261014a1 | 14709 | __extension__ extern __inline int32x4_t |
e3678b44 | 14710 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14711 | __arm_vmulltq_int_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14712 | { |
c431634b | 14713 | return __builtin_mve_vmulltq_int_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14714 | } |
14715 | ||
261014a1 | 14716 | __extension__ extern __inline int64x2_t |
e3678b44 | 14717 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14718 | __arm_vmulltq_int_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14719 | { |
c431634b | 14720 | return __builtin_mve_vmulltq_int_m_sv4si (__arm_vuninitializedq_s64 (), __a, __b, __p); |
e3678b44 SP |
14721 | } |
14722 | ||
261014a1 | 14723 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14724 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14725 | __arm_vmulltq_int_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14726 | { |
c431634b | 14727 | return __builtin_mve_vmulltq_int_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14728 | } |
14729 | ||
261014a1 | 14730 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14731 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14732 | __arm_vmulltq_int_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14733 | { |
c431634b | 14734 | return __builtin_mve_vmulltq_int_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14735 | } |
14736 | ||
261014a1 | 14737 | __extension__ extern __inline uint64x2_t |
e3678b44 | 14738 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14739 | __arm_vmulltq_int_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14740 | { |
c431634b | 14741 | return __builtin_mve_vmulltq_int_m_uv4si (__arm_vuninitializedq_u64 (), __a, __b, __p); |
e3678b44 SP |
14742 | } |
14743 | ||
261014a1 | 14744 | __extension__ extern __inline int8x16_t |
e3678b44 | 14745 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14746 | __arm_vmulq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14747 | { |
c431634b | 14748 | return __builtin_mve_vmulq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14749 | } |
14750 | ||
261014a1 | 14751 | __extension__ extern __inline int16x8_t |
e3678b44 | 14752 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14753 | __arm_vmulq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14754 | { |
c431634b | 14755 | return __builtin_mve_vmulq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14756 | } |
14757 | ||
261014a1 | 14758 | __extension__ extern __inline int32x4_t |
e3678b44 | 14759 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14760 | __arm_vmulq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14761 | { |
c431634b | 14762 | return __builtin_mve_vmulq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14763 | } |
14764 | ||
261014a1 | 14765 | __extension__ extern __inline int8x16_t |
e3678b44 | 14766 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14767 | __arm_vmulq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) |
e3678b44 | 14768 | { |
c431634b | 14769 | return __builtin_mve_vmulq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14770 | } |
14771 | ||
261014a1 | 14772 | __extension__ extern __inline int16x8_t |
e3678b44 | 14773 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14774 | __arm_vmulq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) |
e3678b44 | 14775 | { |
c431634b | 14776 | return __builtin_mve_vmulq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14777 | } |
14778 | ||
261014a1 | 14779 | __extension__ extern __inline int32x4_t |
e3678b44 | 14780 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14781 | __arm_vmulq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 14782 | { |
c431634b | 14783 | return __builtin_mve_vmulq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14784 | } |
14785 | ||
261014a1 | 14786 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14787 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14788 | __arm_vmulq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14789 | { |
c431634b | 14790 | return __builtin_mve_vmulq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14791 | } |
14792 | ||
261014a1 | 14793 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14794 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14795 | __arm_vmulq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14796 | { |
c431634b | 14797 | return __builtin_mve_vmulq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14798 | } |
14799 | ||
261014a1 | 14800 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14801 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14802 | __arm_vmulq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14803 | { |
c431634b | 14804 | return __builtin_mve_vmulq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14805 | } |
14806 | ||
261014a1 | 14807 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14808 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14809 | __arm_vmulq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
e3678b44 | 14810 | { |
c431634b | 14811 | return __builtin_mve_vmulq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14812 | } |
14813 | ||
261014a1 | 14814 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14815 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14816 | __arm_vmulq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
e3678b44 | 14817 | { |
c431634b | 14818 | return __builtin_mve_vmulq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14819 | } |
14820 | ||
261014a1 | 14821 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14822 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14823 | __arm_vmulq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
e3678b44 | 14824 | { |
c431634b | 14825 | return __builtin_mve_vmulq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14826 | } |
14827 | ||
261014a1 | 14828 | __extension__ extern __inline int8x16_t |
e3678b44 | 14829 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14830 | __arm_vsubq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14831 | { |
c431634b | 14832 | return __builtin_mve_vsubq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14833 | } |
14834 | ||
261014a1 | 14835 | __extension__ extern __inline int16x8_t |
e3678b44 | 14836 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14837 | __arm_vsubq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14838 | { |
c431634b | 14839 | return __builtin_mve_vsubq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14840 | } |
14841 | ||
261014a1 | 14842 | __extension__ extern __inline int32x4_t |
e3678b44 | 14843 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14844 | __arm_vsubq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14845 | { |
c431634b | 14846 | return __builtin_mve_vsubq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14847 | } |
14848 | ||
261014a1 | 14849 | __extension__ extern __inline int8x16_t |
e3678b44 | 14850 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14851 | __arm_vsubq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) |
e3678b44 | 14852 | { |
c431634b | 14853 | return __builtin_mve_vsubq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14854 | } |
14855 | ||
261014a1 | 14856 | __extension__ extern __inline int16x8_t |
e3678b44 | 14857 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14858 | __arm_vsubq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) |
e3678b44 | 14859 | { |
c431634b | 14860 | return __builtin_mve_vsubq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14861 | } |
14862 | ||
261014a1 | 14863 | __extension__ extern __inline int32x4_t |
e3678b44 | 14864 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14865 | __arm_vsubq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 14866 | { |
c431634b | 14867 | return __builtin_mve_vsubq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14868 | } |
14869 | ||
261014a1 | 14870 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14871 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14872 | __arm_vsubq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14873 | { |
c431634b | 14874 | return __builtin_mve_vsubq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14875 | } |
14876 | ||
261014a1 | 14877 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14878 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14879 | __arm_vsubq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14880 | { |
c431634b | 14881 | return __builtin_mve_vsubq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14882 | } |
14883 | ||
261014a1 | 14884 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14885 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14886 | __arm_vsubq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14887 | { |
c431634b | 14888 | return __builtin_mve_vsubq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14889 | } |
14890 | ||
261014a1 | 14891 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14892 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14893 | __arm_vsubq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
e3678b44 | 14894 | { |
c431634b | 14895 | return __builtin_mve_vsubq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14896 | } |
14897 | ||
261014a1 | 14898 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14899 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14900 | __arm_vsubq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
e3678b44 | 14901 | { |
c431634b | 14902 | return __builtin_mve_vsubq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14903 | } |
14904 | ||
261014a1 | 14905 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14906 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14907 | __arm_vsubq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
e3678b44 | 14908 | { |
c431634b | 14909 | return __builtin_mve_vsubq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14910 | } |
14911 | ||
261014a1 | 14912 | __extension__ extern __inline int8x16_t |
e3678b44 | 14913 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14914 | __arm_vcaddq_rot90_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14915 | { |
c431634b | 14916 | return __builtin_mve_vcaddq_rot90_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14917 | } |
14918 | ||
261014a1 | 14919 | __extension__ extern __inline int16x8_t |
e3678b44 | 14920 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14921 | __arm_vcaddq_rot90_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14922 | { |
c431634b | 14923 | return __builtin_mve_vcaddq_rot90_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14924 | } |
14925 | ||
261014a1 | 14926 | __extension__ extern __inline int32x4_t |
e3678b44 | 14927 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14928 | __arm_vcaddq_rot90_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14929 | { |
c431634b | 14930 | return __builtin_mve_vcaddq_rot90_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14931 | } |
14932 | ||
261014a1 | 14933 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14934 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14935 | __arm_vcaddq_rot90_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14936 | { |
c431634b | 14937 | return __builtin_mve_vcaddq_rot90_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14938 | } |
14939 | ||
261014a1 | 14940 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14941 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14942 | __arm_vcaddq_rot90_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14943 | { |
c431634b | 14944 | return __builtin_mve_vcaddq_rot90_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14945 | } |
14946 | ||
261014a1 | 14947 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14948 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14949 | __arm_vcaddq_rot90_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14950 | { |
c431634b | 14951 | return __builtin_mve_vcaddq_rot90_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14952 | } |
14953 | ||
261014a1 | 14954 | __extension__ extern __inline int8x16_t |
e3678b44 | 14955 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14956 | __arm_vcaddq_rot270_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14957 | { |
c431634b | 14958 | return __builtin_mve_vcaddq_rot270_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
14959 | } |
14960 | ||
261014a1 | 14961 | __extension__ extern __inline int16x8_t |
e3678b44 | 14962 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14963 | __arm_vcaddq_rot270_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14964 | { |
c431634b | 14965 | return __builtin_mve_vcaddq_rot270_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
14966 | } |
14967 | ||
261014a1 | 14968 | __extension__ extern __inline int32x4_t |
e3678b44 | 14969 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14970 | __arm_vcaddq_rot270_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14971 | { |
c431634b | 14972 | return __builtin_mve_vcaddq_rot270_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
14973 | } |
14974 | ||
261014a1 | 14975 | __extension__ extern __inline uint8x16_t |
e3678b44 | 14976 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14977 | __arm_vcaddq_rot270_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 14978 | { |
c431634b | 14979 | return __builtin_mve_vcaddq_rot270_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
14980 | } |
14981 | ||
261014a1 | 14982 | __extension__ extern __inline uint16x8_t |
e3678b44 | 14983 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14984 | __arm_vcaddq_rot270_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 14985 | { |
c431634b | 14986 | return __builtin_mve_vcaddq_rot270_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
14987 | } |
14988 | ||
261014a1 | 14989 | __extension__ extern __inline uint32x4_t |
e3678b44 | 14990 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14991 | __arm_vcaddq_rot270_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 14992 | { |
c431634b | 14993 | return __builtin_mve_vcaddq_rot270_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
14994 | } |
14995 | ||
261014a1 | 14996 | __extension__ extern __inline int8x16_t |
e3678b44 | 14997 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 14998 | __arm_vhaddq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) |
e3678b44 | 14999 | { |
c431634b | 15000 | return __builtin_mve_vhaddq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15001 | } |
15002 | ||
261014a1 | 15003 | __extension__ extern __inline int16x8_t |
e3678b44 | 15004 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15005 | __arm_vhaddq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) |
e3678b44 | 15006 | { |
c431634b | 15007 | return __builtin_mve_vhaddq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15008 | } |
15009 | ||
261014a1 | 15010 | __extension__ extern __inline int32x4_t |
e3678b44 | 15011 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15012 | __arm_vhaddq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15013 | { |
c431634b | 15014 | return __builtin_mve_vhaddq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15015 | } |
15016 | ||
261014a1 | 15017 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15018 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15019 | __arm_vhaddq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
e3678b44 | 15020 | { |
c431634b | 15021 | return __builtin_mve_vhaddq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15022 | } |
15023 | ||
261014a1 | 15024 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15025 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15026 | __arm_vhaddq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
e3678b44 | 15027 | { |
c431634b | 15028 | return __builtin_mve_vhaddq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15029 | } |
15030 | ||
261014a1 | 15031 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15032 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15033 | __arm_vhaddq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
e3678b44 | 15034 | { |
c431634b | 15035 | return __builtin_mve_vhaddq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15036 | } |
15037 | ||
261014a1 | 15038 | __extension__ extern __inline int8x16_t |
e3678b44 | 15039 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15040 | __arm_vhaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15041 | { |
c431634b | 15042 | return __builtin_mve_vhaddq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15043 | } |
15044 | ||
261014a1 | 15045 | __extension__ extern __inline int16x8_t |
e3678b44 | 15046 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15047 | __arm_vhaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15048 | { |
c431634b | 15049 | return __builtin_mve_vhaddq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15050 | } |
15051 | ||
261014a1 | 15052 | __extension__ extern __inline int32x4_t |
e3678b44 | 15053 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15054 | __arm_vhaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15055 | { |
c431634b | 15056 | return __builtin_mve_vhaddq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15057 | } |
15058 | ||
261014a1 | 15059 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15060 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15061 | __arm_vhaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15062 | { |
c431634b | 15063 | return __builtin_mve_vhaddq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15064 | } |
15065 | ||
261014a1 | 15066 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15067 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15068 | __arm_vhaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15069 | { |
c431634b | 15070 | return __builtin_mve_vhaddq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15071 | } |
15072 | ||
15073 | __extension__ extern __inline uint32x4_t | |
15074 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15075 | __arm_vhaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15076 | { |
c431634b | 15077 | return __builtin_mve_vhaddq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15078 | } |
15079 | ||
261014a1 | 15080 | __extension__ extern __inline int8x16_t |
e3678b44 | 15081 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15082 | __arm_vhcaddq_rot90_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15083 | { |
c431634b | 15084 | return __builtin_mve_vhcaddq_rot90_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15085 | } |
15086 | ||
261014a1 | 15087 | __extension__ extern __inline int16x8_t |
e3678b44 | 15088 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15089 | __arm_vhcaddq_rot90_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15090 | { |
c431634b | 15091 | return __builtin_mve_vhcaddq_rot90_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15092 | } |
15093 | ||
261014a1 | 15094 | __extension__ extern __inline int32x4_t |
e3678b44 | 15095 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15096 | __arm_vhcaddq_rot90_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15097 | { |
c431634b | 15098 | return __builtin_mve_vhcaddq_rot90_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15099 | } |
15100 | ||
261014a1 | 15101 | __extension__ extern __inline int8x16_t |
e3678b44 | 15102 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15103 | __arm_vhcaddq_rot270_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15104 | { |
c431634b | 15105 | return __builtin_mve_vhcaddq_rot270_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15106 | } |
15107 | ||
261014a1 | 15108 | __extension__ extern __inline int16x8_t |
e3678b44 | 15109 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15110 | __arm_vhcaddq_rot270_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15111 | { |
c431634b | 15112 | return __builtin_mve_vhcaddq_rot270_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15113 | } |
15114 | ||
261014a1 | 15115 | __extension__ extern __inline int32x4_t |
e3678b44 | 15116 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15117 | __arm_vhcaddq_rot270_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15118 | { |
c431634b | 15119 | return __builtin_mve_vhcaddq_rot270_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15120 | } |
15121 | ||
261014a1 | 15122 | __extension__ extern __inline int8x16_t |
e3678b44 | 15123 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15124 | __arm_vhsubq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) |
e3678b44 | 15125 | { |
c431634b | 15126 | return __builtin_mve_vhsubq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15127 | } |
15128 | ||
261014a1 | 15129 | __extension__ extern __inline int16x8_t |
e3678b44 | 15130 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15131 | __arm_vhsubq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) |
e3678b44 | 15132 | { |
c431634b | 15133 | return __builtin_mve_vhsubq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15134 | } |
15135 | ||
261014a1 | 15136 | __extension__ extern __inline int32x4_t |
e3678b44 | 15137 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15138 | __arm_vhsubq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15139 | { |
c431634b | 15140 | return __builtin_mve_vhsubq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15141 | } |
15142 | ||
261014a1 | 15143 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15144 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15145 | __arm_vhsubq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
e3678b44 | 15146 | { |
c431634b | 15147 | return __builtin_mve_vhsubq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15148 | } |
15149 | ||
261014a1 | 15150 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15151 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15152 | __arm_vhsubq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
e3678b44 | 15153 | { |
c431634b | 15154 | return __builtin_mve_vhsubq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15155 | } |
15156 | ||
261014a1 | 15157 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15158 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15159 | __arm_vhsubq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
e3678b44 | 15160 | { |
c431634b | 15161 | return __builtin_mve_vhsubq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15162 | } |
15163 | ||
261014a1 | 15164 | __extension__ extern __inline int8x16_t |
e3678b44 | 15165 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15166 | __arm_vhsubq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15167 | { |
c431634b | 15168 | return __builtin_mve_vhsubq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15169 | } |
15170 | ||
261014a1 | 15171 | __extension__ extern __inline int16x8_t |
e3678b44 | 15172 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15173 | __arm_vhsubq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15174 | { |
c431634b | 15175 | return __builtin_mve_vhsubq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15176 | } |
15177 | ||
261014a1 | 15178 | __extension__ extern __inline int32x4_t |
e3678b44 | 15179 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15180 | __arm_vhsubq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15181 | { |
c431634b | 15182 | return __builtin_mve_vhsubq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15183 | } |
15184 | ||
261014a1 | 15185 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15186 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15187 | __arm_vhsubq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15188 | { |
c431634b | 15189 | return __builtin_mve_vhsubq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15190 | } |
15191 | ||
261014a1 | 15192 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15193 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15194 | __arm_vhsubq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15195 | { |
c431634b | 15196 | return __builtin_mve_vhsubq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15197 | } |
15198 | ||
261014a1 | 15199 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15200 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15201 | __arm_vhsubq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15202 | { |
c431634b | 15203 | return __builtin_mve_vhsubq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15204 | } |
15205 | ||
261014a1 | 15206 | __extension__ extern __inline int8x16_t |
e3678b44 | 15207 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15208 | __arm_vrhaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15209 | { |
c431634b | 15210 | return __builtin_mve_vrhaddq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15211 | } |
15212 | ||
15213 | __extension__ extern __inline int16x8_t | |
15214 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15215 | __arm_vrhaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15216 | { |
c431634b | 15217 | return __builtin_mve_vrhaddq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15218 | } |
15219 | ||
261014a1 | 15220 | __extension__ extern __inline int32x4_t |
e3678b44 | 15221 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15222 | __arm_vrhaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15223 | { |
c431634b | 15224 | return __builtin_mve_vrhaddq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15225 | } |
15226 | ||
261014a1 | 15227 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15228 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15229 | __arm_vrhaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15230 | { |
c431634b | 15231 | return __builtin_mve_vrhaddq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15232 | } |
15233 | ||
261014a1 | 15234 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15235 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15236 | __arm_vrhaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15237 | { |
c431634b | 15238 | return __builtin_mve_vrhaddq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15239 | } |
15240 | ||
261014a1 | 15241 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15242 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15243 | __arm_vrhaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15244 | { |
c431634b | 15245 | return __builtin_mve_vrhaddq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15246 | } |
15247 | ||
261014a1 | 15248 | __extension__ extern __inline int8x16_t |
e3678b44 | 15249 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15250 | __arm_vrmulhq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15251 | { |
c431634b | 15252 | return __builtin_mve_vrmulhq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15253 | } |
15254 | ||
261014a1 | 15255 | __extension__ extern __inline int16x8_t |
e3678b44 | 15256 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15257 | __arm_vrmulhq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15258 | { |
c431634b | 15259 | return __builtin_mve_vrmulhq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15260 | } |
15261 | ||
261014a1 | 15262 | __extension__ extern __inline int32x4_t |
e3678b44 | 15263 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15264 | __arm_vrmulhq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15265 | { |
c431634b | 15266 | return __builtin_mve_vrmulhq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15267 | } |
15268 | ||
261014a1 | 15269 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15270 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15271 | __arm_vrmulhq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15272 | { |
c431634b | 15273 | return __builtin_mve_vrmulhq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15274 | } |
15275 | ||
261014a1 | 15276 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15277 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15278 | __arm_vrmulhq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15279 | { |
c431634b | 15280 | return __builtin_mve_vrmulhq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15281 | } |
15282 | ||
261014a1 | 15283 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15284 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15285 | __arm_vrmulhq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15286 | { |
c431634b | 15287 | return __builtin_mve_vrmulhq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15288 | } |
15289 | ||
261014a1 | 15290 | __extension__ extern __inline int8x16_t |
e3678b44 | 15291 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15292 | __arm_vandq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15293 | { |
c431634b | 15294 | return __builtin_mve_vandq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15295 | } |
15296 | ||
261014a1 | 15297 | __extension__ extern __inline int16x8_t |
e3678b44 | 15298 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15299 | __arm_vandq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15300 | { |
c431634b | 15301 | return __builtin_mve_vandq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15302 | } |
15303 | ||
261014a1 | 15304 | __extension__ extern __inline int32x4_t |
e3678b44 | 15305 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15306 | __arm_vandq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15307 | { |
c431634b | 15308 | return __builtin_mve_vandq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15309 | } |
15310 | ||
261014a1 | 15311 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15312 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15313 | __arm_vandq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15314 | { |
c431634b | 15315 | return __builtin_mve_vandq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15316 | } |
15317 | ||
261014a1 | 15318 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15319 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15320 | __arm_vandq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15321 | { |
c431634b | 15322 | return __builtin_mve_vandq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15323 | } |
15324 | ||
261014a1 | 15325 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15326 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15327 | __arm_vandq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15328 | { |
c431634b | 15329 | return __builtin_mve_vandq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15330 | } |
15331 | ||
261014a1 | 15332 | __extension__ extern __inline int8x16_t |
e3678b44 | 15333 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15334 | __arm_vbicq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15335 | { |
c431634b | 15336 | return __builtin_mve_vbicq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15337 | } |
15338 | ||
261014a1 | 15339 | __extension__ extern __inline int16x8_t |
e3678b44 | 15340 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15341 | __arm_vbicq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15342 | { |
c431634b | 15343 | return __builtin_mve_vbicq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15344 | } |
15345 | ||
261014a1 | 15346 | __extension__ extern __inline int32x4_t |
e3678b44 | 15347 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15348 | __arm_vbicq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15349 | { |
c431634b | 15350 | return __builtin_mve_vbicq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15351 | } |
15352 | ||
261014a1 | 15353 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15354 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15355 | __arm_vbicq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15356 | { |
c431634b | 15357 | return __builtin_mve_vbicq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15358 | } |
15359 | ||
261014a1 | 15360 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15361 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15362 | __arm_vbicq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15363 | { |
c431634b | 15364 | return __builtin_mve_vbicq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15365 | } |
15366 | ||
261014a1 | 15367 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15368 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15369 | __arm_vbicq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15370 | { |
c431634b | 15371 | return __builtin_mve_vbicq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15372 | } |
15373 | ||
261014a1 | 15374 | __extension__ extern __inline int8x16_t |
e3678b44 | 15375 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15376 | __arm_vbrsrq_x_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15377 | { |
c431634b | 15378 | return __builtin_mve_vbrsrq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15379 | } |
15380 | ||
261014a1 | 15381 | __extension__ extern __inline int16x8_t |
e3678b44 | 15382 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15383 | __arm_vbrsrq_x_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15384 | { |
c431634b | 15385 | return __builtin_mve_vbrsrq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15386 | } |
15387 | ||
261014a1 | 15388 | __extension__ extern __inline int32x4_t |
e3678b44 | 15389 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15390 | __arm_vbrsrq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15391 | { |
c431634b | 15392 | return __builtin_mve_vbrsrq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15393 | } |
15394 | ||
261014a1 | 15395 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15396 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15397 | __arm_vbrsrq_x_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15398 | { |
c431634b | 15399 | return __builtin_mve_vbrsrq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15400 | } |
15401 | ||
261014a1 | 15402 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15403 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15404 | __arm_vbrsrq_x_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15405 | { |
c431634b | 15406 | return __builtin_mve_vbrsrq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15407 | } |
15408 | ||
261014a1 | 15409 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15410 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15411 | __arm_vbrsrq_x_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) |
e3678b44 | 15412 | { |
c431634b | 15413 | return __builtin_mve_vbrsrq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15414 | } |
15415 | ||
261014a1 | 15416 | __extension__ extern __inline int8x16_t |
e3678b44 | 15417 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15418 | __arm_veorq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15419 | { |
c431634b | 15420 | return __builtin_mve_veorq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15421 | } |
15422 | ||
261014a1 | 15423 | __extension__ extern __inline int16x8_t |
e3678b44 | 15424 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15425 | __arm_veorq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15426 | { |
c431634b | 15427 | return __builtin_mve_veorq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15428 | } |
15429 | ||
261014a1 | 15430 | __extension__ extern __inline int32x4_t |
e3678b44 | 15431 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15432 | __arm_veorq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15433 | { |
c431634b | 15434 | return __builtin_mve_veorq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15435 | } |
15436 | ||
261014a1 | 15437 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15438 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15439 | __arm_veorq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15440 | { |
c431634b | 15441 | return __builtin_mve_veorq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15442 | } |
15443 | ||
15444 | __extension__ extern __inline uint16x8_t | |
15445 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15446 | __arm_veorq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15447 | { |
c431634b | 15448 | return __builtin_mve_veorq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15449 | } |
15450 | ||
261014a1 | 15451 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15452 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15453 | __arm_veorq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15454 | { |
c431634b | 15455 | return __builtin_mve_veorq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15456 | } |
15457 | ||
261014a1 | 15458 | __extension__ extern __inline int16x8_t |
e3678b44 | 15459 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15460 | __arm_vmovlbq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15461 | { |
c431634b | 15462 | return __builtin_mve_vmovlbq_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
15463 | } |
15464 | ||
261014a1 | 15465 | __extension__ extern __inline int32x4_t |
e3678b44 | 15466 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15467 | __arm_vmovlbq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15468 | { |
c431634b | 15469 | return __builtin_mve_vmovlbq_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
15470 | } |
15471 | ||
261014a1 | 15472 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15473 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15474 | __arm_vmovlbq_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15475 | { |
c431634b | 15476 | return __builtin_mve_vmovlbq_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
15477 | } |
15478 | ||
261014a1 | 15479 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15480 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15481 | __arm_vmovlbq_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15482 | { |
c431634b | 15483 | return __builtin_mve_vmovlbq_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __p); |
e3678b44 SP |
15484 | } |
15485 | ||
261014a1 | 15486 | __extension__ extern __inline int16x8_t |
e3678b44 | 15487 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15488 | __arm_vmovltq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15489 | { |
c431634b | 15490 | return __builtin_mve_vmovltq_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
15491 | } |
15492 | ||
261014a1 | 15493 | __extension__ extern __inline int32x4_t |
e3678b44 | 15494 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15495 | __arm_vmovltq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15496 | { |
c431634b | 15497 | return __builtin_mve_vmovltq_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
15498 | } |
15499 | ||
261014a1 | 15500 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15501 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15502 | __arm_vmovltq_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15503 | { |
c431634b | 15504 | return __builtin_mve_vmovltq_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
15505 | } |
15506 | ||
261014a1 | 15507 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15508 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15509 | __arm_vmovltq_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15510 | { |
c431634b | 15511 | return __builtin_mve_vmovltq_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __p); |
e3678b44 SP |
15512 | } |
15513 | ||
261014a1 | 15514 | __extension__ extern __inline int8x16_t |
e3678b44 | 15515 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15516 | __arm_vmvnq_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15517 | { |
c431634b | 15518 | return __builtin_mve_vmvnq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
15519 | } |
15520 | ||
261014a1 | 15521 | __extension__ extern __inline int16x8_t |
e3678b44 | 15522 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15523 | __arm_vmvnq_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15524 | { |
c431634b | 15525 | return __builtin_mve_vmvnq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
15526 | } |
15527 | ||
15528 | __extension__ extern __inline int32x4_t | |
15529 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15530 | __arm_vmvnq_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 15531 | { |
c431634b | 15532 | return __builtin_mve_vmvnq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
15533 | } |
15534 | ||
261014a1 | 15535 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15536 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15537 | __arm_vmvnq_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15538 | { |
c431634b | 15539 | return __builtin_mve_vmvnq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p); |
e3678b44 SP |
15540 | } |
15541 | ||
261014a1 | 15542 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15543 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15544 | __arm_vmvnq_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15545 | { |
c431634b | 15546 | return __builtin_mve_vmvnq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
15547 | } |
15548 | ||
261014a1 | 15549 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15550 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15551 | __arm_vmvnq_x_u32 (uint32x4_t __a, mve_pred16_t __p) |
e3678b44 | 15552 | { |
c431634b | 15553 | return __builtin_mve_vmvnq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p); |
e3678b44 SP |
15554 | } |
15555 | ||
261014a1 | 15556 | __extension__ extern __inline int16x8_t |
e3678b44 | 15557 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15558 | __arm_vmvnq_x_n_s16 (const int __imm, mve_pred16_t __p) |
e3678b44 | 15559 | { |
c431634b | 15560 | return __builtin_mve_vmvnq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __imm, __p); |
e3678b44 SP |
15561 | } |
15562 | ||
261014a1 | 15563 | __extension__ extern __inline int32x4_t |
e3678b44 | 15564 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15565 | __arm_vmvnq_x_n_s32 (const int __imm, mve_pred16_t __p) |
e3678b44 | 15566 | { |
c431634b | 15567 | return __builtin_mve_vmvnq_m_n_sv4si (__arm_vuninitializedq_s32 (), __imm, __p); |
e3678b44 SP |
15568 | } |
15569 | ||
261014a1 | 15570 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15571 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15572 | __arm_vmvnq_x_n_u16 (const int __imm, mve_pred16_t __p) |
e3678b44 | 15573 | { |
c431634b | 15574 | return __builtin_mve_vmvnq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __imm, __p); |
e3678b44 SP |
15575 | } |
15576 | ||
261014a1 | 15577 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15578 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15579 | __arm_vmvnq_x_n_u32 (const int __imm, mve_pred16_t __p) |
e3678b44 | 15580 | { |
c431634b | 15581 | return __builtin_mve_vmvnq_m_n_uv4si (__arm_vuninitializedq_u32 (), __imm, __p); |
e3678b44 SP |
15582 | } |
15583 | ||
261014a1 | 15584 | __extension__ extern __inline int8x16_t |
e3678b44 | 15585 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15586 | __arm_vornq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15587 | { |
c431634b | 15588 | return __builtin_mve_vornq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15589 | } |
15590 | ||
261014a1 | 15591 | __extension__ extern __inline int16x8_t |
e3678b44 | 15592 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15593 | __arm_vornq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15594 | { |
c431634b | 15595 | return __builtin_mve_vornq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15596 | } |
15597 | ||
261014a1 | 15598 | __extension__ extern __inline int32x4_t |
e3678b44 | 15599 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15600 | __arm_vornq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15601 | { |
c431634b | 15602 | return __builtin_mve_vornq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15603 | } |
15604 | ||
261014a1 | 15605 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15606 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15607 | __arm_vornq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15608 | { |
c431634b | 15609 | return __builtin_mve_vornq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15610 | } |
15611 | ||
261014a1 | 15612 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15613 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15614 | __arm_vornq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15615 | { |
c431634b | 15616 | return __builtin_mve_vornq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15617 | } |
15618 | ||
261014a1 | 15619 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15620 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15621 | __arm_vornq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15622 | { |
c431634b | 15623 | return __builtin_mve_vornq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15624 | } |
15625 | ||
261014a1 | 15626 | __extension__ extern __inline int8x16_t |
e3678b44 | 15627 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15628 | __arm_vorrq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15629 | { |
c431634b | 15630 | return __builtin_mve_vorrq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15631 | } |
15632 | ||
261014a1 | 15633 | __extension__ extern __inline int16x8_t |
e3678b44 | 15634 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15635 | __arm_vorrq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15636 | { |
c431634b | 15637 | return __builtin_mve_vorrq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
e3678b44 SP |
15638 | } |
15639 | ||
261014a1 | 15640 | __extension__ extern __inline int32x4_t |
e3678b44 | 15641 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15642 | __arm_vorrq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15643 | { |
c431634b | 15644 | return __builtin_mve_vorrq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
e3678b44 SP |
15645 | } |
15646 | ||
261014a1 | 15647 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15648 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15649 | __arm_vorrq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15650 | { |
c431634b | 15651 | return __builtin_mve_vorrq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
e3678b44 SP |
15652 | } |
15653 | ||
261014a1 | 15654 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15655 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15656 | __arm_vorrq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) |
e3678b44 | 15657 | { |
c431634b | 15658 | return __builtin_mve_vorrq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
e3678b44 SP |
15659 | } |
15660 | ||
261014a1 | 15661 | __extension__ extern __inline uint32x4_t |
e3678b44 | 15662 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15663 | __arm_vorrq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) |
e3678b44 | 15664 | { |
c431634b | 15665 | return __builtin_mve_vorrq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
e3678b44 SP |
15666 | } |
15667 | ||
261014a1 | 15668 | __extension__ extern __inline int8x16_t |
e3678b44 | 15669 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15670 | __arm_vrev16q_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15671 | { |
c431634b | 15672 | return __builtin_mve_vrev16q_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
15673 | } |
15674 | ||
261014a1 | 15675 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15676 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15677 | __arm_vrev16q_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15678 | { |
c431634b | 15679 | return __builtin_mve_vrev16q_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p); |
e3678b44 SP |
15680 | } |
15681 | ||
261014a1 | 15682 | __extension__ extern __inline int8x16_t |
e3678b44 | 15683 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15684 | __arm_vrev32q_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15685 | { |
c431634b | 15686 | return __builtin_mve_vrev32q_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
15687 | } |
15688 | ||
261014a1 | 15689 | __extension__ extern __inline int16x8_t |
e3678b44 | 15690 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15691 | __arm_vrev32q_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15692 | { |
c431634b | 15693 | return __builtin_mve_vrev32q_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
15694 | } |
15695 | ||
261014a1 | 15696 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15697 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15698 | __arm_vrev32q_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15699 | { |
c431634b | 15700 | return __builtin_mve_vrev32q_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p); |
e3678b44 SP |
15701 | } |
15702 | ||
261014a1 | 15703 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15704 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15705 | __arm_vrev32q_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15706 | { |
c431634b | 15707 | return __builtin_mve_vrev32q_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
15708 | } |
15709 | ||
261014a1 | 15710 | __extension__ extern __inline int8x16_t |
e3678b44 | 15711 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15712 | __arm_vrev64q_x_s8 (int8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15713 | { |
c431634b | 15714 | return __builtin_mve_vrev64q_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p); |
e3678b44 SP |
15715 | } |
15716 | ||
261014a1 | 15717 | __extension__ extern __inline int16x8_t |
e3678b44 | 15718 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15719 | __arm_vrev64q_x_s16 (int16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15720 | { |
c431634b | 15721 | return __builtin_mve_vrev64q_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p); |
e3678b44 SP |
15722 | } |
15723 | ||
261014a1 | 15724 | __extension__ extern __inline int32x4_t |
e3678b44 | 15725 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15726 | __arm_vrev64q_x_s32 (int32x4_t __a, mve_pred16_t __p) |
e3678b44 | 15727 | { |
c431634b | 15728 | return __builtin_mve_vrev64q_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p); |
e3678b44 SP |
15729 | } |
15730 | ||
261014a1 | 15731 | __extension__ extern __inline uint8x16_t |
e3678b44 | 15732 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15733 | __arm_vrev64q_x_u8 (uint8x16_t __a, mve_pred16_t __p) |
e3678b44 | 15734 | { |
c431634b | 15735 | return __builtin_mve_vrev64q_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p); |
e3678b44 SP |
15736 | } |
15737 | ||
261014a1 | 15738 | __extension__ extern __inline uint16x8_t |
e3678b44 | 15739 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15740 | __arm_vrev64q_x_u16 (uint16x8_t __a, mve_pred16_t __p) |
e3678b44 | 15741 | { |
c431634b | 15742 | return __builtin_mve_vrev64q_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p); |
e3678b44 SP |
15743 | } |
15744 | ||
15745 | __extension__ extern __inline uint32x4_t | |
15746 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 15747 | __arm_vrev64q_x_u32 (uint32x4_t __a, mve_pred16_t __p) |
e3678b44 | 15748 | { |
c431634b | 15749 | return __builtin_mve_vrev64q_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p); |
e3678b44 SP |
15750 | } |
15751 | ||
261014a1 | 15752 | __extension__ extern __inline int8x16_t |
e3678b44 | 15753 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15754 | __arm_vrshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
e3678b44 | 15755 | { |
c431634b | 15756 | return __builtin_mve_vrshlq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
e3678b44 SP |
15757 | } |
15758 | ||
261014a1 | 15759 | __extension__ extern __inline int16x8_t |
db5db9d2 | 15760 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15761 | __arm_vrshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
db5db9d2 | 15762 | { |
c431634b | 15763 | return __builtin_mve_vrshlq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
db5db9d2 SP |
15764 | } |
15765 | ||
261014a1 | 15766 | __extension__ extern __inline int32x4_t |
db5db9d2 | 15767 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15768 | __arm_vrshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
db5db9d2 | 15769 | { |
c431634b | 15770 | return __builtin_mve_vrshlq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
db5db9d2 SP |
15771 | } |
15772 | ||
261014a1 | 15773 | __extension__ extern __inline uint8x16_t |
db5db9d2 | 15774 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15775 | __arm_vrshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
db5db9d2 | 15776 | { |
c431634b | 15777 | return __builtin_mve_vrshlq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
db5db9d2 SP |
15778 | } |
15779 | ||
261014a1 | 15780 | __extension__ extern __inline uint16x8_t |
db5db9d2 | 15781 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15782 | __arm_vrshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) |
db5db9d2 | 15783 | { |
c431634b | 15784 | return __builtin_mve_vrshlq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
db5db9d2 SP |
15785 | } |
15786 | ||
261014a1 | 15787 | __extension__ extern __inline uint32x4_t |
532e9e24 | 15788 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15789 | __arm_vrshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) |
532e9e24 | 15790 | { |
c431634b | 15791 | return __builtin_mve_vrshlq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
532e9e24 SP |
15792 | } |
15793 | ||
261014a1 | 15794 | __extension__ extern __inline int16x8_t |
532e9e24 | 15795 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15796 | __arm_vshllbq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15797 | { |
c431634b | 15798 | return __builtin_mve_vshllbq_m_n_sv16qi (__arm_vuninitializedq_s16 (), __a, __imm, __p); |
532e9e24 SP |
15799 | } |
15800 | ||
261014a1 | 15801 | __extension__ extern __inline int32x4_t |
532e9e24 | 15802 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15803 | __arm_vshllbq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15804 | { |
c431634b | 15805 | return __builtin_mve_vshllbq_m_n_sv8hi (__arm_vuninitializedq_s32 (), __a, __imm, __p); |
532e9e24 SP |
15806 | } |
15807 | ||
261014a1 | 15808 | __extension__ extern __inline uint16x8_t |
532e9e24 | 15809 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15810 | __arm_vshllbq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15811 | { |
c431634b | 15812 | return __builtin_mve_vshllbq_m_n_uv16qi (__arm_vuninitializedq_u16 (), __a, __imm, __p); |
532e9e24 SP |
15813 | } |
15814 | ||
261014a1 | 15815 | __extension__ extern __inline uint32x4_t |
532e9e24 | 15816 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15817 | __arm_vshllbq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15818 | { |
c431634b | 15819 | return __builtin_mve_vshllbq_m_n_uv8hi (__arm_vuninitializedq_u32 (), __a, __imm, __p); |
532e9e24 SP |
15820 | } |
15821 | ||
261014a1 | 15822 | __extension__ extern __inline int16x8_t |
532e9e24 | 15823 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15824 | __arm_vshlltq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15825 | { |
c431634b | 15826 | return __builtin_mve_vshlltq_m_n_sv16qi (__arm_vuninitializedq_s16 (), __a, __imm, __p); |
532e9e24 SP |
15827 | } |
15828 | ||
261014a1 | 15829 | __extension__ extern __inline int32x4_t |
532e9e24 | 15830 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15831 | __arm_vshlltq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15832 | { |
c431634b | 15833 | return __builtin_mve_vshlltq_m_n_sv8hi (__arm_vuninitializedq_s32 (), __a, __imm, __p); |
532e9e24 SP |
15834 | } |
15835 | ||
261014a1 | 15836 | __extension__ extern __inline uint16x8_t |
532e9e24 | 15837 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15838 | __arm_vshlltq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15839 | { |
c431634b | 15840 | return __builtin_mve_vshlltq_m_n_uv16qi (__arm_vuninitializedq_u16 (), __a, __imm, __p); |
532e9e24 SP |
15841 | } |
15842 | ||
261014a1 | 15843 | __extension__ extern __inline uint32x4_t |
532e9e24 | 15844 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15845 | __arm_vshlltq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) |
532e9e24 | 15846 | { |
c431634b | 15847 | return __builtin_mve_vshlltq_m_n_uv8hi (__arm_vuninitializedq_u32 (), __a, __imm, __p); |
532e9e24 SP |
15848 | } |
15849 | ||
261014a1 | 15850 | __extension__ extern __inline int8x16_t |
532e9e24 | 15851 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 15852 | __arm_vshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) |
532e9e24 | 15853 | { |
c431634b | 15854 | return __builtin_mve_vshlq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p); |
261014a1 SP |
15855 | } |
15856 | ||
15857 | __extension__ extern __inline int16x8_t | |
15858 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15859 | __arm_vshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
15860 | { | |
c431634b | 15861 | return __builtin_mve_vshlq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p); |
261014a1 SP |
15862 | } |
15863 | ||
15864 | __extension__ extern __inline int32x4_t | |
15865 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15866 | __arm_vshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
15867 | { | |
c431634b | 15868 | return __builtin_mve_vshlq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p); |
261014a1 SP |
15869 | } |
15870 | ||
15871 | __extension__ extern __inline uint8x16_t | |
15872 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15873 | __arm_vshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
15874 | { | |
c431634b | 15875 | return __builtin_mve_vshlq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p); |
261014a1 SP |
15876 | } |
15877 | ||
15878 | __extension__ extern __inline uint16x8_t | |
15879 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15880 | __arm_vshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
15881 | { | |
c431634b | 15882 | return __builtin_mve_vshlq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p); |
261014a1 SP |
15883 | } |
15884 | ||
15885 | __extension__ extern __inline uint32x4_t | |
15886 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15887 | __arm_vshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
15888 | { | |
c431634b | 15889 | return __builtin_mve_vshlq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p); |
261014a1 SP |
15890 | } |
15891 | ||
15892 | __extension__ extern __inline int8x16_t | |
15893 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15894 | __arm_vshlq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p) | |
15895 | { | |
c431634b | 15896 | return __builtin_mve_vshlq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __imm, __p); |
261014a1 SP |
15897 | } |
15898 | ||
15899 | __extension__ extern __inline int16x8_t | |
15900 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15901 | __arm_vshlq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
15902 | { | |
c431634b | 15903 | return __builtin_mve_vshlq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm, __p); |
261014a1 SP |
15904 | } |
15905 | ||
15906 | __extension__ extern __inline int32x4_t | |
15907 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15908 | __arm_vshlq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
15909 | { | |
c431634b | 15910 | return __builtin_mve_vshlq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __imm, __p); |
261014a1 SP |
15911 | } |
15912 | ||
15913 | __extension__ extern __inline uint8x16_t | |
15914 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15915 | __arm_vshlq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
15916 | { | |
c431634b | 15917 | return __builtin_mve_vshlq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p); |
261014a1 SP |
15918 | } |
15919 | ||
15920 | __extension__ extern __inline uint16x8_t | |
15921 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15922 | __arm_vshlq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
15923 | { | |
c431634b | 15924 | return __builtin_mve_vshlq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p); |
261014a1 SP |
15925 | } |
15926 | ||
15927 | __extension__ extern __inline uint32x4_t | |
15928 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15929 | __arm_vshlq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
15930 | { | |
c431634b | 15931 | return __builtin_mve_vshlq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p); |
261014a1 SP |
15932 | } |
15933 | ||
15934 | __extension__ extern __inline int8x16_t | |
15935 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15936 | __arm_vrshrq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p) | |
15937 | { | |
c431634b | 15938 | return __builtin_mve_vrshrq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __imm, __p); |
261014a1 SP |
15939 | } |
15940 | ||
15941 | __extension__ extern __inline int16x8_t | |
15942 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15943 | __arm_vrshrq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
15944 | { | |
c431634b | 15945 | return __builtin_mve_vrshrq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm, __p); |
261014a1 SP |
15946 | } |
15947 | ||
15948 | __extension__ extern __inline int32x4_t | |
15949 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15950 | __arm_vrshrq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
15951 | { | |
c431634b | 15952 | return __builtin_mve_vrshrq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __imm, __p); |
261014a1 SP |
15953 | } |
15954 | ||
15955 | __extension__ extern __inline uint8x16_t | |
15956 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15957 | __arm_vrshrq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
15958 | { | |
c431634b | 15959 | return __builtin_mve_vrshrq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p); |
261014a1 SP |
15960 | } |
15961 | ||
15962 | __extension__ extern __inline uint16x8_t | |
15963 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15964 | __arm_vrshrq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
15965 | { | |
c431634b | 15966 | return __builtin_mve_vrshrq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p); |
261014a1 SP |
15967 | } |
15968 | ||
15969 | __extension__ extern __inline uint32x4_t | |
15970 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15971 | __arm_vrshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
15972 | { | |
c431634b | 15973 | return __builtin_mve_vrshrq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p); |
261014a1 SP |
15974 | } |
15975 | ||
15976 | __extension__ extern __inline int8x16_t | |
15977 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15978 | __arm_vshrq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p) | |
15979 | { | |
c431634b | 15980 | return __builtin_mve_vshrq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __imm, __p); |
261014a1 SP |
15981 | } |
15982 | ||
15983 | __extension__ extern __inline int16x8_t | |
15984 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15985 | __arm_vshrq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
15986 | { | |
c431634b | 15987 | return __builtin_mve_vshrq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm, __p); |
261014a1 SP |
15988 | } |
15989 | ||
15990 | __extension__ extern __inline int32x4_t | |
15991 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15992 | __arm_vshrq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
15993 | { | |
c431634b | 15994 | return __builtin_mve_vshrq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __imm, __p); |
261014a1 SP |
15995 | } |
15996 | ||
15997 | __extension__ extern __inline uint8x16_t | |
15998 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
15999 | __arm_vshrq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
16000 | { | |
c431634b | 16001 | return __builtin_mve_vshrq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p); |
261014a1 SP |
16002 | } |
16003 | ||
16004 | __extension__ extern __inline uint16x8_t | |
16005 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16006 | __arm_vshrq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
16007 | { | |
c431634b | 16008 | return __builtin_mve_vshrq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p); |
261014a1 SP |
16009 | } |
16010 | ||
16011 | __extension__ extern __inline uint32x4_t | |
16012 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16013 | __arm_vshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
16014 | { | |
c431634b | 16015 | return __builtin_mve_vshrq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p); |
261014a1 SP |
16016 | } |
16017 | ||
c3562f81 SP |
16018 | __extension__ extern __inline int32x4_t |
16019 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16020 | __arm_vadciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out) | |
16021 | { | |
16022 | int32x4_t __res = __builtin_mve_vadciq_sv4si (__a, __b); | |
16023 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16024 | return __res; | |
16025 | } | |
16026 | ||
16027 | __extension__ extern __inline uint32x4_t | |
16028 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16029 | __arm_vadciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out) | |
16030 | { | |
16031 | uint32x4_t __res = __builtin_mve_vadciq_uv4si (__a, __b); | |
16032 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16033 | return __res; | |
16034 | } | |
16035 | ||
16036 | __extension__ extern __inline int32x4_t | |
16037 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16038 | __arm_vadciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) | |
16039 | { | |
16040 | int32x4_t __res = __builtin_mve_vadciq_m_sv4si (__inactive, __a, __b, __p); | |
16041 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16042 | return __res; | |
16043 | } | |
16044 | ||
16045 | __extension__ extern __inline uint32x4_t | |
16046 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16047 | __arm_vadciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) | |
16048 | { | |
16049 | uint32x4_t __res = __builtin_mve_vadciq_m_uv4si (__inactive, __a, __b, __p); | |
16050 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16051 | return __res; | |
16052 | } | |
16053 | ||
16054 | __extension__ extern __inline int32x4_t | |
16055 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16056 | __arm_vadcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry) | |
16057 | { | |
16058 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16059 | int32x4_t __res = __builtin_mve_vadcq_sv4si (__a, __b); | |
16060 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16061 | return __res; | |
16062 | } | |
16063 | ||
16064 | __extension__ extern __inline uint32x4_t | |
16065 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16066 | __arm_vadcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry) | |
16067 | { | |
16068 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16069 | uint32x4_t __res = __builtin_mve_vadcq_uv4si (__a, __b); | |
16070 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16071 | return __res; | |
16072 | } | |
16073 | ||
16074 | __extension__ extern __inline int32x4_t | |
16075 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16076 | __arm_vadcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p) | |
16077 | { | |
16078 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16079 | int32x4_t __res = __builtin_mve_vadcq_m_sv4si (__inactive, __a, __b, __p); | |
16080 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16081 | return __res; | |
16082 | } | |
16083 | ||
16084 | __extension__ extern __inline uint32x4_t | |
16085 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16086 | __arm_vadcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p) | |
16087 | { | |
16088 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16089 | uint32x4_t __res = __builtin_mve_vadcq_m_uv4si (__inactive, __a, __b, __p); | |
16090 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16091 | return __res; | |
16092 | } | |
16093 | ||
16094 | __extension__ extern __inline int32x4_t | |
16095 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16096 | __arm_vsbciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out) | |
16097 | { | |
16098 | int32x4_t __res = __builtin_mve_vsbciq_sv4si (__a, __b); | |
16099 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16100 | return __res; | |
16101 | } | |
16102 | ||
16103 | __extension__ extern __inline uint32x4_t | |
16104 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16105 | __arm_vsbciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out) | |
16106 | { | |
16107 | uint32x4_t __res = __builtin_mve_vsbciq_uv4si (__a, __b); | |
16108 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16109 | return __res; | |
16110 | } | |
16111 | ||
16112 | __extension__ extern __inline int32x4_t | |
16113 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16114 | __arm_vsbciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) | |
16115 | { | |
16116 | int32x4_t __res = __builtin_mve_vsbciq_m_sv4si (__inactive, __a, __b, __p); | |
16117 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16118 | return __res; | |
16119 | } | |
16120 | ||
16121 | __extension__ extern __inline uint32x4_t | |
16122 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16123 | __arm_vsbciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) | |
16124 | { | |
16125 | uint32x4_t __res = __builtin_mve_vsbciq_m_uv4si (__inactive, __a, __b, __p); | |
16126 | *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16127 | return __res; | |
16128 | } | |
16129 | ||
16130 | __extension__ extern __inline int32x4_t | |
16131 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16132 | __arm_vsbcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry) | |
16133 | { | |
16134 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16135 | int32x4_t __res = __builtin_mve_vsbcq_sv4si (__a, __b); | |
16136 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16137 | return __res; | |
16138 | } | |
16139 | ||
16140 | __extension__ extern __inline uint32x4_t | |
16141 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16142 | __arm_vsbcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry) | |
16143 | { | |
16144 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16145 | uint32x4_t __res = __builtin_mve_vsbcq_uv4si (__a, __b); | |
16146 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16147 | return __res; | |
16148 | } | |
16149 | ||
16150 | __extension__ extern __inline int32x4_t | |
16151 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16152 | __arm_vsbcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p) | |
16153 | { | |
16154 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16155 | int32x4_t __res = __builtin_mve_vsbcq_m_sv4si (__inactive, __a, __b, __p); | |
16156 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16157 | return __res; | |
16158 | } | |
16159 | ||
16160 | __extension__ extern __inline uint32x4_t | |
16161 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16162 | __arm_vsbcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p) | |
16163 | { | |
16164 | __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29)); | |
16165 | uint32x4_t __res = __builtin_mve_vsbcq_m_uv4si (__inactive, __a, __b, __p); | |
16166 | *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u; | |
16167 | return __res; | |
16168 | } | |
16169 | ||
1dfcc3b5 SP |
16170 | __extension__ extern __inline void |
16171 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16172 | __arm_vst1q_p_u8 (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p) | |
16173 | { | |
16174 | return vstrbq_p_u8 (__addr, __value, __p); | |
16175 | } | |
16176 | ||
16177 | __extension__ extern __inline void | |
16178 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16179 | __arm_vst1q_p_s8 (int8_t * __addr, int8x16_t __value, mve_pred16_t __p) | |
16180 | { | |
16181 | return vstrbq_p_s8 (__addr, __value, __p); | |
16182 | } | |
16183 | ||
16184 | __extension__ extern __inline void | |
16185 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16186 | __arm_vst2q_s8 (int8_t * __addr, int8x16x2_t __value) | |
16187 | { | |
16188 | union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv; | |
16189 | __rv.__i = __value; | |
16190 | __builtin_mve_vst2qv16qi ((__builtin_neon_qi *) __addr, __rv.__o); | |
16191 | } | |
16192 | ||
16193 | __extension__ extern __inline void | |
16194 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16195 | __arm_vst2q_u8 (uint8_t * __addr, uint8x16x2_t __value) | |
16196 | { | |
16197 | union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv; | |
16198 | __rv.__i = __value; | |
16199 | __builtin_mve_vst2qv16qi ((__builtin_neon_qi *) __addr, __rv.__o); | |
16200 | } | |
16201 | ||
16202 | __extension__ extern __inline uint8x16_t | |
16203 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16204 | __arm_vld1q_z_u8 (uint8_t const *__base, mve_pred16_t __p) | |
16205 | { | |
16206 | return vldrbq_z_u8 ( __base, __p); | |
16207 | } | |
16208 | ||
16209 | __extension__ extern __inline int8x16_t | |
16210 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16211 | __arm_vld1q_z_s8 (int8_t const *__base, mve_pred16_t __p) | |
16212 | { | |
16213 | return vldrbq_z_s8 ( __base, __p); | |
16214 | } | |
16215 | ||
16216 | __extension__ extern __inline int8x16x2_t | |
16217 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16218 | __arm_vld2q_s8 (int8_t const * __addr) | |
16219 | { | |
16220 | union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv; | |
16221 | __rv.__o = __builtin_mve_vld2qv16qi ((__builtin_neon_qi *) __addr); | |
16222 | return __rv.__i; | |
16223 | } | |
16224 | ||
16225 | __extension__ extern __inline uint8x16x2_t | |
16226 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16227 | __arm_vld2q_u8 (uint8_t const * __addr) | |
16228 | { | |
16229 | union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv; | |
16230 | __rv.__o = __builtin_mve_vld2qv16qi ((__builtin_neon_qi *) __addr); | |
16231 | return __rv.__i; | |
16232 | } | |
16233 | ||
16234 | __extension__ extern __inline int8x16x4_t | |
16235 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16236 | __arm_vld4q_s8 (int8_t const * __addr) | |
16237 | { | |
16238 | union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv; | |
16239 | __rv.__o = __builtin_mve_vld4qv16qi ((__builtin_neon_qi *) __addr); | |
16240 | return __rv.__i; | |
16241 | } | |
16242 | ||
16243 | __extension__ extern __inline uint8x16x4_t | |
16244 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16245 | __arm_vld4q_u8 (uint8_t const * __addr) | |
16246 | { | |
16247 | union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv; | |
16248 | __rv.__o = __builtin_mve_vld4qv16qi ((__builtin_neon_qi *) __addr); | |
16249 | return __rv.__i; | |
16250 | } | |
16251 | ||
16252 | __extension__ extern __inline void | |
16253 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16254 | __arm_vst1q_p_u16 (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p) | |
16255 | { | |
16256 | return vstrhq_p_u16 (__addr, __value, __p); | |
16257 | } | |
16258 | ||
16259 | __extension__ extern __inline void | |
16260 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16261 | __arm_vst1q_p_s16 (int16_t * __addr, int16x8_t __value, mve_pred16_t __p) | |
16262 | { | |
16263 | return vstrhq_p_s16 (__addr, __value, __p); | |
16264 | } | |
16265 | ||
16266 | __extension__ extern __inline void | |
16267 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16268 | __arm_vst2q_s16 (int16_t * __addr, int16x8x2_t __value) | |
16269 | { | |
16270 | union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv; | |
16271 | __rv.__i = __value; | |
16272 | __builtin_mve_vst2qv8hi ((__builtin_neon_hi *) __addr, __rv.__o); | |
16273 | } | |
16274 | ||
16275 | __extension__ extern __inline void | |
16276 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16277 | __arm_vst2q_u16 (uint16_t * __addr, uint16x8x2_t __value) | |
16278 | { | |
16279 | union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv; | |
16280 | __rv.__i = __value; | |
16281 | __builtin_mve_vst2qv8hi ((__builtin_neon_hi *) __addr, __rv.__o); | |
16282 | } | |
16283 | ||
16284 | __extension__ extern __inline uint16x8_t | |
16285 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16286 | __arm_vld1q_z_u16 (uint16_t const *__base, mve_pred16_t __p) | |
16287 | { | |
16288 | return vldrhq_z_u16 ( __base, __p); | |
16289 | } | |
16290 | ||
16291 | __extension__ extern __inline int16x8_t | |
16292 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16293 | __arm_vld1q_z_s16 (int16_t const *__base, mve_pred16_t __p) | |
16294 | { | |
16295 | return vldrhq_z_s16 ( __base, __p); | |
16296 | } | |
16297 | ||
16298 | __extension__ extern __inline int16x8x2_t | |
16299 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16300 | __arm_vld2q_s16 (int16_t const * __addr) | |
16301 | { | |
16302 | union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv; | |
16303 | __rv.__o = __builtin_mve_vld2qv8hi ((__builtin_neon_hi *) __addr); | |
16304 | return __rv.__i; | |
16305 | } | |
16306 | ||
16307 | __extension__ extern __inline uint16x8x2_t | |
16308 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16309 | __arm_vld2q_u16 (uint16_t const * __addr) | |
16310 | { | |
16311 | union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv; | |
16312 | __rv.__o = __builtin_mve_vld2qv8hi ((__builtin_neon_hi *) __addr); | |
16313 | return __rv.__i; | |
16314 | } | |
16315 | ||
16316 | __extension__ extern __inline int16x8x4_t | |
16317 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16318 | __arm_vld4q_s16 (int16_t const * __addr) | |
16319 | { | |
16320 | union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv; | |
16321 | __rv.__o = __builtin_mve_vld4qv8hi ((__builtin_neon_hi *) __addr); | |
16322 | return __rv.__i; | |
16323 | } | |
16324 | ||
16325 | __extension__ extern __inline uint16x8x4_t | |
16326 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16327 | __arm_vld4q_u16 (uint16_t const * __addr) | |
16328 | { | |
16329 | union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv; | |
16330 | __rv.__o = __builtin_mve_vld4qv8hi ((__builtin_neon_hi *) __addr); | |
16331 | return __rv.__i; | |
16332 | } | |
16333 | ||
16334 | __extension__ extern __inline void | |
16335 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16336 | __arm_vst1q_p_u32 (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p) | |
16337 | { | |
16338 | return vstrwq_p_u32 (__addr, __value, __p); | |
16339 | } | |
16340 | ||
16341 | __extension__ extern __inline void | |
16342 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16343 | __arm_vst1q_p_s32 (int32_t * __addr, int32x4_t __value, mve_pred16_t __p) | |
16344 | { | |
16345 | return vstrwq_p_s32 (__addr, __value, __p); | |
16346 | } | |
16347 | ||
16348 | __extension__ extern __inline void | |
16349 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16350 | __arm_vst2q_s32 (int32_t * __addr, int32x4x2_t __value) | |
16351 | { | |
16352 | union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv; | |
16353 | __rv.__i = __value; | |
16354 | __builtin_mve_vst2qv4si ((__builtin_neon_si *) __addr, __rv.__o); | |
16355 | } | |
16356 | ||
16357 | __extension__ extern __inline void | |
16358 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16359 | __arm_vst2q_u32 (uint32_t * __addr, uint32x4x2_t __value) | |
16360 | { | |
16361 | union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv; | |
16362 | __rv.__i = __value; | |
16363 | __builtin_mve_vst2qv4si ((__builtin_neon_si *) __addr, __rv.__o); | |
16364 | } | |
16365 | ||
16366 | __extension__ extern __inline uint32x4_t | |
16367 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16368 | __arm_vld1q_z_u32 (uint32_t const *__base, mve_pred16_t __p) | |
16369 | { | |
16370 | return vldrwq_z_u32 ( __base, __p); | |
16371 | } | |
16372 | ||
16373 | __extension__ extern __inline int32x4_t | |
16374 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16375 | __arm_vld1q_z_s32 (int32_t const *__base, mve_pred16_t __p) | |
16376 | { | |
16377 | return vldrwq_z_s32 ( __base, __p); | |
16378 | } | |
16379 | ||
16380 | __extension__ extern __inline int32x4x2_t | |
16381 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16382 | __arm_vld2q_s32 (int32_t const * __addr) | |
16383 | { | |
16384 | union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv; | |
16385 | __rv.__o = __builtin_mve_vld2qv4si ((__builtin_neon_si *) __addr); | |
16386 | return __rv.__i; | |
16387 | } | |
16388 | ||
16389 | __extension__ extern __inline uint32x4x2_t | |
16390 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16391 | __arm_vld2q_u32 (uint32_t const * __addr) | |
16392 | { | |
16393 | union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv; | |
16394 | __rv.__o = __builtin_mve_vld2qv4si ((__builtin_neon_si *) __addr); | |
16395 | return __rv.__i; | |
16396 | } | |
16397 | ||
16398 | __extension__ extern __inline int32x4x4_t | |
16399 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16400 | __arm_vld4q_s32 (int32_t const * __addr) | |
16401 | { | |
16402 | union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv; | |
16403 | __rv.__o = __builtin_mve_vld4qv4si ((__builtin_neon_si *) __addr); | |
16404 | return __rv.__i; | |
16405 | } | |
16406 | ||
16407 | __extension__ extern __inline uint32x4x4_t | |
16408 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16409 | __arm_vld4q_u32 (uint32_t const * __addr) | |
16410 | { | |
16411 | union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv; | |
16412 | __rv.__o = __builtin_mve_vld4qv4si ((__builtin_neon_si *) __addr); | |
16413 | return __rv.__i; | |
16414 | } | |
16415 | ||
1a5c27b1 SP |
16416 | __extension__ extern __inline int16x8_t |
16417 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16418 | __arm_vsetq_lane_s16 (int16_t __a, int16x8_t __b, const int __idx) | |
16419 | { | |
16420 | __ARM_CHECK_LANEQ (__b, __idx); | |
16421 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16422 | return __b; | |
16423 | } | |
16424 | ||
16425 | __extension__ extern __inline int32x4_t | |
16426 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16427 | __arm_vsetq_lane_s32 (int32_t __a, int32x4_t __b, const int __idx) | |
16428 | { | |
16429 | __ARM_CHECK_LANEQ (__b, __idx); | |
16430 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16431 | return __b; | |
16432 | } | |
16433 | ||
16434 | __extension__ extern __inline int8x16_t | |
16435 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16436 | __arm_vsetq_lane_s8 (int8_t __a, int8x16_t __b, const int __idx) | |
16437 | { | |
16438 | __ARM_CHECK_LANEQ (__b, __idx); | |
16439 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16440 | return __b; | |
16441 | } | |
16442 | ||
16443 | __extension__ extern __inline int64x2_t | |
16444 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16445 | __arm_vsetq_lane_s64 (int64_t __a, int64x2_t __b, const int __idx) | |
16446 | { | |
16447 | __ARM_CHECK_LANEQ (__b, __idx); | |
16448 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16449 | return __b; | |
16450 | } | |
16451 | ||
16452 | __extension__ extern __inline uint8x16_t | |
16453 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16454 | __arm_vsetq_lane_u8 (uint8_t __a, uint8x16_t __b, const int __idx) | |
16455 | { | |
16456 | __ARM_CHECK_LANEQ (__b, __idx); | |
16457 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16458 | return __b; | |
16459 | } | |
16460 | ||
16461 | __extension__ extern __inline uint16x8_t | |
16462 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16463 | __arm_vsetq_lane_u16 (uint16_t __a, uint16x8_t __b, const int __idx) | |
16464 | { | |
16465 | __ARM_CHECK_LANEQ (__b, __idx); | |
16466 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16467 | return __b; | |
16468 | } | |
16469 | ||
16470 | __extension__ extern __inline uint32x4_t | |
16471 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16472 | __arm_vsetq_lane_u32 (uint32_t __a, uint32x4_t __b, const int __idx) | |
16473 | { | |
16474 | __ARM_CHECK_LANEQ (__b, __idx); | |
16475 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16476 | return __b; | |
16477 | } | |
16478 | ||
16479 | __extension__ extern __inline uint64x2_t | |
16480 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16481 | __arm_vsetq_lane_u64 (uint64_t __a, uint64x2_t __b, const int __idx) | |
16482 | { | |
16483 | __ARM_CHECK_LANEQ (__b, __idx); | |
16484 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
16485 | return __b; | |
16486 | } | |
16487 | ||
16488 | __extension__ extern __inline int16_t | |
16489 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16490 | __arm_vgetq_lane_s16 (int16x8_t __a, const int __idx) | |
16491 | { | |
16492 | __ARM_CHECK_LANEQ (__a, __idx); | |
16493 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16494 | } | |
16495 | ||
16496 | __extension__ extern __inline int32_t | |
16497 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16498 | __arm_vgetq_lane_s32 (int32x4_t __a, const int __idx) | |
16499 | { | |
16500 | __ARM_CHECK_LANEQ (__a, __idx); | |
16501 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16502 | } | |
16503 | ||
16504 | __extension__ extern __inline int8_t | |
16505 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16506 | __arm_vgetq_lane_s8 (int8x16_t __a, const int __idx) | |
16507 | { | |
16508 | __ARM_CHECK_LANEQ (__a, __idx); | |
16509 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16510 | } | |
16511 | ||
16512 | __extension__ extern __inline int64_t | |
16513 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16514 | __arm_vgetq_lane_s64 (int64x2_t __a, const int __idx) | |
16515 | { | |
16516 | __ARM_CHECK_LANEQ (__a, __idx); | |
16517 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16518 | } | |
16519 | ||
16520 | __extension__ extern __inline uint8_t | |
16521 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16522 | __arm_vgetq_lane_u8 (uint8x16_t __a, const int __idx) | |
16523 | { | |
16524 | __ARM_CHECK_LANEQ (__a, __idx); | |
16525 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16526 | } | |
16527 | ||
16528 | __extension__ extern __inline uint16_t | |
16529 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16530 | __arm_vgetq_lane_u16 (uint16x8_t __a, const int __idx) | |
16531 | { | |
16532 | __ARM_CHECK_LANEQ (__a, __idx); | |
16533 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16534 | } | |
16535 | ||
16536 | __extension__ extern __inline uint32_t | |
16537 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16538 | __arm_vgetq_lane_u32 (uint32x4_t __a, const int __idx) | |
16539 | { | |
16540 | __ARM_CHECK_LANEQ (__a, __idx); | |
16541 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16542 | } | |
16543 | ||
16544 | __extension__ extern __inline uint64_t | |
16545 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16546 | __arm_vgetq_lane_u64 (uint64x2_t __a, const int __idx) | |
16547 | { | |
16548 | __ARM_CHECK_LANEQ (__a, __idx); | |
16549 | return __a[__ARM_LANEQ(__a,__idx)]; | |
16550 | } | |
16551 | ||
85244449 SP |
16552 | __extension__ extern __inline uint64_t |
16553 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16554 | __arm_lsll (uint64_t value, int32_t shift) | |
16555 | { | |
16556 | return (value << shift); | |
16557 | } | |
16558 | ||
16559 | __extension__ extern __inline int64_t | |
16560 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16561 | __arm_asrl (int64_t value, int32_t shift) | |
16562 | { | |
16563 | return (value >> shift); | |
16564 | } | |
16565 | ||
16566 | __extension__ extern __inline uint64_t | |
16567 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16568 | __arm_uqrshll (uint64_t value, int32_t shift) | |
16569 | { | |
16570 | return __builtin_mve_uqrshll_sat64_di (value, shift); | |
16571 | } | |
16572 | ||
16573 | __extension__ extern __inline uint64_t | |
16574 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16575 | __arm_uqrshll_sat48 (uint64_t value, int32_t shift) | |
16576 | { | |
16577 | return __builtin_mve_uqrshll_sat48_di (value, shift); | |
16578 | } | |
16579 | ||
16580 | __extension__ extern __inline int64_t | |
16581 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16582 | __arm_sqrshrl (int64_t value, int32_t shift) | |
16583 | { | |
16584 | return __builtin_mve_sqrshrl_sat64_di (value, shift); | |
16585 | } | |
16586 | ||
16587 | __extension__ extern __inline int64_t | |
16588 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16589 | __arm_sqrshrl_sat48 (int64_t value, int32_t shift) | |
16590 | { | |
16591 | return __builtin_mve_sqrshrl_sat48_di (value, shift); | |
16592 | } | |
16593 | ||
16594 | __extension__ extern __inline uint64_t | |
16595 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16596 | __arm_uqshll (uint64_t value, const int shift) | |
16597 | { | |
16598 | return __builtin_mve_uqshll_di (value, shift); | |
16599 | } | |
16600 | ||
16601 | __extension__ extern __inline uint64_t | |
16602 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16603 | __arm_urshrl (uint64_t value, const int shift) | |
16604 | { | |
16605 | return __builtin_mve_urshrl_di (value, shift); | |
16606 | } | |
16607 | ||
16608 | __extension__ extern __inline int64_t | |
16609 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16610 | __arm_srshrl (int64_t value, const int shift) | |
16611 | { | |
16612 | return __builtin_mve_srshrl_di (value, shift); | |
16613 | } | |
16614 | ||
16615 | __extension__ extern __inline int64_t | |
16616 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16617 | __arm_sqshll (int64_t value, const int shift) | |
16618 | { | |
16619 | return __builtin_mve_sqshll_di (value, shift); | |
16620 | } | |
16621 | ||
16622 | __extension__ extern __inline uint32_t | |
16623 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16624 | __arm_uqrshl (uint32_t value, int32_t shift) | |
16625 | { | |
16626 | return __builtin_mve_uqrshl_si (value, shift); | |
16627 | } | |
16628 | ||
16629 | __extension__ extern __inline int32_t | |
16630 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16631 | __arm_sqrshr (int32_t value, int32_t shift) | |
16632 | { | |
16633 | return __builtin_mve_sqrshr_si (value, shift); | |
16634 | } | |
16635 | ||
16636 | __extension__ extern __inline uint32_t | |
16637 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16638 | __arm_uqshl (uint32_t value, const int shift) | |
16639 | { | |
16640 | return __builtin_mve_uqshl_si (value, shift); | |
16641 | } | |
16642 | ||
16643 | __extension__ extern __inline uint32_t | |
16644 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16645 | __arm_urshr (uint32_t value, const int shift) | |
16646 | { | |
16647 | return __builtin_mve_urshr_si (value, shift); | |
16648 | } | |
16649 | ||
16650 | __extension__ extern __inline int32_t | |
16651 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16652 | __arm_sqshl (int32_t value, const int shift) | |
16653 | { | |
16654 | return __builtin_mve_sqshl_si (value, shift); | |
16655 | } | |
16656 | ||
16657 | __extension__ extern __inline int32_t | |
16658 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16659 | __arm_srshr (int32_t value, const int shift) | |
16660 | { | |
16661 | return __builtin_mve_srshr_si (value, shift); | |
16662 | } | |
16663 | ||
88c9a831 SP |
16664 | __extension__ extern __inline int8x16_t |
16665 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16666 | __arm_vshlcq_m_s8 (int8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
16667 | { | |
16668 | int8x16_t __res = __builtin_mve_vshlcq_m_vec_sv16qi (__a, *__b, __imm, __p); | |
16669 | *__b = __builtin_mve_vshlcq_m_carry_sv16qi (__a, *__b, __imm, __p); | |
16670 | return __res; | |
16671 | } | |
16672 | ||
16673 | __extension__ extern __inline uint8x16_t | |
16674 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16675 | __arm_vshlcq_m_u8 (uint8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
16676 | { | |
16677 | uint8x16_t __res = __builtin_mve_vshlcq_m_vec_uv16qi (__a, *__b, __imm, __p); | |
16678 | *__b = __builtin_mve_vshlcq_m_carry_uv16qi (__a, *__b, __imm, __p); | |
16679 | return __res; | |
16680 | } | |
16681 | ||
16682 | __extension__ extern __inline int16x8_t | |
16683 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16684 | __arm_vshlcq_m_s16 (int16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
16685 | { | |
16686 | int16x8_t __res = __builtin_mve_vshlcq_m_vec_sv8hi (__a, *__b, __imm, __p); | |
16687 | *__b = __builtin_mve_vshlcq_m_carry_sv8hi (__a, *__b, __imm, __p); | |
16688 | return __res; | |
16689 | } | |
16690 | ||
16691 | __extension__ extern __inline uint16x8_t | |
16692 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16693 | __arm_vshlcq_m_u16 (uint16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
16694 | { | |
16695 | uint16x8_t __res = __builtin_mve_vshlcq_m_vec_uv8hi (__a, *__b, __imm, __p); | |
16696 | *__b = __builtin_mve_vshlcq_m_carry_uv8hi (__a, *__b, __imm, __p); | |
16697 | return __res; | |
16698 | } | |
16699 | ||
16700 | __extension__ extern __inline int32x4_t | |
16701 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16702 | __arm_vshlcq_m_s32 (int32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
16703 | { | |
16704 | int32x4_t __res = __builtin_mve_vshlcq_m_vec_sv4si (__a, *__b, __imm, __p); | |
16705 | *__b = __builtin_mve_vshlcq_m_carry_sv4si (__a, *__b, __imm, __p); | |
16706 | return __res; | |
16707 | } | |
16708 | ||
16709 | __extension__ extern __inline uint32x4_t | |
16710 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16711 | __arm_vshlcq_m_u32 (uint32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
16712 | { | |
16713 | uint32x4_t __res = __builtin_mve_vshlcq_m_vec_uv4si (__a, *__b, __imm, __p); | |
16714 | *__b = __builtin_mve_vshlcq_m_carry_uv4si (__a, *__b, __imm, __p); | |
16715 | return __res; | |
16716 | } | |
16717 | ||
261014a1 SP |
16718 | #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ |
16719 | ||
16720 | __extension__ extern __inline void | |
16721 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16722 | __arm_vst4q_f16 (float16_t * __addr, float16x8x4_t __value) | |
16723 | { | |
16724 | union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv; | |
16725 | __rv.__i = __value; | |
16726 | __builtin_mve_vst4qv8hf (__addr, __rv.__o); | |
16727 | } | |
16728 | ||
16729 | __extension__ extern __inline void | |
16730 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16731 | __arm_vst4q_f32 (float32_t * __addr, float32x4x4_t __value) | |
16732 | { | |
16733 | union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv; | |
16734 | __rv.__i = __value; | |
16735 | __builtin_mve_vst4qv4sf (__addr, __rv.__o); | |
16736 | } | |
16737 | ||
16738 | __extension__ extern __inline float16x8_t | |
16739 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16740 | __arm_vrndxq_f16 (float16x8_t __a) | |
16741 | { | |
16742 | return __builtin_mve_vrndxq_fv8hf (__a); | |
532e9e24 SP |
16743 | } |
16744 | ||
16745 | __extension__ extern __inline float32x4_t | |
16746 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16747 | __arm_vrndxq_f32 (float32x4_t __a) |
532e9e24 | 16748 | { |
261014a1 | 16749 | return __builtin_mve_vrndxq_fv4sf (__a); |
532e9e24 SP |
16750 | } |
16751 | ||
16752 | __extension__ extern __inline float16x8_t | |
16753 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16754 | __arm_vrndq_f16 (float16x8_t __a) |
532e9e24 | 16755 | { |
261014a1 | 16756 | return __builtin_mve_vrndq_fv8hf (__a); |
532e9e24 SP |
16757 | } |
16758 | ||
16759 | __extension__ extern __inline float32x4_t | |
16760 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16761 | __arm_vrndq_f32 (float32x4_t __a) |
532e9e24 | 16762 | { |
261014a1 | 16763 | return __builtin_mve_vrndq_fv4sf (__a); |
532e9e24 SP |
16764 | } |
16765 | ||
16766 | __extension__ extern __inline float16x8_t | |
16767 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16768 | __arm_vrndpq_f16 (float16x8_t __a) |
532e9e24 | 16769 | { |
261014a1 | 16770 | return __builtin_mve_vrndpq_fv8hf (__a); |
532e9e24 SP |
16771 | } |
16772 | ||
16773 | __extension__ extern __inline float32x4_t | |
16774 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16775 | __arm_vrndpq_f32 (float32x4_t __a) |
532e9e24 | 16776 | { |
261014a1 | 16777 | return __builtin_mve_vrndpq_fv4sf (__a); |
532e9e24 SP |
16778 | } |
16779 | ||
16780 | __extension__ extern __inline float16x8_t | |
16781 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16782 | __arm_vrndnq_f16 (float16x8_t __a) |
532e9e24 | 16783 | { |
261014a1 | 16784 | return __builtin_mve_vrndnq_fv8hf (__a); |
532e9e24 SP |
16785 | } |
16786 | ||
16787 | __extension__ extern __inline float32x4_t | |
16788 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16789 | __arm_vrndnq_f32 (float32x4_t __a) |
532e9e24 | 16790 | { |
261014a1 | 16791 | return __builtin_mve_vrndnq_fv4sf (__a); |
532e9e24 SP |
16792 | } |
16793 | ||
16794 | __extension__ extern __inline float16x8_t | |
16795 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16796 | __arm_vrndmq_f16 (float16x8_t __a) |
532e9e24 | 16797 | { |
261014a1 | 16798 | return __builtin_mve_vrndmq_fv8hf (__a); |
532e9e24 SP |
16799 | } |
16800 | ||
16801 | __extension__ extern __inline float32x4_t | |
16802 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16803 | __arm_vrndmq_f32 (float32x4_t __a) |
532e9e24 | 16804 | { |
261014a1 | 16805 | return __builtin_mve_vrndmq_fv4sf (__a); |
532e9e24 SP |
16806 | } |
16807 | ||
16808 | __extension__ extern __inline float16x8_t | |
16809 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16810 | __arm_vrndaq_f16 (float16x8_t __a) |
532e9e24 | 16811 | { |
261014a1 | 16812 | return __builtin_mve_vrndaq_fv8hf (__a); |
532e9e24 SP |
16813 | } |
16814 | ||
16815 | __extension__ extern __inline float32x4_t | |
16816 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16817 | __arm_vrndaq_f32 (float32x4_t __a) |
532e9e24 | 16818 | { |
261014a1 | 16819 | return __builtin_mve_vrndaq_fv4sf (__a); |
532e9e24 SP |
16820 | } |
16821 | ||
16822 | __extension__ extern __inline float16x8_t | |
16823 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16824 | __arm_vrev64q_f16 (float16x8_t __a) |
532e9e24 | 16825 | { |
261014a1 | 16826 | return __builtin_mve_vrev64q_fv8hf (__a); |
532e9e24 SP |
16827 | } |
16828 | ||
16829 | __extension__ extern __inline float32x4_t | |
16830 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16831 | __arm_vrev64q_f32 (float32x4_t __a) |
532e9e24 | 16832 | { |
261014a1 | 16833 | return __builtin_mve_vrev64q_fv4sf (__a); |
532e9e24 SP |
16834 | } |
16835 | ||
16836 | __extension__ extern __inline float16x8_t | |
16837 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 16838 | __arm_vnegq_f16 (float16x8_t __a) |
532e9e24 | 16839 | { |
261014a1 SP |
16840 | return __builtin_mve_vnegq_fv8hf (__a); |
16841 | } | |
16842 | ||
16843 | __extension__ extern __inline float32x4_t | |
16844 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16845 | __arm_vnegq_f32 (float32x4_t __a) | |
16846 | { | |
16847 | return __builtin_mve_vnegq_fv4sf (__a); | |
16848 | } | |
16849 | ||
16850 | __extension__ extern __inline float16x8_t | |
16851 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16852 | __arm_vdupq_n_f16 (float16_t __a) | |
16853 | { | |
16854 | return __builtin_mve_vdupq_n_fv8hf (__a); | |
16855 | } | |
16856 | ||
16857 | __extension__ extern __inline float32x4_t | |
16858 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16859 | __arm_vdupq_n_f32 (float32_t __a) | |
16860 | { | |
16861 | return __builtin_mve_vdupq_n_fv4sf (__a); | |
16862 | } | |
16863 | ||
16864 | __extension__ extern __inline float16x8_t | |
16865 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16866 | __arm_vabsq_f16 (float16x8_t __a) | |
16867 | { | |
16868 | return __builtin_mve_vabsq_fv8hf (__a); | |
16869 | } | |
16870 | ||
16871 | __extension__ extern __inline float32x4_t | |
16872 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16873 | __arm_vabsq_f32 (float32x4_t __a) | |
16874 | { | |
16875 | return __builtin_mve_vabsq_fv4sf (__a); | |
16876 | } | |
16877 | ||
16878 | __extension__ extern __inline float16x8_t | |
16879 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16880 | __arm_vrev32q_f16 (float16x8_t __a) | |
16881 | { | |
16882 | return __builtin_mve_vrev32q_fv8hf (__a); | |
532e9e24 SP |
16883 | } |
16884 | ||
16885 | __extension__ extern __inline float32x4_t | |
16886 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 SP |
16887 | __arm_vcvttq_f32_f16 (float16x8_t __a) |
16888 | { | |
16889 | return __builtin_mve_vcvttq_f32_f16v4sf (__a); | |
16890 | } | |
16891 | ||
16892 | __extension__ extern __inline float32x4_t | |
16893 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16894 | __arm_vcvtbq_f32_f16 (float16x8_t __a) | |
16895 | { | |
16896 | return __builtin_mve_vcvtbq_f32_f16v4sf (__a); | |
16897 | } | |
16898 | ||
16899 | __extension__ extern __inline float16x8_t | |
16900 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16901 | __arm_vcvtq_f16_s16 (int16x8_t __a) | |
16902 | { | |
16903 | return __builtin_mve_vcvtq_to_f_sv8hf (__a); | |
16904 | } | |
16905 | ||
16906 | __extension__ extern __inline float32x4_t | |
16907 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16908 | __arm_vcvtq_f32_s32 (int32x4_t __a) | |
16909 | { | |
16910 | return __builtin_mve_vcvtq_to_f_sv4sf (__a); | |
16911 | } | |
16912 | ||
16913 | __extension__ extern __inline float16x8_t | |
16914 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16915 | __arm_vcvtq_f16_u16 (uint16x8_t __a) | |
16916 | { | |
16917 | return __builtin_mve_vcvtq_to_f_uv8hf (__a); | |
16918 | } | |
16919 | ||
16920 | __extension__ extern __inline float32x4_t | |
16921 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16922 | __arm_vcvtq_f32_u32 (uint32x4_t __a) | |
16923 | { | |
16924 | return __builtin_mve_vcvtq_to_f_uv4sf (__a); | |
16925 | } | |
16926 | ||
16927 | __extension__ extern __inline int16x8_t | |
16928 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16929 | __arm_vcvtq_s16_f16 (float16x8_t __a) | |
16930 | { | |
16931 | return __builtin_mve_vcvtq_from_f_sv8hi (__a); | |
16932 | } | |
16933 | ||
16934 | __extension__ extern __inline int32x4_t | |
16935 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16936 | __arm_vcvtq_s32_f32 (float32x4_t __a) | |
16937 | { | |
16938 | return __builtin_mve_vcvtq_from_f_sv4si (__a); | |
16939 | } | |
16940 | ||
16941 | __extension__ extern __inline uint16x8_t | |
16942 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16943 | __arm_vcvtq_u16_f16 (float16x8_t __a) | |
16944 | { | |
16945 | return __builtin_mve_vcvtq_from_f_uv8hi (__a); | |
16946 | } | |
16947 | ||
16948 | __extension__ extern __inline uint32x4_t | |
16949 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16950 | __arm_vcvtq_u32_f32 (float32x4_t __a) | |
16951 | { | |
16952 | return __builtin_mve_vcvtq_from_f_uv4si (__a); | |
16953 | } | |
16954 | ||
16955 | __extension__ extern __inline uint16x8_t | |
16956 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16957 | __arm_vcvtpq_u16_f16 (float16x8_t __a) | |
16958 | { | |
16959 | return __builtin_mve_vcvtpq_uv8hi (__a); | |
16960 | } | |
16961 | ||
16962 | __extension__ extern __inline uint32x4_t | |
16963 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16964 | __arm_vcvtpq_u32_f32 (float32x4_t __a) | |
16965 | { | |
16966 | return __builtin_mve_vcvtpq_uv4si (__a); | |
16967 | } | |
16968 | ||
16969 | __extension__ extern __inline uint16x8_t | |
16970 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16971 | __arm_vcvtnq_u16_f16 (float16x8_t __a) | |
16972 | { | |
16973 | return __builtin_mve_vcvtnq_uv8hi (__a); | |
16974 | } | |
16975 | ||
5a448362 CL |
16976 | __extension__ extern __inline uint32x4_t |
16977 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16978 | __arm_vcvtnq_u32_f32 (float32x4_t __a) | |
16979 | { | |
16980 | return __builtin_mve_vcvtnq_uv4si (__a); | |
16981 | } | |
16982 | ||
261014a1 SP |
16983 | __extension__ extern __inline uint16x8_t |
16984 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16985 | __arm_vcvtmq_u16_f16 (float16x8_t __a) | |
16986 | { | |
16987 | return __builtin_mve_vcvtmq_uv8hi (__a); | |
16988 | } | |
16989 | ||
16990 | __extension__ extern __inline uint32x4_t | |
16991 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16992 | __arm_vcvtmq_u32_f32 (float32x4_t __a) | |
16993 | { | |
16994 | return __builtin_mve_vcvtmq_uv4si (__a); | |
16995 | } | |
16996 | ||
16997 | __extension__ extern __inline uint16x8_t | |
16998 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
16999 | __arm_vcvtaq_u16_f16 (float16x8_t __a) | |
17000 | { | |
17001 | return __builtin_mve_vcvtaq_uv8hi (__a); | |
17002 | } | |
17003 | ||
17004 | __extension__ extern __inline uint32x4_t | |
17005 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17006 | __arm_vcvtaq_u32_f32 (float32x4_t __a) | |
17007 | { | |
17008 | return __builtin_mve_vcvtaq_uv4si (__a); | |
17009 | } | |
17010 | ||
17011 | __extension__ extern __inline int16x8_t | |
17012 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17013 | __arm_vcvtaq_s16_f16 (float16x8_t __a) | |
17014 | { | |
17015 | return __builtin_mve_vcvtaq_sv8hi (__a); | |
17016 | } | |
17017 | ||
17018 | __extension__ extern __inline int32x4_t | |
17019 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17020 | __arm_vcvtaq_s32_f32 (float32x4_t __a) | |
17021 | { | |
17022 | return __builtin_mve_vcvtaq_sv4si (__a); | |
17023 | } | |
17024 | ||
17025 | __extension__ extern __inline int16x8_t | |
17026 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17027 | __arm_vcvtnq_s16_f16 (float16x8_t __a) | |
17028 | { | |
17029 | return __builtin_mve_vcvtnq_sv8hi (__a); | |
17030 | } | |
17031 | ||
17032 | __extension__ extern __inline int32x4_t | |
17033 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17034 | __arm_vcvtnq_s32_f32 (float32x4_t __a) | |
17035 | { | |
17036 | return __builtin_mve_vcvtnq_sv4si (__a); | |
17037 | } | |
17038 | ||
17039 | __extension__ extern __inline int16x8_t | |
17040 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17041 | __arm_vcvtpq_s16_f16 (float16x8_t __a) | |
17042 | { | |
17043 | return __builtin_mve_vcvtpq_sv8hi (__a); | |
17044 | } | |
17045 | ||
17046 | __extension__ extern __inline int32x4_t | |
17047 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17048 | __arm_vcvtpq_s32_f32 (float32x4_t __a) | |
17049 | { | |
17050 | return __builtin_mve_vcvtpq_sv4si (__a); | |
17051 | } | |
17052 | ||
17053 | __extension__ extern __inline int16x8_t | |
17054 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17055 | __arm_vcvtmq_s16_f16 (float16x8_t __a) | |
17056 | { | |
17057 | return __builtin_mve_vcvtmq_sv8hi (__a); | |
17058 | } | |
17059 | ||
17060 | __extension__ extern __inline int32x4_t | |
17061 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17062 | __arm_vcvtmq_s32_f32 (float32x4_t __a) | |
17063 | { | |
17064 | return __builtin_mve_vcvtmq_sv4si (__a); | |
17065 | } | |
17066 | ||
17067 | __extension__ extern __inline float16x8_t | |
17068 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17069 | __arm_vsubq_n_f16 (float16x8_t __a, float16_t __b) | |
17070 | { | |
17071 | return __builtin_mve_vsubq_n_fv8hf (__a, __b); | |
17072 | } | |
17073 | ||
17074 | __extension__ extern __inline float32x4_t | |
17075 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17076 | __arm_vsubq_n_f32 (float32x4_t __a, float32_t __b) | |
17077 | { | |
17078 | return __builtin_mve_vsubq_n_fv4sf (__a, __b); | |
17079 | } | |
17080 | ||
17081 | __extension__ extern __inline float16x8_t | |
17082 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17083 | __arm_vbrsrq_n_f16 (float16x8_t __a, int32_t __b) | |
17084 | { | |
17085 | return __builtin_mve_vbrsrq_n_fv8hf (__a, __b); | |
17086 | } | |
17087 | ||
17088 | __extension__ extern __inline float32x4_t | |
17089 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17090 | __arm_vbrsrq_n_f32 (float32x4_t __a, int32_t __b) | |
17091 | { | |
17092 | return __builtin_mve_vbrsrq_n_fv4sf (__a, __b); | |
17093 | } | |
17094 | ||
17095 | __extension__ extern __inline float16x8_t | |
17096 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17097 | __arm_vcvtq_n_f16_s16 (int16x8_t __a, const int __imm6) | |
17098 | { | |
17099 | return __builtin_mve_vcvtq_n_to_f_sv8hf (__a, __imm6); | |
17100 | } | |
17101 | ||
17102 | __extension__ extern __inline float32x4_t | |
17103 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17104 | __arm_vcvtq_n_f32_s32 (int32x4_t __a, const int __imm6) | |
17105 | { | |
17106 | return __builtin_mve_vcvtq_n_to_f_sv4sf (__a, __imm6); | |
17107 | } | |
17108 | ||
17109 | __extension__ extern __inline float16x8_t | |
17110 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17111 | __arm_vcvtq_n_f16_u16 (uint16x8_t __a, const int __imm6) | |
17112 | { | |
17113 | return __builtin_mve_vcvtq_n_to_f_uv8hf (__a, __imm6); | |
17114 | } | |
17115 | ||
17116 | __extension__ extern __inline float32x4_t | |
17117 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17118 | __arm_vcvtq_n_f32_u32 (uint32x4_t __a, const int __imm6) | |
17119 | { | |
17120 | return __builtin_mve_vcvtq_n_to_f_uv4sf (__a, __imm6); | |
17121 | } | |
17122 | ||
17123 | __extension__ extern __inline float16x8_t | |
17124 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17125 | __arm_vcreateq_f16 (uint64_t __a, uint64_t __b) | |
17126 | { | |
17127 | return __builtin_mve_vcreateq_fv8hf (__a, __b); | |
17128 | } | |
17129 | ||
17130 | __extension__ extern __inline float32x4_t | |
17131 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17132 | __arm_vcreateq_f32 (uint64_t __a, uint64_t __b) | |
17133 | { | |
17134 | return __builtin_mve_vcreateq_fv4sf (__a, __b); | |
17135 | } | |
17136 | ||
17137 | __extension__ extern __inline int16x8_t | |
17138 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17139 | __arm_vcvtq_n_s16_f16 (float16x8_t __a, const int __imm6) | |
17140 | { | |
17141 | return __builtin_mve_vcvtq_n_from_f_sv8hi (__a, __imm6); | |
17142 | } | |
17143 | ||
17144 | __extension__ extern __inline int32x4_t | |
17145 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17146 | __arm_vcvtq_n_s32_f32 (float32x4_t __a, const int __imm6) | |
17147 | { | |
17148 | return __builtin_mve_vcvtq_n_from_f_sv4si (__a, __imm6); | |
17149 | } | |
17150 | ||
17151 | __extension__ extern __inline uint16x8_t | |
17152 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17153 | __arm_vcvtq_n_u16_f16 (float16x8_t __a, const int __imm6) | |
17154 | { | |
17155 | return __builtin_mve_vcvtq_n_from_f_uv8hi (__a, __imm6); | |
17156 | } | |
17157 | ||
17158 | __extension__ extern __inline uint32x4_t | |
17159 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17160 | __arm_vcvtq_n_u32_f32 (float32x4_t __a, const int __imm6) | |
17161 | { | |
17162 | return __builtin_mve_vcvtq_n_from_f_uv4si (__a, __imm6); | |
17163 | } | |
17164 | ||
17165 | __extension__ extern __inline mve_pred16_t | |
17166 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17167 | __arm_vcmpneq_n_f16 (float16x8_t __a, float16_t __b) | |
17168 | { | |
17169 | return __builtin_mve_vcmpneq_n_fv8hf (__a, __b); | |
17170 | } | |
17171 | ||
17172 | __extension__ extern __inline mve_pred16_t | |
17173 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17174 | __arm_vcmpneq_f16 (float16x8_t __a, float16x8_t __b) | |
17175 | { | |
17176 | return __builtin_mve_vcmpneq_fv8hf (__a, __b); | |
17177 | } | |
17178 | ||
17179 | __extension__ extern __inline mve_pred16_t | |
17180 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17181 | __arm_vcmpltq_n_f16 (float16x8_t __a, float16_t __b) | |
17182 | { | |
17183 | return __builtin_mve_vcmpltq_n_fv8hf (__a, __b); | |
17184 | } | |
17185 | ||
17186 | __extension__ extern __inline mve_pred16_t | |
17187 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17188 | __arm_vcmpltq_f16 (float16x8_t __a, float16x8_t __b) | |
17189 | { | |
17190 | return __builtin_mve_vcmpltq_fv8hf (__a, __b); | |
17191 | } | |
17192 | ||
17193 | __extension__ extern __inline mve_pred16_t | |
17194 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17195 | __arm_vcmpleq_n_f16 (float16x8_t __a, float16_t __b) | |
17196 | { | |
17197 | return __builtin_mve_vcmpleq_n_fv8hf (__a, __b); | |
17198 | } | |
17199 | ||
17200 | __extension__ extern __inline mve_pred16_t | |
17201 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17202 | __arm_vcmpleq_f16 (float16x8_t __a, float16x8_t __b) | |
17203 | { | |
17204 | return __builtin_mve_vcmpleq_fv8hf (__a, __b); | |
17205 | } | |
17206 | ||
17207 | __extension__ extern __inline mve_pred16_t | |
17208 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17209 | __arm_vcmpgtq_n_f16 (float16x8_t __a, float16_t __b) | |
17210 | { | |
17211 | return __builtin_mve_vcmpgtq_n_fv8hf (__a, __b); | |
17212 | } | |
17213 | ||
17214 | __extension__ extern __inline mve_pred16_t | |
17215 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17216 | __arm_vcmpgtq_f16 (float16x8_t __a, float16x8_t __b) | |
17217 | { | |
17218 | return __builtin_mve_vcmpgtq_fv8hf (__a, __b); | |
17219 | } | |
17220 | ||
17221 | __extension__ extern __inline mve_pred16_t | |
17222 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17223 | __arm_vcmpgeq_n_f16 (float16x8_t __a, float16_t __b) | |
17224 | { | |
17225 | return __builtin_mve_vcmpgeq_n_fv8hf (__a, __b); | |
17226 | } | |
17227 | ||
17228 | __extension__ extern __inline mve_pred16_t | |
17229 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17230 | __arm_vcmpgeq_f16 (float16x8_t __a, float16x8_t __b) | |
17231 | { | |
17232 | return __builtin_mve_vcmpgeq_fv8hf (__a, __b); | |
17233 | } | |
17234 | ||
17235 | __extension__ extern __inline mve_pred16_t | |
17236 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17237 | __arm_vcmpeqq_n_f16 (float16x8_t __a, float16_t __b) | |
17238 | { | |
17239 | return __builtin_mve_vcmpeqq_n_fv8hf (__a, __b); | |
17240 | } | |
17241 | ||
17242 | __extension__ extern __inline mve_pred16_t | |
17243 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17244 | __arm_vcmpeqq_f16 (float16x8_t __a, float16x8_t __b) | |
17245 | { | |
17246 | return __builtin_mve_vcmpeqq_fv8hf (__a, __b); | |
17247 | } | |
17248 | ||
17249 | __extension__ extern __inline float16x8_t | |
17250 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17251 | __arm_vsubq_f16 (float16x8_t __a, float16x8_t __b) | |
17252 | { | |
17253 | return __builtin_mve_vsubq_fv8hf (__a, __b); | |
17254 | } | |
17255 | ||
17256 | __extension__ extern __inline float16x8_t | |
17257 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17258 | __arm_vorrq_f16 (float16x8_t __a, float16x8_t __b) | |
17259 | { | |
17260 | return __builtin_mve_vorrq_fv8hf (__a, __b); | |
17261 | } | |
17262 | ||
17263 | __extension__ extern __inline float16x8_t | |
17264 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17265 | __arm_vornq_f16 (float16x8_t __a, float16x8_t __b) | |
17266 | { | |
17267 | return __builtin_mve_vornq_fv8hf (__a, __b); | |
17268 | } | |
17269 | ||
17270 | __extension__ extern __inline float16x8_t | |
17271 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17272 | __arm_vmulq_n_f16 (float16x8_t __a, float16_t __b) | |
17273 | { | |
17274 | return __builtin_mve_vmulq_n_fv8hf (__a, __b); | |
17275 | } | |
17276 | ||
17277 | __extension__ extern __inline float16x8_t | |
17278 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17279 | __arm_vmulq_f16 (float16x8_t __a, float16x8_t __b) | |
17280 | { | |
17281 | return __builtin_mve_vmulq_fv8hf (__a, __b); | |
17282 | } | |
17283 | ||
17284 | __extension__ extern __inline float16_t | |
17285 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17286 | __arm_vminnmvq_f16 (float16_t __a, float16x8_t __b) | |
17287 | { | |
17288 | return __builtin_mve_vminnmvq_fv8hf (__a, __b); | |
17289 | } | |
17290 | ||
17291 | __extension__ extern __inline float16x8_t | |
17292 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17293 | __arm_vminnmq_f16 (float16x8_t __a, float16x8_t __b) | |
17294 | { | |
17295 | return __builtin_mve_vminnmq_fv8hf (__a, __b); | |
17296 | } | |
17297 | ||
17298 | __extension__ extern __inline float16_t | |
17299 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17300 | __arm_vminnmavq_f16 (float16_t __a, float16x8_t __b) | |
17301 | { | |
17302 | return __builtin_mve_vminnmavq_fv8hf (__a, __b); | |
17303 | } | |
17304 | ||
17305 | __extension__ extern __inline float16x8_t | |
17306 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17307 | __arm_vminnmaq_f16 (float16x8_t __a, float16x8_t __b) | |
17308 | { | |
17309 | return __builtin_mve_vminnmaq_fv8hf (__a, __b); | |
17310 | } | |
17311 | ||
17312 | __extension__ extern __inline float16_t | |
17313 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17314 | __arm_vmaxnmvq_f16 (float16_t __a, float16x8_t __b) | |
17315 | { | |
17316 | return __builtin_mve_vmaxnmvq_fv8hf (__a, __b); | |
17317 | } | |
17318 | ||
17319 | __extension__ extern __inline float16x8_t | |
17320 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17321 | __arm_vmaxnmq_f16 (float16x8_t __a, float16x8_t __b) | |
17322 | { | |
17323 | return __builtin_mve_vmaxnmq_fv8hf (__a, __b); | |
17324 | } | |
17325 | ||
17326 | __extension__ extern __inline float16_t | |
17327 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17328 | __arm_vmaxnmavq_f16 (float16_t __a, float16x8_t __b) | |
17329 | { | |
17330 | return __builtin_mve_vmaxnmavq_fv8hf (__a, __b); | |
17331 | } | |
17332 | ||
17333 | __extension__ extern __inline float16x8_t | |
17334 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17335 | __arm_vmaxnmaq_f16 (float16x8_t __a, float16x8_t __b) | |
17336 | { | |
17337 | return __builtin_mve_vmaxnmaq_fv8hf (__a, __b); | |
17338 | } | |
17339 | ||
17340 | __extension__ extern __inline float16x8_t | |
17341 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17342 | __arm_veorq_f16 (float16x8_t __a, float16x8_t __b) | |
17343 | { | |
17344 | return __builtin_mve_veorq_fv8hf (__a, __b); | |
17345 | } | |
17346 | ||
17347 | __extension__ extern __inline float16x8_t | |
17348 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17349 | __arm_vcmulq_rot90_f16 (float16x8_t __a, float16x8_t __b) | |
17350 | { | |
db253e8b | 17351 | return __builtin_mve_vcmulq_rot90v8hf (__a, __b); |
261014a1 SP |
17352 | } |
17353 | ||
17354 | __extension__ extern __inline float16x8_t | |
17355 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17356 | __arm_vcmulq_rot270_f16 (float16x8_t __a, float16x8_t __b) | |
17357 | { | |
db253e8b | 17358 | return __builtin_mve_vcmulq_rot270v8hf (__a, __b); |
261014a1 SP |
17359 | } |
17360 | ||
17361 | __extension__ extern __inline float16x8_t | |
17362 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17363 | __arm_vcmulq_rot180_f16 (float16x8_t __a, float16x8_t __b) | |
17364 | { | |
db253e8b | 17365 | return __builtin_mve_vcmulq_rot180v8hf (__a, __b); |
261014a1 SP |
17366 | } |
17367 | ||
17368 | __extension__ extern __inline float16x8_t | |
17369 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17370 | __arm_vcmulq_f16 (float16x8_t __a, float16x8_t __b) | |
17371 | { | |
db253e8b | 17372 | return __builtin_mve_vcmulqv8hf (__a, __b); |
261014a1 SP |
17373 | } |
17374 | ||
17375 | __extension__ extern __inline float16x8_t | |
17376 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17377 | __arm_vcaddq_rot90_f16 (float16x8_t __a, float16x8_t __b) | |
17378 | { | |
9732dc85 | 17379 | return __builtin_mve_vcaddq_rot90v8hf (__a, __b); |
261014a1 SP |
17380 | } |
17381 | ||
17382 | __extension__ extern __inline float16x8_t | |
17383 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17384 | __arm_vcaddq_rot270_f16 (float16x8_t __a, float16x8_t __b) | |
17385 | { | |
9732dc85 | 17386 | return __builtin_mve_vcaddq_rot270v8hf (__a, __b); |
261014a1 SP |
17387 | } |
17388 | ||
17389 | __extension__ extern __inline float16x8_t | |
17390 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17391 | __arm_vbicq_f16 (float16x8_t __a, float16x8_t __b) | |
17392 | { | |
17393 | return __builtin_mve_vbicq_fv8hf (__a, __b); | |
17394 | } | |
17395 | ||
17396 | __extension__ extern __inline float16x8_t | |
17397 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17398 | __arm_vandq_f16 (float16x8_t __a, float16x8_t __b) | |
17399 | { | |
17400 | return __builtin_mve_vandq_fv8hf (__a, __b); | |
17401 | } | |
17402 | ||
17403 | __extension__ extern __inline float16x8_t | |
17404 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17405 | __arm_vaddq_n_f16 (float16x8_t __a, float16_t __b) | |
17406 | { | |
17407 | return __builtin_mve_vaddq_n_fv8hf (__a, __b); | |
17408 | } | |
17409 | ||
17410 | __extension__ extern __inline float16x8_t | |
17411 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17412 | __arm_vabdq_f16 (float16x8_t __a, float16x8_t __b) | |
17413 | { | |
17414 | return __builtin_mve_vabdq_fv8hf (__a, __b); | |
17415 | } | |
17416 | ||
17417 | __extension__ extern __inline mve_pred16_t | |
17418 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17419 | __arm_vcmpneq_n_f32 (float32x4_t __a, float32_t __b) | |
17420 | { | |
17421 | return __builtin_mve_vcmpneq_n_fv4sf (__a, __b); | |
17422 | } | |
17423 | ||
17424 | __extension__ extern __inline mve_pred16_t | |
17425 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17426 | __arm_vcmpneq_f32 (float32x4_t __a, float32x4_t __b) | |
17427 | { | |
17428 | return __builtin_mve_vcmpneq_fv4sf (__a, __b); | |
17429 | } | |
17430 | ||
17431 | __extension__ extern __inline mve_pred16_t | |
17432 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17433 | __arm_vcmpltq_n_f32 (float32x4_t __a, float32_t __b) | |
17434 | { | |
17435 | return __builtin_mve_vcmpltq_n_fv4sf (__a, __b); | |
17436 | } | |
17437 | ||
17438 | __extension__ extern __inline mve_pred16_t | |
17439 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17440 | __arm_vcmpltq_f32 (float32x4_t __a, float32x4_t __b) | |
17441 | { | |
17442 | return __builtin_mve_vcmpltq_fv4sf (__a, __b); | |
17443 | } | |
17444 | ||
17445 | __extension__ extern __inline mve_pred16_t | |
17446 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17447 | __arm_vcmpleq_n_f32 (float32x4_t __a, float32_t __b) | |
17448 | { | |
17449 | return __builtin_mve_vcmpleq_n_fv4sf (__a, __b); | |
17450 | } | |
17451 | ||
17452 | __extension__ extern __inline mve_pred16_t | |
17453 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17454 | __arm_vcmpleq_f32 (float32x4_t __a, float32x4_t __b) | |
17455 | { | |
17456 | return __builtin_mve_vcmpleq_fv4sf (__a, __b); | |
17457 | } | |
17458 | ||
17459 | __extension__ extern __inline mve_pred16_t | |
17460 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17461 | __arm_vcmpgtq_n_f32 (float32x4_t __a, float32_t __b) | |
17462 | { | |
17463 | return __builtin_mve_vcmpgtq_n_fv4sf (__a, __b); | |
17464 | } | |
17465 | ||
17466 | __extension__ extern __inline mve_pred16_t | |
17467 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17468 | __arm_vcmpgtq_f32 (float32x4_t __a, float32x4_t __b) | |
17469 | { | |
17470 | return __builtin_mve_vcmpgtq_fv4sf (__a, __b); | |
17471 | } | |
17472 | ||
17473 | __extension__ extern __inline mve_pred16_t | |
17474 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17475 | __arm_vcmpgeq_n_f32 (float32x4_t __a, float32_t __b) | |
17476 | { | |
17477 | return __builtin_mve_vcmpgeq_n_fv4sf (__a, __b); | |
17478 | } | |
17479 | ||
17480 | __extension__ extern __inline mve_pred16_t | |
17481 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17482 | __arm_vcmpgeq_f32 (float32x4_t __a, float32x4_t __b) | |
17483 | { | |
17484 | return __builtin_mve_vcmpgeq_fv4sf (__a, __b); | |
17485 | } | |
17486 | ||
17487 | __extension__ extern __inline mve_pred16_t | |
17488 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17489 | __arm_vcmpeqq_n_f32 (float32x4_t __a, float32_t __b) | |
17490 | { | |
17491 | return __builtin_mve_vcmpeqq_n_fv4sf (__a, __b); | |
17492 | } | |
17493 | ||
17494 | __extension__ extern __inline mve_pred16_t | |
17495 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17496 | __arm_vcmpeqq_f32 (float32x4_t __a, float32x4_t __b) | |
17497 | { | |
17498 | return __builtin_mve_vcmpeqq_fv4sf (__a, __b); | |
17499 | } | |
17500 | ||
17501 | __extension__ extern __inline float32x4_t | |
17502 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17503 | __arm_vsubq_f32 (float32x4_t __a, float32x4_t __b) | |
17504 | { | |
17505 | return __builtin_mve_vsubq_fv4sf (__a, __b); | |
17506 | } | |
17507 | ||
17508 | __extension__ extern __inline float32x4_t | |
17509 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17510 | __arm_vorrq_f32 (float32x4_t __a, float32x4_t __b) | |
17511 | { | |
17512 | return __builtin_mve_vorrq_fv4sf (__a, __b); | |
17513 | } | |
17514 | ||
17515 | __extension__ extern __inline float32x4_t | |
17516 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17517 | __arm_vornq_f32 (float32x4_t __a, float32x4_t __b) | |
17518 | { | |
17519 | return __builtin_mve_vornq_fv4sf (__a, __b); | |
17520 | } | |
17521 | ||
17522 | __extension__ extern __inline float32x4_t | |
17523 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17524 | __arm_vmulq_n_f32 (float32x4_t __a, float32_t __b) | |
17525 | { | |
17526 | return __builtin_mve_vmulq_n_fv4sf (__a, __b); | |
17527 | } | |
17528 | ||
17529 | __extension__ extern __inline float32x4_t | |
17530 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17531 | __arm_vmulq_f32 (float32x4_t __a, float32x4_t __b) | |
17532 | { | |
17533 | return __builtin_mve_vmulq_fv4sf (__a, __b); | |
17534 | } | |
17535 | ||
17536 | __extension__ extern __inline float32_t | |
17537 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17538 | __arm_vminnmvq_f32 (float32_t __a, float32x4_t __b) | |
17539 | { | |
17540 | return __builtin_mve_vminnmvq_fv4sf (__a, __b); | |
17541 | } | |
17542 | ||
17543 | __extension__ extern __inline float32x4_t | |
17544 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17545 | __arm_vminnmq_f32 (float32x4_t __a, float32x4_t __b) | |
17546 | { | |
17547 | return __builtin_mve_vminnmq_fv4sf (__a, __b); | |
17548 | } | |
17549 | ||
17550 | __extension__ extern __inline float32_t | |
17551 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17552 | __arm_vminnmavq_f32 (float32_t __a, float32x4_t __b) | |
17553 | { | |
17554 | return __builtin_mve_vminnmavq_fv4sf (__a, __b); | |
17555 | } | |
17556 | ||
17557 | __extension__ extern __inline float32x4_t | |
17558 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17559 | __arm_vminnmaq_f32 (float32x4_t __a, float32x4_t __b) | |
17560 | { | |
17561 | return __builtin_mve_vminnmaq_fv4sf (__a, __b); | |
17562 | } | |
17563 | ||
17564 | __extension__ extern __inline float32_t | |
17565 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17566 | __arm_vmaxnmvq_f32 (float32_t __a, float32x4_t __b) | |
17567 | { | |
17568 | return __builtin_mve_vmaxnmvq_fv4sf (__a, __b); | |
17569 | } | |
17570 | ||
17571 | __extension__ extern __inline float32x4_t | |
17572 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17573 | __arm_vmaxnmq_f32 (float32x4_t __a, float32x4_t __b) | |
17574 | { | |
17575 | return __builtin_mve_vmaxnmq_fv4sf (__a, __b); | |
17576 | } | |
17577 | ||
17578 | __extension__ extern __inline float32_t | |
17579 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17580 | __arm_vmaxnmavq_f32 (float32_t __a, float32x4_t __b) | |
17581 | { | |
17582 | return __builtin_mve_vmaxnmavq_fv4sf (__a, __b); | |
17583 | } | |
17584 | ||
17585 | __extension__ extern __inline float32x4_t | |
17586 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17587 | __arm_vmaxnmaq_f32 (float32x4_t __a, float32x4_t __b) | |
17588 | { | |
17589 | return __builtin_mve_vmaxnmaq_fv4sf (__a, __b); | |
17590 | } | |
17591 | ||
17592 | __extension__ extern __inline float32x4_t | |
17593 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17594 | __arm_veorq_f32 (float32x4_t __a, float32x4_t __b) | |
17595 | { | |
17596 | return __builtin_mve_veorq_fv4sf (__a, __b); | |
17597 | } | |
17598 | ||
17599 | __extension__ extern __inline float32x4_t | |
17600 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17601 | __arm_vcmulq_rot90_f32 (float32x4_t __a, float32x4_t __b) | |
17602 | { | |
db253e8b | 17603 | return __builtin_mve_vcmulq_rot90v4sf (__a, __b); |
261014a1 SP |
17604 | } |
17605 | ||
17606 | __extension__ extern __inline float32x4_t | |
17607 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17608 | __arm_vcmulq_rot270_f32 (float32x4_t __a, float32x4_t __b) | |
17609 | { | |
db253e8b | 17610 | return __builtin_mve_vcmulq_rot270v4sf (__a, __b); |
261014a1 SP |
17611 | } |
17612 | ||
17613 | __extension__ extern __inline float32x4_t | |
17614 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17615 | __arm_vcmulq_rot180_f32 (float32x4_t __a, float32x4_t __b) | |
17616 | { | |
db253e8b | 17617 | return __builtin_mve_vcmulq_rot180v4sf (__a, __b); |
261014a1 SP |
17618 | } |
17619 | ||
17620 | __extension__ extern __inline float32x4_t | |
17621 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17622 | __arm_vcmulq_f32 (float32x4_t __a, float32x4_t __b) | |
17623 | { | |
db253e8b | 17624 | return __builtin_mve_vcmulqv4sf (__a, __b); |
261014a1 SP |
17625 | } |
17626 | ||
17627 | __extension__ extern __inline float32x4_t | |
17628 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17629 | __arm_vcaddq_rot90_f32 (float32x4_t __a, float32x4_t __b) | |
17630 | { | |
9732dc85 | 17631 | return __builtin_mve_vcaddq_rot90v4sf (__a, __b); |
261014a1 SP |
17632 | } |
17633 | ||
17634 | __extension__ extern __inline float32x4_t | |
17635 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17636 | __arm_vcaddq_rot270_f32 (float32x4_t __a, float32x4_t __b) | |
17637 | { | |
9732dc85 | 17638 | return __builtin_mve_vcaddq_rot270v4sf (__a, __b); |
261014a1 SP |
17639 | } |
17640 | ||
17641 | __extension__ extern __inline float32x4_t | |
17642 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17643 | __arm_vbicq_f32 (float32x4_t __a, float32x4_t __b) | |
17644 | { | |
17645 | return __builtin_mve_vbicq_fv4sf (__a, __b); | |
17646 | } | |
17647 | ||
17648 | __extension__ extern __inline float32x4_t | |
17649 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17650 | __arm_vandq_f32 (float32x4_t __a, float32x4_t __b) | |
17651 | { | |
17652 | return __builtin_mve_vandq_fv4sf (__a, __b); | |
17653 | } | |
17654 | ||
17655 | __extension__ extern __inline float32x4_t | |
17656 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17657 | __arm_vaddq_n_f32 (float32x4_t __a, float32_t __b) | |
17658 | { | |
17659 | return __builtin_mve_vaddq_n_fv4sf (__a, __b); | |
17660 | } | |
17661 | ||
17662 | __extension__ extern __inline float32x4_t | |
17663 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17664 | __arm_vabdq_f32 (float32x4_t __a, float32x4_t __b) | |
17665 | { | |
17666 | return __builtin_mve_vabdq_fv4sf (__a, __b); | |
17667 | } | |
17668 | ||
17669 | __extension__ extern __inline float16x8_t | |
17670 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17671 | __arm_vcvttq_f16_f32 (float16x8_t __a, float32x4_t __b) | |
17672 | { | |
17673 | return __builtin_mve_vcvttq_f16_f32v8hf (__a, __b); | |
17674 | } | |
17675 | ||
17676 | __extension__ extern __inline float16x8_t | |
17677 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17678 | __arm_vcvtbq_f16_f32 (float16x8_t __a, float32x4_t __b) | |
17679 | { | |
17680 | return __builtin_mve_vcvtbq_f16_f32v8hf (__a, __b); | |
17681 | } | |
17682 | ||
17683 | __extension__ extern __inline mve_pred16_t | |
17684 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17685 | __arm_vcmpeqq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17686 | { | |
17687 | return __builtin_mve_vcmpeqq_m_fv8hf (__a, __b, __p); | |
17688 | } | |
17689 | ||
17690 | __extension__ extern __inline mve_pred16_t | |
17691 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17692 | __arm_vcmpeqq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
17693 | { | |
17694 | return __builtin_mve_vcmpeqq_m_fv4sf (__a, __b, __p); | |
17695 | } | |
17696 | ||
17697 | __extension__ extern __inline int16x8_t | |
17698 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17699 | __arm_vcvtaq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17700 | { | |
17701 | return __builtin_mve_vcvtaq_m_sv8hi (__inactive, __a, __p); | |
17702 | } | |
17703 | ||
17704 | __extension__ extern __inline uint16x8_t | |
17705 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17706 | __arm_vcvtaq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17707 | { | |
17708 | return __builtin_mve_vcvtaq_m_uv8hi (__inactive, __a, __p); | |
17709 | } | |
17710 | ||
17711 | __extension__ extern __inline int32x4_t | |
17712 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17713 | __arm_vcvtaq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17714 | { | |
17715 | return __builtin_mve_vcvtaq_m_sv4si (__inactive, __a, __p); | |
17716 | } | |
17717 | ||
17718 | __extension__ extern __inline uint32x4_t | |
17719 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17720 | __arm_vcvtaq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
17721 | { | |
17722 | return __builtin_mve_vcvtaq_m_uv4si (__inactive, __a, __p); | |
17723 | } | |
17724 | ||
17725 | __extension__ extern __inline float16x8_t | |
17726 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17727 | __arm_vcvtq_m_f16_s16 (float16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
17728 | { | |
17729 | return __builtin_mve_vcvtq_m_to_f_sv8hf (__inactive, __a, __p); | |
17730 | } | |
17731 | ||
17732 | __extension__ extern __inline float16x8_t | |
17733 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17734 | __arm_vcvtq_m_f16_u16 (float16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
17735 | { | |
17736 | return __builtin_mve_vcvtq_m_to_f_uv8hf (__inactive, __a, __p); | |
17737 | } | |
17738 | ||
17739 | __extension__ extern __inline float32x4_t | |
17740 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17741 | __arm_vcvtq_m_f32_s32 (float32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
17742 | { | |
17743 | return __builtin_mve_vcvtq_m_to_f_sv4sf (__inactive, __a, __p); | |
17744 | } | |
17745 | ||
17746 | __extension__ extern __inline float32x4_t | |
17747 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17748 | __arm_vcvtq_m_f32_u32 (float32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
17749 | { | |
17750 | return __builtin_mve_vcvtq_m_to_f_uv4sf (__inactive, __a, __p); | |
17751 | } | |
17752 | ||
17753 | ||
17754 | __extension__ extern __inline float16x8_t | |
17755 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17756 | __arm_vcvtbq_m_f16_f32 (float16x8_t __a, float32x4_t __b, mve_pred16_t __p) | |
17757 | { | |
17758 | return __builtin_mve_vcvtbq_m_f16_f32v8hf (__a, __b, __p); | |
17759 | } | |
17760 | ||
17761 | __extension__ extern __inline float32x4_t | |
17762 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17763 | __arm_vcvtbq_m_f32_f16 (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17764 | { | |
17765 | return __builtin_mve_vcvtbq_m_f32_f16v4sf (__inactive, __a, __p); | |
17766 | } | |
17767 | ||
17768 | __extension__ extern __inline float16x8_t | |
17769 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17770 | __arm_vcvttq_m_f16_f32 (float16x8_t __a, float32x4_t __b, mve_pred16_t __p) | |
17771 | { | |
17772 | return __builtin_mve_vcvttq_m_f16_f32v8hf (__a, __b, __p); | |
17773 | } | |
17774 | ||
17775 | __extension__ extern __inline float32x4_t | |
17776 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17777 | __arm_vcvttq_m_f32_f16 (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17778 | { | |
17779 | return __builtin_mve_vcvttq_m_f32_f16v4sf (__inactive, __a, __p); | |
17780 | } | |
17781 | ||
17782 | __extension__ extern __inline float16x8_t | |
17783 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17784 | __arm_vrev32q_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17785 | { | |
17786 | return __builtin_mve_vrev32q_m_fv8hf (__inactive, __a, __p); | |
17787 | } | |
17788 | ||
17789 | __extension__ extern __inline float16x8_t | |
17790 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17791 | __arm_vcmlaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17792 | { | |
db253e8b | 17793 | return __builtin_mve_vcmlaqv8hf (__a, __b, __c); |
261014a1 SP |
17794 | } |
17795 | ||
17796 | __extension__ extern __inline float16x8_t | |
17797 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17798 | __arm_vcmlaq_rot180_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17799 | { | |
db253e8b | 17800 | return __builtin_mve_vcmlaq_rot180v8hf (__a, __b, __c); |
261014a1 SP |
17801 | } |
17802 | ||
17803 | __extension__ extern __inline float16x8_t | |
17804 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17805 | __arm_vcmlaq_rot270_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17806 | { | |
db253e8b | 17807 | return __builtin_mve_vcmlaq_rot270v8hf (__a, __b, __c); |
261014a1 SP |
17808 | } |
17809 | ||
17810 | __extension__ extern __inline float16x8_t | |
17811 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17812 | __arm_vcmlaq_rot90_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17813 | { | |
db253e8b | 17814 | return __builtin_mve_vcmlaq_rot90v8hf (__a, __b, __c); |
261014a1 SP |
17815 | } |
17816 | ||
17817 | __extension__ extern __inline float16x8_t | |
17818 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17819 | __arm_vfmaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17820 | { | |
17821 | return __builtin_mve_vfmaq_fv8hf (__a, __b, __c); | |
17822 | } | |
17823 | ||
17824 | __extension__ extern __inline float16x8_t | |
17825 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17826 | __arm_vfmaq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c) | |
17827 | { | |
17828 | return __builtin_mve_vfmaq_n_fv8hf (__a, __b, __c); | |
17829 | } | |
17830 | ||
17831 | __extension__ extern __inline float16x8_t | |
17832 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17833 | __arm_vfmasq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c) | |
17834 | { | |
17835 | return __builtin_mve_vfmasq_n_fv8hf (__a, __b, __c); | |
17836 | } | |
17837 | ||
17838 | __extension__ extern __inline float16x8_t | |
17839 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17840 | __arm_vfmsq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
17841 | { | |
17842 | return __builtin_mve_vfmsq_fv8hf (__a, __b, __c); | |
17843 | } | |
17844 | ||
17845 | __extension__ extern __inline float16x8_t | |
17846 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17847 | __arm_vabsq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17848 | { | |
17849 | return __builtin_mve_vabsq_m_fv8hf (__inactive, __a, __p); | |
17850 | } | |
17851 | ||
17852 | __extension__ extern __inline int16x8_t | |
17853 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17854 | __arm_vcvtmq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17855 | { | |
17856 | return __builtin_mve_vcvtmq_m_sv8hi (__inactive, __a, __p); | |
17857 | } | |
17858 | ||
17859 | __extension__ extern __inline int16x8_t | |
17860 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17861 | __arm_vcvtnq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17862 | { | |
17863 | return __builtin_mve_vcvtnq_m_sv8hi (__inactive, __a, __p); | |
17864 | } | |
17865 | ||
17866 | __extension__ extern __inline int16x8_t | |
17867 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17868 | __arm_vcvtpq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17869 | { | |
17870 | return __builtin_mve_vcvtpq_m_sv8hi (__inactive, __a, __p); | |
17871 | } | |
17872 | ||
17873 | __extension__ extern __inline int16x8_t | |
17874 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17875 | __arm_vcvtq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17876 | { | |
17877 | return __builtin_mve_vcvtq_m_from_f_sv8hi (__inactive, __a, __p); | |
17878 | } | |
17879 | ||
17880 | __extension__ extern __inline float16x8_t | |
17881 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17882 | __arm_vdupq_m_n_f16 (float16x8_t __inactive, float16_t __a, mve_pred16_t __p) | |
17883 | { | |
17884 | return __builtin_mve_vdupq_m_n_fv8hf (__inactive, __a, __p); | |
17885 | } | |
17886 | ||
17887 | __extension__ extern __inline float16x8_t | |
17888 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17889 | __arm_vmaxnmaq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17890 | { | |
17891 | return __builtin_mve_vmaxnmaq_m_fv8hf (__a, __b, __p); | |
17892 | } | |
17893 | ||
17894 | __extension__ extern __inline float16_t | |
17895 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17896 | __arm_vmaxnmavq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
17897 | { | |
17898 | return __builtin_mve_vmaxnmavq_p_fv8hf (__a, __b, __p); | |
17899 | } | |
17900 | ||
17901 | __extension__ extern __inline float16_t | |
17902 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17903 | __arm_vmaxnmvq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
17904 | { | |
17905 | return __builtin_mve_vmaxnmvq_p_fv8hf (__a, __b, __p); | |
17906 | } | |
17907 | ||
17908 | __extension__ extern __inline float16x8_t | |
17909 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17910 | __arm_vminnmaq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17911 | { | |
17912 | return __builtin_mve_vminnmaq_m_fv8hf (__a, __b, __p); | |
17913 | } | |
17914 | ||
17915 | __extension__ extern __inline float16_t | |
17916 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17917 | __arm_vminnmavq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
17918 | { | |
17919 | return __builtin_mve_vminnmavq_p_fv8hf (__a, __b, __p); | |
17920 | } | |
17921 | ||
17922 | __extension__ extern __inline float16_t | |
17923 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17924 | __arm_vminnmvq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
17925 | { | |
17926 | return __builtin_mve_vminnmvq_p_fv8hf (__a, __b, __p); | |
17927 | } | |
17928 | ||
17929 | __extension__ extern __inline float16x8_t | |
17930 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17931 | __arm_vnegq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17932 | { | |
17933 | return __builtin_mve_vnegq_m_fv8hf (__inactive, __a, __p); | |
17934 | } | |
17935 | ||
17936 | __extension__ extern __inline float16x8_t | |
17937 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17938 | __arm_vpselq_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
17939 | { | |
17940 | return __builtin_mve_vpselq_fv8hf (__a, __b, __p); | |
17941 | } | |
17942 | ||
17943 | __extension__ extern __inline float16x8_t | |
17944 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17945 | __arm_vrev64q_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17946 | { | |
17947 | return __builtin_mve_vrev64q_m_fv8hf (__inactive, __a, __p); | |
17948 | } | |
17949 | ||
17950 | __extension__ extern __inline float16x8_t | |
17951 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17952 | __arm_vrndaq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17953 | { | |
17954 | return __builtin_mve_vrndaq_m_fv8hf (__inactive, __a, __p); | |
17955 | } | |
17956 | ||
17957 | __extension__ extern __inline float16x8_t | |
17958 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17959 | __arm_vrndmq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17960 | { | |
17961 | return __builtin_mve_vrndmq_m_fv8hf (__inactive, __a, __p); | |
17962 | } | |
17963 | ||
17964 | __extension__ extern __inline float16x8_t | |
17965 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17966 | __arm_vrndnq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17967 | { | |
17968 | return __builtin_mve_vrndnq_m_fv8hf (__inactive, __a, __p); | |
17969 | } | |
17970 | ||
17971 | __extension__ extern __inline float16x8_t | |
17972 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17973 | __arm_vrndpq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17974 | { | |
17975 | return __builtin_mve_vrndpq_m_fv8hf (__inactive, __a, __p); | |
17976 | } | |
17977 | ||
17978 | __extension__ extern __inline float16x8_t | |
17979 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17980 | __arm_vrndq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17981 | { | |
17982 | return __builtin_mve_vrndq_m_fv8hf (__inactive, __a, __p); | |
17983 | } | |
17984 | ||
17985 | __extension__ extern __inline float16x8_t | |
17986 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17987 | __arm_vrndxq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
17988 | { | |
17989 | return __builtin_mve_vrndxq_m_fv8hf (__inactive, __a, __p); | |
17990 | } | |
17991 | ||
17992 | __extension__ extern __inline mve_pred16_t | |
17993 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
17994 | __arm_vcmpeqq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
17995 | { | |
17996 | return __builtin_mve_vcmpeqq_m_n_fv8hf (__a, __b, __p); | |
17997 | } | |
17998 | ||
17999 | __extension__ extern __inline mve_pred16_t | |
18000 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18001 | __arm_vcmpgeq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18002 | { | |
18003 | return __builtin_mve_vcmpgeq_m_fv8hf (__a, __b, __p); | |
18004 | } | |
18005 | ||
18006 | __extension__ extern __inline mve_pred16_t | |
18007 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18008 | __arm_vcmpgeq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18009 | { | |
18010 | return __builtin_mve_vcmpgeq_m_n_fv8hf (__a, __b, __p); | |
18011 | } | |
18012 | ||
18013 | __extension__ extern __inline mve_pred16_t | |
18014 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18015 | __arm_vcmpgtq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18016 | { | |
18017 | return __builtin_mve_vcmpgtq_m_fv8hf (__a, __b, __p); | |
18018 | } | |
18019 | ||
18020 | __extension__ extern __inline mve_pred16_t | |
18021 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18022 | __arm_vcmpgtq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18023 | { | |
18024 | return __builtin_mve_vcmpgtq_m_n_fv8hf (__a, __b, __p); | |
18025 | } | |
18026 | ||
18027 | __extension__ extern __inline mve_pred16_t | |
18028 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18029 | __arm_vcmpleq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18030 | { | |
18031 | return __builtin_mve_vcmpleq_m_fv8hf (__a, __b, __p); | |
18032 | } | |
18033 | ||
18034 | __extension__ extern __inline mve_pred16_t | |
18035 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18036 | __arm_vcmpleq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18037 | { | |
18038 | return __builtin_mve_vcmpleq_m_n_fv8hf (__a, __b, __p); | |
18039 | } | |
18040 | ||
18041 | __extension__ extern __inline mve_pred16_t | |
18042 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18043 | __arm_vcmpltq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18044 | { | |
18045 | return __builtin_mve_vcmpltq_m_fv8hf (__a, __b, __p); | |
18046 | } | |
18047 | ||
18048 | __extension__ extern __inline mve_pred16_t | |
18049 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18050 | __arm_vcmpltq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18051 | { | |
18052 | return __builtin_mve_vcmpltq_m_n_fv8hf (__a, __b, __p); | |
18053 | } | |
18054 | ||
18055 | __extension__ extern __inline mve_pred16_t | |
18056 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18057 | __arm_vcmpneq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18058 | { | |
18059 | return __builtin_mve_vcmpneq_m_fv8hf (__a, __b, __p); | |
18060 | } | |
18061 | ||
18062 | __extension__ extern __inline mve_pred16_t | |
18063 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18064 | __arm_vcmpneq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18065 | { | |
18066 | return __builtin_mve_vcmpneq_m_n_fv8hf (__a, __b, __p); | |
18067 | } | |
18068 | ||
18069 | __extension__ extern __inline uint16x8_t | |
18070 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18071 | __arm_vcvtmq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
18072 | { | |
18073 | return __builtin_mve_vcvtmq_m_uv8hi (__inactive, __a, __p); | |
18074 | } | |
18075 | ||
18076 | __extension__ extern __inline uint16x8_t | |
18077 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18078 | __arm_vcvtnq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
18079 | { | |
18080 | return __builtin_mve_vcvtnq_m_uv8hi (__inactive, __a, __p); | |
18081 | } | |
18082 | ||
18083 | __extension__ extern __inline uint16x8_t | |
18084 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18085 | __arm_vcvtpq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
18086 | { | |
18087 | return __builtin_mve_vcvtpq_m_uv8hi (__inactive, __a, __p); | |
18088 | } | |
18089 | ||
18090 | __extension__ extern __inline uint16x8_t | |
18091 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18092 | __arm_vcvtq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
18093 | { | |
18094 | return __builtin_mve_vcvtq_m_from_f_uv8hi (__inactive, __a, __p); | |
18095 | } | |
18096 | ||
18097 | __extension__ extern __inline float32x4_t | |
18098 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18099 | __arm_vcmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
18100 | { | |
db253e8b | 18101 | return __builtin_mve_vcmlaqv4sf (__a, __b, __c); |
261014a1 SP |
18102 | } |
18103 | ||
18104 | __extension__ extern __inline float32x4_t | |
18105 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18106 | __arm_vcmlaq_rot180_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
18107 | { | |
db253e8b | 18108 | return __builtin_mve_vcmlaq_rot180v4sf (__a, __b, __c); |
261014a1 SP |
18109 | } |
18110 | ||
18111 | __extension__ extern __inline float32x4_t | |
18112 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18113 | __arm_vcmlaq_rot270_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
18114 | { | |
db253e8b | 18115 | return __builtin_mve_vcmlaq_rot270v4sf (__a, __b, __c); |
261014a1 SP |
18116 | } |
18117 | ||
18118 | __extension__ extern __inline float32x4_t | |
18119 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18120 | __arm_vcmlaq_rot90_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
18121 | { | |
db253e8b | 18122 | return __builtin_mve_vcmlaq_rot90v4sf (__a, __b, __c); |
261014a1 SP |
18123 | } |
18124 | ||
18125 | __extension__ extern __inline float32x4_t | |
18126 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18127 | __arm_vfmaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
18128 | { | |
18129 | return __builtin_mve_vfmaq_fv4sf (__a, __b, __c); | |
18130 | } | |
18131 | ||
18132 | __extension__ extern __inline float32x4_t | |
18133 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18134 | __arm_vfmaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) | |
18135 | { | |
18136 | return __builtin_mve_vfmaq_n_fv4sf (__a, __b, __c); | |
18137 | } | |
18138 | ||
18139 | __extension__ extern __inline float32x4_t | |
18140 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18141 | __arm_vfmasq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) | |
18142 | { | |
18143 | return __builtin_mve_vfmasq_n_fv4sf (__a, __b, __c); | |
18144 | } | |
18145 | ||
18146 | __extension__ extern __inline float32x4_t | |
18147 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18148 | __arm_vfmsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
18149 | { | |
18150 | return __builtin_mve_vfmsq_fv4sf (__a, __b, __c); | |
18151 | } | |
18152 | ||
18153 | __extension__ extern __inline float32x4_t | |
18154 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18155 | __arm_vabsq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18156 | { | |
18157 | return __builtin_mve_vabsq_m_fv4sf (__inactive, __a, __p); | |
18158 | } | |
18159 | ||
18160 | __extension__ extern __inline int32x4_t | |
18161 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18162 | __arm_vcvtmq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18163 | { | |
18164 | return __builtin_mve_vcvtmq_m_sv4si (__inactive, __a, __p); | |
18165 | } | |
18166 | ||
18167 | __extension__ extern __inline int32x4_t | |
18168 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18169 | __arm_vcvtnq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18170 | { | |
18171 | return __builtin_mve_vcvtnq_m_sv4si (__inactive, __a, __p); | |
18172 | } | |
18173 | ||
18174 | __extension__ extern __inline int32x4_t | |
18175 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18176 | __arm_vcvtpq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18177 | { | |
18178 | return __builtin_mve_vcvtpq_m_sv4si (__inactive, __a, __p); | |
18179 | } | |
18180 | ||
18181 | __extension__ extern __inline int32x4_t | |
18182 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18183 | __arm_vcvtq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18184 | { | |
18185 | return __builtin_mve_vcvtq_m_from_f_sv4si (__inactive, __a, __p); | |
18186 | } | |
18187 | ||
18188 | __extension__ extern __inline float32x4_t | |
18189 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18190 | __arm_vdupq_m_n_f32 (float32x4_t __inactive, float32_t __a, mve_pred16_t __p) | |
18191 | { | |
18192 | return __builtin_mve_vdupq_m_n_fv4sf (__inactive, __a, __p); | |
18193 | } | |
18194 | ||
18195 | __extension__ extern __inline float32x4_t | |
18196 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18197 | __arm_vmaxnmaq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18198 | { | |
18199 | return __builtin_mve_vmaxnmaq_m_fv4sf (__a, __b, __p); | |
18200 | } | |
18201 | ||
18202 | __extension__ extern __inline float32_t | |
18203 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18204 | __arm_vmaxnmavq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
18205 | { | |
18206 | return __builtin_mve_vmaxnmavq_p_fv4sf (__a, __b, __p); | |
18207 | } | |
18208 | ||
18209 | __extension__ extern __inline float32_t | |
18210 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18211 | __arm_vmaxnmvq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
18212 | { | |
18213 | return __builtin_mve_vmaxnmvq_p_fv4sf (__a, __b, __p); | |
18214 | } | |
18215 | ||
18216 | __extension__ extern __inline float32x4_t | |
18217 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18218 | __arm_vminnmaq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18219 | { | |
18220 | return __builtin_mve_vminnmaq_m_fv4sf (__a, __b, __p); | |
18221 | } | |
18222 | ||
18223 | __extension__ extern __inline float32_t | |
18224 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18225 | __arm_vminnmavq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
18226 | { | |
18227 | return __builtin_mve_vminnmavq_p_fv4sf (__a, __b, __p); | |
18228 | } | |
18229 | ||
18230 | __extension__ extern __inline float32_t | |
18231 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18232 | __arm_vminnmvq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
18233 | { | |
18234 | return __builtin_mve_vminnmvq_p_fv4sf (__a, __b, __p); | |
18235 | } | |
18236 | ||
18237 | __extension__ extern __inline float32x4_t | |
18238 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18239 | __arm_vnegq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18240 | { | |
18241 | return __builtin_mve_vnegq_m_fv4sf (__inactive, __a, __p); | |
18242 | } | |
18243 | ||
18244 | __extension__ extern __inline float32x4_t | |
18245 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18246 | __arm_vpselq_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18247 | { | |
18248 | return __builtin_mve_vpselq_fv4sf (__a, __b, __p); | |
18249 | } | |
18250 | ||
18251 | __extension__ extern __inline float32x4_t | |
18252 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18253 | __arm_vrev64q_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18254 | { | |
18255 | return __builtin_mve_vrev64q_m_fv4sf (__inactive, __a, __p); | |
18256 | } | |
18257 | ||
18258 | __extension__ extern __inline float32x4_t | |
18259 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18260 | __arm_vrndaq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18261 | { | |
18262 | return __builtin_mve_vrndaq_m_fv4sf (__inactive, __a, __p); | |
18263 | } | |
18264 | ||
18265 | __extension__ extern __inline float32x4_t | |
18266 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18267 | __arm_vrndmq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18268 | { | |
18269 | return __builtin_mve_vrndmq_m_fv4sf (__inactive, __a, __p); | |
18270 | } | |
18271 | ||
18272 | __extension__ extern __inline float32x4_t | |
18273 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18274 | __arm_vrndnq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18275 | { | |
18276 | return __builtin_mve_vrndnq_m_fv4sf (__inactive, __a, __p); | |
18277 | } | |
18278 | ||
18279 | __extension__ extern __inline float32x4_t | |
18280 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18281 | __arm_vrndpq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18282 | { | |
18283 | return __builtin_mve_vrndpq_m_fv4sf (__inactive, __a, __p); | |
18284 | } | |
18285 | ||
18286 | __extension__ extern __inline float32x4_t | |
18287 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18288 | __arm_vrndq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18289 | { | |
18290 | return __builtin_mve_vrndq_m_fv4sf (__inactive, __a, __p); | |
18291 | } | |
18292 | ||
18293 | __extension__ extern __inline float32x4_t | |
18294 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18295 | __arm_vrndxq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18296 | { | |
18297 | return __builtin_mve_vrndxq_m_fv4sf (__inactive, __a, __p); | |
18298 | } | |
18299 | ||
18300 | __extension__ extern __inline mve_pred16_t | |
18301 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18302 | __arm_vcmpeqq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18303 | { | |
18304 | return __builtin_mve_vcmpeqq_m_n_fv4sf (__a, __b, __p); | |
18305 | } | |
18306 | ||
18307 | __extension__ extern __inline mve_pred16_t | |
18308 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18309 | __arm_vcmpgeq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18310 | { | |
18311 | return __builtin_mve_vcmpgeq_m_fv4sf (__a, __b, __p); | |
18312 | } | |
18313 | ||
18314 | __extension__ extern __inline mve_pred16_t | |
18315 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18316 | __arm_vcmpgeq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18317 | { | |
18318 | return __builtin_mve_vcmpgeq_m_n_fv4sf (__a, __b, __p); | |
18319 | } | |
18320 | ||
18321 | __extension__ extern __inline mve_pred16_t | |
18322 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18323 | __arm_vcmpgtq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18324 | { | |
18325 | return __builtin_mve_vcmpgtq_m_fv4sf (__a, __b, __p); | |
18326 | } | |
18327 | ||
18328 | __extension__ extern __inline mve_pred16_t | |
18329 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18330 | __arm_vcmpgtq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18331 | { | |
18332 | return __builtin_mve_vcmpgtq_m_n_fv4sf (__a, __b, __p); | |
18333 | } | |
18334 | ||
18335 | __extension__ extern __inline mve_pred16_t | |
18336 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18337 | __arm_vcmpleq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18338 | { | |
18339 | return __builtin_mve_vcmpleq_m_fv4sf (__a, __b, __p); | |
18340 | } | |
18341 | ||
18342 | __extension__ extern __inline mve_pred16_t | |
18343 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18344 | __arm_vcmpleq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18345 | { | |
18346 | return __builtin_mve_vcmpleq_m_n_fv4sf (__a, __b, __p); | |
18347 | } | |
18348 | ||
18349 | __extension__ extern __inline mve_pred16_t | |
18350 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18351 | __arm_vcmpltq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18352 | { | |
18353 | return __builtin_mve_vcmpltq_m_fv4sf (__a, __b, __p); | |
18354 | } | |
18355 | ||
18356 | __extension__ extern __inline mve_pred16_t | |
18357 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18358 | __arm_vcmpltq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18359 | { | |
18360 | return __builtin_mve_vcmpltq_m_n_fv4sf (__a, __b, __p); | |
18361 | } | |
18362 | ||
18363 | __extension__ extern __inline mve_pred16_t | |
18364 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18365 | __arm_vcmpneq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18366 | { | |
18367 | return __builtin_mve_vcmpneq_m_fv4sf (__a, __b, __p); | |
18368 | } | |
18369 | ||
18370 | __extension__ extern __inline mve_pred16_t | |
18371 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18372 | __arm_vcmpneq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18373 | { | |
18374 | return __builtin_mve_vcmpneq_m_n_fv4sf (__a, __b, __p); | |
18375 | } | |
18376 | ||
18377 | __extension__ extern __inline uint32x4_t | |
18378 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18379 | __arm_vcvtmq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18380 | { | |
18381 | return __builtin_mve_vcvtmq_m_uv4si (__inactive, __a, __p); | |
18382 | } | |
18383 | ||
18384 | __extension__ extern __inline uint32x4_t | |
18385 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18386 | __arm_vcvtnq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18387 | { | |
18388 | return __builtin_mve_vcvtnq_m_uv4si (__inactive, __a, __p); | |
18389 | } | |
18390 | ||
18391 | __extension__ extern __inline uint32x4_t | |
18392 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18393 | __arm_vcvtpq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18394 | { | |
18395 | return __builtin_mve_vcvtpq_m_uv4si (__inactive, __a, __p); | |
18396 | } | |
18397 | ||
18398 | __extension__ extern __inline uint32x4_t | |
18399 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18400 | __arm_vcvtq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
18401 | { | |
18402 | return __builtin_mve_vcvtq_m_from_f_uv4si (__inactive, __a, __p); | |
18403 | } | |
18404 | ||
18405 | __extension__ extern __inline float16x8_t | |
18406 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18407 | __arm_vcvtq_m_n_f16_u16 (float16x8_t __inactive, uint16x8_t __a, const int __imm6, mve_pred16_t __p) | |
18408 | { | |
18409 | return __builtin_mve_vcvtq_m_n_to_f_uv8hf (__inactive, __a, __imm6, __p); | |
18410 | } | |
18411 | ||
18412 | __extension__ extern __inline float16x8_t | |
18413 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18414 | __arm_vcvtq_m_n_f16_s16 (float16x8_t __inactive, int16x8_t __a, const int __imm6, mve_pred16_t __p) | |
18415 | { | |
18416 | return __builtin_mve_vcvtq_m_n_to_f_sv8hf (__inactive, __a, __imm6, __p); | |
18417 | } | |
18418 | ||
18419 | __extension__ extern __inline float32x4_t | |
18420 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18421 | __arm_vcvtq_m_n_f32_u32 (float32x4_t __inactive, uint32x4_t __a, const int __imm6, mve_pred16_t __p) | |
18422 | { | |
18423 | return __builtin_mve_vcvtq_m_n_to_f_uv4sf (__inactive, __a, __imm6, __p); | |
18424 | } | |
18425 | ||
18426 | __extension__ extern __inline float32x4_t | |
18427 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18428 | __arm_vcvtq_m_n_f32_s32 (float32x4_t __inactive, int32x4_t __a, const int __imm6, mve_pred16_t __p) | |
18429 | { | |
18430 | return __builtin_mve_vcvtq_m_n_to_f_sv4sf (__inactive, __a, __imm6, __p); | |
18431 | } | |
18432 | ||
18433 | __extension__ extern __inline float32x4_t | |
18434 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18435 | __arm_vabdq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18436 | { | |
18437 | return __builtin_mve_vabdq_m_fv4sf (__inactive, __a, __b, __p); | |
18438 | } | |
18439 | ||
18440 | __extension__ extern __inline float16x8_t | |
18441 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18442 | __arm_vabdq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18443 | { | |
18444 | return __builtin_mve_vabdq_m_fv8hf (__inactive, __a, __b, __p); | |
18445 | } | |
18446 | ||
18447 | __extension__ extern __inline float32x4_t | |
18448 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18449 | __arm_vaddq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18450 | { | |
18451 | return __builtin_mve_vaddq_m_fv4sf (__inactive, __a, __b, __p); | |
18452 | } | |
18453 | ||
18454 | __extension__ extern __inline float16x8_t | |
18455 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18456 | __arm_vaddq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18457 | { | |
18458 | return __builtin_mve_vaddq_m_fv8hf (__inactive, __a, __b, __p); | |
18459 | } | |
18460 | ||
18461 | __extension__ extern __inline float32x4_t | |
18462 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18463 | __arm_vaddq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18464 | { | |
18465 | return __builtin_mve_vaddq_m_n_fv4sf (__inactive, __a, __b, __p); | |
18466 | } | |
18467 | ||
18468 | __extension__ extern __inline float16x8_t | |
18469 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18470 | __arm_vaddq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18471 | { | |
18472 | return __builtin_mve_vaddq_m_n_fv8hf (__inactive, __a, __b, __p); | |
18473 | } | |
18474 | ||
18475 | __extension__ extern __inline float32x4_t | |
18476 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18477 | __arm_vandq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18478 | { | |
18479 | return __builtin_mve_vandq_m_fv4sf (__inactive, __a, __b, __p); | |
18480 | } | |
18481 | ||
18482 | __extension__ extern __inline float16x8_t | |
18483 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18484 | __arm_vandq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18485 | { | |
18486 | return __builtin_mve_vandq_m_fv8hf (__inactive, __a, __b, __p); | |
18487 | } | |
18488 | ||
18489 | __extension__ extern __inline float32x4_t | |
18490 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18491 | __arm_vbicq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18492 | { | |
18493 | return __builtin_mve_vbicq_m_fv4sf (__inactive, __a, __b, __p); | |
18494 | } | |
18495 | ||
18496 | __extension__ extern __inline float16x8_t | |
18497 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18498 | __arm_vbicq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18499 | { | |
18500 | return __builtin_mve_vbicq_m_fv8hf (__inactive, __a, __b, __p); | |
18501 | } | |
18502 | ||
18503 | __extension__ extern __inline float32x4_t | |
18504 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18505 | __arm_vbrsrq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, int32_t __b, mve_pred16_t __p) | |
18506 | { | |
18507 | return __builtin_mve_vbrsrq_m_n_fv4sf (__inactive, __a, __b, __p); | |
18508 | } | |
18509 | ||
18510 | __extension__ extern __inline float16x8_t | |
18511 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18512 | __arm_vbrsrq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, int32_t __b, mve_pred16_t __p) | |
18513 | { | |
18514 | return __builtin_mve_vbrsrq_m_n_fv8hf (__inactive, __a, __b, __p); | |
18515 | } | |
18516 | ||
18517 | __extension__ extern __inline float32x4_t | |
18518 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18519 | __arm_vcaddq_rot270_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18520 | { | |
18521 | return __builtin_mve_vcaddq_rot270_m_fv4sf (__inactive, __a, __b, __p); | |
18522 | } | |
18523 | ||
18524 | __extension__ extern __inline float16x8_t | |
18525 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18526 | __arm_vcaddq_rot270_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18527 | { | |
18528 | return __builtin_mve_vcaddq_rot270_m_fv8hf (__inactive, __a, __b, __p); | |
18529 | } | |
18530 | ||
18531 | __extension__ extern __inline float32x4_t | |
18532 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18533 | __arm_vcaddq_rot90_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18534 | { | |
18535 | return __builtin_mve_vcaddq_rot90_m_fv4sf (__inactive, __a, __b, __p); | |
18536 | } | |
18537 | ||
18538 | __extension__ extern __inline float16x8_t | |
18539 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18540 | __arm_vcaddq_rot90_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18541 | { | |
18542 | return __builtin_mve_vcaddq_rot90_m_fv8hf (__inactive, __a, __b, __p); | |
18543 | } | |
18544 | ||
18545 | __extension__ extern __inline float32x4_t | |
18546 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18547 | __arm_vcmlaq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
18548 | { | |
18549 | return __builtin_mve_vcmlaq_m_fv4sf (__a, __b, __c, __p); | |
18550 | } | |
18551 | ||
18552 | __extension__ extern __inline float16x8_t | |
18553 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18554 | __arm_vcmlaq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
18555 | { | |
18556 | return __builtin_mve_vcmlaq_m_fv8hf (__a, __b, __c, __p); | |
18557 | } | |
18558 | ||
18559 | __extension__ extern __inline float32x4_t | |
18560 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18561 | __arm_vcmlaq_rot180_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
18562 | { | |
18563 | return __builtin_mve_vcmlaq_rot180_m_fv4sf (__a, __b, __c, __p); | |
18564 | } | |
18565 | ||
18566 | __extension__ extern __inline float16x8_t | |
18567 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18568 | __arm_vcmlaq_rot180_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
18569 | { | |
18570 | return __builtin_mve_vcmlaq_rot180_m_fv8hf (__a, __b, __c, __p); | |
18571 | } | |
18572 | ||
18573 | __extension__ extern __inline float32x4_t | |
18574 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18575 | __arm_vcmlaq_rot270_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
18576 | { | |
18577 | return __builtin_mve_vcmlaq_rot270_m_fv4sf (__a, __b, __c, __p); | |
18578 | } | |
18579 | ||
18580 | __extension__ extern __inline float16x8_t | |
18581 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18582 | __arm_vcmlaq_rot270_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
18583 | { | |
18584 | return __builtin_mve_vcmlaq_rot270_m_fv8hf (__a, __b, __c, __p); | |
18585 | } | |
18586 | ||
18587 | __extension__ extern __inline float32x4_t | |
18588 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18589 | __arm_vcmlaq_rot90_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
18590 | { | |
18591 | return __builtin_mve_vcmlaq_rot90_m_fv4sf (__a, __b, __c, __p); | |
18592 | } | |
18593 | ||
18594 | __extension__ extern __inline float16x8_t | |
18595 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18596 | __arm_vcmlaq_rot90_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
18597 | { | |
18598 | return __builtin_mve_vcmlaq_rot90_m_fv8hf (__a, __b, __c, __p); | |
18599 | } | |
18600 | ||
18601 | __extension__ extern __inline float32x4_t | |
18602 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18603 | __arm_vcmulq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18604 | { | |
18605 | return __builtin_mve_vcmulq_m_fv4sf (__inactive, __a, __b, __p); | |
18606 | } | |
18607 | ||
18608 | __extension__ extern __inline float16x8_t | |
18609 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18610 | __arm_vcmulq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18611 | { | |
18612 | return __builtin_mve_vcmulq_m_fv8hf (__inactive, __a, __b, __p); | |
18613 | } | |
18614 | ||
18615 | __extension__ extern __inline float32x4_t | |
18616 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18617 | __arm_vcmulq_rot180_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18618 | { | |
18619 | return __builtin_mve_vcmulq_rot180_m_fv4sf (__inactive, __a, __b, __p); | |
18620 | } | |
18621 | ||
18622 | __extension__ extern __inline float16x8_t | |
18623 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18624 | __arm_vcmulq_rot180_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18625 | { | |
18626 | return __builtin_mve_vcmulq_rot180_m_fv8hf (__inactive, __a, __b, __p); | |
18627 | } | |
18628 | ||
18629 | __extension__ extern __inline float32x4_t | |
18630 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18631 | __arm_vcmulq_rot270_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18632 | { | |
18633 | return __builtin_mve_vcmulq_rot270_m_fv4sf (__inactive, __a, __b, __p); | |
18634 | } | |
18635 | ||
18636 | __extension__ extern __inline float16x8_t | |
18637 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18638 | __arm_vcmulq_rot270_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18639 | { | |
18640 | return __builtin_mve_vcmulq_rot270_m_fv8hf (__inactive, __a, __b, __p); | |
18641 | } | |
18642 | ||
18643 | __extension__ extern __inline float32x4_t | |
18644 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18645 | __arm_vcmulq_rot90_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18646 | { | |
18647 | return __builtin_mve_vcmulq_rot90_m_fv4sf (__inactive, __a, __b, __p); | |
18648 | } | |
18649 | ||
18650 | __extension__ extern __inline float16x8_t | |
18651 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18652 | __arm_vcmulq_rot90_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18653 | { | |
18654 | return __builtin_mve_vcmulq_rot90_m_fv8hf (__inactive, __a, __b, __p); | |
18655 | } | |
18656 | ||
18657 | __extension__ extern __inline int32x4_t | |
18658 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18659 | __arm_vcvtq_m_n_s32_f32 (int32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p) | |
18660 | { | |
18661 | return __builtin_mve_vcvtq_m_n_from_f_sv4si (__inactive, __a, __imm6, __p); | |
18662 | } | |
18663 | ||
18664 | __extension__ extern __inline int16x8_t | |
18665 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18666 | __arm_vcvtq_m_n_s16_f16 (int16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p) | |
18667 | { | |
18668 | return __builtin_mve_vcvtq_m_n_from_f_sv8hi (__inactive, __a, __imm6, __p); | |
18669 | } | |
18670 | ||
18671 | __extension__ extern __inline uint32x4_t | |
18672 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18673 | __arm_vcvtq_m_n_u32_f32 (uint32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p) | |
18674 | { | |
18675 | return __builtin_mve_vcvtq_m_n_from_f_uv4si (__inactive, __a, __imm6, __p); | |
18676 | } | |
18677 | ||
18678 | __extension__ extern __inline uint16x8_t | |
18679 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18680 | __arm_vcvtq_m_n_u16_f16 (uint16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p) | |
18681 | { | |
18682 | return __builtin_mve_vcvtq_m_n_from_f_uv8hi (__inactive, __a, __imm6, __p); | |
18683 | } | |
18684 | ||
18685 | __extension__ extern __inline float32x4_t | |
18686 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18687 | __arm_veorq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18688 | { | |
18689 | return __builtin_mve_veorq_m_fv4sf (__inactive, __a, __b, __p); | |
18690 | } | |
18691 | ||
18692 | __extension__ extern __inline float16x8_t | |
18693 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18694 | __arm_veorq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18695 | { | |
18696 | return __builtin_mve_veorq_m_fv8hf (__inactive, __a, __b, __p); | |
18697 | } | |
18698 | ||
18699 | __extension__ extern __inline float32x4_t | |
18700 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18701 | __arm_vfmaq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
18702 | { | |
18703 | return __builtin_mve_vfmaq_m_fv4sf (__a, __b, __c, __p); | |
18704 | } | |
18705 | ||
18706 | __extension__ extern __inline float16x8_t | |
18707 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18708 | __arm_vfmaq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
18709 | { | |
18710 | return __builtin_mve_vfmaq_m_fv8hf (__a, __b, __c, __p); | |
18711 | } | |
18712 | ||
18713 | __extension__ extern __inline float32x4_t | |
18714 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18715 | __arm_vfmaq_m_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p) | |
18716 | { | |
18717 | return __builtin_mve_vfmaq_m_n_fv4sf (__a, __b, __c, __p); | |
18718 | } | |
18719 | ||
18720 | __extension__ extern __inline float16x8_t | |
18721 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18722 | __arm_vfmaq_m_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p) | |
18723 | { | |
18724 | return __builtin_mve_vfmaq_m_n_fv8hf (__a, __b, __c, __p); | |
18725 | } | |
18726 | ||
18727 | __extension__ extern __inline float32x4_t | |
18728 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18729 | __arm_vfmasq_m_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p) | |
18730 | { | |
18731 | return __builtin_mve_vfmasq_m_n_fv4sf (__a, __b, __c, __p); | |
18732 | } | |
18733 | ||
18734 | __extension__ extern __inline float16x8_t | |
18735 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18736 | __arm_vfmasq_m_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p) | |
18737 | { | |
18738 | return __builtin_mve_vfmasq_m_n_fv8hf (__a, __b, __c, __p); | |
18739 | } | |
18740 | ||
18741 | __extension__ extern __inline float32x4_t | |
18742 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18743 | __arm_vfmsq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
18744 | { | |
18745 | return __builtin_mve_vfmsq_m_fv4sf (__a, __b, __c, __p); | |
18746 | } | |
18747 | ||
18748 | __extension__ extern __inline float16x8_t | |
18749 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18750 | __arm_vfmsq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
18751 | { | |
18752 | return __builtin_mve_vfmsq_m_fv8hf (__a, __b, __c, __p); | |
18753 | } | |
18754 | ||
18755 | __extension__ extern __inline float32x4_t | |
18756 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18757 | __arm_vmaxnmq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18758 | { | |
18759 | return __builtin_mve_vmaxnmq_m_fv4sf (__inactive, __a, __b, __p); | |
18760 | } | |
18761 | ||
18762 | __extension__ extern __inline float16x8_t | |
18763 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18764 | __arm_vmaxnmq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18765 | { | |
18766 | return __builtin_mve_vmaxnmq_m_fv8hf (__inactive, __a, __b, __p); | |
18767 | } | |
18768 | ||
18769 | __extension__ extern __inline float32x4_t | |
18770 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18771 | __arm_vminnmq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18772 | { | |
18773 | return __builtin_mve_vminnmq_m_fv4sf (__inactive, __a, __b, __p); | |
18774 | } | |
18775 | ||
18776 | __extension__ extern __inline float16x8_t | |
18777 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18778 | __arm_vminnmq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18779 | { | |
18780 | return __builtin_mve_vminnmq_m_fv8hf (__inactive, __a, __b, __p); | |
18781 | } | |
18782 | ||
18783 | __extension__ extern __inline float32x4_t | |
18784 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18785 | __arm_vmulq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18786 | { | |
18787 | return __builtin_mve_vmulq_m_fv4sf (__inactive, __a, __b, __p); | |
18788 | } | |
18789 | ||
18790 | __extension__ extern __inline float16x8_t | |
18791 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18792 | __arm_vmulq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18793 | { | |
18794 | return __builtin_mve_vmulq_m_fv8hf (__inactive, __a, __b, __p); | |
18795 | } | |
18796 | ||
18797 | __extension__ extern __inline float32x4_t | |
18798 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18799 | __arm_vmulq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18800 | { | |
18801 | return __builtin_mve_vmulq_m_n_fv4sf (__inactive, __a, __b, __p); | |
18802 | } | |
18803 | ||
18804 | __extension__ extern __inline float16x8_t | |
18805 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18806 | __arm_vmulq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18807 | { | |
18808 | return __builtin_mve_vmulq_m_n_fv8hf (__inactive, __a, __b, __p); | |
18809 | } | |
18810 | ||
18811 | __extension__ extern __inline float32x4_t | |
18812 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18813 | __arm_vornq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18814 | { | |
18815 | return __builtin_mve_vornq_m_fv4sf (__inactive, __a, __b, __p); | |
18816 | } | |
18817 | ||
18818 | __extension__ extern __inline float16x8_t | |
18819 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18820 | __arm_vornq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18821 | { | |
18822 | return __builtin_mve_vornq_m_fv8hf (__inactive, __a, __b, __p); | |
18823 | } | |
18824 | ||
18825 | __extension__ extern __inline float32x4_t | |
18826 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18827 | __arm_vorrq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18828 | { | |
18829 | return __builtin_mve_vorrq_m_fv4sf (__inactive, __a, __b, __p); | |
18830 | } | |
18831 | ||
18832 | __extension__ extern __inline float16x8_t | |
18833 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18834 | __arm_vorrq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18835 | { | |
18836 | return __builtin_mve_vorrq_m_fv8hf (__inactive, __a, __b, __p); | |
18837 | } | |
18838 | ||
18839 | __extension__ extern __inline float32x4_t | |
18840 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18841 | __arm_vsubq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
18842 | { | |
18843 | return __builtin_mve_vsubq_m_fv4sf (__inactive, __a, __b, __p); | |
18844 | } | |
18845 | ||
18846 | __extension__ extern __inline float16x8_t | |
18847 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18848 | __arm_vsubq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
18849 | { | |
18850 | return __builtin_mve_vsubq_m_fv8hf (__inactive, __a, __b, __p); | |
18851 | } | |
18852 | ||
18853 | __extension__ extern __inline float32x4_t | |
18854 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18855 | __arm_vsubq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
18856 | { | |
18857 | return __builtin_mve_vsubq_m_n_fv4sf (__inactive, __a, __b, __p); | |
18858 | } | |
18859 | ||
18860 | __extension__ extern __inline float16x8_t | |
18861 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18862 | __arm_vsubq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
18863 | { | |
18864 | return __builtin_mve_vsubq_m_n_fv8hf (__inactive, __a, __b, __p); | |
18865 | } | |
18866 | ||
18867 | __extension__ extern __inline float32x4_t | |
18868 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18869 | __arm_vld1q_f32 (float32_t const * __base) | |
18870 | { | |
18871 | return __builtin_mve_vld1q_fv4sf((__builtin_neon_si *) __base); | |
18872 | } | |
18873 | ||
18874 | __extension__ extern __inline float16x8_t | |
18875 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18876 | __arm_vld1q_f16 (float16_t const * __base) | |
18877 | { | |
18878 | return __builtin_mve_vld1q_fv8hf((__builtin_neon_hi *) __base); | |
18879 | } | |
18880 | ||
18881 | __extension__ extern __inline float32x4_t | |
18882 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18883 | __arm_vldrwq_f32 (float32_t const * __base) | |
18884 | { | |
18885 | return __builtin_mve_vldrwq_fv4sf((__builtin_neon_si *) __base); | |
18886 | } | |
18887 | ||
18888 | __extension__ extern __inline float32x4_t | |
18889 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18890 | __arm_vldrwq_z_f32 (float32_t const * __base, mve_pred16_t __p) | |
18891 | { | |
18892 | return __builtin_mve_vldrwq_z_fv4sf((__builtin_neon_si *) __base, __p); | |
18893 | } | |
18894 | ||
18895 | __extension__ extern __inline float16x8_t | |
18896 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18897 | __arm_vldrhq_z_f16 (float16_t const * __base, mve_pred16_t __p) | |
18898 | { | |
18899 | return __builtin_mve_vldrhq_z_fv8hf((__builtin_neon_hi *) __base, __p); | |
18900 | } | |
18901 | ||
18902 | __extension__ extern __inline float16x8_t | |
18903 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18904 | __arm_vldrhq_f16 (float16_t const * __base) | |
18905 | { | |
18906 | return __builtin_mve_vldrhq_fv8hf((__builtin_neon_hi *) __base); | |
18907 | } | |
18908 | ||
18909 | __extension__ extern __inline float16x8_t | |
18910 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18911 | __arm_vldrhq_gather_offset_f16 (float16_t const * __base, uint16x8_t __offset) | |
18912 | { | |
18913 | return __builtin_mve_vldrhq_gather_offset_fv8hf((__builtin_neon_hi *) __base, __offset); | |
18914 | } | |
18915 | ||
18916 | __extension__ extern __inline float16x8_t | |
18917 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18918 | __arm_vldrhq_gather_offset_z_f16 (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
18919 | { | |
18920 | return __builtin_mve_vldrhq_gather_offset_z_fv8hf((__builtin_neon_hi *) __base, __offset, __p); | |
18921 | } | |
18922 | ||
18923 | __extension__ extern __inline float16x8_t | |
18924 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18925 | __arm_vldrhq_gather_shifted_offset_f16 (float16_t const * __base, uint16x8_t __offset) | |
18926 | { | |
ff0597dc | 18927 | return __builtin_mve_vldrhq_gather_shifted_offset_fv8hf ((__builtin_neon_hi *) __base, __offset); |
261014a1 SP |
18928 | } |
18929 | ||
18930 | __extension__ extern __inline float16x8_t | |
18931 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18932 | __arm_vldrhq_gather_shifted_offset_z_f16 (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
18933 | { | |
ff0597dc | 18934 | return __builtin_mve_vldrhq_gather_shifted_offset_z_fv8hf ((__builtin_neon_hi *) __base, __offset, __p); |
261014a1 SP |
18935 | } |
18936 | ||
18937 | __extension__ extern __inline float32x4_t | |
18938 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18939 | __arm_vldrwq_gather_base_f32 (uint32x4_t __addr, const int __offset) | |
18940 | { | |
18941 | return __builtin_mve_vldrwq_gather_base_fv4sf (__addr, __offset); | |
18942 | } | |
18943 | ||
18944 | __extension__ extern __inline float32x4_t | |
18945 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18946 | __arm_vldrwq_gather_base_z_f32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p) | |
18947 | { | |
18948 | return __builtin_mve_vldrwq_gather_base_z_fv4sf (__addr, __offset, __p); | |
18949 | } | |
18950 | ||
18951 | __extension__ extern __inline float32x4_t | |
18952 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18953 | __arm_vldrwq_gather_offset_f32 (float32_t const * __base, uint32x4_t __offset) | |
18954 | { | |
18955 | return __builtin_mve_vldrwq_gather_offset_fv4sf((__builtin_neon_si *) __base, __offset); | |
18956 | } | |
18957 | ||
18958 | __extension__ extern __inline float32x4_t | |
18959 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18960 | __arm_vldrwq_gather_offset_z_f32 (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
18961 | { | |
18962 | return __builtin_mve_vldrwq_gather_offset_z_fv4sf((__builtin_neon_si *) __base, __offset, __p); | |
18963 | } | |
18964 | ||
18965 | __extension__ extern __inline float32x4_t | |
18966 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18967 | __arm_vldrwq_gather_shifted_offset_f32 (float32_t const * __base, uint32x4_t __offset) | |
18968 | { | |
ff0597dc | 18969 | return __builtin_mve_vldrwq_gather_shifted_offset_fv4sf ((__builtin_neon_si *) __base, __offset); |
261014a1 SP |
18970 | } |
18971 | ||
18972 | __extension__ extern __inline float32x4_t | |
18973 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18974 | __arm_vldrwq_gather_shifted_offset_z_f32 (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
18975 | { | |
ff0597dc | 18976 | return __builtin_mve_vldrwq_gather_shifted_offset_z_fv4sf ((__builtin_neon_si *) __base, __offset, __p); |
261014a1 SP |
18977 | } |
18978 | ||
18979 | __extension__ extern __inline void | |
18980 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18981 | __arm_vstrwq_p_f32 (float32_t * __addr, float32x4_t __value, mve_pred16_t __p) | |
18982 | { | |
ff0597dc | 18983 | __builtin_mve_vstrwq_p_fv4sf ((__builtin_neon_si *) __addr, __value, __p); |
261014a1 SP |
18984 | } |
18985 | ||
18986 | __extension__ extern __inline void | |
18987 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18988 | __arm_vstrwq_f32 (float32_t * __addr, float32x4_t __value) | |
18989 | { | |
ff0597dc | 18990 | __builtin_mve_vstrwq_fv4sf ((__builtin_neon_si *) __addr, __value); |
261014a1 SP |
18991 | } |
18992 | ||
18993 | __extension__ extern __inline void | |
18994 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
18995 | __arm_vst1q_f32 (float32_t * __addr, float32x4_t __value) | |
18996 | { | |
ff0597dc | 18997 | __builtin_mve_vst1q_fv4sf ((__builtin_neon_si *) __addr, __value); |
261014a1 SP |
18998 | } |
18999 | ||
19000 | __extension__ extern __inline void | |
19001 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19002 | __arm_vst1q_f16 (float16_t * __addr, float16x8_t __value) | |
19003 | { | |
ff0597dc | 19004 | __builtin_mve_vst1q_fv8hf ((__builtin_neon_hi *) __addr, __value); |
261014a1 SP |
19005 | } |
19006 | ||
19007 | __extension__ extern __inline void | |
19008 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19009 | __arm_vstrhq_f16 (float16_t * __addr, float16x8_t __value) | |
19010 | { | |
ff0597dc | 19011 | __builtin_mve_vstrhq_fv8hf ((__builtin_neon_hi *) __addr, __value); |
261014a1 SP |
19012 | } |
19013 | ||
19014 | __extension__ extern __inline void | |
19015 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19016 | __arm_vstrhq_p_f16 (float16_t * __addr, float16x8_t __value, mve_pred16_t __p) | |
19017 | { | |
ff0597dc | 19018 | __builtin_mve_vstrhq_p_fv8hf ((__builtin_neon_hi *) __addr, __value, __p); |
261014a1 SP |
19019 | } |
19020 | ||
19021 | __extension__ extern __inline void | |
19022 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19023 | __arm_vstrhq_scatter_offset_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value) | |
19024 | { | |
ff0597dc | 19025 | __builtin_mve_vstrhq_scatter_offset_fv8hf ((__builtin_neon_hi *) __base, __offset, __value); |
261014a1 SP |
19026 | } |
19027 | ||
19028 | __extension__ extern __inline void | |
19029 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19030 | __arm_vstrhq_scatter_offset_p_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p) | |
19031 | { | |
ff0597dc | 19032 | __builtin_mve_vstrhq_scatter_offset_p_fv8hf ((__builtin_neon_hi *) __base, __offset, __value, __p); |
261014a1 SP |
19033 | } |
19034 | ||
19035 | __extension__ extern __inline void | |
19036 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19037 | __arm_vstrhq_scatter_shifted_offset_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value) | |
19038 | { | |
ff0597dc | 19039 | __builtin_mve_vstrhq_scatter_shifted_offset_fv8hf ((__builtin_neon_hi *) __base, __offset, __value); |
261014a1 SP |
19040 | } |
19041 | ||
19042 | __extension__ extern __inline void | |
19043 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19044 | __arm_vstrhq_scatter_shifted_offset_p_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p) | |
19045 | { | |
ff0597dc | 19046 | __builtin_mve_vstrhq_scatter_shifted_offset_p_fv8hf ((__builtin_neon_hi *) __base, __offset, __value, __p); |
261014a1 SP |
19047 | } |
19048 | ||
19049 | __extension__ extern __inline void | |
19050 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19051 | __arm_vstrwq_scatter_base_f32 (uint32x4_t __addr, const int __offset, float32x4_t __value) | |
19052 | { | |
19053 | __builtin_mve_vstrwq_scatter_base_fv4sf (__addr, __offset, __value); | |
19054 | } | |
19055 | ||
19056 | __extension__ extern __inline void | |
19057 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19058 | __arm_vstrwq_scatter_base_p_f32 (uint32x4_t __addr, const int __offset, float32x4_t __value, mve_pred16_t __p) | |
19059 | { | |
19060 | __builtin_mve_vstrwq_scatter_base_p_fv4sf (__addr, __offset, __value, __p); | |
19061 | } | |
19062 | ||
19063 | __extension__ extern __inline void | |
19064 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19065 | __arm_vstrwq_scatter_offset_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value) | |
19066 | { | |
ff0597dc | 19067 | __builtin_mve_vstrwq_scatter_offset_fv4sf ((__builtin_neon_si *) __base, __offset, __value); |
261014a1 SP |
19068 | } |
19069 | ||
19070 | __extension__ extern __inline void | |
19071 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19072 | __arm_vstrwq_scatter_offset_p_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p) | |
19073 | { | |
ff0597dc | 19074 | __builtin_mve_vstrwq_scatter_offset_p_fv4sf ((__builtin_neon_si *) __base, __offset, __value, __p); |
261014a1 SP |
19075 | } |
19076 | ||
19077 | __extension__ extern __inline void | |
19078 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19079 | __arm_vstrwq_scatter_shifted_offset_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value) | |
19080 | { | |
ff0597dc | 19081 | __builtin_mve_vstrwq_scatter_shifted_offset_fv4sf ((__builtin_neon_si *) __base, __offset, __value); |
261014a1 SP |
19082 | } |
19083 | ||
19084 | __extension__ extern __inline void | |
19085 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19086 | __arm_vstrwq_scatter_shifted_offset_p_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p) | |
19087 | { | |
ff0597dc | 19088 | __builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf ((__builtin_neon_si *) __base, __offset, __value, __p); |
261014a1 SP |
19089 | } |
19090 | ||
19091 | __extension__ extern __inline float16x8_t | |
19092 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19093 | __arm_vaddq_f16 (float16x8_t __a, float16x8_t __b) | |
19094 | { | |
19095 | return __a + __b; | |
19096 | } | |
19097 | ||
19098 | __extension__ extern __inline float32x4_t | |
19099 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19100 | __arm_vaddq_f32 (float32x4_t __a, float32x4_t __b) | |
19101 | { | |
19102 | return __a + __b; | |
19103 | } | |
19104 | ||
261014a1 | 19105 | __extension__ extern __inline float32x4_t |
532e9e24 | 19106 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19107 | __arm_vldrwq_gather_base_wb_f32 (uint32x4_t * __addr, const int __offset) |
532e9e24 | 19108 | { |
261014a1 | 19109 | float32x4_t |
ff825b81 SP |
19110 | result = __builtin_mve_vldrwq_gather_base_nowb_fv4sf (*__addr, __offset); |
19111 | *__addr = __builtin_mve_vldrwq_gather_base_wb_fv4sf (*__addr, __offset); | |
261014a1 | 19112 | return result; |
532e9e24 SP |
19113 | } |
19114 | ||
19115 | __extension__ extern __inline float32x4_t | |
19116 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19117 | __arm_vldrwq_gather_base_wb_z_f32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p) |
532e9e24 | 19118 | { |
261014a1 | 19119 | float32x4_t |
ff825b81 SP |
19120 | result = __builtin_mve_vldrwq_gather_base_nowb_z_fv4sf (*__addr, __offset, __p); |
19121 | *__addr = __builtin_mve_vldrwq_gather_base_wb_z_fv4sf (*__addr, __offset, __p); | |
261014a1 | 19122 | return result; |
532e9e24 SP |
19123 | } |
19124 | ||
261014a1 | 19125 | __extension__ extern __inline void |
532e9e24 | 19126 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19127 | __arm_vstrwq_scatter_base_wb_f32 (uint32x4_t * __addr, const int __offset, float32x4_t __value) |
532e9e24 | 19128 | { |
37753588 | 19129 | *__addr = __builtin_mve_vstrwq_scatter_base_wb_fv4sf (*__addr, __offset, __value); |
532e9e24 SP |
19130 | } |
19131 | ||
261014a1 | 19132 | __extension__ extern __inline void |
532e9e24 | 19133 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19134 | __arm_vstrwq_scatter_base_wb_p_f32 (uint32x4_t * __addr, const int __offset, float32x4_t __value, mve_pred16_t __p) |
532e9e24 | 19135 | { |
37753588 | 19136 | *__addr = __builtin_mve_vstrwq_scatter_base_wb_p_fv4sf (*__addr, __offset, __value, __p); |
532e9e24 SP |
19137 | } |
19138 | ||
19139 | __extension__ extern __inline float16x8_t | |
19140 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19141 | __arm_vdupq_x_n_f16 (float16_t __a, mve_pred16_t __p) |
532e9e24 | 19142 | { |
c431634b | 19143 | return __builtin_mve_vdupq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __p); |
532e9e24 SP |
19144 | } |
19145 | ||
19146 | __extension__ extern __inline float32x4_t | |
19147 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19148 | __arm_vdupq_x_n_f32 (float32_t __a, mve_pred16_t __p) |
532e9e24 | 19149 | { |
c431634b | 19150 | return __builtin_mve_vdupq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __p); |
532e9e24 SP |
19151 | } |
19152 | ||
19153 | __extension__ extern __inline float16x8_t | |
19154 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19155 | __arm_vminnmq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 19156 | { |
c431634b | 19157 | return __builtin_mve_vminnmq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19158 | } |
19159 | ||
19160 | __extension__ extern __inline float32x4_t | |
19161 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19162 | __arm_vminnmq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 19163 | { |
c431634b | 19164 | return __builtin_mve_vminnmq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19165 | } |
19166 | ||
19167 | __extension__ extern __inline float16x8_t | |
19168 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19169 | __arm_vmaxnmq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 19170 | { |
c431634b | 19171 | return __builtin_mve_vmaxnmq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19172 | } |
19173 | ||
19174 | __extension__ extern __inline float32x4_t | |
19175 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19176 | __arm_vmaxnmq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 19177 | { |
c431634b | 19178 | return __builtin_mve_vmaxnmq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19179 | } |
19180 | ||
19181 | __extension__ extern __inline float16x8_t | |
19182 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19183 | __arm_vabdq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 19184 | { |
c431634b | 19185 | return __builtin_mve_vabdq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19186 | } |
19187 | ||
19188 | __extension__ extern __inline float32x4_t | |
19189 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19190 | __arm_vabdq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 19191 | { |
c431634b | 19192 | return __builtin_mve_vabdq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19193 | } |
19194 | ||
19195 | __extension__ extern __inline float16x8_t | |
19196 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19197 | __arm_vabsq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
532e9e24 | 19198 | { |
c431634b | 19199 | return __builtin_mve_vabsq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p); |
532e9e24 SP |
19200 | } |
19201 | ||
19202 | __extension__ extern __inline float32x4_t | |
19203 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19204 | __arm_vabsq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
532e9e24 | 19205 | { |
c431634b | 19206 | return __builtin_mve_vabsq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p); |
532e9e24 SP |
19207 | } |
19208 | ||
19209 | __extension__ extern __inline float16x8_t | |
19210 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19211 | __arm_vaddq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 19212 | { |
c431634b | 19213 | return __builtin_mve_vaddq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19214 | } |
19215 | ||
19216 | __extension__ extern __inline float32x4_t | |
19217 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19218 | __arm_vaddq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 19219 | { |
c431634b | 19220 | return __builtin_mve_vaddq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19221 | } |
19222 | ||
19223 | __extension__ extern __inline float16x8_t | |
19224 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19225 | __arm_vaddq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) |
532e9e24 | 19226 | { |
c431634b | 19227 | return __builtin_mve_vaddq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19228 | } |
19229 | ||
19230 | __extension__ extern __inline float32x4_t | |
19231 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19232 | __arm_vaddq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) |
532e9e24 | 19233 | { |
c431634b | 19234 | return __builtin_mve_vaddq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19235 | } |
19236 | ||
19237 | __extension__ extern __inline float16x8_t | |
19238 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19239 | __arm_vnegq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
532e9e24 | 19240 | { |
c431634b | 19241 | return __builtin_mve_vnegq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p); |
532e9e24 SP |
19242 | } |
19243 | ||
19244 | __extension__ extern __inline float32x4_t | |
19245 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19246 | __arm_vnegq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
532e9e24 | 19247 | { |
c431634b | 19248 | return __builtin_mve_vnegq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p); |
532e9e24 SP |
19249 | } |
19250 | ||
19251 | __extension__ extern __inline float16x8_t | |
19252 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19253 | __arm_vmulq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 19254 | { |
c431634b | 19255 | return __builtin_mve_vmulq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19256 | } |
19257 | ||
19258 | __extension__ extern __inline float32x4_t | |
19259 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19260 | __arm_vmulq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 19261 | { |
c431634b | 19262 | return __builtin_mve_vmulq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19263 | } |
19264 | ||
19265 | __extension__ extern __inline float16x8_t | |
19266 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19267 | __arm_vmulq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) |
532e9e24 | 19268 | { |
c431634b | 19269 | return __builtin_mve_vmulq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19270 | } |
19271 | ||
19272 | __extension__ extern __inline float32x4_t | |
19273 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19274 | __arm_vmulq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) |
532e9e24 | 19275 | { |
c431634b | 19276 | return __builtin_mve_vmulq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19277 | } |
19278 | ||
19279 | __extension__ extern __inline float16x8_t | |
19280 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19281 | __arm_vsubq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
532e9e24 | 19282 | { |
c431634b | 19283 | return __builtin_mve_vsubq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 SP |
19284 | } |
19285 | ||
19286 | __extension__ extern __inline float32x4_t | |
19287 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19288 | __arm_vsubq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
532e9e24 | 19289 | { |
c431634b | 19290 | return __builtin_mve_vsubq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
532e9e24 SP |
19291 | } |
19292 | ||
19293 | __extension__ extern __inline float16x8_t | |
19294 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19295 | __arm_vsubq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p) |
532e9e24 | 19296 | { |
c431634b | 19297 | return __builtin_mve_vsubq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
532e9e24 | 19298 | } |
429d607b | 19299 | |
bf1e3d5a SP |
19300 | __extension__ extern __inline float32x4_t |
19301 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19302 | __arm_vsubq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p) |
bf1e3d5a | 19303 | { |
c431634b | 19304 | return __builtin_mve_vsubq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
bf1e3d5a SP |
19305 | } |
19306 | ||
19307 | __extension__ extern __inline float16x8_t | |
19308 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19309 | __arm_vcaddq_rot90_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
bf1e3d5a | 19310 | { |
c431634b | 19311 | return __builtin_mve_vcaddq_rot90_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
bf1e3d5a SP |
19312 | } |
19313 | ||
19314 | __extension__ extern __inline float32x4_t | |
19315 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19316 | __arm_vcaddq_rot90_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
bf1e3d5a | 19317 | { |
c431634b | 19318 | return __builtin_mve_vcaddq_rot90_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
bf1e3d5a SP |
19319 | } |
19320 | ||
261014a1 | 19321 | __extension__ extern __inline float16x8_t |
bf1e3d5a | 19322 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19323 | __arm_vcaddq_rot270_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
bf1e3d5a | 19324 | { |
c431634b | 19325 | return __builtin_mve_vcaddq_rot270_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
bf1e3d5a SP |
19326 | } |
19327 | ||
261014a1 | 19328 | __extension__ extern __inline float32x4_t |
bf1e3d5a | 19329 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19330 | __arm_vcaddq_rot270_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
bf1e3d5a | 19331 | { |
c431634b | 19332 | return __builtin_mve_vcaddq_rot270_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
bf1e3d5a SP |
19333 | } |
19334 | ||
19335 | __extension__ extern __inline float16x8_t | |
19336 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19337 | __arm_vcmulq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
bf1e3d5a | 19338 | { |
c431634b | 19339 | return __builtin_mve_vcmulq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
bf1e3d5a | 19340 | } |
4cc23303 | 19341 | |
261014a1 | 19342 | __extension__ extern __inline float32x4_t |
4cc23303 | 19343 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19344 | __arm_vcmulq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
4cc23303 | 19345 | { |
c431634b | 19346 | return __builtin_mve_vcmulq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
4cc23303 SP |
19347 | } |
19348 | ||
19349 | __extension__ extern __inline float16x8_t | |
19350 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19351 | __arm_vcmulq_rot90_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
4cc23303 | 19352 | { |
c431634b | 19353 | return __builtin_mve_vcmulq_rot90_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
4cc23303 SP |
19354 | } |
19355 | ||
261014a1 | 19356 | __extension__ extern __inline float32x4_t |
4cc23303 | 19357 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19358 | __arm_vcmulq_rot90_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
4cc23303 | 19359 | { |
c431634b | 19360 | return __builtin_mve_vcmulq_rot90_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
4cc23303 SP |
19361 | } |
19362 | ||
19363 | __extension__ extern __inline float16x8_t | |
19364 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19365 | __arm_vcmulq_rot180_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
4cc23303 | 19366 | { |
c431634b | 19367 | return __builtin_mve_vcmulq_rot180_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
4cc23303 SP |
19368 | } |
19369 | ||
19370 | __extension__ extern __inline float32x4_t | |
19371 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19372 | __arm_vcmulq_rot180_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
4cc23303 | 19373 | { |
c431634b | 19374 | return __builtin_mve_vcmulq_rot180_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
4cc23303 SP |
19375 | } |
19376 | ||
261014a1 | 19377 | __extension__ extern __inline float16x8_t |
4cc23303 | 19378 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19379 | __arm_vcmulq_rot270_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
4cc23303 | 19380 | { |
c431634b | 19381 | return __builtin_mve_vcmulq_rot270_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
4cc23303 SP |
19382 | } |
19383 | ||
19384 | __extension__ extern __inline float32x4_t | |
19385 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19386 | __arm_vcmulq_rot270_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
4cc23303 | 19387 | { |
c431634b | 19388 | return __builtin_mve_vcmulq_rot270_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
4cc23303 SP |
19389 | } |
19390 | ||
261014a1 | 19391 | __extension__ extern __inline int16x8_t |
4cc23303 | 19392 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19393 | __arm_vcvtaq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p) |
4cc23303 | 19394 | { |
c431634b | 19395 | return __builtin_mve_vcvtaq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p); |
4cc23303 SP |
19396 | } |
19397 | ||
261014a1 | 19398 | __extension__ extern __inline int32x4_t |
4cc23303 | 19399 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19400 | __arm_vcvtaq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p) |
4cc23303 | 19401 | { |
c431634b | 19402 | return __builtin_mve_vcvtaq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p); |
4cc23303 SP |
19403 | } |
19404 | ||
261014a1 | 19405 | __extension__ extern __inline uint16x8_t |
4cc23303 | 19406 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19407 | __arm_vcvtaq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p) |
4cc23303 | 19408 | { |
c431634b | 19409 | return __builtin_mve_vcvtaq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p); |
4cc23303 SP |
19410 | } |
19411 | ||
261014a1 | 19412 | __extension__ extern __inline uint32x4_t |
5cad47e0 | 19413 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19414 | __arm_vcvtaq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p) |
5cad47e0 | 19415 | { |
c431634b | 19416 | return __builtin_mve_vcvtaq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p); |
5cad47e0 SP |
19417 | } |
19418 | ||
261014a1 | 19419 | __extension__ extern __inline int16x8_t |
5cad47e0 | 19420 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19421 | __arm_vcvtnq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p) |
5cad47e0 | 19422 | { |
c431634b | 19423 | return __builtin_mve_vcvtnq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p); |
5cad47e0 SP |
19424 | } |
19425 | ||
261014a1 | 19426 | __extension__ extern __inline int32x4_t |
5cad47e0 | 19427 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19428 | __arm_vcvtnq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p) |
5cad47e0 | 19429 | { |
c431634b | 19430 | return __builtin_mve_vcvtnq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p); |
5cad47e0 SP |
19431 | } |
19432 | ||
261014a1 | 19433 | __extension__ extern __inline uint16x8_t |
5cad47e0 | 19434 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19435 | __arm_vcvtnq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p) |
5cad47e0 | 19436 | { |
c431634b | 19437 | return __builtin_mve_vcvtnq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p); |
5cad47e0 SP |
19438 | } |
19439 | ||
261014a1 | 19440 | __extension__ extern __inline uint32x4_t |
5cad47e0 | 19441 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19442 | __arm_vcvtnq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p) |
5cad47e0 | 19443 | { |
c431634b | 19444 | return __builtin_mve_vcvtnq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p); |
5cad47e0 SP |
19445 | } |
19446 | ||
261014a1 | 19447 | __extension__ extern __inline int16x8_t |
5cad47e0 | 19448 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19449 | __arm_vcvtpq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p) |
5cad47e0 | 19450 | { |
c431634b | 19451 | return __builtin_mve_vcvtpq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p); |
5cad47e0 SP |
19452 | } |
19453 | ||
261014a1 | 19454 | __extension__ extern __inline int32x4_t |
7a5fffa5 | 19455 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19456 | __arm_vcvtpq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p) |
7a5fffa5 | 19457 | { |
c431634b | 19458 | return __builtin_mve_vcvtpq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p); |
7a5fffa5 SP |
19459 | } |
19460 | ||
261014a1 | 19461 | __extension__ extern __inline uint16x8_t |
7a5fffa5 | 19462 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19463 | __arm_vcvtpq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 19464 | { |
c431634b | 19465 | return __builtin_mve_vcvtpq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p); |
7a5fffa5 SP |
19466 | } |
19467 | ||
261014a1 | 19468 | __extension__ extern __inline uint32x4_t |
7a5fffa5 | 19469 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19470 | __arm_vcvtpq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p) |
7a5fffa5 | 19471 | { |
c431634b | 19472 | return __builtin_mve_vcvtpq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p); |
7a5fffa5 SP |
19473 | } |
19474 | ||
261014a1 | 19475 | __extension__ extern __inline int16x8_t |
7a5fffa5 | 19476 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19477 | __arm_vcvtmq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 19478 | { |
c431634b | 19479 | return __builtin_mve_vcvtmq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p); |
7a5fffa5 SP |
19480 | } |
19481 | ||
261014a1 | 19482 | __extension__ extern __inline int32x4_t |
7a5fffa5 | 19483 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19484 | __arm_vcvtmq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p) |
7a5fffa5 | 19485 | { |
c431634b | 19486 | return __builtin_mve_vcvtmq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p); |
7a5fffa5 SP |
19487 | } |
19488 | ||
261014a1 | 19489 | __extension__ extern __inline uint16x8_t |
7a5fffa5 | 19490 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19491 | __arm_vcvtmq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 19492 | { |
c431634b | 19493 | return __builtin_mve_vcvtmq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p); |
7a5fffa5 SP |
19494 | } |
19495 | ||
261014a1 | 19496 | __extension__ extern __inline uint32x4_t |
7a5fffa5 | 19497 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19498 | __arm_vcvtmq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p) |
7a5fffa5 | 19499 | { |
c431634b | 19500 | return __builtin_mve_vcvtmq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p); |
7a5fffa5 SP |
19501 | } |
19502 | ||
261014a1 | 19503 | __extension__ extern __inline float32x4_t |
7a5fffa5 | 19504 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19505 | __arm_vcvtbq_x_f32_f16 (float16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 19506 | { |
c431634b | 19507 | return __builtin_mve_vcvtbq_m_f32_f16v4sf (__arm_vuninitializedq_f32 (), __a, __p); |
7a5fffa5 SP |
19508 | } |
19509 | ||
261014a1 | 19510 | __extension__ extern __inline float32x4_t |
7a5fffa5 | 19511 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19512 | __arm_vcvttq_x_f32_f16 (float16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 19513 | { |
c431634b | 19514 | return __builtin_mve_vcvttq_m_f32_f16v4sf (__arm_vuninitializedq_f32 (), __a, __p); |
7a5fffa5 SP |
19515 | } |
19516 | ||
261014a1 | 19517 | __extension__ extern __inline float16x8_t |
7a5fffa5 | 19518 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19519 | __arm_vcvtq_x_f16_u16 (uint16x8_t __a, mve_pred16_t __p) |
7a5fffa5 | 19520 | { |
c431634b | 19521 | return __builtin_mve_vcvtq_m_to_f_uv8hf (__arm_vuninitializedq_f16 (), __a, __p); |
7a5fffa5 SP |
19522 | } |
19523 | ||
3eff57aa SP |
19524 | __extension__ extern __inline float16x8_t |
19525 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19526 | __arm_vcvtq_x_f16_s16 (int16x8_t __a, mve_pred16_t __p) |
3eff57aa | 19527 | { |
c431634b | 19528 | return __builtin_mve_vcvtq_m_to_f_sv8hf (__arm_vuninitializedq_f16 (), __a, __p); |
3eff57aa SP |
19529 | } |
19530 | ||
19531 | __extension__ extern __inline float32x4_t | |
19532 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19533 | __arm_vcvtq_x_f32_s32 (int32x4_t __a, mve_pred16_t __p) |
3eff57aa | 19534 | { |
c431634b | 19535 | return __builtin_mve_vcvtq_m_to_f_sv4sf (__arm_vuninitializedq_f32 (), __a, __p); |
3eff57aa SP |
19536 | } |
19537 | ||
261014a1 | 19538 | __extension__ extern __inline float32x4_t |
85a94e87 | 19539 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19540 | __arm_vcvtq_x_f32_u32 (uint32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19541 | { |
c431634b | 19542 | return __builtin_mve_vcvtq_m_to_f_uv4sf (__arm_vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19543 | } |
19544 | ||
261014a1 | 19545 | __extension__ extern __inline float16x8_t |
85a94e87 | 19546 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19547 | __arm_vcvtq_x_n_f16_s16 (int16x8_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19548 | { |
c431634b | 19549 | return __builtin_mve_vcvtq_m_n_to_f_sv8hf (__arm_vuninitializedq_f16 (), __a, __imm6, __p); |
85a94e87 SP |
19550 | } |
19551 | ||
261014a1 | 19552 | __extension__ extern __inline float16x8_t |
85a94e87 | 19553 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19554 | __arm_vcvtq_x_n_f16_u16 (uint16x8_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19555 | { |
c431634b | 19556 | return __builtin_mve_vcvtq_m_n_to_f_uv8hf (__arm_vuninitializedq_f16 (), __a, __imm6, __p); |
85a94e87 SP |
19557 | } |
19558 | ||
261014a1 | 19559 | __extension__ extern __inline float32x4_t |
85a94e87 | 19560 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19561 | __arm_vcvtq_x_n_f32_s32 (int32x4_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19562 | { |
c431634b | 19563 | return __builtin_mve_vcvtq_m_n_to_f_sv4sf (__arm_vuninitializedq_f32 (), __a, __imm6, __p); |
85a94e87 SP |
19564 | } |
19565 | ||
261014a1 | 19566 | __extension__ extern __inline float32x4_t |
85a94e87 | 19567 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19568 | __arm_vcvtq_x_n_f32_u32 (uint32x4_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19569 | { |
c431634b | 19570 | return __builtin_mve_vcvtq_m_n_to_f_uv4sf (__arm_vuninitializedq_f32 (), __a, __imm6, __p); |
85a94e87 SP |
19571 | } |
19572 | ||
19573 | __extension__ extern __inline int16x8_t | |
19574 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19575 | __arm_vcvtq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19576 | { |
c431634b | 19577 | return __builtin_mve_vcvtq_m_from_f_sv8hi (__arm_vuninitializedq_s16 (), __a, __p); |
85a94e87 SP |
19578 | } |
19579 | ||
261014a1 | 19580 | __extension__ extern __inline int32x4_t |
85a94e87 | 19581 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19582 | __arm_vcvtq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19583 | { |
c431634b | 19584 | return __builtin_mve_vcvtq_m_from_f_sv4si (__arm_vuninitializedq_s32 (), __a, __p); |
85a94e87 SP |
19585 | } |
19586 | ||
261014a1 | 19587 | __extension__ extern __inline uint16x8_t |
85a94e87 | 19588 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19589 | __arm_vcvtq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19590 | { |
c431634b | 19591 | return __builtin_mve_vcvtq_m_from_f_uv8hi (__arm_vuninitializedq_u16 (), __a, __p); |
85a94e87 SP |
19592 | } |
19593 | ||
261014a1 | 19594 | __extension__ extern __inline uint32x4_t |
85a94e87 | 19595 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19596 | __arm_vcvtq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19597 | { |
c431634b | 19598 | return __builtin_mve_vcvtq_m_from_f_uv4si (__arm_vuninitializedq_u32 (), __a, __p); |
85a94e87 SP |
19599 | } |
19600 | ||
261014a1 | 19601 | __extension__ extern __inline int16x8_t |
85a94e87 | 19602 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19603 | __arm_vcvtq_x_n_s16_f16 (float16x8_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19604 | { |
c431634b | 19605 | return __builtin_mve_vcvtq_m_n_from_f_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm6, __p); |
85a94e87 SP |
19606 | } |
19607 | ||
261014a1 | 19608 | __extension__ extern __inline int32x4_t |
85a94e87 | 19609 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19610 | __arm_vcvtq_x_n_s32_f32 (float32x4_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19611 | { |
c431634b | 19612 | return __builtin_mve_vcvtq_m_n_from_f_sv4si (__arm_vuninitializedq_s32 (), __a, __imm6, __p); |
85a94e87 SP |
19613 | } |
19614 | ||
19615 | __extension__ extern __inline uint16x8_t | |
19616 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19617 | __arm_vcvtq_x_n_u16_f16 (float16x8_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19618 | { |
c431634b | 19619 | return __builtin_mve_vcvtq_m_n_from_f_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm6, __p); |
85a94e87 SP |
19620 | } |
19621 | ||
19622 | __extension__ extern __inline uint32x4_t | |
19623 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19624 | __arm_vcvtq_x_n_u32_f32 (float32x4_t __a, const int __imm6, mve_pred16_t __p) |
85a94e87 | 19625 | { |
c431634b | 19626 | return __builtin_mve_vcvtq_m_n_from_f_uv4si (__arm_vuninitializedq_u32 (), __a, __imm6, __p); |
85a94e87 SP |
19627 | } |
19628 | ||
261014a1 | 19629 | __extension__ extern __inline float16x8_t |
85a94e87 | 19630 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19631 | __arm_vrndq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19632 | { |
c431634b | 19633 | return __builtin_mve_vrndq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19634 | } |
19635 | ||
261014a1 | 19636 | __extension__ extern __inline float32x4_t |
85a94e87 | 19637 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19638 | __arm_vrndq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19639 | { |
c431634b | 19640 | return __builtin_mve_vrndq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19641 | } |
19642 | ||
261014a1 | 19643 | __extension__ extern __inline float16x8_t |
85a94e87 | 19644 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19645 | __arm_vrndnq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19646 | { |
c431634b | 19647 | return __builtin_mve_vrndnq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19648 | } |
19649 | ||
261014a1 | 19650 | __extension__ extern __inline float32x4_t |
85a94e87 | 19651 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19652 | __arm_vrndnq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19653 | { |
c431634b | 19654 | return __builtin_mve_vrndnq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19655 | } |
19656 | ||
261014a1 | 19657 | __extension__ extern __inline float16x8_t |
85a94e87 | 19658 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19659 | __arm_vrndmq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19660 | { |
c431634b | 19661 | return __builtin_mve_vrndmq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19662 | } |
19663 | ||
261014a1 | 19664 | __extension__ extern __inline float32x4_t |
85a94e87 | 19665 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19666 | __arm_vrndmq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19667 | { |
c431634b | 19668 | return __builtin_mve_vrndmq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19669 | } |
19670 | ||
19671 | __extension__ extern __inline float16x8_t | |
19672 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19673 | __arm_vrndpq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19674 | { |
c431634b | 19675 | return __builtin_mve_vrndpq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19676 | } |
19677 | ||
261014a1 | 19678 | __extension__ extern __inline float32x4_t |
85a94e87 | 19679 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19680 | __arm_vrndpq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19681 | { |
c431634b | 19682 | return __builtin_mve_vrndpq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19683 | } |
19684 | ||
19685 | __extension__ extern __inline float16x8_t | |
19686 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19687 | __arm_vrndaq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19688 | { |
c431634b | 19689 | return __builtin_mve_vrndaq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19690 | } |
19691 | ||
261014a1 | 19692 | __extension__ extern __inline float32x4_t |
85a94e87 | 19693 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19694 | __arm_vrndaq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19695 | { |
c431634b | 19696 | return __builtin_mve_vrndaq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19697 | } |
19698 | ||
19699 | __extension__ extern __inline float16x8_t | |
19700 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19701 | __arm_vrndxq_x_f16 (float16x8_t __a, mve_pred16_t __p) |
85a94e87 | 19702 | { |
c431634b | 19703 | return __builtin_mve_vrndxq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p); |
85a94e87 SP |
19704 | } |
19705 | ||
261014a1 | 19706 | __extension__ extern __inline float32x4_t |
85a94e87 | 19707 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19708 | __arm_vrndxq_x_f32 (float32x4_t __a, mve_pred16_t __p) |
85a94e87 | 19709 | { |
c431634b | 19710 | return __builtin_mve_vrndxq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p); |
85a94e87 SP |
19711 | } |
19712 | ||
19713 | __extension__ extern __inline float16x8_t | |
19714 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19715 | __arm_vandq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
85a94e87 | 19716 | { |
c431634b | 19717 | return __builtin_mve_vandq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
19718 | } |
19719 | ||
261014a1 | 19720 | __extension__ extern __inline float32x4_t |
85a94e87 | 19721 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19722 | __arm_vandq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
85a94e87 | 19723 | { |
c431634b | 19724 | return __builtin_mve_vandq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
85a94e87 SP |
19725 | } |
19726 | ||
261014a1 | 19727 | __extension__ extern __inline float16x8_t |
85a94e87 | 19728 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19729 | __arm_vbicq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
85a94e87 | 19730 | { |
c431634b | 19731 | return __builtin_mve_vbicq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
19732 | } |
19733 | ||
19734 | __extension__ extern __inline float32x4_t | |
19735 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19736 | __arm_vbicq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
85a94e87 | 19737 | { |
c431634b | 19738 | return __builtin_mve_vbicq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
85a94e87 SP |
19739 | } |
19740 | ||
261014a1 | 19741 | __extension__ extern __inline float16x8_t |
85a94e87 | 19742 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19743 | __arm_vbrsrq_x_n_f16 (float16x8_t __a, int32_t __b, mve_pred16_t __p) |
85a94e87 | 19744 | { |
c431634b | 19745 | return __builtin_mve_vbrsrq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
19746 | } |
19747 | ||
19748 | __extension__ extern __inline float32x4_t | |
19749 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19750 | __arm_vbrsrq_x_n_f32 (float32x4_t __a, int32_t __b, mve_pred16_t __p) |
85a94e87 | 19751 | { |
c431634b | 19752 | return __builtin_mve_vbrsrq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
85a94e87 SP |
19753 | } |
19754 | ||
261014a1 | 19755 | __extension__ extern __inline float16x8_t |
85a94e87 | 19756 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19757 | __arm_veorq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
85a94e87 | 19758 | { |
c431634b | 19759 | return __builtin_mve_veorq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
19760 | } |
19761 | ||
19762 | __extension__ extern __inline float32x4_t | |
19763 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19764 | __arm_veorq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
85a94e87 | 19765 | { |
c431634b | 19766 | return __builtin_mve_veorq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
85a94e87 SP |
19767 | } |
19768 | ||
261014a1 | 19769 | __extension__ extern __inline float16x8_t |
85a94e87 | 19770 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19771 | __arm_vornq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
85a94e87 | 19772 | { |
c431634b | 19773 | return __builtin_mve_vornq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
19774 | } |
19775 | ||
19776 | __extension__ extern __inline float32x4_t | |
19777 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19778 | __arm_vornq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
85a94e87 | 19779 | { |
c431634b | 19780 | return __builtin_mve_vornq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
85a94e87 SP |
19781 | } |
19782 | ||
261014a1 | 19783 | __extension__ extern __inline float16x8_t |
85a94e87 | 19784 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19785 | __arm_vorrq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) |
85a94e87 | 19786 | { |
c431634b | 19787 | return __builtin_mve_vorrq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p); |
85a94e87 SP |
19788 | } |
19789 | ||
41e1a7ff SP |
19790 | __extension__ extern __inline float32x4_t |
19791 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
261014a1 | 19792 | __arm_vorrq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) |
41e1a7ff | 19793 | { |
c431634b | 19794 | return __builtin_mve_vorrq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p); |
41e1a7ff SP |
19795 | } |
19796 | ||
261014a1 | 19797 | __extension__ extern __inline float16x8_t |
41e1a7ff | 19798 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19799 | __arm_vrev32q_x_f16 (float16x8_t __a, mve_pred16_t __p) |
41e1a7ff | 19800 | { |
c431634b | 19801 | return __builtin_mve_vrev32q_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p); |
41e1a7ff SP |
19802 | } |
19803 | ||
261014a1 | 19804 | __extension__ extern __inline float16x8_t |
41e1a7ff | 19805 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19806 | __arm_vrev64q_x_f16 (float16x8_t __a, mve_pred16_t __p) |
41e1a7ff | 19807 | { |
c431634b | 19808 | return __builtin_mve_vrev64q_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p); |
41e1a7ff SP |
19809 | } |
19810 | ||
261014a1 | 19811 | __extension__ extern __inline float32x4_t |
41e1a7ff | 19812 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
261014a1 | 19813 | __arm_vrev64q_x_f32 (float32x4_t __a, mve_pred16_t __p) |
41e1a7ff | 19814 | { |
c431634b | 19815 | return __builtin_mve_vrev64q_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p); |
41e1a7ff SP |
19816 | } |
19817 | ||
1dfcc3b5 SP |
19818 | __extension__ extern __inline float16x8x4_t |
19819 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19820 | __arm_vld4q_f16 (float16_t const * __addr) | |
19821 | { | |
19822 | union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv; | |
19823 | __rv.__o = __builtin_mve_vld4qv8hf (__addr); | |
19824 | return __rv.__i; | |
19825 | } | |
19826 | ||
19827 | __extension__ extern __inline float16x8x2_t | |
19828 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19829 | __arm_vld2q_f16 (float16_t const * __addr) | |
19830 | { | |
19831 | union { float16x8x2_t __i; __builtin_neon_oi __o; } __rv; | |
19832 | __rv.__o = __builtin_mve_vld2qv8hf (__addr); | |
19833 | return __rv.__i; | |
19834 | } | |
19835 | ||
19836 | __extension__ extern __inline float16x8_t | |
19837 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19838 | __arm_vld1q_z_f16 (float16_t const *__base, mve_pred16_t __p) | |
19839 | { | |
ff0597dc | 19840 | return vldrhq_z_f16 (__base, __p); |
1dfcc3b5 SP |
19841 | } |
19842 | ||
19843 | __extension__ extern __inline void | |
19844 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19845 | __arm_vst2q_f16 (float16_t * __addr, float16x8x2_t __value) | |
19846 | { | |
19847 | union { float16x8x2_t __i; __builtin_neon_oi __o; } __rv; | |
19848 | __rv.__i = __value; | |
19849 | __builtin_mve_vst2qv8hf (__addr, __rv.__o); | |
19850 | } | |
19851 | ||
19852 | __extension__ extern __inline void | |
19853 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19854 | __arm_vst1q_p_f16 (float16_t * __addr, float16x8_t __value, mve_pred16_t __p) | |
19855 | { | |
19856 | return vstrhq_p_f16 (__addr, __value, __p); | |
19857 | } | |
19858 | ||
19859 | __extension__ extern __inline float32x4x4_t | |
19860 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19861 | __arm_vld4q_f32 (float32_t const * __addr) | |
19862 | { | |
19863 | union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv; | |
19864 | __rv.__o = __builtin_mve_vld4qv4sf (__addr); | |
19865 | return __rv.__i; | |
19866 | } | |
19867 | ||
19868 | __extension__ extern __inline float32x4x2_t | |
19869 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19870 | __arm_vld2q_f32 (float32_t const * __addr) | |
19871 | { | |
19872 | union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv; | |
19873 | __rv.__o = __builtin_mve_vld2qv4sf (__addr); | |
19874 | return __rv.__i; | |
19875 | } | |
19876 | ||
19877 | __extension__ extern __inline float32x4_t | |
19878 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19879 | __arm_vld1q_z_f32 (float32_t const *__base, mve_pred16_t __p) | |
19880 | { | |
ff0597dc | 19881 | return vldrwq_z_f32 (__base, __p); |
1dfcc3b5 SP |
19882 | } |
19883 | ||
19884 | __extension__ extern __inline void | |
19885 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19886 | __arm_vst2q_f32 (float32_t * __addr, float32x4x2_t __value) | |
19887 | { | |
19888 | union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv; | |
19889 | __rv.__i = __value; | |
19890 | __builtin_mve_vst2qv4sf (__addr, __rv.__o); | |
19891 | } | |
19892 | ||
19893 | __extension__ extern __inline void | |
19894 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19895 | __arm_vst1q_p_f32 (float32_t * __addr, float32x4_t __value, mve_pred16_t __p) | |
19896 | { | |
19897 | return vstrwq_p_f32 (__addr, __value, __p); | |
19898 | } | |
19899 | ||
1a5c27b1 SP |
19900 | __extension__ extern __inline float16x8_t |
19901 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19902 | __arm_vsetq_lane_f16 (float16_t __a, float16x8_t __b, const int __idx) | |
19903 | { | |
19904 | __ARM_CHECK_LANEQ (__b, __idx); | |
19905 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
19906 | return __b; | |
19907 | } | |
19908 | ||
19909 | __extension__ extern __inline float32x4_t | |
19910 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19911 | __arm_vsetq_lane_f32 (float32_t __a, float32x4_t __b, const int __idx) | |
19912 | { | |
19913 | __ARM_CHECK_LANEQ (__b, __idx); | |
19914 | __b[__ARM_LANEQ(__b,__idx)] = __a; | |
19915 | return __b; | |
19916 | } | |
19917 | ||
19918 | __extension__ extern __inline float16_t | |
19919 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19920 | __arm_vgetq_lane_f16 (float16x8_t __a, const int __idx) | |
19921 | { | |
19922 | __ARM_CHECK_LANEQ (__a, __idx); | |
19923 | return __a[__ARM_LANEQ(__a,__idx)]; | |
19924 | } | |
19925 | ||
19926 | __extension__ extern __inline float32_t | |
19927 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19928 | __arm_vgetq_lane_f32 (float32x4_t __a, const int __idx) | |
19929 | { | |
19930 | __ARM_CHECK_LANEQ (__a, __idx); | |
19931 | return __a[__ARM_LANEQ(__a,__idx)]; | |
19932 | } | |
e3678b44 SP |
19933 | #endif |
19934 | ||
6a90680b ASDV |
19935 | #ifdef __cplusplus |
19936 | __extension__ extern __inline void | |
19937 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19938 | __arm_vst4q (int8_t * __addr, int8x16x4_t __value) | |
19939 | { | |
19940 | __arm_vst4q_s8 (__addr, __value); | |
19941 | } | |
14782c81 | 19942 | |
6a90680b ASDV |
19943 | __extension__ extern __inline void |
19944 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19945 | __arm_vst4q (int16_t * __addr, int16x8x4_t __value) | |
19946 | { | |
19947 | __arm_vst4q_s16 (__addr, __value); | |
19948 | } | |
e3678b44 | 19949 | |
6a90680b ASDV |
19950 | __extension__ extern __inline void |
19951 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19952 | __arm_vst4q (int32_t * __addr, int32x4x4_t __value) | |
19953 | { | |
19954 | __arm_vst4q_s32 (__addr, __value); | |
19955 | } | |
e3678b44 | 19956 | |
6a90680b ASDV |
19957 | __extension__ extern __inline void |
19958 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19959 | __arm_vst4q (uint8_t * __addr, uint8x16x4_t __value) | |
19960 | { | |
19961 | __arm_vst4q_u8 (__addr, __value); | |
19962 | } | |
e3678b44 | 19963 | |
6a90680b ASDV |
19964 | __extension__ extern __inline void |
19965 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19966 | __arm_vst4q (uint16_t * __addr, uint16x8x4_t __value) | |
19967 | { | |
19968 | __arm_vst4q_u16 (__addr, __value); | |
19969 | } | |
e3678b44 | 19970 | |
6a90680b ASDV |
19971 | __extension__ extern __inline void |
19972 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19973 | __arm_vst4q (uint32_t * __addr, uint32x4x4_t __value) | |
19974 | { | |
19975 | __arm_vst4q_u32 (__addr, __value); | |
19976 | } | |
e3678b44 | 19977 | |
6a90680b ASDV |
19978 | __extension__ extern __inline int8x16_t |
19979 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19980 | __arm_vdupq_n (int8_t __a) | |
19981 | { | |
19982 | return __arm_vdupq_n_s8 (__a); | |
19983 | } | |
e3678b44 | 19984 | |
6a90680b ASDV |
19985 | __extension__ extern __inline int16x8_t |
19986 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19987 | __arm_vdupq_n (int16_t __a) | |
19988 | { | |
19989 | return __arm_vdupq_n_s16 (__a); | |
19990 | } | |
e3678b44 | 19991 | |
6a90680b ASDV |
19992 | __extension__ extern __inline int32x4_t |
19993 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
19994 | __arm_vdupq_n (int32_t __a) | |
19995 | { | |
19996 | return __arm_vdupq_n_s32 (__a); | |
19997 | } | |
e3678b44 | 19998 | |
6a90680b ASDV |
19999 | __extension__ extern __inline int8x16_t |
20000 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20001 | __arm_vabsq (int8x16_t __a) | |
20002 | { | |
20003 | return __arm_vabsq_s8 (__a); | |
20004 | } | |
e3678b44 | 20005 | |
6a90680b ASDV |
20006 | __extension__ extern __inline int16x8_t |
20007 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20008 | __arm_vabsq (int16x8_t __a) | |
20009 | { | |
20010 | return __arm_vabsq_s16 (__a); | |
20011 | } | |
20012 | ||
20013 | __extension__ extern __inline int32x4_t | |
20014 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20015 | __arm_vabsq (int32x4_t __a) | |
20016 | { | |
20017 | return __arm_vabsq_s32 (__a); | |
20018 | } | |
20019 | ||
20020 | __extension__ extern __inline int8x16_t | |
20021 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20022 | __arm_vclsq (int8x16_t __a) | |
20023 | { | |
20024 | return __arm_vclsq_s8 (__a); | |
20025 | } | |
20026 | ||
20027 | __extension__ extern __inline int16x8_t | |
20028 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20029 | __arm_vclsq (int16x8_t __a) | |
20030 | { | |
20031 | return __arm_vclsq_s16 (__a); | |
20032 | } | |
20033 | ||
20034 | __extension__ extern __inline int32x4_t | |
20035 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20036 | __arm_vclsq (int32x4_t __a) | |
20037 | { | |
20038 | return __arm_vclsq_s32 (__a); | |
20039 | } | |
20040 | ||
20041 | __extension__ extern __inline int8x16_t | |
20042 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20043 | __arm_vclzq (int8x16_t __a) | |
20044 | { | |
20045 | return __arm_vclzq_s8 (__a); | |
20046 | } | |
20047 | ||
20048 | __extension__ extern __inline int16x8_t | |
20049 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20050 | __arm_vclzq (int16x8_t __a) | |
20051 | { | |
20052 | return __arm_vclzq_s16 (__a); | |
20053 | } | |
20054 | ||
20055 | __extension__ extern __inline int32x4_t | |
20056 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20057 | __arm_vclzq (int32x4_t __a) | |
20058 | { | |
20059 | return __arm_vclzq_s32 (__a); | |
20060 | } | |
20061 | ||
20062 | __extension__ extern __inline int8x16_t | |
20063 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20064 | __arm_vnegq (int8x16_t __a) | |
20065 | { | |
20066 | return __arm_vnegq_s8 (__a); | |
20067 | } | |
20068 | ||
20069 | __extension__ extern __inline int16x8_t | |
20070 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20071 | __arm_vnegq (int16x8_t __a) | |
20072 | { | |
20073 | return __arm_vnegq_s16 (__a); | |
20074 | } | |
20075 | ||
20076 | __extension__ extern __inline int32x4_t | |
20077 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20078 | __arm_vnegq (int32x4_t __a) | |
20079 | { | |
20080 | return __arm_vnegq_s32 (__a); | |
20081 | } | |
20082 | ||
20083 | __extension__ extern __inline int64_t | |
20084 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20085 | __arm_vaddlvq (int32x4_t __a) | |
20086 | { | |
20087 | return __arm_vaddlvq_s32 (__a); | |
20088 | } | |
20089 | ||
20090 | __extension__ extern __inline int32_t | |
20091 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20092 | __arm_vaddvq (int8x16_t __a) | |
20093 | { | |
20094 | return __arm_vaddvq_s8 (__a); | |
20095 | } | |
20096 | ||
20097 | __extension__ extern __inline int32_t | |
20098 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20099 | __arm_vaddvq (int16x8_t __a) | |
20100 | { | |
20101 | return __arm_vaddvq_s16 (__a); | |
20102 | } | |
20103 | ||
20104 | __extension__ extern __inline int32_t | |
20105 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20106 | __arm_vaddvq (int32x4_t __a) | |
20107 | { | |
20108 | return __arm_vaddvq_s32 (__a); | |
20109 | } | |
20110 | ||
20111 | __extension__ extern __inline int16x8_t | |
20112 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20113 | __arm_vmovlbq (int8x16_t __a) | |
20114 | { | |
20115 | return __arm_vmovlbq_s8 (__a); | |
20116 | } | |
20117 | ||
20118 | __extension__ extern __inline int32x4_t | |
20119 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20120 | __arm_vmovlbq (int16x8_t __a) | |
20121 | { | |
20122 | return __arm_vmovlbq_s16 (__a); | |
20123 | } | |
20124 | ||
20125 | __extension__ extern __inline int16x8_t | |
20126 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20127 | __arm_vmovltq (int8x16_t __a) | |
20128 | { | |
20129 | return __arm_vmovltq_s8 (__a); | |
20130 | } | |
20131 | ||
20132 | __extension__ extern __inline int32x4_t | |
20133 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20134 | __arm_vmovltq (int16x8_t __a) | |
20135 | { | |
20136 | return __arm_vmovltq_s16 (__a); | |
20137 | } | |
20138 | ||
20139 | __extension__ extern __inline int8x16_t | |
20140 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20141 | __arm_vmvnq (int8x16_t __a) | |
20142 | { | |
20143 | return __arm_vmvnq_s8 (__a); | |
20144 | } | |
20145 | ||
20146 | __extension__ extern __inline int16x8_t | |
20147 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20148 | __arm_vmvnq (int16x8_t __a) | |
20149 | { | |
20150 | return __arm_vmvnq_s16 (__a); | |
20151 | } | |
20152 | ||
20153 | __extension__ extern __inline int32x4_t | |
20154 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20155 | __arm_vmvnq (int32x4_t __a) | |
20156 | { | |
20157 | return __arm_vmvnq_s32 (__a); | |
20158 | } | |
20159 | ||
20160 | __extension__ extern __inline int8x16_t | |
20161 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20162 | __arm_vrev16q (int8x16_t __a) | |
20163 | { | |
20164 | return __arm_vrev16q_s8 (__a); | |
20165 | } | |
20166 | ||
20167 | __extension__ extern __inline int8x16_t | |
20168 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20169 | __arm_vrev32q (int8x16_t __a) | |
20170 | { | |
20171 | return __arm_vrev32q_s8 (__a); | |
20172 | } | |
20173 | ||
20174 | __extension__ extern __inline int16x8_t | |
20175 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20176 | __arm_vrev32q (int16x8_t __a) | |
20177 | { | |
20178 | return __arm_vrev32q_s16 (__a); | |
20179 | } | |
20180 | ||
20181 | __extension__ extern __inline int8x16_t | |
20182 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20183 | __arm_vrev64q (int8x16_t __a) | |
20184 | { | |
20185 | return __arm_vrev64q_s8 (__a); | |
20186 | } | |
20187 | ||
20188 | __extension__ extern __inline int16x8_t | |
20189 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20190 | __arm_vrev64q (int16x8_t __a) | |
20191 | { | |
20192 | return __arm_vrev64q_s16 (__a); | |
20193 | } | |
20194 | ||
20195 | __extension__ extern __inline int32x4_t | |
20196 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20197 | __arm_vrev64q (int32x4_t __a) | |
20198 | { | |
20199 | return __arm_vrev64q_s32 (__a); | |
20200 | } | |
20201 | ||
20202 | __extension__ extern __inline int8x16_t | |
20203 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20204 | __arm_vqabsq (int8x16_t __a) | |
20205 | { | |
20206 | return __arm_vqabsq_s8 (__a); | |
20207 | } | |
20208 | ||
20209 | __extension__ extern __inline int16x8_t | |
20210 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20211 | __arm_vqabsq (int16x8_t __a) | |
20212 | { | |
20213 | return __arm_vqabsq_s16 (__a); | |
20214 | } | |
20215 | ||
20216 | __extension__ extern __inline int32x4_t | |
20217 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20218 | __arm_vqabsq (int32x4_t __a) | |
20219 | { | |
20220 | return __arm_vqabsq_s32 (__a); | |
20221 | } | |
20222 | ||
20223 | __extension__ extern __inline int8x16_t | |
20224 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20225 | __arm_vqnegq (int8x16_t __a) | |
20226 | { | |
20227 | return __arm_vqnegq_s8 (__a); | |
20228 | } | |
20229 | ||
20230 | __extension__ extern __inline int16x8_t | |
20231 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20232 | __arm_vqnegq (int16x8_t __a) | |
20233 | { | |
20234 | return __arm_vqnegq_s16 (__a); | |
20235 | } | |
20236 | ||
20237 | __extension__ extern __inline int32x4_t | |
20238 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20239 | __arm_vqnegq (int32x4_t __a) | |
20240 | { | |
20241 | return __arm_vqnegq_s32 (__a); | |
20242 | } | |
20243 | ||
20244 | __extension__ extern __inline uint8x16_t | |
20245 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20246 | __arm_vrev64q (uint8x16_t __a) | |
20247 | { | |
20248 | return __arm_vrev64q_u8 (__a); | |
20249 | } | |
20250 | ||
20251 | __extension__ extern __inline uint16x8_t | |
20252 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20253 | __arm_vrev64q (uint16x8_t __a) | |
20254 | { | |
20255 | return __arm_vrev64q_u16 (__a); | |
20256 | } | |
20257 | ||
20258 | __extension__ extern __inline uint32x4_t | |
20259 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20260 | __arm_vrev64q (uint32x4_t __a) | |
20261 | { | |
20262 | return __arm_vrev64q_u32 (__a); | |
20263 | } | |
20264 | ||
20265 | __extension__ extern __inline uint8x16_t | |
20266 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20267 | __arm_vmvnq (uint8x16_t __a) | |
20268 | { | |
20269 | return __arm_vmvnq_u8 (__a); | |
20270 | } | |
20271 | ||
20272 | __extension__ extern __inline uint16x8_t | |
20273 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20274 | __arm_vmvnq (uint16x8_t __a) | |
20275 | { | |
20276 | return __arm_vmvnq_u16 (__a); | |
20277 | } | |
20278 | ||
20279 | __extension__ extern __inline uint32x4_t | |
20280 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20281 | __arm_vmvnq (uint32x4_t __a) | |
20282 | { | |
20283 | return __arm_vmvnq_u32 (__a); | |
20284 | } | |
20285 | ||
20286 | __extension__ extern __inline uint8x16_t | |
20287 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20288 | __arm_vdupq_n (uint8_t __a) | |
20289 | { | |
20290 | return __arm_vdupq_n_u8 (__a); | |
20291 | } | |
20292 | ||
20293 | __extension__ extern __inline uint16x8_t | |
20294 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20295 | __arm_vdupq_n (uint16_t __a) | |
20296 | { | |
20297 | return __arm_vdupq_n_u16 (__a); | |
20298 | } | |
20299 | ||
20300 | __extension__ extern __inline uint32x4_t | |
20301 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20302 | __arm_vdupq_n (uint32_t __a) | |
20303 | { | |
20304 | return __arm_vdupq_n_u32 (__a); | |
20305 | } | |
20306 | ||
20307 | __extension__ extern __inline uint8x16_t | |
20308 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20309 | __arm_vclzq (uint8x16_t __a) | |
20310 | { | |
20311 | return __arm_vclzq_u8 (__a); | |
20312 | } | |
20313 | ||
20314 | __extension__ extern __inline uint16x8_t | |
20315 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20316 | __arm_vclzq (uint16x8_t __a) | |
20317 | { | |
20318 | return __arm_vclzq_u16 (__a); | |
20319 | } | |
20320 | ||
20321 | __extension__ extern __inline uint32x4_t | |
20322 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20323 | __arm_vclzq (uint32x4_t __a) | |
20324 | { | |
20325 | return __arm_vclzq_u32 (__a); | |
20326 | } | |
20327 | ||
20328 | __extension__ extern __inline uint32_t | |
20329 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20330 | __arm_vaddvq (uint8x16_t __a) | |
20331 | { | |
20332 | return __arm_vaddvq_u8 (__a); | |
20333 | } | |
20334 | ||
20335 | __extension__ extern __inline uint32_t | |
20336 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20337 | __arm_vaddvq (uint16x8_t __a) | |
20338 | { | |
20339 | return __arm_vaddvq_u16 (__a); | |
20340 | } | |
20341 | ||
20342 | __extension__ extern __inline uint32_t | |
20343 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20344 | __arm_vaddvq (uint32x4_t __a) | |
20345 | { | |
20346 | return __arm_vaddvq_u32 (__a); | |
20347 | } | |
20348 | ||
20349 | __extension__ extern __inline uint8x16_t | |
20350 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20351 | __arm_vrev32q (uint8x16_t __a) | |
20352 | { | |
20353 | return __arm_vrev32q_u8 (__a); | |
20354 | } | |
20355 | ||
20356 | __extension__ extern __inline uint16x8_t | |
20357 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20358 | __arm_vrev32q (uint16x8_t __a) | |
20359 | { | |
20360 | return __arm_vrev32q_u16 (__a); | |
20361 | } | |
20362 | ||
20363 | __extension__ extern __inline uint16x8_t | |
20364 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20365 | __arm_vmovltq (uint8x16_t __a) | |
20366 | { | |
20367 | return __arm_vmovltq_u8 (__a); | |
20368 | } | |
20369 | ||
20370 | __extension__ extern __inline uint32x4_t | |
20371 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20372 | __arm_vmovltq (uint16x8_t __a) | |
20373 | { | |
20374 | return __arm_vmovltq_u16 (__a); | |
20375 | } | |
20376 | ||
20377 | __extension__ extern __inline uint16x8_t | |
20378 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20379 | __arm_vmovlbq (uint8x16_t __a) | |
20380 | { | |
20381 | return __arm_vmovlbq_u8 (__a); | |
20382 | } | |
20383 | ||
20384 | __extension__ extern __inline uint32x4_t | |
20385 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20386 | __arm_vmovlbq (uint16x8_t __a) | |
20387 | { | |
20388 | return __arm_vmovlbq_u16 (__a); | |
20389 | } | |
20390 | ||
20391 | __extension__ extern __inline uint8x16_t | |
20392 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20393 | __arm_vrev16q (uint8x16_t __a) | |
20394 | { | |
20395 | return __arm_vrev16q_u8 (__a); | |
20396 | } | |
20397 | ||
20398 | __extension__ extern __inline uint64_t | |
20399 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20400 | __arm_vaddlvq (uint32x4_t __a) | |
20401 | { | |
20402 | return __arm_vaddlvq_u32 (__a); | |
20403 | } | |
20404 | ||
20405 | __extension__ extern __inline int8x16_t | |
20406 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20407 | __arm_vshrq (int8x16_t __a, const int __imm) | |
20408 | { | |
20409 | return __arm_vshrq_n_s8 (__a, __imm); | |
20410 | } | |
20411 | ||
20412 | __extension__ extern __inline int16x8_t | |
20413 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20414 | __arm_vshrq (int16x8_t __a, const int __imm) | |
20415 | { | |
20416 | return __arm_vshrq_n_s16 (__a, __imm); | |
20417 | } | |
20418 | ||
20419 | __extension__ extern __inline int32x4_t | |
20420 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20421 | __arm_vshrq (int32x4_t __a, const int __imm) | |
20422 | { | |
20423 | return __arm_vshrq_n_s32 (__a, __imm); | |
20424 | } | |
20425 | ||
20426 | __extension__ extern __inline uint8x16_t | |
20427 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20428 | __arm_vshrq (uint8x16_t __a, const int __imm) | |
20429 | { | |
20430 | return __arm_vshrq_n_u8 (__a, __imm); | |
20431 | } | |
20432 | ||
20433 | __extension__ extern __inline uint16x8_t | |
20434 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20435 | __arm_vshrq (uint16x8_t __a, const int __imm) | |
20436 | { | |
20437 | return __arm_vshrq_n_u16 (__a, __imm); | |
20438 | } | |
20439 | ||
20440 | __extension__ extern __inline uint32x4_t | |
20441 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20442 | __arm_vshrq (uint32x4_t __a, const int __imm) | |
20443 | { | |
20444 | return __arm_vshrq_n_u32 (__a, __imm); | |
20445 | } | |
20446 | ||
20447 | __extension__ extern __inline int64_t | |
20448 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20449 | __arm_vaddlvq_p (int32x4_t __a, mve_pred16_t __p) | |
20450 | { | |
20451 | return __arm_vaddlvq_p_s32 (__a, __p); | |
20452 | } | |
20453 | ||
20454 | __extension__ extern __inline uint64_t | |
20455 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20456 | __arm_vaddlvq_p (uint32x4_t __a, mve_pred16_t __p) | |
20457 | { | |
20458 | return __arm_vaddlvq_p_u32 (__a, __p); | |
20459 | } | |
20460 | ||
20461 | __extension__ extern __inline int32_t | |
20462 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20463 | __arm_vcmpneq (int8x16_t __a, int8x16_t __b) | |
20464 | { | |
20465 | return __arm_vcmpneq_s8 (__a, __b); | |
20466 | } | |
20467 | ||
20468 | __extension__ extern __inline mve_pred16_t | |
20469 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20470 | __arm_vcmpneq (int16x8_t __a, int16x8_t __b) | |
20471 | { | |
20472 | return __arm_vcmpneq_s16 (__a, __b); | |
20473 | } | |
20474 | ||
20475 | __extension__ extern __inline mve_pred16_t | |
20476 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20477 | __arm_vcmpneq (int32x4_t __a, int32x4_t __b) | |
20478 | { | |
20479 | return __arm_vcmpneq_s32 (__a, __b); | |
20480 | } | |
20481 | ||
20482 | __extension__ extern __inline mve_pred16_t | |
20483 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20484 | __arm_vcmpneq (uint8x16_t __a, uint8x16_t __b) | |
20485 | { | |
20486 | return __arm_vcmpneq_u8 (__a, __b); | |
20487 | } | |
20488 | ||
20489 | __extension__ extern __inline mve_pred16_t | |
20490 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20491 | __arm_vcmpneq (uint16x8_t __a, uint16x8_t __b) | |
20492 | { | |
20493 | return __arm_vcmpneq_u16 (__a, __b); | |
20494 | } | |
20495 | ||
20496 | __extension__ extern __inline mve_pred16_t | |
20497 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20498 | __arm_vcmpneq (uint32x4_t __a, uint32x4_t __b) | |
20499 | { | |
20500 | return __arm_vcmpneq_u32 (__a, __b); | |
20501 | } | |
20502 | ||
20503 | __extension__ extern __inline int8x16_t | |
20504 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20505 | __arm_vshlq (int8x16_t __a, int8x16_t __b) | |
20506 | { | |
20507 | return __arm_vshlq_s8 (__a, __b); | |
20508 | } | |
20509 | ||
20510 | __extension__ extern __inline int16x8_t | |
20511 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20512 | __arm_vshlq (int16x8_t __a, int16x8_t __b) | |
20513 | { | |
20514 | return __arm_vshlq_s16 (__a, __b); | |
20515 | } | |
20516 | ||
20517 | __extension__ extern __inline int32x4_t | |
20518 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20519 | __arm_vshlq (int32x4_t __a, int32x4_t __b) | |
20520 | { | |
20521 | return __arm_vshlq_s32 (__a, __b); | |
20522 | } | |
20523 | ||
20524 | __extension__ extern __inline uint8x16_t | |
20525 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20526 | __arm_vshlq (uint8x16_t __a, int8x16_t __b) | |
20527 | { | |
20528 | return __arm_vshlq_u8 (__a, __b); | |
20529 | } | |
20530 | ||
20531 | __extension__ extern __inline uint16x8_t | |
20532 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20533 | __arm_vshlq (uint16x8_t __a, int16x8_t __b) | |
20534 | { | |
20535 | return __arm_vshlq_u16 (__a, __b); | |
20536 | } | |
20537 | ||
20538 | __extension__ extern __inline uint32x4_t | |
20539 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20540 | __arm_vshlq (uint32x4_t __a, int32x4_t __b) | |
20541 | { | |
20542 | return __arm_vshlq_u32 (__a, __b); | |
20543 | } | |
20544 | ||
20545 | __extension__ extern __inline uint8x16_t | |
20546 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20547 | __arm_vsubq (uint8x16_t __a, uint8x16_t __b) | |
20548 | { | |
20549 | return __arm_vsubq_u8 (__a, __b); | |
20550 | } | |
20551 | ||
20552 | __extension__ extern __inline uint8x16_t | |
20553 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20554 | __arm_vsubq (uint8x16_t __a, uint8_t __b) | |
20555 | { | |
20556 | return __arm_vsubq_n_u8 (__a, __b); | |
20557 | } | |
20558 | ||
20559 | __extension__ extern __inline uint8x16_t | |
20560 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20561 | __arm_vrmulhq (uint8x16_t __a, uint8x16_t __b) | |
20562 | { | |
20563 | return __arm_vrmulhq_u8 (__a, __b); | |
20564 | } | |
20565 | ||
20566 | __extension__ extern __inline uint8x16_t | |
20567 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20568 | __arm_vrhaddq (uint8x16_t __a, uint8x16_t __b) | |
20569 | { | |
20570 | return __arm_vrhaddq_u8 (__a, __b); | |
20571 | } | |
20572 | ||
20573 | __extension__ extern __inline uint8x16_t | |
20574 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20575 | __arm_vqsubq (uint8x16_t __a, uint8x16_t __b) | |
20576 | { | |
20577 | return __arm_vqsubq_u8 (__a, __b); | |
20578 | } | |
20579 | ||
20580 | __extension__ extern __inline uint8x16_t | |
20581 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20582 | __arm_vqsubq (uint8x16_t __a, uint8_t __b) | |
20583 | { | |
20584 | return __arm_vqsubq_n_u8 (__a, __b); | |
20585 | } | |
20586 | ||
20587 | __extension__ extern __inline uint8x16_t | |
20588 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20589 | __arm_vqaddq (uint8x16_t __a, uint8x16_t __b) | |
20590 | { | |
20591 | return __arm_vqaddq_u8 (__a, __b); | |
20592 | } | |
20593 | ||
20594 | __extension__ extern __inline uint8x16_t | |
20595 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20596 | __arm_vqaddq (uint8x16_t __a, uint8_t __b) | |
20597 | { | |
20598 | return __arm_vqaddq_n_u8 (__a, __b); | |
20599 | } | |
20600 | ||
20601 | __extension__ extern __inline uint8x16_t | |
20602 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20603 | __arm_vorrq (uint8x16_t __a, uint8x16_t __b) | |
20604 | { | |
20605 | return __arm_vorrq_u8 (__a, __b); | |
20606 | } | |
20607 | ||
20608 | __extension__ extern __inline uint8x16_t | |
20609 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20610 | __arm_vornq (uint8x16_t __a, uint8x16_t __b) | |
20611 | { | |
20612 | return __arm_vornq_u8 (__a, __b); | |
20613 | } | |
20614 | ||
20615 | __extension__ extern __inline uint8x16_t | |
20616 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20617 | __arm_vmulq (uint8x16_t __a, uint8x16_t __b) | |
20618 | { | |
20619 | return __arm_vmulq_u8 (__a, __b); | |
20620 | } | |
20621 | ||
20622 | __extension__ extern __inline uint8x16_t | |
20623 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20624 | __arm_vmulq (uint8x16_t __a, uint8_t __b) | |
20625 | { | |
20626 | return __arm_vmulq_n_u8 (__a, __b); | |
20627 | } | |
20628 | ||
20629 | __extension__ extern __inline uint16x8_t | |
20630 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20631 | __arm_vmulltq_int (uint8x16_t __a, uint8x16_t __b) | |
20632 | { | |
20633 | return __arm_vmulltq_int_u8 (__a, __b); | |
20634 | } | |
20635 | ||
20636 | __extension__ extern __inline uint16x8_t | |
20637 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20638 | __arm_vmullbq_int (uint8x16_t __a, uint8x16_t __b) | |
20639 | { | |
20640 | return __arm_vmullbq_int_u8 (__a, __b); | |
20641 | } | |
20642 | ||
20643 | __extension__ extern __inline uint8x16_t | |
20644 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20645 | __arm_vmulhq (uint8x16_t __a, uint8x16_t __b) | |
20646 | { | |
20647 | return __arm_vmulhq_u8 (__a, __b); | |
20648 | } | |
20649 | ||
20650 | __extension__ extern __inline uint32_t | |
20651 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20652 | __arm_vmladavq (uint8x16_t __a, uint8x16_t __b) | |
20653 | { | |
20654 | return __arm_vmladavq_u8 (__a, __b); | |
20655 | } | |
20656 | ||
20657 | __extension__ extern __inline uint8_t | |
20658 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20659 | __arm_vminvq (uint8_t __a, uint8x16_t __b) | |
20660 | { | |
20661 | return __arm_vminvq_u8 (__a, __b); | |
20662 | } | |
20663 | ||
20664 | __extension__ extern __inline uint8x16_t | |
20665 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20666 | __arm_vminq (uint8x16_t __a, uint8x16_t __b) | |
20667 | { | |
20668 | return __arm_vminq_u8 (__a, __b); | |
20669 | } | |
20670 | ||
20671 | __extension__ extern __inline uint8_t | |
20672 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20673 | __arm_vmaxvq (uint8_t __a, uint8x16_t __b) | |
20674 | { | |
20675 | return __arm_vmaxvq_u8 (__a, __b); | |
20676 | } | |
20677 | ||
20678 | __extension__ extern __inline uint8x16_t | |
20679 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20680 | __arm_vmaxq (uint8x16_t __a, uint8x16_t __b) | |
20681 | { | |
20682 | return __arm_vmaxq_u8 (__a, __b); | |
20683 | } | |
20684 | ||
20685 | __extension__ extern __inline uint8x16_t | |
20686 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20687 | __arm_vhsubq (uint8x16_t __a, uint8x16_t __b) | |
20688 | { | |
20689 | return __arm_vhsubq_u8 (__a, __b); | |
20690 | } | |
20691 | ||
20692 | __extension__ extern __inline uint8x16_t | |
20693 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20694 | __arm_vhsubq (uint8x16_t __a, uint8_t __b) | |
20695 | { | |
20696 | return __arm_vhsubq_n_u8 (__a, __b); | |
20697 | } | |
20698 | ||
20699 | __extension__ extern __inline uint8x16_t | |
20700 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20701 | __arm_vhaddq (uint8x16_t __a, uint8x16_t __b) | |
20702 | { | |
20703 | return __arm_vhaddq_u8 (__a, __b); | |
20704 | } | |
20705 | ||
20706 | __extension__ extern __inline uint8x16_t | |
20707 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20708 | __arm_vhaddq (uint8x16_t __a, uint8_t __b) | |
20709 | { | |
20710 | return __arm_vhaddq_n_u8 (__a, __b); | |
20711 | } | |
20712 | ||
20713 | __extension__ extern __inline uint8x16_t | |
20714 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20715 | __arm_veorq (uint8x16_t __a, uint8x16_t __b) | |
20716 | { | |
20717 | return __arm_veorq_u8 (__a, __b); | |
20718 | } | |
20719 | ||
20720 | __extension__ extern __inline mve_pred16_t | |
20721 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20722 | __arm_vcmpneq (uint8x16_t __a, uint8_t __b) | |
20723 | { | |
20724 | return __arm_vcmpneq_n_u8 (__a, __b); | |
20725 | } | |
20726 | ||
20727 | __extension__ extern __inline mve_pred16_t | |
20728 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20729 | __arm_vcmphiq (uint8x16_t __a, uint8x16_t __b) | |
20730 | { | |
20731 | return __arm_vcmphiq_u8 (__a, __b); | |
20732 | } | |
20733 | ||
20734 | __extension__ extern __inline mve_pred16_t | |
20735 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20736 | __arm_vcmphiq (uint8x16_t __a, uint8_t __b) | |
20737 | { | |
20738 | return __arm_vcmphiq_n_u8 (__a, __b); | |
20739 | } | |
20740 | ||
20741 | __extension__ extern __inline mve_pred16_t | |
20742 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20743 | __arm_vcmpeqq (uint8x16_t __a, uint8x16_t __b) | |
20744 | { | |
20745 | return __arm_vcmpeqq_u8 (__a, __b); | |
20746 | } | |
20747 | ||
20748 | __extension__ extern __inline mve_pred16_t | |
20749 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20750 | __arm_vcmpeqq (uint8x16_t __a, uint8_t __b) | |
20751 | { | |
20752 | return __arm_vcmpeqq_n_u8 (__a, __b); | |
20753 | } | |
20754 | ||
20755 | __extension__ extern __inline mve_pred16_t | |
20756 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20757 | __arm_vcmpcsq (uint8x16_t __a, uint8x16_t __b) | |
20758 | { | |
20759 | return __arm_vcmpcsq_u8 (__a, __b); | |
20760 | } | |
20761 | ||
20762 | __extension__ extern __inline mve_pred16_t | |
20763 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20764 | __arm_vcmpcsq (uint8x16_t __a, uint8_t __b) | |
20765 | { | |
20766 | return __arm_vcmpcsq_n_u8 (__a, __b); | |
20767 | } | |
20768 | ||
20769 | __extension__ extern __inline uint8x16_t | |
20770 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20771 | __arm_vcaddq_rot90 (uint8x16_t __a, uint8x16_t __b) | |
20772 | { | |
20773 | return __arm_vcaddq_rot90_u8 (__a, __b); | |
20774 | } | |
20775 | ||
20776 | __extension__ extern __inline uint8x16_t | |
20777 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20778 | __arm_vcaddq_rot270 (uint8x16_t __a, uint8x16_t __b) | |
20779 | { | |
20780 | return __arm_vcaddq_rot270_u8 (__a, __b); | |
20781 | } | |
20782 | ||
20783 | __extension__ extern __inline uint8x16_t | |
20784 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20785 | __arm_vbicq (uint8x16_t __a, uint8x16_t __b) | |
20786 | { | |
20787 | return __arm_vbicq_u8 (__a, __b); | |
20788 | } | |
20789 | ||
20790 | __extension__ extern __inline uint8x16_t | |
20791 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20792 | __arm_vandq (uint8x16_t __a, uint8x16_t __b) | |
20793 | { | |
20794 | return __arm_vandq_u8 (__a, __b); | |
20795 | } | |
20796 | ||
20797 | __extension__ extern __inline uint32_t | |
20798 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20799 | __arm_vaddvq_p (uint8x16_t __a, mve_pred16_t __p) | |
20800 | { | |
20801 | return __arm_vaddvq_p_u8 (__a, __p); | |
20802 | } | |
20803 | ||
20804 | __extension__ extern __inline uint32_t | |
20805 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20806 | __arm_vaddvaq (uint32_t __a, uint8x16_t __b) | |
20807 | { | |
20808 | return __arm_vaddvaq_u8 (__a, __b); | |
20809 | } | |
20810 | ||
20811 | __extension__ extern __inline uint8x16_t | |
20812 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20813 | __arm_vaddq (uint8x16_t __a, uint8_t __b) | |
20814 | { | |
20815 | return __arm_vaddq_n_u8 (__a, __b); | |
20816 | } | |
20817 | ||
20818 | __extension__ extern __inline uint8x16_t | |
20819 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20820 | __arm_vabdq (uint8x16_t __a, uint8x16_t __b) | |
20821 | { | |
20822 | return __arm_vabdq_u8 (__a, __b); | |
20823 | } | |
20824 | ||
20825 | __extension__ extern __inline uint8x16_t | |
20826 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20827 | __arm_vshlq_r (uint8x16_t __a, int32_t __b) | |
20828 | { | |
20829 | return __arm_vshlq_r_u8 (__a, __b); | |
20830 | } | |
20831 | ||
20832 | __extension__ extern __inline uint8x16_t | |
20833 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20834 | __arm_vrshlq (uint8x16_t __a, int8x16_t __b) | |
20835 | { | |
20836 | return __arm_vrshlq_u8 (__a, __b); | |
20837 | } | |
20838 | ||
20839 | __extension__ extern __inline uint8x16_t | |
20840 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20841 | __arm_vrshlq (uint8x16_t __a, int32_t __b) | |
20842 | { | |
20843 | return __arm_vrshlq_n_u8 (__a, __b); | |
20844 | } | |
20845 | ||
20846 | __extension__ extern __inline uint8x16_t | |
20847 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20848 | __arm_vqshlq (uint8x16_t __a, int8x16_t __b) | |
20849 | { | |
20850 | return __arm_vqshlq_u8 (__a, __b); | |
20851 | } | |
20852 | ||
20853 | __extension__ extern __inline uint8x16_t | |
20854 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20855 | __arm_vqshlq_r (uint8x16_t __a, int32_t __b) | |
20856 | { | |
20857 | return __arm_vqshlq_r_u8 (__a, __b); | |
20858 | } | |
20859 | ||
20860 | __extension__ extern __inline uint8x16_t | |
20861 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20862 | __arm_vqrshlq (uint8x16_t __a, int8x16_t __b) | |
20863 | { | |
20864 | return __arm_vqrshlq_u8 (__a, __b); | |
20865 | } | |
20866 | ||
20867 | __extension__ extern __inline uint8x16_t | |
20868 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20869 | __arm_vqrshlq (uint8x16_t __a, int32_t __b) | |
20870 | { | |
20871 | return __arm_vqrshlq_n_u8 (__a, __b); | |
20872 | } | |
20873 | ||
20874 | __extension__ extern __inline uint8_t | |
20875 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20876 | __arm_vminavq (uint8_t __a, int8x16_t __b) | |
20877 | { | |
20878 | return __arm_vminavq_s8 (__a, __b); | |
20879 | } | |
20880 | ||
20881 | __extension__ extern __inline uint8x16_t | |
20882 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20883 | __arm_vminaq (uint8x16_t __a, int8x16_t __b) | |
20884 | { | |
20885 | return __arm_vminaq_s8 (__a, __b); | |
20886 | } | |
20887 | ||
20888 | __extension__ extern __inline uint8_t | |
20889 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20890 | __arm_vmaxavq (uint8_t __a, int8x16_t __b) | |
20891 | { | |
20892 | return __arm_vmaxavq_s8 (__a, __b); | |
20893 | } | |
20894 | ||
20895 | __extension__ extern __inline uint8x16_t | |
20896 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20897 | __arm_vmaxaq (uint8x16_t __a, int8x16_t __b) | |
20898 | { | |
20899 | return __arm_vmaxaq_s8 (__a, __b); | |
20900 | } | |
20901 | ||
20902 | __extension__ extern __inline uint8x16_t | |
20903 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20904 | __arm_vbrsrq (uint8x16_t __a, int32_t __b) | |
20905 | { | |
20906 | return __arm_vbrsrq_n_u8 (__a, __b); | |
20907 | } | |
20908 | ||
20909 | __extension__ extern __inline uint8x16_t | |
20910 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20911 | __arm_vshlq_n (uint8x16_t __a, const int __imm) | |
20912 | { | |
20913 | return __arm_vshlq_n_u8 (__a, __imm); | |
20914 | } | |
20915 | ||
20916 | __extension__ extern __inline uint8x16_t | |
20917 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20918 | __arm_vrshrq (uint8x16_t __a, const int __imm) | |
20919 | { | |
20920 | return __arm_vrshrq_n_u8 (__a, __imm); | |
20921 | } | |
20922 | ||
20923 | __extension__ extern __inline uint8x16_t | |
20924 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20925 | __arm_vqshlq_n (uint8x16_t __a, const int __imm) | |
20926 | { | |
20927 | return __arm_vqshlq_n_u8 (__a, __imm); | |
20928 | } | |
20929 | ||
20930 | __extension__ extern __inline mve_pred16_t | |
20931 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20932 | __arm_vcmpneq (int8x16_t __a, int8_t __b) | |
20933 | { | |
20934 | return __arm_vcmpneq_n_s8 (__a, __b); | |
20935 | } | |
20936 | ||
20937 | __extension__ extern __inline mve_pred16_t | |
20938 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20939 | __arm_vcmpltq (int8x16_t __a, int8x16_t __b) | |
20940 | { | |
20941 | return __arm_vcmpltq_s8 (__a, __b); | |
20942 | } | |
20943 | ||
20944 | __extension__ extern __inline mve_pred16_t | |
20945 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20946 | __arm_vcmpltq (int8x16_t __a, int8_t __b) | |
20947 | { | |
20948 | return __arm_vcmpltq_n_s8 (__a, __b); | |
20949 | } | |
20950 | ||
20951 | __extension__ extern __inline mve_pred16_t | |
20952 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20953 | __arm_vcmpleq (int8x16_t __a, int8x16_t __b) | |
20954 | { | |
20955 | return __arm_vcmpleq_s8 (__a, __b); | |
20956 | } | |
20957 | ||
20958 | __extension__ extern __inline mve_pred16_t | |
20959 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20960 | __arm_vcmpleq (int8x16_t __a, int8_t __b) | |
20961 | { | |
20962 | return __arm_vcmpleq_n_s8 (__a, __b); | |
20963 | } | |
20964 | ||
20965 | __extension__ extern __inline mve_pred16_t | |
20966 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20967 | __arm_vcmpgtq (int8x16_t __a, int8x16_t __b) | |
20968 | { | |
20969 | return __arm_vcmpgtq_s8 (__a, __b); | |
20970 | } | |
20971 | ||
20972 | __extension__ extern __inline mve_pred16_t | |
20973 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20974 | __arm_vcmpgtq (int8x16_t __a, int8_t __b) | |
20975 | { | |
20976 | return __arm_vcmpgtq_n_s8 (__a, __b); | |
20977 | } | |
20978 | ||
20979 | __extension__ extern __inline mve_pred16_t | |
20980 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20981 | __arm_vcmpgeq (int8x16_t __a, int8x16_t __b) | |
20982 | { | |
20983 | return __arm_vcmpgeq_s8 (__a, __b); | |
20984 | } | |
20985 | ||
20986 | __extension__ extern __inline mve_pred16_t | |
20987 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20988 | __arm_vcmpgeq (int8x16_t __a, int8_t __b) | |
20989 | { | |
20990 | return __arm_vcmpgeq_n_s8 (__a, __b); | |
20991 | } | |
20992 | ||
20993 | __extension__ extern __inline mve_pred16_t | |
20994 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
20995 | __arm_vcmpeqq (int8x16_t __a, int8x16_t __b) | |
20996 | { | |
20997 | return __arm_vcmpeqq_s8 (__a, __b); | |
20998 | } | |
20999 | ||
21000 | __extension__ extern __inline mve_pred16_t | |
21001 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21002 | __arm_vcmpeqq (int8x16_t __a, int8_t __b) | |
21003 | { | |
21004 | return __arm_vcmpeqq_n_s8 (__a, __b); | |
21005 | } | |
21006 | ||
21007 | __extension__ extern __inline uint8x16_t | |
21008 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21009 | __arm_vqshluq (int8x16_t __a, const int __imm) | |
21010 | { | |
21011 | return __arm_vqshluq_n_s8 (__a, __imm); | |
21012 | } | |
21013 | ||
21014 | __extension__ extern __inline int32_t | |
21015 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21016 | __arm_vaddvq_p (int8x16_t __a, mve_pred16_t __p) | |
21017 | { | |
21018 | return __arm_vaddvq_p_s8 (__a, __p); | |
21019 | } | |
21020 | ||
21021 | __extension__ extern __inline int8x16_t | |
21022 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21023 | __arm_vsubq (int8x16_t __a, int8x16_t __b) | |
21024 | { | |
21025 | return __arm_vsubq_s8 (__a, __b); | |
21026 | } | |
21027 | ||
21028 | __extension__ extern __inline int8x16_t | |
21029 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21030 | __arm_vsubq (int8x16_t __a, int8_t __b) | |
21031 | { | |
21032 | return __arm_vsubq_n_s8 (__a, __b); | |
21033 | } | |
21034 | ||
21035 | __extension__ extern __inline int8x16_t | |
21036 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21037 | __arm_vshlq_r (int8x16_t __a, int32_t __b) | |
21038 | { | |
21039 | return __arm_vshlq_r_s8 (__a, __b); | |
21040 | } | |
21041 | ||
21042 | __extension__ extern __inline int8x16_t | |
21043 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21044 | __arm_vrshlq (int8x16_t __a, int8x16_t __b) | |
21045 | { | |
21046 | return __arm_vrshlq_s8 (__a, __b); | |
21047 | } | |
21048 | ||
21049 | __extension__ extern __inline int8x16_t | |
21050 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21051 | __arm_vrshlq (int8x16_t __a, int32_t __b) | |
21052 | { | |
21053 | return __arm_vrshlq_n_s8 (__a, __b); | |
21054 | } | |
21055 | ||
21056 | __extension__ extern __inline int8x16_t | |
21057 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21058 | __arm_vrmulhq (int8x16_t __a, int8x16_t __b) | |
21059 | { | |
21060 | return __arm_vrmulhq_s8 (__a, __b); | |
21061 | } | |
21062 | ||
21063 | __extension__ extern __inline int8x16_t | |
21064 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21065 | __arm_vrhaddq (int8x16_t __a, int8x16_t __b) | |
21066 | { | |
21067 | return __arm_vrhaddq_s8 (__a, __b); | |
21068 | } | |
21069 | ||
21070 | __extension__ extern __inline int8x16_t | |
21071 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21072 | __arm_vqsubq (int8x16_t __a, int8x16_t __b) | |
21073 | { | |
21074 | return __arm_vqsubq_s8 (__a, __b); | |
21075 | } | |
21076 | ||
21077 | __extension__ extern __inline int8x16_t | |
21078 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21079 | __arm_vqsubq (int8x16_t __a, int8_t __b) | |
21080 | { | |
21081 | return __arm_vqsubq_n_s8 (__a, __b); | |
21082 | } | |
21083 | ||
21084 | __extension__ extern __inline int8x16_t | |
21085 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21086 | __arm_vqshlq (int8x16_t __a, int8x16_t __b) | |
21087 | { | |
21088 | return __arm_vqshlq_s8 (__a, __b); | |
21089 | } | |
21090 | ||
21091 | __extension__ extern __inline int8x16_t | |
21092 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21093 | __arm_vqshlq_r (int8x16_t __a, int32_t __b) | |
21094 | { | |
21095 | return __arm_vqshlq_r_s8 (__a, __b); | |
21096 | } | |
21097 | ||
21098 | __extension__ extern __inline int8x16_t | |
21099 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21100 | __arm_vqrshlq (int8x16_t __a, int8x16_t __b) | |
21101 | { | |
21102 | return __arm_vqrshlq_s8 (__a, __b); | |
21103 | } | |
21104 | ||
21105 | __extension__ extern __inline int8x16_t | |
21106 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21107 | __arm_vqrshlq (int8x16_t __a, int32_t __b) | |
21108 | { | |
21109 | return __arm_vqrshlq_n_s8 (__a, __b); | |
21110 | } | |
21111 | ||
21112 | __extension__ extern __inline int8x16_t | |
21113 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21114 | __arm_vqrdmulhq (int8x16_t __a, int8x16_t __b) | |
21115 | { | |
21116 | return __arm_vqrdmulhq_s8 (__a, __b); | |
21117 | } | |
21118 | ||
21119 | __extension__ extern __inline int8x16_t | |
21120 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21121 | __arm_vqrdmulhq (int8x16_t __a, int8_t __b) | |
21122 | { | |
21123 | return __arm_vqrdmulhq_n_s8 (__a, __b); | |
21124 | } | |
21125 | ||
21126 | __extension__ extern __inline int8x16_t | |
21127 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21128 | __arm_vqdmulhq (int8x16_t __a, int8x16_t __b) | |
21129 | { | |
21130 | return __arm_vqdmulhq_s8 (__a, __b); | |
21131 | } | |
21132 | ||
21133 | __extension__ extern __inline int8x16_t | |
21134 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21135 | __arm_vqdmulhq (int8x16_t __a, int8_t __b) | |
21136 | { | |
21137 | return __arm_vqdmulhq_n_s8 (__a, __b); | |
21138 | } | |
21139 | ||
21140 | __extension__ extern __inline int8x16_t | |
21141 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21142 | __arm_vqaddq (int8x16_t __a, int8x16_t __b) | |
21143 | { | |
21144 | return __arm_vqaddq_s8 (__a, __b); | |
21145 | } | |
21146 | ||
21147 | __extension__ extern __inline int8x16_t | |
21148 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21149 | __arm_vqaddq (int8x16_t __a, int8_t __b) | |
21150 | { | |
21151 | return __arm_vqaddq_n_s8 (__a, __b); | |
21152 | } | |
21153 | ||
21154 | __extension__ extern __inline int8x16_t | |
21155 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21156 | __arm_vorrq (int8x16_t __a, int8x16_t __b) | |
21157 | { | |
21158 | return __arm_vorrq_s8 (__a, __b); | |
21159 | } | |
21160 | ||
21161 | __extension__ extern __inline int8x16_t | |
21162 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21163 | __arm_vornq (int8x16_t __a, int8x16_t __b) | |
21164 | { | |
21165 | return __arm_vornq_s8 (__a, __b); | |
21166 | } | |
21167 | ||
21168 | __extension__ extern __inline int8x16_t | |
21169 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21170 | __arm_vmulq (int8x16_t __a, int8x16_t __b) | |
21171 | { | |
21172 | return __arm_vmulq_s8 (__a, __b); | |
21173 | } | |
21174 | ||
21175 | __extension__ extern __inline int8x16_t | |
21176 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21177 | __arm_vmulq (int8x16_t __a, int8_t __b) | |
21178 | { | |
21179 | return __arm_vmulq_n_s8 (__a, __b); | |
21180 | } | |
21181 | ||
21182 | __extension__ extern __inline int16x8_t | |
21183 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21184 | __arm_vmulltq_int (int8x16_t __a, int8x16_t __b) | |
21185 | { | |
21186 | return __arm_vmulltq_int_s8 (__a, __b); | |
21187 | } | |
21188 | ||
21189 | __extension__ extern __inline int16x8_t | |
21190 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21191 | __arm_vmullbq_int (int8x16_t __a, int8x16_t __b) | |
21192 | { | |
21193 | return __arm_vmullbq_int_s8 (__a, __b); | |
21194 | } | |
21195 | ||
21196 | __extension__ extern __inline int8x16_t | |
21197 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21198 | __arm_vmulhq (int8x16_t __a, int8x16_t __b) | |
21199 | { | |
21200 | return __arm_vmulhq_s8 (__a, __b); | |
21201 | } | |
21202 | ||
21203 | __extension__ extern __inline int32_t | |
21204 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21205 | __arm_vmlsdavxq (int8x16_t __a, int8x16_t __b) | |
21206 | { | |
21207 | return __arm_vmlsdavxq_s8 (__a, __b); | |
21208 | } | |
21209 | ||
21210 | __extension__ extern __inline int32_t | |
21211 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21212 | __arm_vmlsdavq (int8x16_t __a, int8x16_t __b) | |
21213 | { | |
21214 | return __arm_vmlsdavq_s8 (__a, __b); | |
21215 | } | |
21216 | ||
21217 | __extension__ extern __inline int32_t | |
21218 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21219 | __arm_vmladavxq (int8x16_t __a, int8x16_t __b) | |
21220 | { | |
21221 | return __arm_vmladavxq_s8 (__a, __b); | |
21222 | } | |
21223 | ||
21224 | __extension__ extern __inline int32_t | |
21225 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21226 | __arm_vmladavq (int8x16_t __a, int8x16_t __b) | |
21227 | { | |
21228 | return __arm_vmladavq_s8 (__a, __b); | |
21229 | } | |
21230 | ||
21231 | __extension__ extern __inline int8_t | |
21232 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21233 | __arm_vminvq (int8_t __a, int8x16_t __b) | |
21234 | { | |
21235 | return __arm_vminvq_s8 (__a, __b); | |
21236 | } | |
21237 | ||
21238 | __extension__ extern __inline int8x16_t | |
21239 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21240 | __arm_vminq (int8x16_t __a, int8x16_t __b) | |
21241 | { | |
21242 | return __arm_vminq_s8 (__a, __b); | |
21243 | } | |
21244 | ||
21245 | __extension__ extern __inline int8_t | |
21246 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21247 | __arm_vmaxvq (int8_t __a, int8x16_t __b) | |
21248 | { | |
21249 | return __arm_vmaxvq_s8 (__a, __b); | |
21250 | } | |
21251 | ||
21252 | __extension__ extern __inline int8x16_t | |
21253 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21254 | __arm_vmaxq (int8x16_t __a, int8x16_t __b) | |
21255 | { | |
21256 | return __arm_vmaxq_s8 (__a, __b); | |
21257 | } | |
21258 | ||
21259 | __extension__ extern __inline int8x16_t | |
21260 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21261 | __arm_vhsubq (int8x16_t __a, int8x16_t __b) | |
21262 | { | |
21263 | return __arm_vhsubq_s8 (__a, __b); | |
21264 | } | |
21265 | ||
21266 | __extension__ extern __inline int8x16_t | |
21267 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21268 | __arm_vhsubq (int8x16_t __a, int8_t __b) | |
21269 | { | |
21270 | return __arm_vhsubq_n_s8 (__a, __b); | |
21271 | } | |
21272 | ||
21273 | __extension__ extern __inline int8x16_t | |
21274 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21275 | __arm_vhcaddq_rot90 (int8x16_t __a, int8x16_t __b) | |
21276 | { | |
21277 | return __arm_vhcaddq_rot90_s8 (__a, __b); | |
21278 | } | |
21279 | ||
21280 | __extension__ extern __inline int8x16_t | |
21281 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21282 | __arm_vhcaddq_rot270 (int8x16_t __a, int8x16_t __b) | |
21283 | { | |
21284 | return __arm_vhcaddq_rot270_s8 (__a, __b); | |
21285 | } | |
21286 | ||
21287 | __extension__ extern __inline int8x16_t | |
21288 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21289 | __arm_vhaddq (int8x16_t __a, int8x16_t __b) | |
21290 | { | |
21291 | return __arm_vhaddq_s8 (__a, __b); | |
21292 | } | |
21293 | ||
21294 | __extension__ extern __inline int8x16_t | |
21295 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21296 | __arm_vhaddq (int8x16_t __a, int8_t __b) | |
21297 | { | |
21298 | return __arm_vhaddq_n_s8 (__a, __b); | |
21299 | } | |
21300 | ||
21301 | __extension__ extern __inline int8x16_t | |
21302 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21303 | __arm_veorq (int8x16_t __a, int8x16_t __b) | |
21304 | { | |
21305 | return __arm_veorq_s8 (__a, __b); | |
21306 | } | |
21307 | ||
21308 | __extension__ extern __inline int8x16_t | |
21309 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21310 | __arm_vcaddq_rot90 (int8x16_t __a, int8x16_t __b) | |
21311 | { | |
21312 | return __arm_vcaddq_rot90_s8 (__a, __b); | |
21313 | } | |
21314 | ||
21315 | __extension__ extern __inline int8x16_t | |
21316 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21317 | __arm_vcaddq_rot270 (int8x16_t __a, int8x16_t __b) | |
21318 | { | |
21319 | return __arm_vcaddq_rot270_s8 (__a, __b); | |
21320 | } | |
21321 | ||
21322 | __extension__ extern __inline int8x16_t | |
21323 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21324 | __arm_vbrsrq (int8x16_t __a, int32_t __b) | |
21325 | { | |
21326 | return __arm_vbrsrq_n_s8 (__a, __b); | |
21327 | } | |
21328 | ||
21329 | __extension__ extern __inline int8x16_t | |
21330 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21331 | __arm_vbicq (int8x16_t __a, int8x16_t __b) | |
21332 | { | |
21333 | return __arm_vbicq_s8 (__a, __b); | |
21334 | } | |
21335 | ||
21336 | __extension__ extern __inline int8x16_t | |
21337 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21338 | __arm_vandq (int8x16_t __a, int8x16_t __b) | |
21339 | { | |
21340 | return __arm_vandq_s8 (__a, __b); | |
21341 | } | |
21342 | ||
21343 | __extension__ extern __inline int32_t | |
21344 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21345 | __arm_vaddvaq (int32_t __a, int8x16_t __b) | |
21346 | { | |
21347 | return __arm_vaddvaq_s8 (__a, __b); | |
21348 | } | |
21349 | ||
21350 | __extension__ extern __inline int8x16_t | |
21351 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21352 | __arm_vaddq (int8x16_t __a, int8_t __b) | |
21353 | { | |
21354 | return __arm_vaddq_n_s8 (__a, __b); | |
21355 | } | |
21356 | ||
21357 | __extension__ extern __inline int8x16_t | |
21358 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21359 | __arm_vabdq (int8x16_t __a, int8x16_t __b) | |
21360 | { | |
21361 | return __arm_vabdq_s8 (__a, __b); | |
21362 | } | |
21363 | ||
21364 | __extension__ extern __inline int8x16_t | |
21365 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21366 | __arm_vshlq_n (int8x16_t __a, const int __imm) | |
21367 | { | |
21368 | return __arm_vshlq_n_s8 (__a, __imm); | |
21369 | } | |
21370 | ||
21371 | __extension__ extern __inline int8x16_t | |
21372 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21373 | __arm_vrshrq (int8x16_t __a, const int __imm) | |
21374 | { | |
21375 | return __arm_vrshrq_n_s8 (__a, __imm); | |
21376 | } | |
21377 | ||
21378 | __extension__ extern __inline int8x16_t | |
21379 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21380 | __arm_vqshlq_n (int8x16_t __a, const int __imm) | |
21381 | { | |
21382 | return __arm_vqshlq_n_s8 (__a, __imm); | |
21383 | } | |
21384 | ||
21385 | __extension__ extern __inline uint16x8_t | |
21386 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21387 | __arm_vsubq (uint16x8_t __a, uint16x8_t __b) | |
21388 | { | |
21389 | return __arm_vsubq_u16 (__a, __b); | |
21390 | } | |
21391 | ||
21392 | __extension__ extern __inline uint16x8_t | |
21393 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21394 | __arm_vsubq (uint16x8_t __a, uint16_t __b) | |
21395 | { | |
21396 | return __arm_vsubq_n_u16 (__a, __b); | |
21397 | } | |
21398 | ||
21399 | __extension__ extern __inline uint16x8_t | |
21400 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21401 | __arm_vrmulhq (uint16x8_t __a, uint16x8_t __b) | |
21402 | { | |
21403 | return __arm_vrmulhq_u16 (__a, __b); | |
21404 | } | |
21405 | ||
21406 | __extension__ extern __inline uint16x8_t | |
21407 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21408 | __arm_vrhaddq (uint16x8_t __a, uint16x8_t __b) | |
21409 | { | |
21410 | return __arm_vrhaddq_u16 (__a, __b); | |
21411 | } | |
21412 | ||
21413 | __extension__ extern __inline uint16x8_t | |
21414 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21415 | __arm_vqsubq (uint16x8_t __a, uint16x8_t __b) | |
21416 | { | |
21417 | return __arm_vqsubq_u16 (__a, __b); | |
21418 | } | |
21419 | ||
21420 | __extension__ extern __inline uint16x8_t | |
21421 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21422 | __arm_vqsubq (uint16x8_t __a, uint16_t __b) | |
21423 | { | |
21424 | return __arm_vqsubq_n_u16 (__a, __b); | |
21425 | } | |
21426 | ||
21427 | __extension__ extern __inline uint16x8_t | |
21428 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21429 | __arm_vqaddq (uint16x8_t __a, uint16x8_t __b) | |
21430 | { | |
21431 | return __arm_vqaddq_u16 (__a, __b); | |
21432 | } | |
21433 | ||
21434 | __extension__ extern __inline uint16x8_t | |
21435 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21436 | __arm_vqaddq (uint16x8_t __a, uint16_t __b) | |
21437 | { | |
21438 | return __arm_vqaddq_n_u16 (__a, __b); | |
21439 | } | |
21440 | ||
21441 | __extension__ extern __inline uint16x8_t | |
21442 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21443 | __arm_vorrq (uint16x8_t __a, uint16x8_t __b) | |
21444 | { | |
21445 | return __arm_vorrq_u16 (__a, __b); | |
21446 | } | |
21447 | ||
21448 | __extension__ extern __inline uint16x8_t | |
21449 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21450 | __arm_vornq (uint16x8_t __a, uint16x8_t __b) | |
21451 | { | |
21452 | return __arm_vornq_u16 (__a, __b); | |
21453 | } | |
21454 | ||
21455 | __extension__ extern __inline uint16x8_t | |
21456 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21457 | __arm_vmulq (uint16x8_t __a, uint16x8_t __b) | |
21458 | { | |
21459 | return __arm_vmulq_u16 (__a, __b); | |
21460 | } | |
21461 | ||
21462 | __extension__ extern __inline uint16x8_t | |
21463 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21464 | __arm_vmulq (uint16x8_t __a, uint16_t __b) | |
21465 | { | |
21466 | return __arm_vmulq_n_u16 (__a, __b); | |
21467 | } | |
21468 | ||
21469 | __extension__ extern __inline uint32x4_t | |
21470 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21471 | __arm_vmulltq_int (uint16x8_t __a, uint16x8_t __b) | |
21472 | { | |
21473 | return __arm_vmulltq_int_u16 (__a, __b); | |
21474 | } | |
21475 | ||
21476 | __extension__ extern __inline uint32x4_t | |
21477 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21478 | __arm_vmullbq_int (uint16x8_t __a, uint16x8_t __b) | |
21479 | { | |
21480 | return __arm_vmullbq_int_u16 (__a, __b); | |
21481 | } | |
21482 | ||
21483 | __extension__ extern __inline uint16x8_t | |
21484 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21485 | __arm_vmulhq (uint16x8_t __a, uint16x8_t __b) | |
21486 | { | |
21487 | return __arm_vmulhq_u16 (__a, __b); | |
21488 | } | |
21489 | ||
21490 | __extension__ extern __inline uint32_t | |
21491 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21492 | __arm_vmladavq (uint16x8_t __a, uint16x8_t __b) | |
21493 | { | |
21494 | return __arm_vmladavq_u16 (__a, __b); | |
21495 | } | |
21496 | ||
21497 | __extension__ extern __inline uint16_t | |
21498 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21499 | __arm_vminvq (uint16_t __a, uint16x8_t __b) | |
21500 | { | |
21501 | return __arm_vminvq_u16 (__a, __b); | |
21502 | } | |
21503 | ||
21504 | __extension__ extern __inline uint16x8_t | |
21505 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21506 | __arm_vminq (uint16x8_t __a, uint16x8_t __b) | |
21507 | { | |
21508 | return __arm_vminq_u16 (__a, __b); | |
21509 | } | |
21510 | ||
21511 | __extension__ extern __inline uint16_t | |
21512 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21513 | __arm_vmaxvq (uint16_t __a, uint16x8_t __b) | |
21514 | { | |
21515 | return __arm_vmaxvq_u16 (__a, __b); | |
21516 | } | |
21517 | ||
21518 | __extension__ extern __inline uint16x8_t | |
21519 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21520 | __arm_vmaxq (uint16x8_t __a, uint16x8_t __b) | |
21521 | { | |
21522 | return __arm_vmaxq_u16 (__a, __b); | |
21523 | } | |
21524 | ||
21525 | __extension__ extern __inline uint16x8_t | |
21526 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21527 | __arm_vhsubq (uint16x8_t __a, uint16x8_t __b) | |
21528 | { | |
21529 | return __arm_vhsubq_u16 (__a, __b); | |
21530 | } | |
21531 | ||
21532 | __extension__ extern __inline uint16x8_t | |
21533 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21534 | __arm_vhsubq (uint16x8_t __a, uint16_t __b) | |
21535 | { | |
21536 | return __arm_vhsubq_n_u16 (__a, __b); | |
21537 | } | |
21538 | ||
21539 | __extension__ extern __inline uint16x8_t | |
21540 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21541 | __arm_vhaddq (uint16x8_t __a, uint16x8_t __b) | |
21542 | { | |
21543 | return __arm_vhaddq_u16 (__a, __b); | |
21544 | } | |
21545 | ||
21546 | __extension__ extern __inline uint16x8_t | |
21547 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21548 | __arm_vhaddq (uint16x8_t __a, uint16_t __b) | |
21549 | { | |
21550 | return __arm_vhaddq_n_u16 (__a, __b); | |
21551 | } | |
21552 | ||
21553 | __extension__ extern __inline uint16x8_t | |
21554 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21555 | __arm_veorq (uint16x8_t __a, uint16x8_t __b) | |
21556 | { | |
21557 | return __arm_veorq_u16 (__a, __b); | |
21558 | } | |
21559 | ||
21560 | __extension__ extern __inline mve_pred16_t | |
21561 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21562 | __arm_vcmpneq (uint16x8_t __a, uint16_t __b) | |
21563 | { | |
21564 | return __arm_vcmpneq_n_u16 (__a, __b); | |
21565 | } | |
21566 | ||
21567 | __extension__ extern __inline mve_pred16_t | |
21568 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21569 | __arm_vcmphiq (uint16x8_t __a, uint16x8_t __b) | |
21570 | { | |
21571 | return __arm_vcmphiq_u16 (__a, __b); | |
21572 | } | |
21573 | ||
21574 | __extension__ extern __inline mve_pred16_t | |
21575 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21576 | __arm_vcmphiq (uint16x8_t __a, uint16_t __b) | |
21577 | { | |
21578 | return __arm_vcmphiq_n_u16 (__a, __b); | |
21579 | } | |
21580 | ||
21581 | __extension__ extern __inline mve_pred16_t | |
21582 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21583 | __arm_vcmpeqq (uint16x8_t __a, uint16x8_t __b) | |
21584 | { | |
21585 | return __arm_vcmpeqq_u16 (__a, __b); | |
21586 | } | |
21587 | ||
21588 | __extension__ extern __inline mve_pred16_t | |
21589 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21590 | __arm_vcmpeqq (uint16x8_t __a, uint16_t __b) | |
21591 | { | |
21592 | return __arm_vcmpeqq_n_u16 (__a, __b); | |
21593 | } | |
21594 | ||
21595 | __extension__ extern __inline mve_pred16_t | |
21596 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21597 | __arm_vcmpcsq (uint16x8_t __a, uint16x8_t __b) | |
21598 | { | |
21599 | return __arm_vcmpcsq_u16 (__a, __b); | |
21600 | } | |
21601 | ||
21602 | __extension__ extern __inline mve_pred16_t | |
21603 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21604 | __arm_vcmpcsq (uint16x8_t __a, uint16_t __b) | |
21605 | { | |
21606 | return __arm_vcmpcsq_n_u16 (__a, __b); | |
21607 | } | |
21608 | ||
21609 | __extension__ extern __inline uint16x8_t | |
21610 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21611 | __arm_vcaddq_rot90 (uint16x8_t __a, uint16x8_t __b) | |
21612 | { | |
21613 | return __arm_vcaddq_rot90_u16 (__a, __b); | |
21614 | } | |
21615 | ||
21616 | __extension__ extern __inline uint16x8_t | |
21617 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21618 | __arm_vcaddq_rot270 (uint16x8_t __a, uint16x8_t __b) | |
21619 | { | |
21620 | return __arm_vcaddq_rot270_u16 (__a, __b); | |
21621 | } | |
21622 | ||
21623 | __extension__ extern __inline uint16x8_t | |
21624 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21625 | __arm_vbicq (uint16x8_t __a, uint16x8_t __b) | |
21626 | { | |
21627 | return __arm_vbicq_u16 (__a, __b); | |
21628 | } | |
21629 | ||
21630 | __extension__ extern __inline uint16x8_t | |
21631 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21632 | __arm_vandq (uint16x8_t __a, uint16x8_t __b) | |
21633 | { | |
21634 | return __arm_vandq_u16 (__a, __b); | |
21635 | } | |
21636 | ||
21637 | __extension__ extern __inline uint32_t | |
21638 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21639 | __arm_vaddvq_p (uint16x8_t __a, mve_pred16_t __p) | |
21640 | { | |
21641 | return __arm_vaddvq_p_u16 (__a, __p); | |
21642 | } | |
21643 | ||
21644 | __extension__ extern __inline uint32_t | |
21645 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21646 | __arm_vaddvaq (uint32_t __a, uint16x8_t __b) | |
21647 | { | |
21648 | return __arm_vaddvaq_u16 (__a, __b); | |
21649 | } | |
21650 | ||
21651 | __extension__ extern __inline uint16x8_t | |
21652 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21653 | __arm_vaddq (uint16x8_t __a, uint16_t __b) | |
21654 | { | |
21655 | return __arm_vaddq_n_u16 (__a, __b); | |
21656 | } | |
21657 | ||
21658 | __extension__ extern __inline uint16x8_t | |
21659 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21660 | __arm_vabdq (uint16x8_t __a, uint16x8_t __b) | |
21661 | { | |
21662 | return __arm_vabdq_u16 (__a, __b); | |
21663 | } | |
21664 | ||
21665 | __extension__ extern __inline uint16x8_t | |
21666 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21667 | __arm_vshlq_r (uint16x8_t __a, int32_t __b) | |
21668 | { | |
21669 | return __arm_vshlq_r_u16 (__a, __b); | |
21670 | } | |
21671 | ||
21672 | __extension__ extern __inline uint16x8_t | |
21673 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21674 | __arm_vrshlq (uint16x8_t __a, int16x8_t __b) | |
21675 | { | |
21676 | return __arm_vrshlq_u16 (__a, __b); | |
21677 | } | |
21678 | ||
21679 | __extension__ extern __inline uint16x8_t | |
21680 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21681 | __arm_vrshlq (uint16x8_t __a, int32_t __b) | |
21682 | { | |
21683 | return __arm_vrshlq_n_u16 (__a, __b); | |
21684 | } | |
21685 | ||
21686 | __extension__ extern __inline uint16x8_t | |
21687 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21688 | __arm_vqshlq (uint16x8_t __a, int16x8_t __b) | |
21689 | { | |
21690 | return __arm_vqshlq_u16 (__a, __b); | |
21691 | } | |
21692 | ||
21693 | __extension__ extern __inline uint16x8_t | |
21694 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21695 | __arm_vqshlq_r (uint16x8_t __a, int32_t __b) | |
21696 | { | |
21697 | return __arm_vqshlq_r_u16 (__a, __b); | |
21698 | } | |
21699 | ||
21700 | __extension__ extern __inline uint16x8_t | |
21701 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21702 | __arm_vqrshlq (uint16x8_t __a, int16x8_t __b) | |
21703 | { | |
21704 | return __arm_vqrshlq_u16 (__a, __b); | |
21705 | } | |
21706 | ||
21707 | __extension__ extern __inline uint16x8_t | |
21708 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21709 | __arm_vqrshlq (uint16x8_t __a, int32_t __b) | |
21710 | { | |
21711 | return __arm_vqrshlq_n_u16 (__a, __b); | |
21712 | } | |
21713 | ||
21714 | __extension__ extern __inline uint16_t | |
21715 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21716 | __arm_vminavq (uint16_t __a, int16x8_t __b) | |
21717 | { | |
21718 | return __arm_vminavq_s16 (__a, __b); | |
21719 | } | |
21720 | ||
21721 | __extension__ extern __inline uint16x8_t | |
21722 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21723 | __arm_vminaq (uint16x8_t __a, int16x8_t __b) | |
21724 | { | |
21725 | return __arm_vminaq_s16 (__a, __b); | |
21726 | } | |
21727 | ||
21728 | __extension__ extern __inline uint16_t | |
21729 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21730 | __arm_vmaxavq (uint16_t __a, int16x8_t __b) | |
21731 | { | |
21732 | return __arm_vmaxavq_s16 (__a, __b); | |
21733 | } | |
21734 | ||
21735 | __extension__ extern __inline uint16x8_t | |
21736 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21737 | __arm_vmaxaq (uint16x8_t __a, int16x8_t __b) | |
21738 | { | |
21739 | return __arm_vmaxaq_s16 (__a, __b); | |
21740 | } | |
21741 | ||
21742 | __extension__ extern __inline uint16x8_t | |
21743 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21744 | __arm_vbrsrq (uint16x8_t __a, int32_t __b) | |
21745 | { | |
21746 | return __arm_vbrsrq_n_u16 (__a, __b); | |
21747 | } | |
21748 | ||
21749 | __extension__ extern __inline uint16x8_t | |
21750 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21751 | __arm_vshlq_n (uint16x8_t __a, const int __imm) | |
21752 | { | |
21753 | return __arm_vshlq_n_u16 (__a, __imm); | |
21754 | } | |
21755 | ||
21756 | __extension__ extern __inline uint16x8_t | |
21757 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21758 | __arm_vrshrq (uint16x8_t __a, const int __imm) | |
21759 | { | |
21760 | return __arm_vrshrq_n_u16 (__a, __imm); | |
21761 | } | |
21762 | ||
21763 | __extension__ extern __inline uint16x8_t | |
21764 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21765 | __arm_vqshlq_n (uint16x8_t __a, const int __imm) | |
21766 | { | |
21767 | return __arm_vqshlq_n_u16 (__a, __imm); | |
21768 | } | |
21769 | ||
21770 | __extension__ extern __inline mve_pred16_t | |
21771 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21772 | __arm_vcmpneq (int16x8_t __a, int16_t __b) | |
21773 | { | |
21774 | return __arm_vcmpneq_n_s16 (__a, __b); | |
21775 | } | |
21776 | ||
21777 | __extension__ extern __inline mve_pred16_t | |
21778 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21779 | __arm_vcmpltq (int16x8_t __a, int16x8_t __b) | |
21780 | { | |
21781 | return __arm_vcmpltq_s16 (__a, __b); | |
21782 | } | |
21783 | ||
21784 | __extension__ extern __inline mve_pred16_t | |
21785 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21786 | __arm_vcmpltq (int16x8_t __a, int16_t __b) | |
21787 | { | |
21788 | return __arm_vcmpltq_n_s16 (__a, __b); | |
21789 | } | |
21790 | ||
21791 | __extension__ extern __inline mve_pred16_t | |
21792 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21793 | __arm_vcmpleq (int16x8_t __a, int16x8_t __b) | |
21794 | { | |
21795 | return __arm_vcmpleq_s16 (__a, __b); | |
21796 | } | |
21797 | ||
21798 | __extension__ extern __inline mve_pred16_t | |
21799 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21800 | __arm_vcmpleq (int16x8_t __a, int16_t __b) | |
21801 | { | |
21802 | return __arm_vcmpleq_n_s16 (__a, __b); | |
21803 | } | |
21804 | ||
21805 | __extension__ extern __inline mve_pred16_t | |
21806 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21807 | __arm_vcmpgtq (int16x8_t __a, int16x8_t __b) | |
21808 | { | |
21809 | return __arm_vcmpgtq_s16 (__a, __b); | |
21810 | } | |
21811 | ||
21812 | __extension__ extern __inline mve_pred16_t | |
21813 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21814 | __arm_vcmpgtq (int16x8_t __a, int16_t __b) | |
21815 | { | |
21816 | return __arm_vcmpgtq_n_s16 (__a, __b); | |
21817 | } | |
21818 | ||
21819 | __extension__ extern __inline mve_pred16_t | |
21820 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21821 | __arm_vcmpgeq (int16x8_t __a, int16x8_t __b) | |
21822 | { | |
21823 | return __arm_vcmpgeq_s16 (__a, __b); | |
21824 | } | |
21825 | ||
21826 | __extension__ extern __inline mve_pred16_t | |
21827 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21828 | __arm_vcmpgeq (int16x8_t __a, int16_t __b) | |
21829 | { | |
21830 | return __arm_vcmpgeq_n_s16 (__a, __b); | |
21831 | } | |
21832 | ||
21833 | __extension__ extern __inline mve_pred16_t | |
21834 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21835 | __arm_vcmpeqq (int16x8_t __a, int16x8_t __b) | |
21836 | { | |
21837 | return __arm_vcmpeqq_s16 (__a, __b); | |
21838 | } | |
21839 | ||
21840 | __extension__ extern __inline mve_pred16_t | |
21841 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21842 | __arm_vcmpeqq (int16x8_t __a, int16_t __b) | |
21843 | { | |
21844 | return __arm_vcmpeqq_n_s16 (__a, __b); | |
21845 | } | |
21846 | ||
21847 | __extension__ extern __inline uint16x8_t | |
21848 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21849 | __arm_vqshluq (int16x8_t __a, const int __imm) | |
21850 | { | |
21851 | return __arm_vqshluq_n_s16 (__a, __imm); | |
21852 | } | |
21853 | ||
21854 | __extension__ extern __inline int32_t | |
21855 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21856 | __arm_vaddvq_p (int16x8_t __a, mve_pred16_t __p) | |
21857 | { | |
21858 | return __arm_vaddvq_p_s16 (__a, __p); | |
21859 | } | |
21860 | ||
21861 | __extension__ extern __inline int16x8_t | |
21862 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21863 | __arm_vsubq (int16x8_t __a, int16x8_t __b) | |
21864 | { | |
21865 | return __arm_vsubq_s16 (__a, __b); | |
21866 | } | |
21867 | ||
21868 | __extension__ extern __inline int16x8_t | |
21869 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21870 | __arm_vsubq (int16x8_t __a, int16_t __b) | |
21871 | { | |
21872 | return __arm_vsubq_n_s16 (__a, __b); | |
21873 | } | |
21874 | ||
21875 | __extension__ extern __inline int16x8_t | |
21876 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21877 | __arm_vshlq_r (int16x8_t __a, int32_t __b) | |
21878 | { | |
21879 | return __arm_vshlq_r_s16 (__a, __b); | |
21880 | } | |
21881 | ||
21882 | __extension__ extern __inline int16x8_t | |
21883 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21884 | __arm_vrshlq (int16x8_t __a, int16x8_t __b) | |
21885 | { | |
21886 | return __arm_vrshlq_s16 (__a, __b); | |
21887 | } | |
21888 | ||
21889 | __extension__ extern __inline int16x8_t | |
21890 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21891 | __arm_vrshlq (int16x8_t __a, int32_t __b) | |
21892 | { | |
21893 | return __arm_vrshlq_n_s16 (__a, __b); | |
21894 | } | |
21895 | ||
21896 | __extension__ extern __inline int16x8_t | |
21897 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21898 | __arm_vrmulhq (int16x8_t __a, int16x8_t __b) | |
21899 | { | |
21900 | return __arm_vrmulhq_s16 (__a, __b); | |
21901 | } | |
21902 | ||
21903 | __extension__ extern __inline int16x8_t | |
21904 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21905 | __arm_vrhaddq (int16x8_t __a, int16x8_t __b) | |
21906 | { | |
21907 | return __arm_vrhaddq_s16 (__a, __b); | |
21908 | } | |
21909 | ||
21910 | __extension__ extern __inline int16x8_t | |
21911 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21912 | __arm_vqsubq (int16x8_t __a, int16x8_t __b) | |
21913 | { | |
21914 | return __arm_vqsubq_s16 (__a, __b); | |
21915 | } | |
21916 | ||
21917 | __extension__ extern __inline int16x8_t | |
21918 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21919 | __arm_vqsubq (int16x8_t __a, int16_t __b) | |
21920 | { | |
21921 | return __arm_vqsubq_n_s16 (__a, __b); | |
21922 | } | |
21923 | ||
21924 | __extension__ extern __inline int16x8_t | |
21925 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21926 | __arm_vqshlq (int16x8_t __a, int16x8_t __b) | |
21927 | { | |
21928 | return __arm_vqshlq_s16 (__a, __b); | |
21929 | } | |
21930 | ||
21931 | __extension__ extern __inline int16x8_t | |
21932 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21933 | __arm_vqshlq_r (int16x8_t __a, int32_t __b) | |
21934 | { | |
21935 | return __arm_vqshlq_r_s16 (__a, __b); | |
21936 | } | |
21937 | ||
21938 | __extension__ extern __inline int16x8_t | |
21939 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21940 | __arm_vqrshlq (int16x8_t __a, int16x8_t __b) | |
21941 | { | |
21942 | return __arm_vqrshlq_s16 (__a, __b); | |
21943 | } | |
21944 | ||
21945 | __extension__ extern __inline int16x8_t | |
21946 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21947 | __arm_vqrshlq (int16x8_t __a, int32_t __b) | |
21948 | { | |
21949 | return __arm_vqrshlq_n_s16 (__a, __b); | |
21950 | } | |
21951 | ||
21952 | __extension__ extern __inline int16x8_t | |
21953 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21954 | __arm_vqrdmulhq (int16x8_t __a, int16x8_t __b) | |
21955 | { | |
21956 | return __arm_vqrdmulhq_s16 (__a, __b); | |
21957 | } | |
21958 | ||
21959 | __extension__ extern __inline int16x8_t | |
21960 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21961 | __arm_vqrdmulhq (int16x8_t __a, int16_t __b) | |
21962 | { | |
21963 | return __arm_vqrdmulhq_n_s16 (__a, __b); | |
21964 | } | |
21965 | ||
21966 | __extension__ extern __inline int16x8_t | |
21967 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21968 | __arm_vqdmulhq (int16x8_t __a, int16x8_t __b) | |
21969 | { | |
21970 | return __arm_vqdmulhq_s16 (__a, __b); | |
21971 | } | |
21972 | ||
21973 | __extension__ extern __inline int16x8_t | |
21974 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21975 | __arm_vqdmulhq (int16x8_t __a, int16_t __b) | |
21976 | { | |
21977 | return __arm_vqdmulhq_n_s16 (__a, __b); | |
21978 | } | |
21979 | ||
21980 | __extension__ extern __inline int16x8_t | |
21981 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21982 | __arm_vqaddq (int16x8_t __a, int16x8_t __b) | |
21983 | { | |
21984 | return __arm_vqaddq_s16 (__a, __b); | |
21985 | } | |
21986 | ||
21987 | __extension__ extern __inline int16x8_t | |
21988 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21989 | __arm_vqaddq (int16x8_t __a, int16_t __b) | |
21990 | { | |
21991 | return __arm_vqaddq_n_s16 (__a, __b); | |
21992 | } | |
21993 | ||
21994 | __extension__ extern __inline int16x8_t | |
21995 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
21996 | __arm_vorrq (int16x8_t __a, int16x8_t __b) | |
21997 | { | |
21998 | return __arm_vorrq_s16 (__a, __b); | |
21999 | } | |
22000 | ||
22001 | __extension__ extern __inline int16x8_t | |
22002 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22003 | __arm_vornq (int16x8_t __a, int16x8_t __b) | |
22004 | { | |
22005 | return __arm_vornq_s16 (__a, __b); | |
22006 | } | |
22007 | ||
22008 | __extension__ extern __inline int16x8_t | |
22009 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22010 | __arm_vmulq (int16x8_t __a, int16x8_t __b) | |
22011 | { | |
22012 | return __arm_vmulq_s16 (__a, __b); | |
22013 | } | |
22014 | ||
22015 | __extension__ extern __inline int16x8_t | |
22016 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22017 | __arm_vmulq (int16x8_t __a, int16_t __b) | |
22018 | { | |
22019 | return __arm_vmulq_n_s16 (__a, __b); | |
22020 | } | |
22021 | ||
22022 | __extension__ extern __inline int32x4_t | |
22023 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22024 | __arm_vmulltq_int (int16x8_t __a, int16x8_t __b) | |
22025 | { | |
22026 | return __arm_vmulltq_int_s16 (__a, __b); | |
22027 | } | |
22028 | ||
22029 | __extension__ extern __inline int32x4_t | |
22030 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22031 | __arm_vmullbq_int (int16x8_t __a, int16x8_t __b) | |
22032 | { | |
22033 | return __arm_vmullbq_int_s16 (__a, __b); | |
22034 | } | |
22035 | ||
22036 | __extension__ extern __inline int16x8_t | |
22037 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22038 | __arm_vmulhq (int16x8_t __a, int16x8_t __b) | |
22039 | { | |
22040 | return __arm_vmulhq_s16 (__a, __b); | |
22041 | } | |
22042 | ||
22043 | __extension__ extern __inline int32_t | |
22044 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22045 | __arm_vmlsdavxq (int16x8_t __a, int16x8_t __b) | |
22046 | { | |
22047 | return __arm_vmlsdavxq_s16 (__a, __b); | |
22048 | } | |
22049 | ||
22050 | __extension__ extern __inline int32_t | |
22051 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22052 | __arm_vmlsdavq (int16x8_t __a, int16x8_t __b) | |
22053 | { | |
22054 | return __arm_vmlsdavq_s16 (__a, __b); | |
22055 | } | |
22056 | ||
22057 | __extension__ extern __inline int32_t | |
22058 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22059 | __arm_vmladavxq (int16x8_t __a, int16x8_t __b) | |
22060 | { | |
22061 | return __arm_vmladavxq_s16 (__a, __b); | |
22062 | } | |
22063 | ||
22064 | __extension__ extern __inline int32_t | |
22065 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22066 | __arm_vmladavq (int16x8_t __a, int16x8_t __b) | |
22067 | { | |
22068 | return __arm_vmladavq_s16 (__a, __b); | |
22069 | } | |
22070 | ||
22071 | __extension__ extern __inline int16_t | |
22072 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22073 | __arm_vminvq (int16_t __a, int16x8_t __b) | |
22074 | { | |
22075 | return __arm_vminvq_s16 (__a, __b); | |
22076 | } | |
22077 | ||
22078 | __extension__ extern __inline int16x8_t | |
22079 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22080 | __arm_vminq (int16x8_t __a, int16x8_t __b) | |
22081 | { | |
22082 | return __arm_vminq_s16 (__a, __b); | |
22083 | } | |
22084 | ||
22085 | __extension__ extern __inline int16_t | |
22086 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22087 | __arm_vmaxvq (int16_t __a, int16x8_t __b) | |
22088 | { | |
22089 | return __arm_vmaxvq_s16 (__a, __b); | |
22090 | } | |
22091 | ||
22092 | __extension__ extern __inline int16x8_t | |
22093 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22094 | __arm_vmaxq (int16x8_t __a, int16x8_t __b) | |
22095 | { | |
22096 | return __arm_vmaxq_s16 (__a, __b); | |
22097 | } | |
22098 | ||
22099 | __extension__ extern __inline int16x8_t | |
22100 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22101 | __arm_vhsubq (int16x8_t __a, int16x8_t __b) | |
22102 | { | |
22103 | return __arm_vhsubq_s16 (__a, __b); | |
22104 | } | |
22105 | ||
22106 | __extension__ extern __inline int16x8_t | |
22107 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22108 | __arm_vhsubq (int16x8_t __a, int16_t __b) | |
22109 | { | |
22110 | return __arm_vhsubq_n_s16 (__a, __b); | |
22111 | } | |
22112 | ||
22113 | __extension__ extern __inline int16x8_t | |
22114 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22115 | __arm_vhcaddq_rot90 (int16x8_t __a, int16x8_t __b) | |
22116 | { | |
22117 | return __arm_vhcaddq_rot90_s16 (__a, __b); | |
22118 | } | |
22119 | ||
22120 | __extension__ extern __inline int16x8_t | |
22121 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22122 | __arm_vhcaddq_rot270 (int16x8_t __a, int16x8_t __b) | |
22123 | { | |
22124 | return __arm_vhcaddq_rot270_s16 (__a, __b); | |
22125 | } | |
22126 | ||
22127 | __extension__ extern __inline int16x8_t | |
22128 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22129 | __arm_vhaddq (int16x8_t __a, int16x8_t __b) | |
22130 | { | |
22131 | return __arm_vhaddq_s16 (__a, __b); | |
22132 | } | |
22133 | ||
22134 | __extension__ extern __inline int16x8_t | |
22135 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22136 | __arm_vhaddq (int16x8_t __a, int16_t __b) | |
22137 | { | |
22138 | return __arm_vhaddq_n_s16 (__a, __b); | |
22139 | } | |
22140 | ||
22141 | __extension__ extern __inline int16x8_t | |
22142 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22143 | __arm_veorq (int16x8_t __a, int16x8_t __b) | |
22144 | { | |
22145 | return __arm_veorq_s16 (__a, __b); | |
22146 | } | |
22147 | ||
22148 | __extension__ extern __inline int16x8_t | |
22149 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22150 | __arm_vcaddq_rot90 (int16x8_t __a, int16x8_t __b) | |
22151 | { | |
22152 | return __arm_vcaddq_rot90_s16 (__a, __b); | |
22153 | } | |
22154 | ||
22155 | __extension__ extern __inline int16x8_t | |
22156 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22157 | __arm_vcaddq_rot270 (int16x8_t __a, int16x8_t __b) | |
22158 | { | |
22159 | return __arm_vcaddq_rot270_s16 (__a, __b); | |
22160 | } | |
22161 | ||
22162 | __extension__ extern __inline int16x8_t | |
22163 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22164 | __arm_vbrsrq (int16x8_t __a, int32_t __b) | |
22165 | { | |
22166 | return __arm_vbrsrq_n_s16 (__a, __b); | |
22167 | } | |
22168 | ||
22169 | __extension__ extern __inline int16x8_t | |
22170 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22171 | __arm_vbicq (int16x8_t __a, int16x8_t __b) | |
22172 | { | |
22173 | return __arm_vbicq_s16 (__a, __b); | |
22174 | } | |
22175 | ||
22176 | __extension__ extern __inline int16x8_t | |
22177 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22178 | __arm_vandq (int16x8_t __a, int16x8_t __b) | |
22179 | { | |
22180 | return __arm_vandq_s16 (__a, __b); | |
22181 | } | |
22182 | ||
22183 | __extension__ extern __inline int32_t | |
22184 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22185 | __arm_vaddvaq (int32_t __a, int16x8_t __b) | |
22186 | { | |
22187 | return __arm_vaddvaq_s16 (__a, __b); | |
22188 | } | |
22189 | ||
22190 | __extension__ extern __inline int16x8_t | |
22191 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22192 | __arm_vaddq (int16x8_t __a, int16_t __b) | |
22193 | { | |
22194 | return __arm_vaddq_n_s16 (__a, __b); | |
22195 | } | |
22196 | ||
22197 | __extension__ extern __inline int16x8_t | |
22198 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22199 | __arm_vabdq (int16x8_t __a, int16x8_t __b) | |
22200 | { | |
22201 | return __arm_vabdq_s16 (__a, __b); | |
22202 | } | |
22203 | ||
22204 | __extension__ extern __inline int16x8_t | |
22205 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22206 | __arm_vshlq_n (int16x8_t __a, const int __imm) | |
22207 | { | |
22208 | return __arm_vshlq_n_s16 (__a, __imm); | |
22209 | } | |
22210 | ||
22211 | __extension__ extern __inline int16x8_t | |
22212 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22213 | __arm_vrshrq (int16x8_t __a, const int __imm) | |
22214 | { | |
22215 | return __arm_vrshrq_n_s16 (__a, __imm); | |
22216 | } | |
22217 | ||
22218 | __extension__ extern __inline int16x8_t | |
22219 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22220 | __arm_vqshlq_n (int16x8_t __a, const int __imm) | |
22221 | { | |
22222 | return __arm_vqshlq_n_s16 (__a, __imm); | |
22223 | } | |
22224 | ||
22225 | __extension__ extern __inline uint32x4_t | |
22226 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22227 | __arm_vsubq (uint32x4_t __a, uint32x4_t __b) | |
22228 | { | |
22229 | return __arm_vsubq_u32 (__a, __b); | |
22230 | } | |
22231 | ||
22232 | __extension__ extern __inline uint32x4_t | |
22233 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22234 | __arm_vsubq (uint32x4_t __a, uint32_t __b) | |
22235 | { | |
22236 | return __arm_vsubq_n_u32 (__a, __b); | |
22237 | } | |
22238 | ||
22239 | __extension__ extern __inline uint32x4_t | |
22240 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22241 | __arm_vrmulhq (uint32x4_t __a, uint32x4_t __b) | |
22242 | { | |
22243 | return __arm_vrmulhq_u32 (__a, __b); | |
22244 | } | |
22245 | ||
22246 | __extension__ extern __inline uint32x4_t | |
22247 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22248 | __arm_vrhaddq (uint32x4_t __a, uint32x4_t __b) | |
22249 | { | |
22250 | return __arm_vrhaddq_u32 (__a, __b); | |
22251 | } | |
22252 | ||
22253 | __extension__ extern __inline uint32x4_t | |
22254 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22255 | __arm_vqsubq (uint32x4_t __a, uint32x4_t __b) | |
22256 | { | |
22257 | return __arm_vqsubq_u32 (__a, __b); | |
22258 | } | |
22259 | ||
22260 | __extension__ extern __inline uint32x4_t | |
22261 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22262 | __arm_vqsubq (uint32x4_t __a, uint32_t __b) | |
22263 | { | |
22264 | return __arm_vqsubq_n_u32 (__a, __b); | |
22265 | } | |
22266 | ||
22267 | __extension__ extern __inline uint32x4_t | |
22268 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22269 | __arm_vqaddq (uint32x4_t __a, uint32x4_t __b) | |
22270 | { | |
22271 | return __arm_vqaddq_u32 (__a, __b); | |
22272 | } | |
22273 | ||
22274 | __extension__ extern __inline uint32x4_t | |
22275 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22276 | __arm_vqaddq (uint32x4_t __a, uint32_t __b) | |
22277 | { | |
22278 | return __arm_vqaddq_n_u32 (__a, __b); | |
22279 | } | |
22280 | ||
22281 | __extension__ extern __inline uint32x4_t | |
22282 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22283 | __arm_vorrq (uint32x4_t __a, uint32x4_t __b) | |
22284 | { | |
22285 | return __arm_vorrq_u32 (__a, __b); | |
22286 | } | |
22287 | ||
22288 | __extension__ extern __inline uint32x4_t | |
22289 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22290 | __arm_vornq (uint32x4_t __a, uint32x4_t __b) | |
22291 | { | |
22292 | return __arm_vornq_u32 (__a, __b); | |
22293 | } | |
22294 | ||
22295 | __extension__ extern __inline uint32x4_t | |
22296 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22297 | __arm_vmulq (uint32x4_t __a, uint32x4_t __b) | |
22298 | { | |
22299 | return __arm_vmulq_u32 (__a, __b); | |
22300 | } | |
22301 | ||
22302 | __extension__ extern __inline uint32x4_t | |
22303 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22304 | __arm_vmulq (uint32x4_t __a, uint32_t __b) | |
22305 | { | |
22306 | return __arm_vmulq_n_u32 (__a, __b); | |
22307 | } | |
22308 | ||
22309 | __extension__ extern __inline uint64x2_t | |
22310 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22311 | __arm_vmulltq_int (uint32x4_t __a, uint32x4_t __b) | |
22312 | { | |
22313 | return __arm_vmulltq_int_u32 (__a, __b); | |
22314 | } | |
22315 | ||
22316 | __extension__ extern __inline uint64x2_t | |
22317 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22318 | __arm_vmullbq_int (uint32x4_t __a, uint32x4_t __b) | |
22319 | { | |
22320 | return __arm_vmullbq_int_u32 (__a, __b); | |
22321 | } | |
22322 | ||
22323 | __extension__ extern __inline uint32x4_t | |
22324 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22325 | __arm_vmulhq (uint32x4_t __a, uint32x4_t __b) | |
22326 | { | |
22327 | return __arm_vmulhq_u32 (__a, __b); | |
22328 | } | |
22329 | ||
22330 | __extension__ extern __inline uint32_t | |
22331 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22332 | __arm_vmladavq (uint32x4_t __a, uint32x4_t __b) | |
22333 | { | |
22334 | return __arm_vmladavq_u32 (__a, __b); | |
22335 | } | |
22336 | ||
22337 | __extension__ extern __inline uint32_t | |
22338 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22339 | __arm_vminvq (uint32_t __a, uint32x4_t __b) | |
22340 | { | |
22341 | return __arm_vminvq_u32 (__a, __b); | |
22342 | } | |
22343 | ||
22344 | __extension__ extern __inline uint32x4_t | |
22345 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22346 | __arm_vminq (uint32x4_t __a, uint32x4_t __b) | |
22347 | { | |
22348 | return __arm_vminq_u32 (__a, __b); | |
22349 | } | |
22350 | ||
22351 | __extension__ extern __inline uint32_t | |
22352 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22353 | __arm_vmaxvq (uint32_t __a, uint32x4_t __b) | |
22354 | { | |
22355 | return __arm_vmaxvq_u32 (__a, __b); | |
22356 | } | |
22357 | ||
22358 | __extension__ extern __inline uint32x4_t | |
22359 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22360 | __arm_vmaxq (uint32x4_t __a, uint32x4_t __b) | |
22361 | { | |
22362 | return __arm_vmaxq_u32 (__a, __b); | |
22363 | } | |
22364 | ||
22365 | __extension__ extern __inline uint32x4_t | |
22366 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22367 | __arm_vhsubq (uint32x4_t __a, uint32x4_t __b) | |
22368 | { | |
22369 | return __arm_vhsubq_u32 (__a, __b); | |
22370 | } | |
22371 | ||
22372 | __extension__ extern __inline uint32x4_t | |
22373 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22374 | __arm_vhsubq (uint32x4_t __a, uint32_t __b) | |
22375 | { | |
22376 | return __arm_vhsubq_n_u32 (__a, __b); | |
22377 | } | |
22378 | ||
22379 | __extension__ extern __inline uint32x4_t | |
22380 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22381 | __arm_vhaddq (uint32x4_t __a, uint32x4_t __b) | |
22382 | { | |
22383 | return __arm_vhaddq_u32 (__a, __b); | |
22384 | } | |
22385 | ||
22386 | __extension__ extern __inline uint32x4_t | |
22387 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22388 | __arm_vhaddq (uint32x4_t __a, uint32_t __b) | |
22389 | { | |
22390 | return __arm_vhaddq_n_u32 (__a, __b); | |
22391 | } | |
22392 | ||
22393 | __extension__ extern __inline uint32x4_t | |
22394 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22395 | __arm_veorq (uint32x4_t __a, uint32x4_t __b) | |
22396 | { | |
22397 | return __arm_veorq_u32 (__a, __b); | |
22398 | } | |
22399 | ||
22400 | __extension__ extern __inline mve_pred16_t | |
22401 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22402 | __arm_vcmpneq (uint32x4_t __a, uint32_t __b) | |
22403 | { | |
22404 | return __arm_vcmpneq_n_u32 (__a, __b); | |
22405 | } | |
22406 | ||
22407 | __extension__ extern __inline mve_pred16_t | |
22408 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22409 | __arm_vcmphiq (uint32x4_t __a, uint32x4_t __b) | |
22410 | { | |
22411 | return __arm_vcmphiq_u32 (__a, __b); | |
22412 | } | |
22413 | ||
22414 | __extension__ extern __inline mve_pred16_t | |
22415 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22416 | __arm_vcmphiq (uint32x4_t __a, uint32_t __b) | |
22417 | { | |
22418 | return __arm_vcmphiq_n_u32 (__a, __b); | |
22419 | } | |
22420 | ||
22421 | __extension__ extern __inline mve_pred16_t | |
22422 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22423 | __arm_vcmpeqq (uint32x4_t __a, uint32x4_t __b) | |
22424 | { | |
22425 | return __arm_vcmpeqq_u32 (__a, __b); | |
22426 | } | |
22427 | ||
22428 | __extension__ extern __inline mve_pred16_t | |
22429 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22430 | __arm_vcmpeqq (uint32x4_t __a, uint32_t __b) | |
22431 | { | |
22432 | return __arm_vcmpeqq_n_u32 (__a, __b); | |
22433 | } | |
22434 | ||
22435 | __extension__ extern __inline mve_pred16_t | |
22436 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22437 | __arm_vcmpcsq (uint32x4_t __a, uint32x4_t __b) | |
22438 | { | |
22439 | return __arm_vcmpcsq_u32 (__a, __b); | |
22440 | } | |
22441 | ||
22442 | __extension__ extern __inline mve_pred16_t | |
22443 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22444 | __arm_vcmpcsq (uint32x4_t __a, uint32_t __b) | |
22445 | { | |
22446 | return __arm_vcmpcsq_n_u32 (__a, __b); | |
22447 | } | |
22448 | ||
22449 | __extension__ extern __inline uint32x4_t | |
22450 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22451 | __arm_vcaddq_rot90 (uint32x4_t __a, uint32x4_t __b) | |
22452 | { | |
22453 | return __arm_vcaddq_rot90_u32 (__a, __b); | |
22454 | } | |
22455 | ||
22456 | __extension__ extern __inline uint32x4_t | |
22457 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22458 | __arm_vcaddq_rot270 (uint32x4_t __a, uint32x4_t __b) | |
22459 | { | |
22460 | return __arm_vcaddq_rot270_u32 (__a, __b); | |
22461 | } | |
22462 | ||
22463 | __extension__ extern __inline uint32x4_t | |
22464 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22465 | __arm_vbicq (uint32x4_t __a, uint32x4_t __b) | |
22466 | { | |
22467 | return __arm_vbicq_u32 (__a, __b); | |
22468 | } | |
22469 | ||
22470 | __extension__ extern __inline uint32x4_t | |
22471 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22472 | __arm_vandq (uint32x4_t __a, uint32x4_t __b) | |
22473 | { | |
22474 | return __arm_vandq_u32 (__a, __b); | |
22475 | } | |
22476 | ||
22477 | __extension__ extern __inline uint32_t | |
22478 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22479 | __arm_vaddvq_p (uint32x4_t __a, mve_pred16_t __p) | |
22480 | { | |
22481 | return __arm_vaddvq_p_u32 (__a, __p); | |
22482 | } | |
22483 | ||
22484 | __extension__ extern __inline uint32_t | |
22485 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22486 | __arm_vaddvaq (uint32_t __a, uint32x4_t __b) | |
22487 | { | |
22488 | return __arm_vaddvaq_u32 (__a, __b); | |
22489 | } | |
22490 | ||
22491 | __extension__ extern __inline uint32x4_t | |
22492 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22493 | __arm_vaddq (uint32x4_t __a, uint32_t __b) | |
22494 | { | |
22495 | return __arm_vaddq_n_u32 (__a, __b); | |
22496 | } | |
22497 | ||
22498 | __extension__ extern __inline uint32x4_t | |
22499 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22500 | __arm_vabdq (uint32x4_t __a, uint32x4_t __b) | |
22501 | { | |
22502 | return __arm_vabdq_u32 (__a, __b); | |
22503 | } | |
22504 | ||
22505 | __extension__ extern __inline uint32x4_t | |
22506 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22507 | __arm_vshlq_r (uint32x4_t __a, int32_t __b) | |
22508 | { | |
22509 | return __arm_vshlq_r_u32 (__a, __b); | |
22510 | } | |
22511 | ||
22512 | __extension__ extern __inline uint32x4_t | |
22513 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22514 | __arm_vrshlq (uint32x4_t __a, int32x4_t __b) | |
22515 | { | |
22516 | return __arm_vrshlq_u32 (__a, __b); | |
22517 | } | |
22518 | ||
22519 | __extension__ extern __inline uint32x4_t | |
22520 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22521 | __arm_vrshlq (uint32x4_t __a, int32_t __b) | |
22522 | { | |
22523 | return __arm_vrshlq_n_u32 (__a, __b); | |
22524 | } | |
22525 | ||
22526 | __extension__ extern __inline uint32x4_t | |
22527 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22528 | __arm_vqshlq (uint32x4_t __a, int32x4_t __b) | |
22529 | { | |
22530 | return __arm_vqshlq_u32 (__a, __b); | |
22531 | } | |
22532 | ||
22533 | __extension__ extern __inline uint32x4_t | |
22534 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22535 | __arm_vqshlq_r (uint32x4_t __a, int32_t __b) | |
22536 | { | |
22537 | return __arm_vqshlq_r_u32 (__a, __b); | |
22538 | } | |
22539 | ||
22540 | __extension__ extern __inline uint32x4_t | |
22541 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22542 | __arm_vqrshlq (uint32x4_t __a, int32x4_t __b) | |
22543 | { | |
22544 | return __arm_vqrshlq_u32 (__a, __b); | |
22545 | } | |
22546 | ||
22547 | __extension__ extern __inline uint32x4_t | |
22548 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22549 | __arm_vqrshlq (uint32x4_t __a, int32_t __b) | |
22550 | { | |
22551 | return __arm_vqrshlq_n_u32 (__a, __b); | |
22552 | } | |
22553 | ||
22554 | __extension__ extern __inline uint32_t | |
22555 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22556 | __arm_vminavq (uint32_t __a, int32x4_t __b) | |
22557 | { | |
22558 | return __arm_vminavq_s32 (__a, __b); | |
22559 | } | |
22560 | ||
22561 | __extension__ extern __inline uint32x4_t | |
22562 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22563 | __arm_vminaq (uint32x4_t __a, int32x4_t __b) | |
22564 | { | |
22565 | return __arm_vminaq_s32 (__a, __b); | |
22566 | } | |
22567 | ||
22568 | __extension__ extern __inline uint32_t | |
22569 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22570 | __arm_vmaxavq (uint32_t __a, int32x4_t __b) | |
22571 | { | |
22572 | return __arm_vmaxavq_s32 (__a, __b); | |
22573 | } | |
22574 | ||
22575 | __extension__ extern __inline uint32x4_t | |
22576 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22577 | __arm_vmaxaq (uint32x4_t __a, int32x4_t __b) | |
22578 | { | |
22579 | return __arm_vmaxaq_s32 (__a, __b); | |
22580 | } | |
22581 | ||
22582 | __extension__ extern __inline uint32x4_t | |
22583 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22584 | __arm_vbrsrq (uint32x4_t __a, int32_t __b) | |
22585 | { | |
22586 | return __arm_vbrsrq_n_u32 (__a, __b); | |
22587 | } | |
22588 | ||
22589 | __extension__ extern __inline uint32x4_t | |
22590 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22591 | __arm_vshlq_n (uint32x4_t __a, const int __imm) | |
22592 | { | |
22593 | return __arm_vshlq_n_u32 (__a, __imm); | |
22594 | } | |
22595 | ||
22596 | __extension__ extern __inline uint32x4_t | |
22597 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22598 | __arm_vrshrq (uint32x4_t __a, const int __imm) | |
22599 | { | |
22600 | return __arm_vrshrq_n_u32 (__a, __imm); | |
22601 | } | |
22602 | ||
22603 | __extension__ extern __inline uint32x4_t | |
22604 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22605 | __arm_vqshlq_n (uint32x4_t __a, const int __imm) | |
22606 | { | |
22607 | return __arm_vqshlq_n_u32 (__a, __imm); | |
22608 | } | |
22609 | ||
22610 | __extension__ extern __inline mve_pred16_t | |
22611 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22612 | __arm_vcmpneq (int32x4_t __a, int32_t __b) | |
22613 | { | |
22614 | return __arm_vcmpneq_n_s32 (__a, __b); | |
22615 | } | |
22616 | ||
22617 | __extension__ extern __inline mve_pred16_t | |
22618 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22619 | __arm_vcmpltq (int32x4_t __a, int32x4_t __b) | |
22620 | { | |
22621 | return __arm_vcmpltq_s32 (__a, __b); | |
22622 | } | |
22623 | ||
22624 | __extension__ extern __inline mve_pred16_t | |
22625 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22626 | __arm_vcmpltq (int32x4_t __a, int32_t __b) | |
22627 | { | |
22628 | return __arm_vcmpltq_n_s32 (__a, __b); | |
22629 | } | |
22630 | ||
22631 | __extension__ extern __inline mve_pred16_t | |
22632 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22633 | __arm_vcmpleq (int32x4_t __a, int32x4_t __b) | |
22634 | { | |
22635 | return __arm_vcmpleq_s32 (__a, __b); | |
22636 | } | |
22637 | ||
22638 | __extension__ extern __inline mve_pred16_t | |
22639 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22640 | __arm_vcmpleq (int32x4_t __a, int32_t __b) | |
22641 | { | |
22642 | return __arm_vcmpleq_n_s32 (__a, __b); | |
22643 | } | |
22644 | ||
22645 | __extension__ extern __inline mve_pred16_t | |
22646 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22647 | __arm_vcmpgtq (int32x4_t __a, int32x4_t __b) | |
22648 | { | |
22649 | return __arm_vcmpgtq_s32 (__a, __b); | |
22650 | } | |
22651 | ||
22652 | __extension__ extern __inline mve_pred16_t | |
22653 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22654 | __arm_vcmpgtq (int32x4_t __a, int32_t __b) | |
22655 | { | |
22656 | return __arm_vcmpgtq_n_s32 (__a, __b); | |
22657 | } | |
22658 | ||
22659 | __extension__ extern __inline mve_pred16_t | |
22660 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22661 | __arm_vcmpgeq (int32x4_t __a, int32x4_t __b) | |
22662 | { | |
22663 | return __arm_vcmpgeq_s32 (__a, __b); | |
22664 | } | |
22665 | ||
22666 | __extension__ extern __inline mve_pred16_t | |
22667 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22668 | __arm_vcmpgeq (int32x4_t __a, int32_t __b) | |
22669 | { | |
22670 | return __arm_vcmpgeq_n_s32 (__a, __b); | |
22671 | } | |
22672 | ||
22673 | __extension__ extern __inline mve_pred16_t | |
22674 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22675 | __arm_vcmpeqq (int32x4_t __a, int32x4_t __b) | |
22676 | { | |
22677 | return __arm_vcmpeqq_s32 (__a, __b); | |
22678 | } | |
22679 | ||
22680 | __extension__ extern __inline mve_pred16_t | |
22681 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22682 | __arm_vcmpeqq (int32x4_t __a, int32_t __b) | |
22683 | { | |
22684 | return __arm_vcmpeqq_n_s32 (__a, __b); | |
22685 | } | |
22686 | ||
22687 | __extension__ extern __inline uint32x4_t | |
22688 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22689 | __arm_vqshluq (int32x4_t __a, const int __imm) | |
22690 | { | |
22691 | return __arm_vqshluq_n_s32 (__a, __imm); | |
22692 | } | |
22693 | ||
22694 | __extension__ extern __inline int32_t | |
22695 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22696 | __arm_vaddvq_p (int32x4_t __a, mve_pred16_t __p) | |
22697 | { | |
22698 | return __arm_vaddvq_p_s32 (__a, __p); | |
22699 | } | |
22700 | ||
22701 | __extension__ extern __inline int32x4_t | |
22702 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22703 | __arm_vsubq (int32x4_t __a, int32x4_t __b) | |
22704 | { | |
22705 | return __arm_vsubq_s32 (__a, __b); | |
22706 | } | |
22707 | ||
22708 | __extension__ extern __inline int32x4_t | |
22709 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22710 | __arm_vsubq (int32x4_t __a, int32_t __b) | |
22711 | { | |
22712 | return __arm_vsubq_n_s32 (__a, __b); | |
22713 | } | |
22714 | ||
22715 | __extension__ extern __inline int32x4_t | |
22716 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22717 | __arm_vshlq_r (int32x4_t __a, int32_t __b) | |
22718 | { | |
22719 | return __arm_vshlq_r_s32 (__a, __b); | |
22720 | } | |
22721 | ||
22722 | __extension__ extern __inline int32x4_t | |
22723 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22724 | __arm_vrshlq (int32x4_t __a, int32x4_t __b) | |
22725 | { | |
22726 | return __arm_vrshlq_s32 (__a, __b); | |
22727 | } | |
22728 | ||
22729 | __extension__ extern __inline int32x4_t | |
22730 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22731 | __arm_vrshlq (int32x4_t __a, int32_t __b) | |
22732 | { | |
22733 | return __arm_vrshlq_n_s32 (__a, __b); | |
22734 | } | |
22735 | ||
22736 | __extension__ extern __inline int32x4_t | |
22737 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22738 | __arm_vrmulhq (int32x4_t __a, int32x4_t __b) | |
22739 | { | |
22740 | return __arm_vrmulhq_s32 (__a, __b); | |
22741 | } | |
22742 | ||
22743 | __extension__ extern __inline int32x4_t | |
22744 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22745 | __arm_vrhaddq (int32x4_t __a, int32x4_t __b) | |
22746 | { | |
22747 | return __arm_vrhaddq_s32 (__a, __b); | |
22748 | } | |
22749 | ||
22750 | __extension__ extern __inline int32x4_t | |
22751 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22752 | __arm_vqsubq (int32x4_t __a, int32x4_t __b) | |
22753 | { | |
22754 | return __arm_vqsubq_s32 (__a, __b); | |
22755 | } | |
22756 | ||
22757 | __extension__ extern __inline int32x4_t | |
22758 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22759 | __arm_vqsubq (int32x4_t __a, int32_t __b) | |
22760 | { | |
22761 | return __arm_vqsubq_n_s32 (__a, __b); | |
22762 | } | |
22763 | ||
22764 | __extension__ extern __inline int32x4_t | |
22765 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22766 | __arm_vqshlq (int32x4_t __a, int32x4_t __b) | |
22767 | { | |
22768 | return __arm_vqshlq_s32 (__a, __b); | |
22769 | } | |
22770 | ||
22771 | __extension__ extern __inline int32x4_t | |
22772 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22773 | __arm_vqshlq_r (int32x4_t __a, int32_t __b) | |
22774 | { | |
22775 | return __arm_vqshlq_r_s32 (__a, __b); | |
22776 | } | |
22777 | ||
22778 | __extension__ extern __inline int32x4_t | |
22779 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22780 | __arm_vqrshlq (int32x4_t __a, int32x4_t __b) | |
22781 | { | |
22782 | return __arm_vqrshlq_s32 (__a, __b); | |
22783 | } | |
22784 | ||
22785 | __extension__ extern __inline int32x4_t | |
22786 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22787 | __arm_vqrshlq (int32x4_t __a, int32_t __b) | |
22788 | { | |
22789 | return __arm_vqrshlq_n_s32 (__a, __b); | |
22790 | } | |
22791 | ||
22792 | __extension__ extern __inline int32x4_t | |
22793 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22794 | __arm_vqrdmulhq (int32x4_t __a, int32x4_t __b) | |
22795 | { | |
22796 | return __arm_vqrdmulhq_s32 (__a, __b); | |
22797 | } | |
22798 | ||
22799 | __extension__ extern __inline int32x4_t | |
22800 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22801 | __arm_vqrdmulhq (int32x4_t __a, int32_t __b) | |
22802 | { | |
22803 | return __arm_vqrdmulhq_n_s32 (__a, __b); | |
22804 | } | |
22805 | ||
22806 | __extension__ extern __inline int32x4_t | |
22807 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22808 | __arm_vqdmulhq (int32x4_t __a, int32x4_t __b) | |
22809 | { | |
22810 | return __arm_vqdmulhq_s32 (__a, __b); | |
22811 | } | |
22812 | ||
22813 | __extension__ extern __inline int32x4_t | |
22814 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22815 | __arm_vqdmulhq (int32x4_t __a, int32_t __b) | |
22816 | { | |
22817 | return __arm_vqdmulhq_n_s32 (__a, __b); | |
22818 | } | |
22819 | ||
22820 | __extension__ extern __inline int32x4_t | |
22821 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22822 | __arm_vqaddq (int32x4_t __a, int32x4_t __b) | |
22823 | { | |
22824 | return __arm_vqaddq_s32 (__a, __b); | |
22825 | } | |
22826 | ||
22827 | __extension__ extern __inline int32x4_t | |
22828 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22829 | __arm_vqaddq (int32x4_t __a, int32_t __b) | |
22830 | { | |
22831 | return __arm_vqaddq_n_s32 (__a, __b); | |
22832 | } | |
22833 | ||
22834 | __extension__ extern __inline int32x4_t | |
22835 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22836 | __arm_vorrq (int32x4_t __a, int32x4_t __b) | |
22837 | { | |
22838 | return __arm_vorrq_s32 (__a, __b); | |
22839 | } | |
22840 | ||
22841 | __extension__ extern __inline int32x4_t | |
22842 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22843 | __arm_vornq (int32x4_t __a, int32x4_t __b) | |
22844 | { | |
22845 | return __arm_vornq_s32 (__a, __b); | |
22846 | } | |
22847 | ||
22848 | __extension__ extern __inline int32x4_t | |
22849 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22850 | __arm_vmulq (int32x4_t __a, int32x4_t __b) | |
22851 | { | |
22852 | return __arm_vmulq_s32 (__a, __b); | |
22853 | } | |
22854 | ||
22855 | __extension__ extern __inline int32x4_t | |
22856 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22857 | __arm_vmulq (int32x4_t __a, int32_t __b) | |
22858 | { | |
22859 | return __arm_vmulq_n_s32 (__a, __b); | |
22860 | } | |
22861 | ||
22862 | __extension__ extern __inline int64x2_t | |
22863 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22864 | __arm_vmulltq_int (int32x4_t __a, int32x4_t __b) | |
22865 | { | |
22866 | return __arm_vmulltq_int_s32 (__a, __b); | |
22867 | } | |
22868 | ||
22869 | __extension__ extern __inline int64x2_t | |
22870 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22871 | __arm_vmullbq_int (int32x4_t __a, int32x4_t __b) | |
22872 | { | |
22873 | return __arm_vmullbq_int_s32 (__a, __b); | |
22874 | } | |
22875 | ||
22876 | __extension__ extern __inline int32x4_t | |
22877 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22878 | __arm_vmulhq (int32x4_t __a, int32x4_t __b) | |
22879 | { | |
22880 | return __arm_vmulhq_s32 (__a, __b); | |
22881 | } | |
22882 | ||
22883 | __extension__ extern __inline int32_t | |
22884 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22885 | __arm_vmlsdavxq (int32x4_t __a, int32x4_t __b) | |
22886 | { | |
22887 | return __arm_vmlsdavxq_s32 (__a, __b); | |
22888 | } | |
22889 | ||
22890 | __extension__ extern __inline int32_t | |
22891 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22892 | __arm_vmlsdavq (int32x4_t __a, int32x4_t __b) | |
22893 | { | |
22894 | return __arm_vmlsdavq_s32 (__a, __b); | |
22895 | } | |
22896 | ||
22897 | __extension__ extern __inline int32_t | |
22898 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22899 | __arm_vmladavxq (int32x4_t __a, int32x4_t __b) | |
22900 | { | |
22901 | return __arm_vmladavxq_s32 (__a, __b); | |
22902 | } | |
22903 | ||
22904 | __extension__ extern __inline int32_t | |
22905 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22906 | __arm_vmladavq (int32x4_t __a, int32x4_t __b) | |
22907 | { | |
22908 | return __arm_vmladavq_s32 (__a, __b); | |
22909 | } | |
22910 | ||
22911 | __extension__ extern __inline int32_t | |
22912 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22913 | __arm_vminvq (int32_t __a, int32x4_t __b) | |
22914 | { | |
22915 | return __arm_vminvq_s32 (__a, __b); | |
22916 | } | |
22917 | ||
22918 | __extension__ extern __inline int32x4_t | |
22919 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22920 | __arm_vminq (int32x4_t __a, int32x4_t __b) | |
22921 | { | |
22922 | return __arm_vminq_s32 (__a, __b); | |
22923 | } | |
22924 | ||
22925 | __extension__ extern __inline int32_t | |
22926 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22927 | __arm_vmaxvq (int32_t __a, int32x4_t __b) | |
22928 | { | |
22929 | return __arm_vmaxvq_s32 (__a, __b); | |
22930 | } | |
22931 | ||
22932 | __extension__ extern __inline int32x4_t | |
22933 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22934 | __arm_vmaxq (int32x4_t __a, int32x4_t __b) | |
22935 | { | |
22936 | return __arm_vmaxq_s32 (__a, __b); | |
22937 | } | |
22938 | ||
22939 | __extension__ extern __inline int32x4_t | |
22940 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22941 | __arm_vhsubq (int32x4_t __a, int32x4_t __b) | |
22942 | { | |
22943 | return __arm_vhsubq_s32 (__a, __b); | |
22944 | } | |
22945 | ||
22946 | __extension__ extern __inline int32x4_t | |
22947 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22948 | __arm_vhsubq (int32x4_t __a, int32_t __b) | |
22949 | { | |
22950 | return __arm_vhsubq_n_s32 (__a, __b); | |
22951 | } | |
22952 | ||
22953 | __extension__ extern __inline int32x4_t | |
22954 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22955 | __arm_vhcaddq_rot90 (int32x4_t __a, int32x4_t __b) | |
22956 | { | |
22957 | return __arm_vhcaddq_rot90_s32 (__a, __b); | |
22958 | } | |
22959 | ||
22960 | __extension__ extern __inline int32x4_t | |
22961 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22962 | __arm_vhcaddq_rot270 (int32x4_t __a, int32x4_t __b) | |
22963 | { | |
22964 | return __arm_vhcaddq_rot270_s32 (__a, __b); | |
22965 | } | |
22966 | ||
22967 | __extension__ extern __inline int32x4_t | |
22968 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22969 | __arm_vhaddq (int32x4_t __a, int32x4_t __b) | |
22970 | { | |
22971 | return __arm_vhaddq_s32 (__a, __b); | |
22972 | } | |
22973 | ||
22974 | __extension__ extern __inline int32x4_t | |
22975 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22976 | __arm_vhaddq (int32x4_t __a, int32_t __b) | |
22977 | { | |
22978 | return __arm_vhaddq_n_s32 (__a, __b); | |
22979 | } | |
22980 | ||
22981 | __extension__ extern __inline int32x4_t | |
22982 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22983 | __arm_veorq (int32x4_t __a, int32x4_t __b) | |
22984 | { | |
22985 | return __arm_veorq_s32 (__a, __b); | |
22986 | } | |
22987 | ||
22988 | __extension__ extern __inline int32x4_t | |
22989 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22990 | __arm_vcaddq_rot90 (int32x4_t __a, int32x4_t __b) | |
22991 | { | |
22992 | return __arm_vcaddq_rot90_s32 (__a, __b); | |
22993 | } | |
22994 | ||
22995 | __extension__ extern __inline int32x4_t | |
22996 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
22997 | __arm_vcaddq_rot270 (int32x4_t __a, int32x4_t __b) | |
22998 | { | |
22999 | return __arm_vcaddq_rot270_s32 (__a, __b); | |
23000 | } | |
23001 | ||
23002 | __extension__ extern __inline int32x4_t | |
23003 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23004 | __arm_vbrsrq (int32x4_t __a, int32_t __b) | |
23005 | { | |
23006 | return __arm_vbrsrq_n_s32 (__a, __b); | |
23007 | } | |
23008 | ||
23009 | __extension__ extern __inline int32x4_t | |
23010 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23011 | __arm_vbicq (int32x4_t __a, int32x4_t __b) | |
23012 | { | |
23013 | return __arm_vbicq_s32 (__a, __b); | |
23014 | } | |
23015 | ||
23016 | __extension__ extern __inline int32x4_t | |
23017 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23018 | __arm_vandq (int32x4_t __a, int32x4_t __b) | |
23019 | { | |
23020 | return __arm_vandq_s32 (__a, __b); | |
23021 | } | |
23022 | ||
23023 | __extension__ extern __inline int32_t | |
23024 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23025 | __arm_vaddvaq (int32_t __a, int32x4_t __b) | |
23026 | { | |
23027 | return __arm_vaddvaq_s32 (__a, __b); | |
23028 | } | |
23029 | ||
23030 | __extension__ extern __inline int32x4_t | |
23031 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23032 | __arm_vaddq (int32x4_t __a, int32_t __b) | |
23033 | { | |
23034 | return __arm_vaddq_n_s32 (__a, __b); | |
23035 | } | |
23036 | ||
23037 | __extension__ extern __inline int32x4_t | |
23038 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23039 | __arm_vabdq (int32x4_t __a, int32x4_t __b) | |
23040 | { | |
23041 | return __arm_vabdq_s32 (__a, __b); | |
23042 | } | |
23043 | ||
23044 | __extension__ extern __inline int32x4_t | |
23045 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23046 | __arm_vshlq_n (int32x4_t __a, const int __imm) | |
23047 | { | |
23048 | return __arm_vshlq_n_s32 (__a, __imm); | |
23049 | } | |
23050 | ||
23051 | __extension__ extern __inline int32x4_t | |
23052 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23053 | __arm_vrshrq (int32x4_t __a, const int __imm) | |
23054 | { | |
23055 | return __arm_vrshrq_n_s32 (__a, __imm); | |
23056 | } | |
23057 | ||
23058 | __extension__ extern __inline int32x4_t | |
23059 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23060 | __arm_vqshlq_n (int32x4_t __a, const int __imm) | |
23061 | { | |
23062 | return __arm_vqshlq_n_s32 (__a, __imm); | |
23063 | } | |
23064 | ||
23065 | __extension__ extern __inline uint8x16_t | |
23066 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23067 | __arm_vqmovntq (uint8x16_t __a, uint16x8_t __b) | |
23068 | { | |
23069 | return __arm_vqmovntq_u16 (__a, __b); | |
23070 | } | |
23071 | ||
23072 | __extension__ extern __inline uint8x16_t | |
23073 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23074 | __arm_vqmovnbq (uint8x16_t __a, uint16x8_t __b) | |
23075 | { | |
23076 | return __arm_vqmovnbq_u16 (__a, __b); | |
23077 | } | |
23078 | ||
23079 | __extension__ extern __inline uint16x8_t | |
23080 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23081 | __arm_vmulltq_poly (uint8x16_t __a, uint8x16_t __b) | |
23082 | { | |
23083 | return __arm_vmulltq_poly_p8 (__a, __b); | |
23084 | } | |
23085 | ||
23086 | __extension__ extern __inline uint16x8_t | |
23087 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23088 | __arm_vmullbq_poly (uint8x16_t __a, uint8x16_t __b) | |
23089 | { | |
23090 | return __arm_vmullbq_poly_p8 (__a, __b); | |
23091 | } | |
23092 | ||
23093 | __extension__ extern __inline uint8x16_t | |
23094 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23095 | __arm_vmovntq (uint8x16_t __a, uint16x8_t __b) | |
23096 | { | |
23097 | return __arm_vmovntq_u16 (__a, __b); | |
23098 | } | |
23099 | ||
23100 | __extension__ extern __inline uint8x16_t | |
23101 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23102 | __arm_vmovnbq (uint8x16_t __a, uint16x8_t __b) | |
23103 | { | |
23104 | return __arm_vmovnbq_u16 (__a, __b); | |
23105 | } | |
23106 | ||
23107 | __extension__ extern __inline uint64_t | |
23108 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23109 | __arm_vmlaldavq (uint16x8_t __a, uint16x8_t __b) | |
23110 | { | |
23111 | return __arm_vmlaldavq_u16 (__a, __b); | |
23112 | } | |
23113 | ||
23114 | __extension__ extern __inline uint8x16_t | |
23115 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23116 | __arm_vqmovuntq (uint8x16_t __a, int16x8_t __b) | |
23117 | { | |
23118 | return __arm_vqmovuntq_s16 (__a, __b); | |
23119 | } | |
23120 | ||
23121 | __extension__ extern __inline uint8x16_t | |
23122 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23123 | __arm_vqmovunbq (uint8x16_t __a, int16x8_t __b) | |
23124 | { | |
23125 | return __arm_vqmovunbq_s16 (__a, __b); | |
23126 | } | |
23127 | ||
23128 | __extension__ extern __inline uint16x8_t | |
23129 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23130 | __arm_vshlltq (uint8x16_t __a, const int __imm) | |
23131 | { | |
23132 | return __arm_vshlltq_n_u8 (__a, __imm); | |
23133 | } | |
23134 | ||
23135 | __extension__ extern __inline uint16x8_t | |
23136 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23137 | __arm_vshllbq (uint8x16_t __a, const int __imm) | |
23138 | { | |
23139 | return __arm_vshllbq_n_u8 (__a, __imm); | |
23140 | } | |
23141 | ||
23142 | __extension__ extern __inline uint16x8_t | |
23143 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23144 | __arm_vorrq (uint16x8_t __a, const int __imm) | |
23145 | { | |
23146 | return __arm_vorrq_n_u16 (__a, __imm); | |
23147 | } | |
23148 | ||
23149 | __extension__ extern __inline uint16x8_t | |
23150 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
d34f510e | 23151 | __arm_vbicq (uint16x8_t __a, const int __imm) |
6a90680b ASDV |
23152 | { |
23153 | return __arm_vbicq_n_u16 (__a, __imm); | |
23154 | } | |
23155 | ||
23156 | __extension__ extern __inline int8x16_t | |
23157 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23158 | __arm_vqmovntq (int8x16_t __a, int16x8_t __b) | |
23159 | { | |
23160 | return __arm_vqmovntq_s16 (__a, __b); | |
23161 | } | |
23162 | ||
23163 | __extension__ extern __inline int8x16_t | |
23164 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23165 | __arm_vqmovnbq (int8x16_t __a, int16x8_t __b) | |
23166 | { | |
23167 | return __arm_vqmovnbq_s16 (__a, __b); | |
23168 | } | |
23169 | ||
23170 | __extension__ extern __inline int32x4_t | |
23171 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23172 | __arm_vqdmulltq (int16x8_t __a, int16x8_t __b) | |
23173 | { | |
23174 | return __arm_vqdmulltq_s16 (__a, __b); | |
23175 | } | |
23176 | ||
23177 | __extension__ extern __inline int32x4_t | |
23178 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23179 | __arm_vqdmulltq (int16x8_t __a, int16_t __b) | |
23180 | { | |
23181 | return __arm_vqdmulltq_n_s16 (__a, __b); | |
23182 | } | |
23183 | ||
23184 | __extension__ extern __inline int32x4_t | |
23185 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23186 | __arm_vqdmullbq (int16x8_t __a, int16x8_t __b) | |
23187 | { | |
23188 | return __arm_vqdmullbq_s16 (__a, __b); | |
23189 | } | |
23190 | ||
23191 | __extension__ extern __inline int32x4_t | |
23192 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23193 | __arm_vqdmullbq (int16x8_t __a, int16_t __b) | |
23194 | { | |
23195 | return __arm_vqdmullbq_n_s16 (__a, __b); | |
23196 | } | |
23197 | ||
23198 | __extension__ extern __inline int8x16_t | |
23199 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23200 | __arm_vmovntq (int8x16_t __a, int16x8_t __b) | |
23201 | { | |
23202 | return __arm_vmovntq_s16 (__a, __b); | |
23203 | } | |
23204 | ||
23205 | __extension__ extern __inline int8x16_t | |
23206 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23207 | __arm_vmovnbq (int8x16_t __a, int16x8_t __b) | |
23208 | { | |
23209 | return __arm_vmovnbq_s16 (__a, __b); | |
23210 | } | |
23211 | ||
23212 | __extension__ extern __inline int64_t | |
23213 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23214 | __arm_vmlsldavxq (int16x8_t __a, int16x8_t __b) | |
23215 | { | |
23216 | return __arm_vmlsldavxq_s16 (__a, __b); | |
23217 | } | |
23218 | ||
23219 | __extension__ extern __inline int64_t | |
23220 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23221 | __arm_vmlsldavq (int16x8_t __a, int16x8_t __b) | |
23222 | { | |
23223 | return __arm_vmlsldavq_s16 (__a, __b); | |
23224 | } | |
23225 | ||
23226 | __extension__ extern __inline int64_t | |
23227 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23228 | __arm_vmlaldavxq (int16x8_t __a, int16x8_t __b) | |
23229 | { | |
23230 | return __arm_vmlaldavxq_s16 (__a, __b); | |
23231 | } | |
23232 | ||
23233 | __extension__ extern __inline int64_t | |
23234 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23235 | __arm_vmlaldavq (int16x8_t __a, int16x8_t __b) | |
23236 | { | |
23237 | return __arm_vmlaldavq_s16 (__a, __b); | |
23238 | } | |
23239 | ||
23240 | __extension__ extern __inline int16x8_t | |
23241 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23242 | __arm_vshlltq (int8x16_t __a, const int __imm) | |
23243 | { | |
23244 | return __arm_vshlltq_n_s8 (__a, __imm); | |
23245 | } | |
23246 | ||
23247 | __extension__ extern __inline int16x8_t | |
23248 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23249 | __arm_vshllbq (int8x16_t __a, const int __imm) | |
23250 | { | |
23251 | return __arm_vshllbq_n_s8 (__a, __imm); | |
23252 | } | |
23253 | ||
23254 | __extension__ extern __inline int16x8_t | |
23255 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23256 | __arm_vorrq (int16x8_t __a, const int __imm) | |
23257 | { | |
23258 | return __arm_vorrq_n_s16 (__a, __imm); | |
23259 | } | |
23260 | ||
23261 | __extension__ extern __inline int16x8_t | |
23262 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
d34f510e | 23263 | __arm_vbicq (int16x8_t __a, const int __imm) |
6a90680b ASDV |
23264 | { |
23265 | return __arm_vbicq_n_s16 (__a, __imm); | |
23266 | } | |
23267 | ||
23268 | __extension__ extern __inline uint16x8_t | |
23269 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23270 | __arm_vqmovntq (uint16x8_t __a, uint32x4_t __b) | |
23271 | { | |
23272 | return __arm_vqmovntq_u32 (__a, __b); | |
23273 | } | |
23274 | ||
23275 | __extension__ extern __inline uint16x8_t | |
23276 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23277 | __arm_vqmovnbq (uint16x8_t __a, uint32x4_t __b) | |
23278 | { | |
23279 | return __arm_vqmovnbq_u32 (__a, __b); | |
23280 | } | |
23281 | ||
23282 | __extension__ extern __inline uint32x4_t | |
23283 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23284 | __arm_vmulltq_poly (uint16x8_t __a, uint16x8_t __b) | |
23285 | { | |
23286 | return __arm_vmulltq_poly_p16 (__a, __b); | |
23287 | } | |
23288 | ||
23289 | __extension__ extern __inline uint32x4_t | |
23290 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23291 | __arm_vmullbq_poly (uint16x8_t __a, uint16x8_t __b) | |
23292 | { | |
23293 | return __arm_vmullbq_poly_p16 (__a, __b); | |
23294 | } | |
23295 | ||
23296 | __extension__ extern __inline uint16x8_t | |
23297 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23298 | __arm_vmovntq (uint16x8_t __a, uint32x4_t __b) | |
23299 | { | |
23300 | return __arm_vmovntq_u32 (__a, __b); | |
23301 | } | |
23302 | ||
23303 | __extension__ extern __inline uint16x8_t | |
23304 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23305 | __arm_vmovnbq (uint16x8_t __a, uint32x4_t __b) | |
23306 | { | |
23307 | return __arm_vmovnbq_u32 (__a, __b); | |
23308 | } | |
23309 | ||
23310 | __extension__ extern __inline uint64_t | |
23311 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23312 | __arm_vmlaldavq (uint32x4_t __a, uint32x4_t __b) | |
23313 | { | |
23314 | return __arm_vmlaldavq_u32 (__a, __b); | |
23315 | } | |
23316 | ||
23317 | __extension__ extern __inline uint16x8_t | |
23318 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23319 | __arm_vqmovuntq (uint16x8_t __a, int32x4_t __b) | |
23320 | { | |
23321 | return __arm_vqmovuntq_s32 (__a, __b); | |
23322 | } | |
23323 | ||
23324 | __extension__ extern __inline uint16x8_t | |
23325 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23326 | __arm_vqmovunbq (uint16x8_t __a, int32x4_t __b) | |
23327 | { | |
23328 | return __arm_vqmovunbq_s32 (__a, __b); | |
23329 | } | |
23330 | ||
23331 | __extension__ extern __inline uint32x4_t | |
23332 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23333 | __arm_vshlltq (uint16x8_t __a, const int __imm) | |
23334 | { | |
23335 | return __arm_vshlltq_n_u16 (__a, __imm); | |
23336 | } | |
23337 | ||
23338 | __extension__ extern __inline uint32x4_t | |
23339 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23340 | __arm_vshllbq (uint16x8_t __a, const int __imm) | |
23341 | { | |
23342 | return __arm_vshllbq_n_u16 (__a, __imm); | |
23343 | } | |
23344 | ||
23345 | __extension__ extern __inline uint32x4_t | |
23346 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23347 | __arm_vorrq (uint32x4_t __a, const int __imm) | |
23348 | { | |
23349 | return __arm_vorrq_n_u32 (__a, __imm); | |
23350 | } | |
23351 | ||
23352 | __extension__ extern __inline uint32x4_t | |
23353 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
d34f510e | 23354 | __arm_vbicq (uint32x4_t __a, const int __imm) |
6a90680b ASDV |
23355 | { |
23356 | return __arm_vbicq_n_u32 (__a, __imm); | |
23357 | } | |
23358 | ||
23359 | __extension__ extern __inline int16x8_t | |
23360 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23361 | __arm_vqmovntq (int16x8_t __a, int32x4_t __b) | |
23362 | { | |
23363 | return __arm_vqmovntq_s32 (__a, __b); | |
23364 | } | |
23365 | ||
23366 | __extension__ extern __inline int16x8_t | |
23367 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23368 | __arm_vqmovnbq (int16x8_t __a, int32x4_t __b) | |
23369 | { | |
23370 | return __arm_vqmovnbq_s32 (__a, __b); | |
23371 | } | |
23372 | ||
23373 | __extension__ extern __inline int64x2_t | |
23374 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23375 | __arm_vqdmulltq (int32x4_t __a, int32x4_t __b) | |
23376 | { | |
23377 | return __arm_vqdmulltq_s32 (__a, __b); | |
23378 | } | |
23379 | ||
23380 | __extension__ extern __inline int64x2_t | |
23381 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23382 | __arm_vqdmulltq (int32x4_t __a, int32_t __b) | |
23383 | { | |
23384 | return __arm_vqdmulltq_n_s32 (__a, __b); | |
23385 | } | |
23386 | ||
23387 | __extension__ extern __inline int64x2_t | |
23388 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23389 | __arm_vqdmullbq (int32x4_t __a, int32x4_t __b) | |
23390 | { | |
23391 | return __arm_vqdmullbq_s32 (__a, __b); | |
23392 | } | |
23393 | ||
23394 | __extension__ extern __inline int64x2_t | |
23395 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23396 | __arm_vqdmullbq (int32x4_t __a, int32_t __b) | |
23397 | { | |
23398 | return __arm_vqdmullbq_n_s32 (__a, __b); | |
23399 | } | |
23400 | ||
23401 | __extension__ extern __inline int16x8_t | |
23402 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23403 | __arm_vmovntq (int16x8_t __a, int32x4_t __b) | |
23404 | { | |
23405 | return __arm_vmovntq_s32 (__a, __b); | |
23406 | } | |
23407 | ||
23408 | __extension__ extern __inline int16x8_t | |
23409 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23410 | __arm_vmovnbq (int16x8_t __a, int32x4_t __b) | |
23411 | { | |
23412 | return __arm_vmovnbq_s32 (__a, __b); | |
23413 | } | |
23414 | ||
23415 | __extension__ extern __inline int64_t | |
23416 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23417 | __arm_vmlsldavxq (int32x4_t __a, int32x4_t __b) | |
23418 | { | |
23419 | return __arm_vmlsldavxq_s32 (__a, __b); | |
23420 | } | |
23421 | ||
23422 | __extension__ extern __inline int64_t | |
23423 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23424 | __arm_vmlsldavq (int32x4_t __a, int32x4_t __b) | |
23425 | { | |
23426 | return __arm_vmlsldavq_s32 (__a, __b); | |
23427 | } | |
23428 | ||
23429 | __extension__ extern __inline int64_t | |
23430 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23431 | __arm_vmlaldavxq (int32x4_t __a, int32x4_t __b) | |
23432 | { | |
23433 | return __arm_vmlaldavxq_s32 (__a, __b); | |
23434 | } | |
23435 | ||
23436 | __extension__ extern __inline int64_t | |
23437 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23438 | __arm_vmlaldavq (int32x4_t __a, int32x4_t __b) | |
23439 | { | |
23440 | return __arm_vmlaldavq_s32 (__a, __b); | |
23441 | } | |
23442 | ||
23443 | __extension__ extern __inline int32x4_t | |
23444 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23445 | __arm_vshlltq (int16x8_t __a, const int __imm) | |
23446 | { | |
23447 | return __arm_vshlltq_n_s16 (__a, __imm); | |
23448 | } | |
23449 | ||
23450 | __extension__ extern __inline int32x4_t | |
23451 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23452 | __arm_vshllbq (int16x8_t __a, const int __imm) | |
23453 | { | |
23454 | return __arm_vshllbq_n_s16 (__a, __imm); | |
23455 | } | |
23456 | ||
23457 | __extension__ extern __inline int32x4_t | |
23458 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23459 | __arm_vorrq (int32x4_t __a, const int __imm) | |
23460 | { | |
23461 | return __arm_vorrq_n_s32 (__a, __imm); | |
23462 | } | |
23463 | ||
23464 | __extension__ extern __inline int32x4_t | |
23465 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
d34f510e | 23466 | __arm_vbicq (int32x4_t __a, const int __imm) |
6a90680b ASDV |
23467 | { |
23468 | return __arm_vbicq_n_s32 (__a, __imm); | |
23469 | } | |
23470 | ||
23471 | __extension__ extern __inline uint64_t | |
23472 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23473 | __arm_vrmlaldavhq (uint32x4_t __a, uint32x4_t __b) | |
23474 | { | |
23475 | return __arm_vrmlaldavhq_u32 (__a, __b); | |
23476 | } | |
23477 | ||
23478 | __extension__ extern __inline uint64_t | |
23479 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23480 | __arm_vaddlvaq (uint64_t __a, uint32x4_t __b) | |
23481 | { | |
23482 | return __arm_vaddlvaq_u32 (__a, __b); | |
23483 | } | |
23484 | ||
23485 | __extension__ extern __inline int64_t | |
23486 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23487 | __arm_vrmlsldavhxq (int32x4_t __a, int32x4_t __b) | |
23488 | { | |
23489 | return __arm_vrmlsldavhxq_s32 (__a, __b); | |
23490 | } | |
23491 | ||
23492 | __extension__ extern __inline int64_t | |
23493 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23494 | __arm_vrmlsldavhq (int32x4_t __a, int32x4_t __b) | |
23495 | { | |
23496 | return __arm_vrmlsldavhq_s32 (__a, __b); | |
23497 | } | |
23498 | ||
23499 | __extension__ extern __inline int64_t | |
23500 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23501 | __arm_vrmlaldavhxq (int32x4_t __a, int32x4_t __b) | |
23502 | { | |
23503 | return __arm_vrmlaldavhxq_s32 (__a, __b); | |
23504 | } | |
23505 | ||
23506 | __extension__ extern __inline int64_t | |
23507 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23508 | __arm_vrmlaldavhq (int32x4_t __a, int32x4_t __b) | |
23509 | { | |
23510 | return __arm_vrmlaldavhq_s32 (__a, __b); | |
23511 | } | |
23512 | ||
23513 | __extension__ extern __inline int64_t | |
23514 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23515 | __arm_vaddlvaq (int64_t __a, int32x4_t __b) | |
23516 | { | |
23517 | return __arm_vaddlvaq_s32 (__a, __b); | |
23518 | } | |
23519 | ||
23520 | __extension__ extern __inline uint32_t | |
23521 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23522 | __arm_vabavq (uint32_t __a, int8x16_t __b, int8x16_t __c) | |
23523 | { | |
23524 | return __arm_vabavq_s8 (__a, __b, __c); | |
23525 | } | |
23526 | ||
23527 | __extension__ extern __inline uint32_t | |
23528 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23529 | __arm_vabavq (uint32_t __a, int16x8_t __b, int16x8_t __c) | |
23530 | { | |
23531 | return __arm_vabavq_s16 (__a, __b, __c); | |
23532 | } | |
23533 | ||
23534 | __extension__ extern __inline uint32_t | |
23535 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23536 | __arm_vabavq (uint32_t __a, int32x4_t __b, int32x4_t __c) | |
23537 | { | |
23538 | return __arm_vabavq_s32 (__a, __b, __c); | |
23539 | } | |
23540 | ||
23541 | __extension__ extern __inline uint32_t | |
23542 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23543 | __arm_vabavq (uint32_t __a, uint8x16_t __b, uint8x16_t __c) | |
23544 | { | |
23545 | return __arm_vabavq_u8 (__a, __b, __c); | |
23546 | } | |
23547 | ||
23548 | __extension__ extern __inline uint32_t | |
23549 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23550 | __arm_vabavq (uint32_t __a, uint16x8_t __b, uint16x8_t __c) | |
23551 | { | |
23552 | return __arm_vabavq_u16 (__a, __b, __c); | |
23553 | } | |
23554 | ||
23555 | __extension__ extern __inline uint32_t | |
23556 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23557 | __arm_vabavq (uint32_t __a, uint32x4_t __b, uint32x4_t __c) | |
23558 | { | |
23559 | return __arm_vabavq_u32 (__a, __b, __c); | |
23560 | } | |
23561 | ||
23562 | __extension__ extern __inline int16x8_t | |
23563 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23564 | __arm_vbicq_m_n (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
23565 | { | |
23566 | return __arm_vbicq_m_n_s16 (__a, __imm, __p); | |
23567 | } | |
23568 | ||
23569 | __extension__ extern __inline int32x4_t | |
23570 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23571 | __arm_vbicq_m_n (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
23572 | { | |
23573 | return __arm_vbicq_m_n_s32 (__a, __imm, __p); | |
23574 | } | |
23575 | ||
23576 | __extension__ extern __inline uint16x8_t | |
23577 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23578 | __arm_vbicq_m_n (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
23579 | { | |
23580 | return __arm_vbicq_m_n_u16 (__a, __imm, __p); | |
23581 | } | |
23582 | ||
23583 | __extension__ extern __inline uint32x4_t | |
23584 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23585 | __arm_vbicq_m_n (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
23586 | { | |
23587 | return __arm_vbicq_m_n_u32 (__a, __imm, __p); | |
23588 | } | |
23589 | ||
23590 | __extension__ extern __inline int8x16_t | |
23591 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23592 | __arm_vqrshrnbq (int8x16_t __a, int16x8_t __b, const int __imm) | |
23593 | { | |
23594 | return __arm_vqrshrnbq_n_s16 (__a, __b, __imm); | |
23595 | } | |
23596 | ||
23597 | __extension__ extern __inline uint8x16_t | |
23598 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23599 | __arm_vqrshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm) | |
23600 | { | |
23601 | return __arm_vqrshrnbq_n_u16 (__a, __b, __imm); | |
23602 | } | |
23603 | ||
23604 | __extension__ extern __inline int16x8_t | |
23605 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23606 | __arm_vqrshrnbq (int16x8_t __a, int32x4_t __b, const int __imm) | |
23607 | { | |
23608 | return __arm_vqrshrnbq_n_s32 (__a, __b, __imm); | |
23609 | } | |
23610 | ||
23611 | __extension__ extern __inline uint16x8_t | |
23612 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23613 | __arm_vqrshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm) | |
23614 | { | |
23615 | return __arm_vqrshrnbq_n_u32 (__a, __b, __imm); | |
23616 | } | |
23617 | ||
23618 | __extension__ extern __inline uint8x16_t | |
23619 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23620 | __arm_vqrshrunbq (uint8x16_t __a, int16x8_t __b, const int __imm) | |
23621 | { | |
23622 | return __arm_vqrshrunbq_n_s16 (__a, __b, __imm); | |
23623 | } | |
23624 | ||
23625 | __extension__ extern __inline uint16x8_t | |
23626 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23627 | __arm_vqrshrunbq (uint16x8_t __a, int32x4_t __b, const int __imm) | |
23628 | { | |
23629 | return __arm_vqrshrunbq_n_s32 (__a, __b, __imm); | |
23630 | } | |
23631 | ||
23632 | __extension__ extern __inline int64_t | |
23633 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23634 | __arm_vrmlaldavhaq (int64_t __a, int32x4_t __b, int32x4_t __c) | |
23635 | { | |
23636 | return __arm_vrmlaldavhaq_s32 (__a, __b, __c); | |
23637 | } | |
23638 | ||
23639 | __extension__ extern __inline uint64_t | |
23640 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23641 | __arm_vrmlaldavhaq (uint64_t __a, uint32x4_t __b, uint32x4_t __c) | |
23642 | { | |
23643 | return __arm_vrmlaldavhaq_u32 (__a, __b, __c); | |
23644 | } | |
23645 | ||
23646 | __extension__ extern __inline int8x16_t | |
23647 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23648 | __arm_vshlcq (int8x16_t __a, uint32_t * __b, const int __imm) | |
23649 | { | |
23650 | return __arm_vshlcq_s8 (__a, __b, __imm); | |
23651 | } | |
23652 | ||
23653 | __extension__ extern __inline uint8x16_t | |
23654 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23655 | __arm_vshlcq (uint8x16_t __a, uint32_t * __b, const int __imm) | |
23656 | { | |
23657 | return __arm_vshlcq_u8 (__a, __b, __imm); | |
23658 | } | |
23659 | ||
23660 | __extension__ extern __inline int16x8_t | |
23661 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23662 | __arm_vshlcq (int16x8_t __a, uint32_t * __b, const int __imm) | |
23663 | { | |
23664 | return __arm_vshlcq_s16 (__a, __b, __imm); | |
23665 | } | |
23666 | ||
23667 | __extension__ extern __inline uint16x8_t | |
23668 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23669 | __arm_vshlcq (uint16x8_t __a, uint32_t * __b, const int __imm) | |
23670 | { | |
23671 | return __arm_vshlcq_u16 (__a, __b, __imm); | |
23672 | } | |
23673 | ||
23674 | __extension__ extern __inline int32x4_t | |
23675 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23676 | __arm_vshlcq (int32x4_t __a, uint32_t * __b, const int __imm) | |
23677 | { | |
23678 | return __arm_vshlcq_s32 (__a, __b, __imm); | |
23679 | } | |
23680 | ||
23681 | __extension__ extern __inline uint32x4_t | |
23682 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23683 | __arm_vshlcq (uint32x4_t __a, uint32_t * __b, const int __imm) | |
23684 | { | |
23685 | return __arm_vshlcq_u32 (__a, __b, __imm); | |
23686 | } | |
23687 | ||
23688 | __extension__ extern __inline uint8x16_t | |
23689 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23690 | __arm_vpselq (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
23691 | { | |
23692 | return __arm_vpselq_u8 (__a, __b, __p); | |
23693 | } | |
23694 | ||
23695 | __extension__ extern __inline int8x16_t | |
23696 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23697 | __arm_vpselq (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
23698 | { | |
23699 | return __arm_vpselq_s8 (__a, __b, __p); | |
23700 | } | |
23701 | ||
23702 | __extension__ extern __inline uint8x16_t | |
23703 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23704 | __arm_vrev64q_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
23705 | { | |
23706 | return __arm_vrev64q_m_u8 (__inactive, __a, __p); | |
23707 | } | |
23708 | ||
6a90680b ASDV |
23709 | __extension__ extern __inline uint8x16_t |
23710 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23711 | __arm_vmvnq_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
23712 | { | |
23713 | return __arm_vmvnq_m_u8 (__inactive, __a, __p); | |
23714 | } | |
23715 | ||
23716 | __extension__ extern __inline uint8x16_t | |
23717 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23718 | __arm_vmlasq (uint8x16_t __a, uint8x16_t __b, uint8_t __c) | |
23719 | { | |
23720 | return __arm_vmlasq_n_u8 (__a, __b, __c); | |
23721 | } | |
23722 | ||
23723 | __extension__ extern __inline uint8x16_t | |
23724 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23725 | __arm_vmlaq (uint8x16_t __a, uint8x16_t __b, uint8_t __c) | |
23726 | { | |
23727 | return __arm_vmlaq_n_u8 (__a, __b, __c); | |
23728 | } | |
23729 | ||
23730 | __extension__ extern __inline uint32_t | |
23731 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23732 | __arm_vmladavq_p (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
23733 | { | |
23734 | return __arm_vmladavq_p_u8 (__a, __b, __p); | |
23735 | } | |
23736 | ||
23737 | __extension__ extern __inline uint32_t | |
23738 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23739 | __arm_vmladavaq (uint32_t __a, uint8x16_t __b, uint8x16_t __c) | |
23740 | { | |
23741 | return __arm_vmladavaq_u8 (__a, __b, __c); | |
23742 | } | |
23743 | ||
23744 | __extension__ extern __inline uint8_t | |
23745 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23746 | __arm_vminvq_p (uint8_t __a, uint8x16_t __b, mve_pred16_t __p) | |
23747 | { | |
23748 | return __arm_vminvq_p_u8 (__a, __b, __p); | |
23749 | } | |
23750 | ||
23751 | __extension__ extern __inline uint8_t | |
23752 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23753 | __arm_vmaxvq_p (uint8_t __a, uint8x16_t __b, mve_pred16_t __p) | |
23754 | { | |
23755 | return __arm_vmaxvq_p_u8 (__a, __b, __p); | |
23756 | } | |
23757 | ||
23758 | __extension__ extern __inline uint8x16_t | |
23759 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23760 | __arm_vdupq_m (uint8x16_t __inactive, uint8_t __a, mve_pred16_t __p) | |
23761 | { | |
23762 | return __arm_vdupq_m_n_u8 (__inactive, __a, __p); | |
23763 | } | |
23764 | ||
23765 | __extension__ extern __inline mve_pred16_t | |
23766 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23767 | __arm_vcmpneq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
23768 | { | |
23769 | return __arm_vcmpneq_m_u8 (__a, __b, __p); | |
23770 | } | |
23771 | ||
23772 | __extension__ extern __inline mve_pred16_t | |
23773 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23774 | __arm_vcmpneq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
23775 | { | |
23776 | return __arm_vcmpneq_m_n_u8 (__a, __b, __p); | |
23777 | } | |
23778 | ||
23779 | __extension__ extern __inline mve_pred16_t | |
23780 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23781 | __arm_vcmphiq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
23782 | { | |
23783 | return __arm_vcmphiq_m_u8 (__a, __b, __p); | |
23784 | } | |
23785 | ||
23786 | __extension__ extern __inline mve_pred16_t | |
23787 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23788 | __arm_vcmphiq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
23789 | { | |
23790 | return __arm_vcmphiq_m_n_u8 (__a, __b, __p); | |
23791 | } | |
23792 | ||
23793 | __extension__ extern __inline mve_pred16_t | |
23794 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23795 | __arm_vcmpeqq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
23796 | { | |
23797 | return __arm_vcmpeqq_m_u8 (__a, __b, __p); | |
23798 | } | |
23799 | ||
23800 | __extension__ extern __inline mve_pred16_t | |
23801 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23802 | __arm_vcmpeqq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
23803 | { | |
23804 | return __arm_vcmpeqq_m_n_u8 (__a, __b, __p); | |
23805 | } | |
23806 | ||
23807 | __extension__ extern __inline mve_pred16_t | |
23808 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23809 | __arm_vcmpcsq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
23810 | { | |
23811 | return __arm_vcmpcsq_m_u8 (__a, __b, __p); | |
23812 | } | |
23813 | ||
23814 | __extension__ extern __inline mve_pred16_t | |
23815 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23816 | __arm_vcmpcsq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
23817 | { | |
23818 | return __arm_vcmpcsq_m_n_u8 (__a, __b, __p); | |
23819 | } | |
23820 | ||
23821 | __extension__ extern __inline uint8x16_t | |
23822 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23823 | __arm_vclzq_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
23824 | { | |
23825 | return __arm_vclzq_m_u8 (__inactive, __a, __p); | |
23826 | } | |
23827 | ||
23828 | __extension__ extern __inline uint32_t | |
23829 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23830 | __arm_vaddvaq_p (uint32_t __a, uint8x16_t __b, mve_pred16_t __p) | |
23831 | { | |
23832 | return __arm_vaddvaq_p_u8 (__a, __b, __p); | |
23833 | } | |
23834 | ||
23835 | __extension__ extern __inline uint8x16_t | |
23836 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23837 | __arm_vsriq (uint8x16_t __a, uint8x16_t __b, const int __imm) | |
23838 | { | |
23839 | return __arm_vsriq_n_u8 (__a, __b, __imm); | |
23840 | } | |
23841 | ||
23842 | __extension__ extern __inline uint8x16_t | |
23843 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23844 | __arm_vsliq (uint8x16_t __a, uint8x16_t __b, const int __imm) | |
23845 | { | |
23846 | return __arm_vsliq_n_u8 (__a, __b, __imm); | |
23847 | } | |
23848 | ||
23849 | __extension__ extern __inline uint8x16_t | |
23850 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23851 | __arm_vshlq_m_r (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
23852 | { | |
23853 | return __arm_vshlq_m_r_u8 (__a, __b, __p); | |
23854 | } | |
23855 | ||
23856 | __extension__ extern __inline uint8x16_t | |
23857 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23858 | __arm_vrshlq_m_n (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
23859 | { | |
23860 | return __arm_vrshlq_m_n_u8 (__a, __b, __p); | |
23861 | } | |
23862 | ||
23863 | __extension__ extern __inline uint8x16_t | |
23864 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23865 | __arm_vqshlq_m_r (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
23866 | { | |
23867 | return __arm_vqshlq_m_r_u8 (__a, __b, __p); | |
23868 | } | |
23869 | ||
23870 | __extension__ extern __inline uint8x16_t | |
23871 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23872 | __arm_vqrshlq_m_n (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
23873 | { | |
23874 | return __arm_vqrshlq_m_n_u8 (__a, __b, __p); | |
23875 | } | |
23876 | ||
23877 | __extension__ extern __inline uint8_t | |
23878 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23879 | __arm_vminavq_p (uint8_t __a, int8x16_t __b, mve_pred16_t __p) | |
23880 | { | |
23881 | return __arm_vminavq_p_s8 (__a, __b, __p); | |
23882 | } | |
23883 | ||
23884 | __extension__ extern __inline uint8x16_t | |
23885 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23886 | __arm_vminaq_m (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
23887 | { | |
23888 | return __arm_vminaq_m_s8 (__a, __b, __p); | |
23889 | } | |
23890 | ||
23891 | __extension__ extern __inline uint8_t | |
23892 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23893 | __arm_vmaxavq_p (uint8_t __a, int8x16_t __b, mve_pred16_t __p) | |
23894 | { | |
23895 | return __arm_vmaxavq_p_s8 (__a, __b, __p); | |
23896 | } | |
23897 | ||
23898 | __extension__ extern __inline uint8x16_t | |
23899 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23900 | __arm_vmaxaq_m (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
23901 | { | |
23902 | return __arm_vmaxaq_m_s8 (__a, __b, __p); | |
23903 | } | |
23904 | ||
23905 | __extension__ extern __inline mve_pred16_t | |
23906 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23907 | __arm_vcmpneq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
23908 | { | |
23909 | return __arm_vcmpneq_m_s8 (__a, __b, __p); | |
23910 | } | |
23911 | ||
23912 | __extension__ extern __inline mve_pred16_t | |
23913 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23914 | __arm_vcmpneq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
23915 | { | |
23916 | return __arm_vcmpneq_m_n_s8 (__a, __b, __p); | |
23917 | } | |
23918 | ||
23919 | __extension__ extern __inline mve_pred16_t | |
23920 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23921 | __arm_vcmpltq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
23922 | { | |
23923 | return __arm_vcmpltq_m_s8 (__a, __b, __p); | |
23924 | } | |
23925 | ||
23926 | __extension__ extern __inline mve_pred16_t | |
23927 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23928 | __arm_vcmpltq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
23929 | { | |
23930 | return __arm_vcmpltq_m_n_s8 (__a, __b, __p); | |
23931 | } | |
23932 | ||
23933 | __extension__ extern __inline mve_pred16_t | |
23934 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23935 | __arm_vcmpleq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
23936 | { | |
23937 | return __arm_vcmpleq_m_s8 (__a, __b, __p); | |
23938 | } | |
23939 | ||
23940 | __extension__ extern __inline mve_pred16_t | |
23941 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23942 | __arm_vcmpleq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
23943 | { | |
23944 | return __arm_vcmpleq_m_n_s8 (__a, __b, __p); | |
23945 | } | |
23946 | ||
23947 | __extension__ extern __inline mve_pred16_t | |
23948 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23949 | __arm_vcmpgtq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
23950 | { | |
23951 | return __arm_vcmpgtq_m_s8 (__a, __b, __p); | |
23952 | } | |
23953 | ||
23954 | __extension__ extern __inline mve_pred16_t | |
23955 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23956 | __arm_vcmpgtq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
23957 | { | |
23958 | return __arm_vcmpgtq_m_n_s8 (__a, __b, __p); | |
23959 | } | |
23960 | ||
23961 | __extension__ extern __inline mve_pred16_t | |
23962 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23963 | __arm_vcmpgeq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
23964 | { | |
23965 | return __arm_vcmpgeq_m_s8 (__a, __b, __p); | |
23966 | } | |
23967 | ||
23968 | __extension__ extern __inline mve_pred16_t | |
23969 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23970 | __arm_vcmpgeq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
23971 | { | |
23972 | return __arm_vcmpgeq_m_n_s8 (__a, __b, __p); | |
23973 | } | |
23974 | ||
23975 | __extension__ extern __inline mve_pred16_t | |
23976 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23977 | __arm_vcmpeqq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
23978 | { | |
23979 | return __arm_vcmpeqq_m_s8 (__a, __b, __p); | |
23980 | } | |
23981 | ||
23982 | __extension__ extern __inline mve_pred16_t | |
23983 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23984 | __arm_vcmpeqq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
23985 | { | |
23986 | return __arm_vcmpeqq_m_n_s8 (__a, __b, __p); | |
23987 | } | |
23988 | ||
23989 | __extension__ extern __inline int8x16_t | |
23990 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23991 | __arm_vshlq_m_r (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
23992 | { | |
23993 | return __arm_vshlq_m_r_s8 (__a, __b, __p); | |
23994 | } | |
23995 | ||
23996 | __extension__ extern __inline int8x16_t | |
23997 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
23998 | __arm_vrshlq_m_n (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
23999 | { | |
24000 | return __arm_vrshlq_m_n_s8 (__a, __b, __p); | |
24001 | } | |
24002 | ||
24003 | __extension__ extern __inline int8x16_t | |
24004 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24005 | __arm_vrev64q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
24006 | { | |
24007 | return __arm_vrev64q_m_s8 (__inactive, __a, __p); | |
24008 | } | |
24009 | ||
24010 | __extension__ extern __inline int8x16_t | |
24011 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24012 | __arm_vqshlq_m_r (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
24013 | { | |
24014 | return __arm_vqshlq_m_r_s8 (__a, __b, __p); | |
24015 | } | |
24016 | ||
24017 | __extension__ extern __inline int8x16_t | |
24018 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24019 | __arm_vqrshlq_m_n (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
24020 | { | |
24021 | return __arm_vqrshlq_m_n_s8 (__a, __b, __p); | |
24022 | } | |
24023 | ||
24024 | __extension__ extern __inline int8x16_t | |
24025 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24026 | __arm_vqnegq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
24027 | { | |
24028 | return __arm_vqnegq_m_s8 (__inactive, __a, __p); | |
24029 | } | |
24030 | ||
24031 | __extension__ extern __inline int8x16_t | |
24032 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24033 | __arm_vqabsq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
24034 | { | |
24035 | return __arm_vqabsq_m_s8 (__inactive, __a, __p); | |
24036 | } | |
24037 | ||
24038 | __extension__ extern __inline int8x16_t | |
24039 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24040 | __arm_vnegq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
24041 | { | |
24042 | return __arm_vnegq_m_s8 (__inactive, __a, __p); | |
24043 | } | |
24044 | ||
24045 | __extension__ extern __inline int8x16_t | |
24046 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24047 | __arm_vmvnq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
24048 | { | |
24049 | return __arm_vmvnq_m_s8 (__inactive, __a, __p); | |
24050 | } | |
24051 | ||
24052 | __extension__ extern __inline int32_t | |
24053 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24054 | __arm_vmlsdavxq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
24055 | { | |
24056 | return __arm_vmlsdavxq_p_s8 (__a, __b, __p); | |
24057 | } | |
24058 | ||
24059 | __extension__ extern __inline int32_t | |
24060 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24061 | __arm_vmlsdavq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
24062 | { | |
24063 | return __arm_vmlsdavq_p_s8 (__a, __b, __p); | |
24064 | } | |
24065 | ||
24066 | __extension__ extern __inline int32_t | |
24067 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24068 | __arm_vmladavxq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
24069 | { | |
24070 | return __arm_vmladavxq_p_s8 (__a, __b, __p); | |
24071 | } | |
24072 | ||
24073 | __extension__ extern __inline int32_t | |
24074 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24075 | __arm_vmladavq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
24076 | { | |
24077 | return __arm_vmladavq_p_s8 (__a, __b, __p); | |
24078 | } | |
24079 | ||
24080 | __extension__ extern __inline int8_t | |
24081 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24082 | __arm_vminvq_p (int8_t __a, int8x16_t __b, mve_pred16_t __p) | |
24083 | { | |
24084 | return __arm_vminvq_p_s8 (__a, __b, __p); | |
24085 | } | |
24086 | ||
24087 | __extension__ extern __inline int8_t | |
24088 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24089 | __arm_vmaxvq_p (int8_t __a, int8x16_t __b, mve_pred16_t __p) | |
24090 | { | |
24091 | return __arm_vmaxvq_p_s8 (__a, __b, __p); | |
24092 | } | |
24093 | ||
24094 | __extension__ extern __inline int8x16_t | |
24095 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24096 | __arm_vdupq_m (int8x16_t __inactive, int8_t __a, mve_pred16_t __p) | |
24097 | { | |
24098 | return __arm_vdupq_m_n_s8 (__inactive, __a, __p); | |
24099 | } | |
24100 | ||
24101 | __extension__ extern __inline int8x16_t | |
24102 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24103 | __arm_vclzq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
24104 | { | |
24105 | return __arm_vclzq_m_s8 (__inactive, __a, __p); | |
24106 | } | |
24107 | ||
24108 | __extension__ extern __inline int8x16_t | |
24109 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24110 | __arm_vclsq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
24111 | { | |
24112 | return __arm_vclsq_m_s8 (__inactive, __a, __p); | |
24113 | } | |
24114 | ||
24115 | __extension__ extern __inline int32_t | |
24116 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24117 | __arm_vaddvaq_p (int32_t __a, int8x16_t __b, mve_pred16_t __p) | |
24118 | { | |
24119 | return __arm_vaddvaq_p_s8 (__a, __b, __p); | |
24120 | } | |
24121 | ||
24122 | __extension__ extern __inline int8x16_t | |
24123 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24124 | __arm_vabsq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
24125 | { | |
24126 | return __arm_vabsq_m_s8 (__inactive, __a, __p); | |
24127 | } | |
24128 | ||
24129 | __extension__ extern __inline int8x16_t | |
24130 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24131 | __arm_vqrdmlsdhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
24132 | { | |
24133 | return __arm_vqrdmlsdhxq_s8 (__inactive, __a, __b); | |
24134 | } | |
24135 | ||
24136 | __extension__ extern __inline int8x16_t | |
24137 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24138 | __arm_vqrdmlsdhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
24139 | { | |
24140 | return __arm_vqrdmlsdhq_s8 (__inactive, __a, __b); | |
24141 | } | |
24142 | ||
24143 | __extension__ extern __inline int8x16_t | |
24144 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24145 | __arm_vqrdmlashq (int8x16_t __a, int8x16_t __b, int8_t __c) | |
24146 | { | |
24147 | return __arm_vqrdmlashq_n_s8 (__a, __b, __c); | |
24148 | } | |
24149 | ||
afb198ee CL |
24150 | __extension__ extern __inline int8x16_t |
24151 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24152 | __arm_vqdmlashq (int8x16_t __a, int8x16_t __b, int8_t __c) | |
24153 | { | |
24154 | return __arm_vqdmlashq_n_s8 (__a, __b, __c); | |
24155 | } | |
24156 | ||
6a90680b ASDV |
24157 | __extension__ extern __inline int8x16_t |
24158 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24159 | __arm_vqrdmlahq (int8x16_t __a, int8x16_t __b, int8_t __c) | |
24160 | { | |
24161 | return __arm_vqrdmlahq_n_s8 (__a, __b, __c); | |
24162 | } | |
24163 | ||
24164 | __extension__ extern __inline int8x16_t | |
24165 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24166 | __arm_vqrdmladhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
24167 | { | |
24168 | return __arm_vqrdmladhxq_s8 (__inactive, __a, __b); | |
24169 | } | |
24170 | ||
24171 | __extension__ extern __inline int8x16_t | |
24172 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24173 | __arm_vqrdmladhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
24174 | { | |
24175 | return __arm_vqrdmladhq_s8 (__inactive, __a, __b); | |
24176 | } | |
24177 | ||
24178 | __extension__ extern __inline int8x16_t | |
24179 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24180 | __arm_vqdmlsdhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
24181 | { | |
24182 | return __arm_vqdmlsdhxq_s8 (__inactive, __a, __b); | |
24183 | } | |
24184 | ||
24185 | __extension__ extern __inline int8x16_t | |
24186 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24187 | __arm_vqdmlsdhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
24188 | { | |
24189 | return __arm_vqdmlsdhq_s8 (__inactive, __a, __b); | |
24190 | } | |
24191 | ||
24192 | __extension__ extern __inline int8x16_t | |
24193 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24194 | __arm_vqdmlahq (int8x16_t __a, int8x16_t __b, int8_t __c) | |
24195 | { | |
24196 | return __arm_vqdmlahq_n_s8 (__a, __b, __c); | |
24197 | } | |
24198 | ||
24199 | __extension__ extern __inline int8x16_t | |
24200 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24201 | __arm_vqdmladhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
24202 | { | |
24203 | return __arm_vqdmladhxq_s8 (__inactive, __a, __b); | |
24204 | } | |
24205 | ||
24206 | __extension__ extern __inline int8x16_t | |
24207 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24208 | __arm_vqdmladhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) | |
24209 | { | |
24210 | return __arm_vqdmladhq_s8 (__inactive, __a, __b); | |
24211 | } | |
24212 | ||
24213 | __extension__ extern __inline int32_t | |
24214 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24215 | __arm_vmlsdavaxq (int32_t __a, int8x16_t __b, int8x16_t __c) | |
24216 | { | |
24217 | return __arm_vmlsdavaxq_s8 (__a, __b, __c); | |
24218 | } | |
24219 | ||
24220 | __extension__ extern __inline int32_t | |
24221 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24222 | __arm_vmlsdavaq (int32_t __a, int8x16_t __b, int8x16_t __c) | |
24223 | { | |
24224 | return __arm_vmlsdavaq_s8 (__a, __b, __c); | |
24225 | } | |
24226 | ||
24227 | __extension__ extern __inline int8x16_t | |
24228 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24229 | __arm_vmlasq (int8x16_t __a, int8x16_t __b, int8_t __c) | |
24230 | { | |
24231 | return __arm_vmlasq_n_s8 (__a, __b, __c); | |
24232 | } | |
24233 | ||
24234 | __extension__ extern __inline int8x16_t | |
24235 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24236 | __arm_vmlaq (int8x16_t __a, int8x16_t __b, int8_t __c) | |
24237 | { | |
24238 | return __arm_vmlaq_n_s8 (__a, __b, __c); | |
24239 | } | |
24240 | ||
24241 | __extension__ extern __inline int32_t | |
24242 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24243 | __arm_vmladavaxq (int32_t __a, int8x16_t __b, int8x16_t __c) | |
24244 | { | |
24245 | return __arm_vmladavaxq_s8 (__a, __b, __c); | |
24246 | } | |
24247 | ||
24248 | __extension__ extern __inline int32_t | |
24249 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24250 | __arm_vmladavaq (int32_t __a, int8x16_t __b, int8x16_t __c) | |
24251 | { | |
24252 | return __arm_vmladavaq_s8 (__a, __b, __c); | |
24253 | } | |
24254 | ||
24255 | __extension__ extern __inline int8x16_t | |
24256 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24257 | __arm_vsriq (int8x16_t __a, int8x16_t __b, const int __imm) | |
24258 | { | |
24259 | return __arm_vsriq_n_s8 (__a, __b, __imm); | |
24260 | } | |
24261 | ||
24262 | __extension__ extern __inline int8x16_t | |
24263 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24264 | __arm_vsliq (int8x16_t __a, int8x16_t __b, const int __imm) | |
24265 | { | |
24266 | return __arm_vsliq_n_s8 (__a, __b, __imm); | |
24267 | } | |
24268 | ||
24269 | __extension__ extern __inline uint16x8_t | |
24270 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24271 | __arm_vpselq (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
24272 | { | |
24273 | return __arm_vpselq_u16 (__a, __b, __p); | |
24274 | } | |
24275 | ||
24276 | __extension__ extern __inline int16x8_t | |
24277 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24278 | __arm_vpselq (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
24279 | { | |
24280 | return __arm_vpselq_s16 (__a, __b, __p); | |
24281 | } | |
24282 | ||
24283 | __extension__ extern __inline uint16x8_t | |
24284 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24285 | __arm_vrev64q_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
24286 | { | |
24287 | return __arm_vrev64q_m_u16 (__inactive, __a, __p); | |
24288 | } | |
24289 | ||
6a90680b ASDV |
24290 | __extension__ extern __inline uint16x8_t |
24291 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24292 | __arm_vmvnq_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
24293 | { | |
24294 | return __arm_vmvnq_m_u16 (__inactive, __a, __p); | |
24295 | } | |
24296 | ||
24297 | __extension__ extern __inline uint16x8_t | |
24298 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24299 | __arm_vmlasq (uint16x8_t __a, uint16x8_t __b, uint16_t __c) | |
24300 | { | |
24301 | return __arm_vmlasq_n_u16 (__a, __b, __c); | |
24302 | } | |
24303 | ||
24304 | __extension__ extern __inline uint16x8_t | |
24305 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24306 | __arm_vmlaq (uint16x8_t __a, uint16x8_t __b, uint16_t __c) | |
24307 | { | |
24308 | return __arm_vmlaq_n_u16 (__a, __b, __c); | |
24309 | } | |
24310 | ||
24311 | __extension__ extern __inline uint32_t | |
24312 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24313 | __arm_vmladavq_p (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
24314 | { | |
24315 | return __arm_vmladavq_p_u16 (__a, __b, __p); | |
24316 | } | |
24317 | ||
24318 | __extension__ extern __inline uint32_t | |
24319 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24320 | __arm_vmladavaq (uint32_t __a, uint16x8_t __b, uint16x8_t __c) | |
24321 | { | |
24322 | return __arm_vmladavaq_u16 (__a, __b, __c); | |
24323 | } | |
24324 | ||
24325 | __extension__ extern __inline uint16_t | |
24326 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24327 | __arm_vminvq_p (uint16_t __a, uint16x8_t __b, mve_pred16_t __p) | |
24328 | { | |
24329 | return __arm_vminvq_p_u16 (__a, __b, __p); | |
24330 | } | |
24331 | ||
24332 | __extension__ extern __inline uint16_t | |
24333 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24334 | __arm_vmaxvq_p (uint16_t __a, uint16x8_t __b, mve_pred16_t __p) | |
24335 | { | |
24336 | return __arm_vmaxvq_p_u16 (__a, __b, __p); | |
24337 | } | |
24338 | ||
24339 | __extension__ extern __inline uint16x8_t | |
24340 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24341 | __arm_vdupq_m (uint16x8_t __inactive, uint16_t __a, mve_pred16_t __p) | |
24342 | { | |
24343 | return __arm_vdupq_m_n_u16 (__inactive, __a, __p); | |
24344 | } | |
24345 | ||
24346 | __extension__ extern __inline mve_pred16_t | |
24347 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24348 | __arm_vcmpneq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
24349 | { | |
24350 | return __arm_vcmpneq_m_u16 (__a, __b, __p); | |
24351 | } | |
24352 | ||
24353 | __extension__ extern __inline mve_pred16_t | |
24354 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24355 | __arm_vcmpneq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
24356 | { | |
24357 | return __arm_vcmpneq_m_n_u16 (__a, __b, __p); | |
24358 | } | |
24359 | ||
24360 | __extension__ extern __inline mve_pred16_t | |
24361 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24362 | __arm_vcmphiq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
24363 | { | |
24364 | return __arm_vcmphiq_m_u16 (__a, __b, __p); | |
24365 | } | |
24366 | ||
24367 | __extension__ extern __inline mve_pred16_t | |
24368 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24369 | __arm_vcmphiq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
24370 | { | |
24371 | return __arm_vcmphiq_m_n_u16 (__a, __b, __p); | |
24372 | } | |
24373 | ||
24374 | __extension__ extern __inline mve_pred16_t | |
24375 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24376 | __arm_vcmpeqq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
24377 | { | |
24378 | return __arm_vcmpeqq_m_u16 (__a, __b, __p); | |
24379 | } | |
24380 | ||
24381 | __extension__ extern __inline mve_pred16_t | |
24382 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24383 | __arm_vcmpeqq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
24384 | { | |
24385 | return __arm_vcmpeqq_m_n_u16 (__a, __b, __p); | |
24386 | } | |
24387 | ||
24388 | __extension__ extern __inline mve_pred16_t | |
24389 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24390 | __arm_vcmpcsq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
24391 | { | |
24392 | return __arm_vcmpcsq_m_u16 (__a, __b, __p); | |
24393 | } | |
24394 | ||
24395 | __extension__ extern __inline mve_pred16_t | |
24396 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24397 | __arm_vcmpcsq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
24398 | { | |
24399 | return __arm_vcmpcsq_m_n_u16 (__a, __b, __p); | |
24400 | } | |
24401 | ||
24402 | __extension__ extern __inline uint16x8_t | |
24403 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24404 | __arm_vclzq_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
24405 | { | |
24406 | return __arm_vclzq_m_u16 (__inactive, __a, __p); | |
24407 | } | |
24408 | ||
24409 | __extension__ extern __inline uint32_t | |
24410 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24411 | __arm_vaddvaq_p (uint32_t __a, uint16x8_t __b, mve_pred16_t __p) | |
24412 | { | |
24413 | return __arm_vaddvaq_p_u16 (__a, __b, __p); | |
24414 | } | |
24415 | ||
24416 | __extension__ extern __inline uint16x8_t | |
24417 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24418 | __arm_vsriq (uint16x8_t __a, uint16x8_t __b, const int __imm) | |
24419 | { | |
24420 | return __arm_vsriq_n_u16 (__a, __b, __imm); | |
24421 | } | |
24422 | ||
24423 | __extension__ extern __inline uint16x8_t | |
24424 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24425 | __arm_vsliq (uint16x8_t __a, uint16x8_t __b, const int __imm) | |
24426 | { | |
24427 | return __arm_vsliq_n_u16 (__a, __b, __imm); | |
24428 | } | |
24429 | ||
24430 | __extension__ extern __inline uint16x8_t | |
24431 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24432 | __arm_vshlq_m_r (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
24433 | { | |
24434 | return __arm_vshlq_m_r_u16 (__a, __b, __p); | |
24435 | } | |
24436 | ||
24437 | __extension__ extern __inline uint16x8_t | |
24438 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24439 | __arm_vrshlq_m_n (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
24440 | { | |
24441 | return __arm_vrshlq_m_n_u16 (__a, __b, __p); | |
24442 | } | |
24443 | ||
24444 | __extension__ extern __inline uint16x8_t | |
24445 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24446 | __arm_vqshlq_m_r (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
24447 | { | |
24448 | return __arm_vqshlq_m_r_u16 (__a, __b, __p); | |
24449 | } | |
24450 | ||
24451 | __extension__ extern __inline uint16x8_t | |
24452 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24453 | __arm_vqrshlq_m_n (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
24454 | { | |
24455 | return __arm_vqrshlq_m_n_u16 (__a, __b, __p); | |
24456 | } | |
24457 | ||
24458 | __extension__ extern __inline uint16_t | |
24459 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24460 | __arm_vminavq_p (uint16_t __a, int16x8_t __b, mve_pred16_t __p) | |
24461 | { | |
24462 | return __arm_vminavq_p_s16 (__a, __b, __p); | |
24463 | } | |
24464 | ||
24465 | __extension__ extern __inline uint16x8_t | |
24466 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24467 | __arm_vminaq_m (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
24468 | { | |
24469 | return __arm_vminaq_m_s16 (__a, __b, __p); | |
24470 | } | |
24471 | ||
24472 | __extension__ extern __inline uint16_t | |
24473 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24474 | __arm_vmaxavq_p (uint16_t __a, int16x8_t __b, mve_pred16_t __p) | |
24475 | { | |
24476 | return __arm_vmaxavq_p_s16 (__a, __b, __p); | |
24477 | } | |
24478 | ||
24479 | __extension__ extern __inline uint16x8_t | |
24480 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24481 | __arm_vmaxaq_m (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
24482 | { | |
24483 | return __arm_vmaxaq_m_s16 (__a, __b, __p); | |
24484 | } | |
24485 | ||
24486 | __extension__ extern __inline mve_pred16_t | |
24487 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24488 | __arm_vcmpneq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
24489 | { | |
24490 | return __arm_vcmpneq_m_s16 (__a, __b, __p); | |
24491 | } | |
24492 | ||
24493 | __extension__ extern __inline mve_pred16_t | |
24494 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24495 | __arm_vcmpneq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
24496 | { | |
24497 | return __arm_vcmpneq_m_n_s16 (__a, __b, __p); | |
24498 | } | |
24499 | ||
24500 | __extension__ extern __inline mve_pred16_t | |
24501 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24502 | __arm_vcmpltq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
24503 | { | |
24504 | return __arm_vcmpltq_m_s16 (__a, __b, __p); | |
24505 | } | |
24506 | ||
24507 | __extension__ extern __inline mve_pred16_t | |
24508 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24509 | __arm_vcmpltq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
24510 | { | |
24511 | return __arm_vcmpltq_m_n_s16 (__a, __b, __p); | |
24512 | } | |
24513 | ||
24514 | __extension__ extern __inline mve_pred16_t | |
24515 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24516 | __arm_vcmpleq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
24517 | { | |
24518 | return __arm_vcmpleq_m_s16 (__a, __b, __p); | |
24519 | } | |
24520 | ||
24521 | __extension__ extern __inline mve_pred16_t | |
24522 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24523 | __arm_vcmpleq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
24524 | { | |
24525 | return __arm_vcmpleq_m_n_s16 (__a, __b, __p); | |
24526 | } | |
24527 | ||
24528 | __extension__ extern __inline mve_pred16_t | |
24529 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24530 | __arm_vcmpgtq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
24531 | { | |
24532 | return __arm_vcmpgtq_m_s16 (__a, __b, __p); | |
24533 | } | |
24534 | ||
24535 | __extension__ extern __inline mve_pred16_t | |
24536 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24537 | __arm_vcmpgtq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
24538 | { | |
24539 | return __arm_vcmpgtq_m_n_s16 (__a, __b, __p); | |
24540 | } | |
24541 | ||
24542 | __extension__ extern __inline mve_pred16_t | |
24543 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24544 | __arm_vcmpgeq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
24545 | { | |
24546 | return __arm_vcmpgeq_m_s16 (__a, __b, __p); | |
24547 | } | |
24548 | ||
24549 | __extension__ extern __inline mve_pred16_t | |
24550 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24551 | __arm_vcmpgeq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
24552 | { | |
24553 | return __arm_vcmpgeq_m_n_s16 (__a, __b, __p); | |
24554 | } | |
24555 | ||
24556 | __extension__ extern __inline mve_pred16_t | |
24557 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24558 | __arm_vcmpeqq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
24559 | { | |
24560 | return __arm_vcmpeqq_m_s16 (__a, __b, __p); | |
24561 | } | |
24562 | ||
24563 | __extension__ extern __inline mve_pred16_t | |
24564 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24565 | __arm_vcmpeqq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
24566 | { | |
24567 | return __arm_vcmpeqq_m_n_s16 (__a, __b, __p); | |
24568 | } | |
24569 | ||
24570 | __extension__ extern __inline int16x8_t | |
24571 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24572 | __arm_vshlq_m_r (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
24573 | { | |
24574 | return __arm_vshlq_m_r_s16 (__a, __b, __p); | |
24575 | } | |
24576 | ||
24577 | __extension__ extern __inline int16x8_t | |
24578 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24579 | __arm_vrshlq_m_n (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
24580 | { | |
24581 | return __arm_vrshlq_m_n_s16 (__a, __b, __p); | |
24582 | } | |
24583 | ||
24584 | __extension__ extern __inline int16x8_t | |
24585 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24586 | __arm_vrev64q_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
24587 | { | |
24588 | return __arm_vrev64q_m_s16 (__inactive, __a, __p); | |
24589 | } | |
24590 | ||
24591 | __extension__ extern __inline int16x8_t | |
24592 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24593 | __arm_vqshlq_m_r (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
24594 | { | |
24595 | return __arm_vqshlq_m_r_s16 (__a, __b, __p); | |
24596 | } | |
24597 | ||
24598 | __extension__ extern __inline int16x8_t | |
24599 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24600 | __arm_vqrshlq_m_n (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
24601 | { | |
24602 | return __arm_vqrshlq_m_n_s16 (__a, __b, __p); | |
24603 | } | |
24604 | ||
24605 | __extension__ extern __inline int16x8_t | |
24606 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24607 | __arm_vqnegq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
24608 | { | |
24609 | return __arm_vqnegq_m_s16 (__inactive, __a, __p); | |
24610 | } | |
24611 | ||
24612 | __extension__ extern __inline int16x8_t | |
24613 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24614 | __arm_vqabsq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
24615 | { | |
24616 | return __arm_vqabsq_m_s16 (__inactive, __a, __p); | |
24617 | } | |
24618 | ||
24619 | __extension__ extern __inline int16x8_t | |
24620 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24621 | __arm_vnegq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
24622 | { | |
24623 | return __arm_vnegq_m_s16 (__inactive, __a, __p); | |
24624 | } | |
24625 | ||
24626 | __extension__ extern __inline int16x8_t | |
24627 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24628 | __arm_vmvnq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
24629 | { | |
24630 | return __arm_vmvnq_m_s16 (__inactive, __a, __p); | |
24631 | } | |
24632 | ||
24633 | __extension__ extern __inline int32_t | |
24634 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24635 | __arm_vmlsdavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
24636 | { | |
24637 | return __arm_vmlsdavxq_p_s16 (__a, __b, __p); | |
24638 | } | |
24639 | ||
24640 | __extension__ extern __inline int32_t | |
24641 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24642 | __arm_vmlsdavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
24643 | { | |
24644 | return __arm_vmlsdavq_p_s16 (__a, __b, __p); | |
24645 | } | |
24646 | ||
24647 | __extension__ extern __inline int32_t | |
24648 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24649 | __arm_vmladavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
24650 | { | |
24651 | return __arm_vmladavxq_p_s16 (__a, __b, __p); | |
24652 | } | |
24653 | ||
24654 | __extension__ extern __inline int32_t | |
24655 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24656 | __arm_vmladavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
24657 | { | |
24658 | return __arm_vmladavq_p_s16 (__a, __b, __p); | |
24659 | } | |
24660 | ||
24661 | __extension__ extern __inline int16_t | |
24662 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24663 | __arm_vminvq_p (int16_t __a, int16x8_t __b, mve_pred16_t __p) | |
24664 | { | |
24665 | return __arm_vminvq_p_s16 (__a, __b, __p); | |
24666 | } | |
24667 | ||
24668 | __extension__ extern __inline int16_t | |
24669 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24670 | __arm_vmaxvq_p (int16_t __a, int16x8_t __b, mve_pred16_t __p) | |
24671 | { | |
24672 | return __arm_vmaxvq_p_s16 (__a, __b, __p); | |
24673 | } | |
24674 | ||
24675 | __extension__ extern __inline int16x8_t | |
24676 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24677 | __arm_vdupq_m (int16x8_t __inactive, int16_t __a, mve_pred16_t __p) | |
24678 | { | |
24679 | return __arm_vdupq_m_n_s16 (__inactive, __a, __p); | |
24680 | } | |
24681 | ||
24682 | __extension__ extern __inline int16x8_t | |
24683 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24684 | __arm_vclzq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
24685 | { | |
24686 | return __arm_vclzq_m_s16 (__inactive, __a, __p); | |
24687 | } | |
24688 | ||
24689 | __extension__ extern __inline int16x8_t | |
24690 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24691 | __arm_vclsq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
24692 | { | |
24693 | return __arm_vclsq_m_s16 (__inactive, __a, __p); | |
24694 | } | |
24695 | ||
24696 | __extension__ extern __inline int32_t | |
24697 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24698 | __arm_vaddvaq_p (int32_t __a, int16x8_t __b, mve_pred16_t __p) | |
24699 | { | |
24700 | return __arm_vaddvaq_p_s16 (__a, __b, __p); | |
24701 | } | |
24702 | ||
24703 | __extension__ extern __inline int16x8_t | |
24704 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24705 | __arm_vabsq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
24706 | { | |
24707 | return __arm_vabsq_m_s16 (__inactive, __a, __p); | |
24708 | } | |
24709 | ||
24710 | __extension__ extern __inline int16x8_t | |
24711 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24712 | __arm_vqrdmlsdhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
24713 | { | |
24714 | return __arm_vqrdmlsdhxq_s16 (__inactive, __a, __b); | |
24715 | } | |
24716 | ||
24717 | __extension__ extern __inline int16x8_t | |
24718 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24719 | __arm_vqrdmlsdhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
24720 | { | |
24721 | return __arm_vqrdmlsdhq_s16 (__inactive, __a, __b); | |
24722 | } | |
24723 | ||
24724 | __extension__ extern __inline int16x8_t | |
24725 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24726 | __arm_vqrdmlashq (int16x8_t __a, int16x8_t __b, int16_t __c) | |
24727 | { | |
24728 | return __arm_vqrdmlashq_n_s16 (__a, __b, __c); | |
24729 | } | |
24730 | ||
afb198ee CL |
24731 | __extension__ extern __inline int16x8_t |
24732 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24733 | __arm_vqdmlashq (int16x8_t __a, int16x8_t __b, int16_t __c) | |
24734 | { | |
24735 | return __arm_vqdmlashq_n_s16 (__a, __b, __c); | |
24736 | } | |
24737 | ||
6a90680b ASDV |
24738 | __extension__ extern __inline int16x8_t |
24739 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24740 | __arm_vqrdmlahq (int16x8_t __a, int16x8_t __b, int16_t __c) | |
24741 | { | |
24742 | return __arm_vqrdmlahq_n_s16 (__a, __b, __c); | |
24743 | } | |
24744 | ||
24745 | __extension__ extern __inline int16x8_t | |
24746 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24747 | __arm_vqrdmladhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
24748 | { | |
24749 | return __arm_vqrdmladhxq_s16 (__inactive, __a, __b); | |
24750 | } | |
24751 | ||
24752 | __extension__ extern __inline int16x8_t | |
24753 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24754 | __arm_vqrdmladhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
24755 | { | |
24756 | return __arm_vqrdmladhq_s16 (__inactive, __a, __b); | |
24757 | } | |
24758 | ||
24759 | __extension__ extern __inline int16x8_t | |
24760 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24761 | __arm_vqdmlsdhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
24762 | { | |
24763 | return __arm_vqdmlsdhxq_s16 (__inactive, __a, __b); | |
24764 | } | |
24765 | ||
24766 | __extension__ extern __inline int16x8_t | |
24767 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24768 | __arm_vqdmlsdhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
24769 | { | |
24770 | return __arm_vqdmlsdhq_s16 (__inactive, __a, __b); | |
24771 | } | |
24772 | ||
24773 | __extension__ extern __inline int16x8_t | |
24774 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24775 | __arm_vqdmlahq (int16x8_t __a, int16x8_t __b, int16_t __c) | |
24776 | { | |
24777 | return __arm_vqdmlahq_n_s16 (__a, __b, __c); | |
24778 | } | |
24779 | ||
24780 | __extension__ extern __inline int16x8_t | |
24781 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24782 | __arm_vqdmladhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
24783 | { | |
24784 | return __arm_vqdmladhxq_s16 (__inactive, __a, __b); | |
24785 | } | |
24786 | ||
24787 | __extension__ extern __inline int16x8_t | |
24788 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24789 | __arm_vqdmladhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) | |
24790 | { | |
24791 | return __arm_vqdmladhq_s16 (__inactive, __a, __b); | |
24792 | } | |
24793 | ||
24794 | __extension__ extern __inline int32_t | |
24795 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24796 | __arm_vmlsdavaxq (int32_t __a, int16x8_t __b, int16x8_t __c) | |
24797 | { | |
24798 | return __arm_vmlsdavaxq_s16 (__a, __b, __c); | |
24799 | } | |
24800 | ||
24801 | __extension__ extern __inline int32_t | |
24802 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24803 | __arm_vmlsdavaq (int32_t __a, int16x8_t __b, int16x8_t __c) | |
24804 | { | |
24805 | return __arm_vmlsdavaq_s16 (__a, __b, __c); | |
24806 | } | |
24807 | ||
24808 | __extension__ extern __inline int16x8_t | |
24809 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24810 | __arm_vmlasq (int16x8_t __a, int16x8_t __b, int16_t __c) | |
24811 | { | |
24812 | return __arm_vmlasq_n_s16 (__a, __b, __c); | |
24813 | } | |
24814 | ||
24815 | __extension__ extern __inline int16x8_t | |
24816 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24817 | __arm_vmlaq (int16x8_t __a, int16x8_t __b, int16_t __c) | |
24818 | { | |
24819 | return __arm_vmlaq_n_s16 (__a, __b, __c); | |
24820 | } | |
24821 | ||
24822 | __extension__ extern __inline int32_t | |
24823 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24824 | __arm_vmladavaxq (int32_t __a, int16x8_t __b, int16x8_t __c) | |
24825 | { | |
24826 | return __arm_vmladavaxq_s16 (__a, __b, __c); | |
24827 | } | |
24828 | ||
24829 | __extension__ extern __inline int32_t | |
24830 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24831 | __arm_vmladavaq (int32_t __a, int16x8_t __b, int16x8_t __c) | |
24832 | { | |
24833 | return __arm_vmladavaq_s16 (__a, __b, __c); | |
24834 | } | |
24835 | ||
24836 | __extension__ extern __inline int16x8_t | |
24837 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24838 | __arm_vsriq (int16x8_t __a, int16x8_t __b, const int __imm) | |
24839 | { | |
24840 | return __arm_vsriq_n_s16 (__a, __b, __imm); | |
24841 | } | |
24842 | ||
24843 | __extension__ extern __inline int16x8_t | |
24844 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24845 | __arm_vsliq (int16x8_t __a, int16x8_t __b, const int __imm) | |
24846 | { | |
24847 | return __arm_vsliq_n_s16 (__a, __b, __imm); | |
24848 | } | |
24849 | ||
24850 | __extension__ extern __inline uint32x4_t | |
24851 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24852 | __arm_vpselq (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
24853 | { | |
24854 | return __arm_vpselq_u32 (__a, __b, __p); | |
24855 | } | |
24856 | ||
24857 | __extension__ extern __inline int32x4_t | |
24858 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24859 | __arm_vpselq (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
24860 | { | |
24861 | return __arm_vpselq_s32 (__a, __b, __p); | |
24862 | } | |
24863 | ||
24864 | __extension__ extern __inline uint32x4_t | |
24865 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24866 | __arm_vrev64q_m (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
24867 | { | |
24868 | return __arm_vrev64q_m_u32 (__inactive, __a, __p); | |
24869 | } | |
24870 | ||
6a90680b ASDV |
24871 | __extension__ extern __inline uint32x4_t |
24872 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24873 | __arm_vmvnq_m (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
24874 | { | |
24875 | return __arm_vmvnq_m_u32 (__inactive, __a, __p); | |
24876 | } | |
24877 | ||
24878 | __extension__ extern __inline uint32x4_t | |
24879 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24880 | __arm_vmlasq (uint32x4_t __a, uint32x4_t __b, uint32_t __c) | |
24881 | { | |
24882 | return __arm_vmlasq_n_u32 (__a, __b, __c); | |
24883 | } | |
24884 | ||
24885 | __extension__ extern __inline uint32x4_t | |
24886 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24887 | __arm_vmlaq (uint32x4_t __a, uint32x4_t __b, uint32_t __c) | |
24888 | { | |
24889 | return __arm_vmlaq_n_u32 (__a, __b, __c); | |
24890 | } | |
24891 | ||
24892 | __extension__ extern __inline uint32_t | |
24893 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24894 | __arm_vmladavq_p (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
24895 | { | |
24896 | return __arm_vmladavq_p_u32 (__a, __b, __p); | |
24897 | } | |
24898 | ||
24899 | __extension__ extern __inline uint32_t | |
24900 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24901 | __arm_vmladavaq (uint32_t __a, uint32x4_t __b, uint32x4_t __c) | |
24902 | { | |
24903 | return __arm_vmladavaq_u32 (__a, __b, __c); | |
24904 | } | |
24905 | ||
24906 | __extension__ extern __inline uint32_t | |
24907 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24908 | __arm_vminvq_p (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) | |
24909 | { | |
24910 | return __arm_vminvq_p_u32 (__a, __b, __p); | |
24911 | } | |
24912 | ||
24913 | __extension__ extern __inline uint32_t | |
24914 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24915 | __arm_vmaxvq_p (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) | |
24916 | { | |
24917 | return __arm_vmaxvq_p_u32 (__a, __b, __p); | |
24918 | } | |
24919 | ||
24920 | __extension__ extern __inline uint32x4_t | |
24921 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24922 | __arm_vdupq_m (uint32x4_t __inactive, uint32_t __a, mve_pred16_t __p) | |
24923 | { | |
24924 | return __arm_vdupq_m_n_u32 (__inactive, __a, __p); | |
24925 | } | |
24926 | ||
24927 | __extension__ extern __inline mve_pred16_t | |
24928 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24929 | __arm_vcmpneq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
24930 | { | |
24931 | return __arm_vcmpneq_m_u32 (__a, __b, __p); | |
24932 | } | |
24933 | ||
24934 | __extension__ extern __inline mve_pred16_t | |
24935 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24936 | __arm_vcmpneq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
24937 | { | |
24938 | return __arm_vcmpneq_m_n_u32 (__a, __b, __p); | |
24939 | } | |
24940 | ||
24941 | __extension__ extern __inline mve_pred16_t | |
24942 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24943 | __arm_vcmphiq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
24944 | { | |
24945 | return __arm_vcmphiq_m_u32 (__a, __b, __p); | |
24946 | } | |
24947 | ||
24948 | __extension__ extern __inline mve_pred16_t | |
24949 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24950 | __arm_vcmphiq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
24951 | { | |
24952 | return __arm_vcmphiq_m_n_u32 (__a, __b, __p); | |
24953 | } | |
24954 | ||
24955 | __extension__ extern __inline mve_pred16_t | |
24956 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24957 | __arm_vcmpeqq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
24958 | { | |
24959 | return __arm_vcmpeqq_m_u32 (__a, __b, __p); | |
24960 | } | |
24961 | ||
24962 | __extension__ extern __inline mve_pred16_t | |
24963 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24964 | __arm_vcmpeqq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
24965 | { | |
24966 | return __arm_vcmpeqq_m_n_u32 (__a, __b, __p); | |
24967 | } | |
24968 | ||
24969 | __extension__ extern __inline mve_pred16_t | |
24970 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24971 | __arm_vcmpcsq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
24972 | { | |
24973 | return __arm_vcmpcsq_m_u32 (__a, __b, __p); | |
24974 | } | |
24975 | ||
24976 | __extension__ extern __inline mve_pred16_t | |
24977 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24978 | __arm_vcmpcsq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
24979 | { | |
24980 | return __arm_vcmpcsq_m_n_u32 (__a, __b, __p); | |
24981 | } | |
24982 | ||
24983 | __extension__ extern __inline uint32x4_t | |
24984 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24985 | __arm_vclzq_m (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
24986 | { | |
24987 | return __arm_vclzq_m_u32 (__inactive, __a, __p); | |
24988 | } | |
24989 | ||
24990 | __extension__ extern __inline uint32_t | |
24991 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24992 | __arm_vaddvaq_p (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) | |
24993 | { | |
24994 | return __arm_vaddvaq_p_u32 (__a, __b, __p); | |
24995 | } | |
24996 | ||
24997 | __extension__ extern __inline uint32x4_t | |
24998 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
24999 | __arm_vsriq (uint32x4_t __a, uint32x4_t __b, const int __imm) | |
25000 | { | |
25001 | return __arm_vsriq_n_u32 (__a, __b, __imm); | |
25002 | } | |
25003 | ||
25004 | __extension__ extern __inline uint32x4_t | |
25005 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25006 | __arm_vsliq (uint32x4_t __a, uint32x4_t __b, const int __imm) | |
25007 | { | |
25008 | return __arm_vsliq_n_u32 (__a, __b, __imm); | |
25009 | } | |
25010 | ||
25011 | __extension__ extern __inline uint32x4_t | |
25012 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25013 | __arm_vshlq_m_r (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
25014 | { | |
25015 | return __arm_vshlq_m_r_u32 (__a, __b, __p); | |
25016 | } | |
25017 | ||
25018 | __extension__ extern __inline uint32x4_t | |
25019 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25020 | __arm_vrshlq_m_n (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
25021 | { | |
25022 | return __arm_vrshlq_m_n_u32 (__a, __b, __p); | |
25023 | } | |
25024 | ||
25025 | __extension__ extern __inline uint32x4_t | |
25026 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25027 | __arm_vqshlq_m_r (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
25028 | { | |
25029 | return __arm_vqshlq_m_r_u32 (__a, __b, __p); | |
25030 | } | |
25031 | ||
25032 | __extension__ extern __inline uint32x4_t | |
25033 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25034 | __arm_vqrshlq_m_n (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
25035 | { | |
25036 | return __arm_vqrshlq_m_n_u32 (__a, __b, __p); | |
25037 | } | |
25038 | ||
25039 | __extension__ extern __inline uint32_t | |
25040 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25041 | __arm_vminavq_p (uint32_t __a, int32x4_t __b, mve_pred16_t __p) | |
25042 | { | |
25043 | return __arm_vminavq_p_s32 (__a, __b, __p); | |
25044 | } | |
25045 | ||
25046 | __extension__ extern __inline uint32x4_t | |
25047 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25048 | __arm_vminaq_m (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25049 | { | |
25050 | return __arm_vminaq_m_s32 (__a, __b, __p); | |
25051 | } | |
25052 | ||
25053 | __extension__ extern __inline uint32_t | |
25054 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25055 | __arm_vmaxavq_p (uint32_t __a, int32x4_t __b, mve_pred16_t __p) | |
25056 | { | |
25057 | return __arm_vmaxavq_p_s32 (__a, __b, __p); | |
25058 | } | |
25059 | ||
25060 | __extension__ extern __inline uint32x4_t | |
25061 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25062 | __arm_vmaxaq_m (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25063 | { | |
25064 | return __arm_vmaxaq_m_s32 (__a, __b, __p); | |
25065 | } | |
25066 | ||
25067 | __extension__ extern __inline mve_pred16_t | |
25068 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25069 | __arm_vcmpneq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25070 | { | |
25071 | return __arm_vcmpneq_m_s32 (__a, __b, __p); | |
25072 | } | |
25073 | ||
25074 | __extension__ extern __inline mve_pred16_t | |
25075 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25076 | __arm_vcmpneq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
25077 | { | |
25078 | return __arm_vcmpneq_m_n_s32 (__a, __b, __p); | |
25079 | } | |
25080 | ||
25081 | __extension__ extern __inline mve_pred16_t | |
25082 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25083 | __arm_vcmpltq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25084 | { | |
25085 | return __arm_vcmpltq_m_s32 (__a, __b, __p); | |
25086 | } | |
25087 | ||
25088 | __extension__ extern __inline mve_pred16_t | |
25089 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25090 | __arm_vcmpltq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
25091 | { | |
25092 | return __arm_vcmpltq_m_n_s32 (__a, __b, __p); | |
25093 | } | |
25094 | ||
25095 | __extension__ extern __inline mve_pred16_t | |
25096 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25097 | __arm_vcmpleq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25098 | { | |
25099 | return __arm_vcmpleq_m_s32 (__a, __b, __p); | |
25100 | } | |
25101 | ||
25102 | __extension__ extern __inline mve_pred16_t | |
25103 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25104 | __arm_vcmpleq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
25105 | { | |
25106 | return __arm_vcmpleq_m_n_s32 (__a, __b, __p); | |
25107 | } | |
25108 | ||
25109 | __extension__ extern __inline mve_pred16_t | |
25110 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25111 | __arm_vcmpgtq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25112 | { | |
25113 | return __arm_vcmpgtq_m_s32 (__a, __b, __p); | |
25114 | } | |
25115 | ||
25116 | __extension__ extern __inline mve_pred16_t | |
25117 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25118 | __arm_vcmpgtq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
25119 | { | |
25120 | return __arm_vcmpgtq_m_n_s32 (__a, __b, __p); | |
25121 | } | |
25122 | ||
25123 | __extension__ extern __inline mve_pred16_t | |
25124 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25125 | __arm_vcmpgeq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25126 | { | |
25127 | return __arm_vcmpgeq_m_s32 (__a, __b, __p); | |
25128 | } | |
25129 | ||
25130 | __extension__ extern __inline mve_pred16_t | |
25131 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25132 | __arm_vcmpgeq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
25133 | { | |
25134 | return __arm_vcmpgeq_m_n_s32 (__a, __b, __p); | |
25135 | } | |
25136 | ||
25137 | __extension__ extern __inline mve_pred16_t | |
25138 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25139 | __arm_vcmpeqq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25140 | { | |
25141 | return __arm_vcmpeqq_m_s32 (__a, __b, __p); | |
25142 | } | |
25143 | ||
25144 | __extension__ extern __inline mve_pred16_t | |
25145 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25146 | __arm_vcmpeqq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
25147 | { | |
25148 | return __arm_vcmpeqq_m_n_s32 (__a, __b, __p); | |
25149 | } | |
25150 | ||
25151 | __extension__ extern __inline int32x4_t | |
25152 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25153 | __arm_vshlq_m_r (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
25154 | { | |
25155 | return __arm_vshlq_m_r_s32 (__a, __b, __p); | |
25156 | } | |
25157 | ||
25158 | __extension__ extern __inline int32x4_t | |
25159 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25160 | __arm_vrshlq_m_n (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
25161 | { | |
25162 | return __arm_vrshlq_m_n_s32 (__a, __b, __p); | |
25163 | } | |
25164 | ||
25165 | __extension__ extern __inline int32x4_t | |
25166 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25167 | __arm_vrev64q_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
25168 | { | |
25169 | return __arm_vrev64q_m_s32 (__inactive, __a, __p); | |
25170 | } | |
25171 | ||
25172 | __extension__ extern __inline int32x4_t | |
25173 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25174 | __arm_vqshlq_m_r (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
25175 | { | |
25176 | return __arm_vqshlq_m_r_s32 (__a, __b, __p); | |
25177 | } | |
25178 | ||
25179 | __extension__ extern __inline int32x4_t | |
25180 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25181 | __arm_vqrshlq_m_n (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
25182 | { | |
25183 | return __arm_vqrshlq_m_n_s32 (__a, __b, __p); | |
25184 | } | |
25185 | ||
25186 | __extension__ extern __inline int32x4_t | |
25187 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25188 | __arm_vqnegq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
25189 | { | |
25190 | return __arm_vqnegq_m_s32 (__inactive, __a, __p); | |
25191 | } | |
25192 | ||
25193 | __extension__ extern __inline int32x4_t | |
25194 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25195 | __arm_vqabsq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
25196 | { | |
25197 | return __arm_vqabsq_m_s32 (__inactive, __a, __p); | |
25198 | } | |
25199 | ||
25200 | __extension__ extern __inline int32x4_t | |
25201 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25202 | __arm_vnegq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
25203 | { | |
25204 | return __arm_vnegq_m_s32 (__inactive, __a, __p); | |
25205 | } | |
25206 | ||
25207 | __extension__ extern __inline int32x4_t | |
25208 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25209 | __arm_vmvnq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
25210 | { | |
25211 | return __arm_vmvnq_m_s32 (__inactive, __a, __p); | |
25212 | } | |
25213 | ||
25214 | __extension__ extern __inline int32_t | |
25215 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25216 | __arm_vmlsdavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25217 | { | |
25218 | return __arm_vmlsdavxq_p_s32 (__a, __b, __p); | |
25219 | } | |
25220 | ||
25221 | __extension__ extern __inline int32_t | |
25222 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25223 | __arm_vmlsdavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25224 | { | |
25225 | return __arm_vmlsdavq_p_s32 (__a, __b, __p); | |
25226 | } | |
25227 | ||
25228 | __extension__ extern __inline int32_t | |
25229 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25230 | __arm_vmladavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25231 | { | |
25232 | return __arm_vmladavxq_p_s32 (__a, __b, __p); | |
25233 | } | |
25234 | ||
25235 | __extension__ extern __inline int32_t | |
25236 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25237 | __arm_vmladavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25238 | { | |
25239 | return __arm_vmladavq_p_s32 (__a, __b, __p); | |
25240 | } | |
25241 | ||
25242 | __extension__ extern __inline int32_t | |
25243 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25244 | __arm_vminvq_p (int32_t __a, int32x4_t __b, mve_pred16_t __p) | |
25245 | { | |
25246 | return __arm_vminvq_p_s32 (__a, __b, __p); | |
25247 | } | |
25248 | ||
25249 | __extension__ extern __inline int32_t | |
25250 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25251 | __arm_vmaxvq_p (int32_t __a, int32x4_t __b, mve_pred16_t __p) | |
25252 | { | |
25253 | return __arm_vmaxvq_p_s32 (__a, __b, __p); | |
25254 | } | |
25255 | ||
25256 | __extension__ extern __inline int32x4_t | |
25257 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25258 | __arm_vdupq_m (int32x4_t __inactive, int32_t __a, mve_pred16_t __p) | |
25259 | { | |
25260 | return __arm_vdupq_m_n_s32 (__inactive, __a, __p); | |
25261 | } | |
25262 | ||
25263 | __extension__ extern __inline int32x4_t | |
25264 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25265 | __arm_vclzq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
25266 | { | |
25267 | return __arm_vclzq_m_s32 (__inactive, __a, __p); | |
25268 | } | |
25269 | ||
25270 | __extension__ extern __inline int32x4_t | |
25271 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25272 | __arm_vclsq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
25273 | { | |
25274 | return __arm_vclsq_m_s32 (__inactive, __a, __p); | |
25275 | } | |
25276 | ||
25277 | __extension__ extern __inline int32_t | |
25278 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25279 | __arm_vaddvaq_p (int32_t __a, int32x4_t __b, mve_pred16_t __p) | |
25280 | { | |
25281 | return __arm_vaddvaq_p_s32 (__a, __b, __p); | |
25282 | } | |
25283 | ||
25284 | __extension__ extern __inline int32x4_t | |
25285 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25286 | __arm_vabsq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
25287 | { | |
25288 | return __arm_vabsq_m_s32 (__inactive, __a, __p); | |
25289 | } | |
25290 | ||
25291 | __extension__ extern __inline int32x4_t | |
25292 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25293 | __arm_vqrdmlsdhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
25294 | { | |
25295 | return __arm_vqrdmlsdhxq_s32 (__inactive, __a, __b); | |
25296 | } | |
25297 | ||
25298 | __extension__ extern __inline int32x4_t | |
25299 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25300 | __arm_vqrdmlsdhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
25301 | { | |
25302 | return __arm_vqrdmlsdhq_s32 (__inactive, __a, __b); | |
25303 | } | |
25304 | ||
25305 | __extension__ extern __inline int32x4_t | |
25306 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25307 | __arm_vqrdmlashq (int32x4_t __a, int32x4_t __b, int32_t __c) | |
25308 | { | |
25309 | return __arm_vqrdmlashq_n_s32 (__a, __b, __c); | |
25310 | } | |
25311 | ||
afb198ee CL |
25312 | __extension__ extern __inline int32x4_t |
25313 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25314 | __arm_vqdmlashq (int32x4_t __a, int32x4_t __b, int32_t __c) | |
25315 | { | |
25316 | return __arm_vqdmlashq_n_s32 (__a, __b, __c); | |
25317 | } | |
25318 | ||
6a90680b ASDV |
25319 | __extension__ extern __inline int32x4_t |
25320 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25321 | __arm_vqrdmlahq (int32x4_t __a, int32x4_t __b, int32_t __c) | |
25322 | { | |
25323 | return __arm_vqrdmlahq_n_s32 (__a, __b, __c); | |
25324 | } | |
25325 | ||
25326 | __extension__ extern __inline int32x4_t | |
25327 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25328 | __arm_vqrdmladhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
25329 | { | |
25330 | return __arm_vqrdmladhxq_s32 (__inactive, __a, __b); | |
25331 | } | |
25332 | ||
25333 | __extension__ extern __inline int32x4_t | |
25334 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25335 | __arm_vqrdmladhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
25336 | { | |
25337 | return __arm_vqrdmladhq_s32 (__inactive, __a, __b); | |
25338 | } | |
25339 | ||
25340 | __extension__ extern __inline int32x4_t | |
25341 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25342 | __arm_vqdmlsdhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
25343 | { | |
25344 | return __arm_vqdmlsdhxq_s32 (__inactive, __a, __b); | |
25345 | } | |
25346 | ||
25347 | __extension__ extern __inline int32x4_t | |
25348 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25349 | __arm_vqdmlsdhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
25350 | { | |
25351 | return __arm_vqdmlsdhq_s32 (__inactive, __a, __b); | |
25352 | } | |
25353 | ||
25354 | __extension__ extern __inline int32x4_t | |
25355 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25356 | __arm_vqdmlahq (int32x4_t __a, int32x4_t __b, int32_t __c) | |
25357 | { | |
25358 | return __arm_vqdmlahq_n_s32 (__a, __b, __c); | |
25359 | } | |
25360 | ||
25361 | __extension__ extern __inline int32x4_t | |
25362 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25363 | __arm_vqdmladhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
25364 | { | |
25365 | return __arm_vqdmladhxq_s32 (__inactive, __a, __b); | |
25366 | } | |
25367 | ||
25368 | __extension__ extern __inline int32x4_t | |
25369 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25370 | __arm_vqdmladhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) | |
25371 | { | |
25372 | return __arm_vqdmladhq_s32 (__inactive, __a, __b); | |
25373 | } | |
25374 | ||
25375 | __extension__ extern __inline int32_t | |
25376 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25377 | __arm_vmlsdavaxq (int32_t __a, int32x4_t __b, int32x4_t __c) | |
25378 | { | |
25379 | return __arm_vmlsdavaxq_s32 (__a, __b, __c); | |
25380 | } | |
25381 | ||
25382 | __extension__ extern __inline int32_t | |
25383 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25384 | __arm_vmlsdavaq (int32_t __a, int32x4_t __b, int32x4_t __c) | |
25385 | { | |
25386 | return __arm_vmlsdavaq_s32 (__a, __b, __c); | |
25387 | } | |
25388 | ||
25389 | __extension__ extern __inline int32x4_t | |
25390 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25391 | __arm_vmlasq (int32x4_t __a, int32x4_t __b, int32_t __c) | |
25392 | { | |
25393 | return __arm_vmlasq_n_s32 (__a, __b, __c); | |
25394 | } | |
25395 | ||
25396 | __extension__ extern __inline int32x4_t | |
25397 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25398 | __arm_vmlaq (int32x4_t __a, int32x4_t __b, int32_t __c) | |
25399 | { | |
25400 | return __arm_vmlaq_n_s32 (__a, __b, __c); | |
25401 | } | |
25402 | ||
25403 | __extension__ extern __inline int32_t | |
25404 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25405 | __arm_vmladavaxq (int32_t __a, int32x4_t __b, int32x4_t __c) | |
25406 | { | |
25407 | return __arm_vmladavaxq_s32 (__a, __b, __c); | |
25408 | } | |
25409 | ||
25410 | __extension__ extern __inline int32_t | |
25411 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25412 | __arm_vmladavaq (int32_t __a, int32x4_t __b, int32x4_t __c) | |
25413 | { | |
25414 | return __arm_vmladavaq_s32 (__a, __b, __c); | |
25415 | } | |
25416 | ||
25417 | __extension__ extern __inline int32x4_t | |
25418 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25419 | __arm_vsriq (int32x4_t __a, int32x4_t __b, const int __imm) | |
25420 | { | |
25421 | return __arm_vsriq_n_s32 (__a, __b, __imm); | |
25422 | } | |
25423 | ||
25424 | __extension__ extern __inline int32x4_t | |
25425 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25426 | __arm_vsliq (int32x4_t __a, int32x4_t __b, const int __imm) | |
25427 | { | |
25428 | return __arm_vsliq_n_s32 (__a, __b, __imm); | |
25429 | } | |
25430 | ||
25431 | __extension__ extern __inline uint64x2_t | |
25432 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25433 | __arm_vpselq (uint64x2_t __a, uint64x2_t __b, mve_pred16_t __p) | |
25434 | { | |
25435 | return __arm_vpselq_u64 (__a, __b, __p); | |
25436 | } | |
25437 | ||
25438 | __extension__ extern __inline int64x2_t | |
25439 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25440 | __arm_vpselq (int64x2_t __a, int64x2_t __b, mve_pred16_t __p) | |
25441 | { | |
25442 | return __arm_vpselq_s64 (__a, __b, __p); | |
25443 | } | |
25444 | ||
25445 | __extension__ extern __inline int64_t | |
25446 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25447 | __arm_vrmlaldavhaxq (int64_t __a, int32x4_t __b, int32x4_t __c) | |
25448 | { | |
25449 | return __arm_vrmlaldavhaxq_s32 (__a, __b, __c); | |
25450 | } | |
25451 | ||
25452 | __extension__ extern __inline int64_t | |
25453 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25454 | __arm_vrmlsldavhaq (int64_t __a, int32x4_t __b, int32x4_t __c) | |
25455 | { | |
25456 | return __arm_vrmlsldavhaq_s32 (__a, __b, __c); | |
25457 | } | |
25458 | ||
25459 | __extension__ extern __inline int64_t | |
25460 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25461 | __arm_vrmlsldavhaxq (int64_t __a, int32x4_t __b, int32x4_t __c) | |
25462 | { | |
25463 | return __arm_vrmlsldavhaxq_s32 (__a, __b, __c); | |
25464 | } | |
25465 | ||
25466 | __extension__ extern __inline int64_t | |
25467 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25468 | __arm_vaddlvaq_p (int64_t __a, int32x4_t __b, mve_pred16_t __p) | |
25469 | { | |
25470 | return __arm_vaddlvaq_p_s32 (__a, __b, __p); | |
25471 | } | |
25472 | ||
25473 | __extension__ extern __inline int8x16_t | |
25474 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25475 | __arm_vrev16q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
25476 | { | |
25477 | return __arm_vrev16q_m_s8 (__inactive, __a, __p); | |
25478 | } | |
25479 | ||
25480 | __extension__ extern __inline int64_t | |
25481 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25482 | __arm_vrmlaldavhq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25483 | { | |
25484 | return __arm_vrmlaldavhq_p_s32 (__a, __b, __p); | |
25485 | } | |
25486 | ||
25487 | __extension__ extern __inline int64_t | |
25488 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25489 | __arm_vrmlaldavhxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25490 | { | |
25491 | return __arm_vrmlaldavhxq_p_s32 (__a, __b, __p); | |
25492 | } | |
25493 | ||
25494 | __extension__ extern __inline int64_t | |
25495 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25496 | __arm_vrmlsldavhq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25497 | { | |
25498 | return __arm_vrmlsldavhq_p_s32 (__a, __b, __p); | |
25499 | } | |
25500 | ||
25501 | __extension__ extern __inline int64_t | |
25502 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25503 | __arm_vrmlsldavhxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25504 | { | |
25505 | return __arm_vrmlsldavhxq_p_s32 (__a, __b, __p); | |
25506 | } | |
25507 | ||
25508 | __extension__ extern __inline uint64_t | |
25509 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25510 | __arm_vaddlvaq_p (uint64_t __a, uint32x4_t __b, mve_pred16_t __p) | |
25511 | { | |
25512 | return __arm_vaddlvaq_p_u32 (__a, __b, __p); | |
25513 | } | |
25514 | ||
25515 | __extension__ extern __inline uint8x16_t | |
25516 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25517 | __arm_vrev16q_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
25518 | { | |
25519 | return __arm_vrev16q_m_u8 (__inactive, __a, __p); | |
25520 | } | |
25521 | ||
25522 | __extension__ extern __inline uint64_t | |
25523 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25524 | __arm_vrmlaldavhq_p (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
25525 | { | |
25526 | return __arm_vrmlaldavhq_p_u32 (__a, __b, __p); | |
25527 | } | |
25528 | ||
25529 | __extension__ extern __inline int16x8_t | |
25530 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25531 | __arm_vmvnq_m (int16x8_t __inactive, const int __imm, mve_pred16_t __p) | |
25532 | { | |
25533 | return __arm_vmvnq_m_n_s16 (__inactive, __imm, __p); | |
25534 | } | |
25535 | ||
25536 | __extension__ extern __inline int16x8_t | |
25537 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25538 | __arm_vorrq_m_n (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
25539 | { | |
25540 | return __arm_vorrq_m_n_s16 (__a, __imm, __p); | |
25541 | } | |
25542 | ||
25543 | __extension__ extern __inline int8x16_t | |
25544 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25545 | __arm_vqrshrntq (int8x16_t __a, int16x8_t __b, const int __imm) | |
25546 | { | |
25547 | return __arm_vqrshrntq_n_s16 (__a, __b, __imm); | |
25548 | } | |
25549 | ||
25550 | __extension__ extern __inline int8x16_t | |
25551 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25552 | __arm_vqshrnbq (int8x16_t __a, int16x8_t __b, const int __imm) | |
25553 | { | |
25554 | return __arm_vqshrnbq_n_s16 (__a, __b, __imm); | |
25555 | } | |
25556 | ||
25557 | __extension__ extern __inline int8x16_t | |
25558 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25559 | __arm_vqshrntq (int8x16_t __a, int16x8_t __b, const int __imm) | |
25560 | { | |
25561 | return __arm_vqshrntq_n_s16 (__a, __b, __imm); | |
25562 | } | |
25563 | ||
25564 | __extension__ extern __inline int8x16_t | |
25565 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25566 | __arm_vrshrnbq (int8x16_t __a, int16x8_t __b, const int __imm) | |
25567 | { | |
25568 | return __arm_vrshrnbq_n_s16 (__a, __b, __imm); | |
25569 | } | |
25570 | ||
25571 | __extension__ extern __inline int8x16_t | |
25572 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25573 | __arm_vrshrntq (int8x16_t __a, int16x8_t __b, const int __imm) | |
25574 | { | |
25575 | return __arm_vrshrntq_n_s16 (__a, __b, __imm); | |
25576 | } | |
25577 | ||
25578 | __extension__ extern __inline int8x16_t | |
25579 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25580 | __arm_vshrnbq (int8x16_t __a, int16x8_t __b, const int __imm) | |
25581 | { | |
25582 | return __arm_vshrnbq_n_s16 (__a, __b, __imm); | |
25583 | } | |
25584 | ||
25585 | __extension__ extern __inline int8x16_t | |
25586 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25587 | __arm_vshrntq (int8x16_t __a, int16x8_t __b, const int __imm) | |
25588 | { | |
25589 | return __arm_vshrntq_n_s16 (__a, __b, __imm); | |
25590 | } | |
25591 | ||
25592 | __extension__ extern __inline int64_t | |
25593 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25594 | __arm_vmlaldavaq (int64_t __a, int16x8_t __b, int16x8_t __c) | |
25595 | { | |
25596 | return __arm_vmlaldavaq_s16 (__a, __b, __c); | |
25597 | } | |
25598 | ||
25599 | __extension__ extern __inline int64_t | |
25600 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25601 | __arm_vmlaldavaxq (int64_t __a, int16x8_t __b, int16x8_t __c) | |
25602 | { | |
25603 | return __arm_vmlaldavaxq_s16 (__a, __b, __c); | |
25604 | } | |
25605 | ||
25606 | __extension__ extern __inline int64_t | |
25607 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25608 | __arm_vmlsldavaq (int64_t __a, int16x8_t __b, int16x8_t __c) | |
25609 | { | |
25610 | return __arm_vmlsldavaq_s16 (__a, __b, __c); | |
25611 | } | |
25612 | ||
25613 | __extension__ extern __inline int64_t | |
25614 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25615 | __arm_vmlsldavaxq (int64_t __a, int16x8_t __b, int16x8_t __c) | |
25616 | { | |
25617 | return __arm_vmlsldavaxq_s16 (__a, __b, __c); | |
25618 | } | |
25619 | ||
25620 | __extension__ extern __inline int64_t | |
25621 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25622 | __arm_vmlaldavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
25623 | { | |
25624 | return __arm_vmlaldavq_p_s16 (__a, __b, __p); | |
25625 | } | |
25626 | ||
25627 | __extension__ extern __inline int64_t | |
25628 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25629 | __arm_vmlaldavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
25630 | { | |
25631 | return __arm_vmlaldavxq_p_s16 (__a, __b, __p); | |
25632 | } | |
25633 | ||
25634 | __extension__ extern __inline int64_t | |
25635 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25636 | __arm_vmlsldavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
25637 | { | |
25638 | return __arm_vmlsldavq_p_s16 (__a, __b, __p); | |
25639 | } | |
25640 | ||
25641 | __extension__ extern __inline int64_t | |
25642 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25643 | __arm_vmlsldavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
25644 | { | |
25645 | return __arm_vmlsldavxq_p_s16 (__a, __b, __p); | |
25646 | } | |
25647 | ||
25648 | __extension__ extern __inline int16x8_t | |
25649 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25650 | __arm_vmovlbq_m (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
25651 | { | |
25652 | return __arm_vmovlbq_m_s8 (__inactive, __a, __p); | |
25653 | } | |
25654 | ||
25655 | __extension__ extern __inline int16x8_t | |
25656 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25657 | __arm_vmovltq_m (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
25658 | { | |
25659 | return __arm_vmovltq_m_s8 (__inactive, __a, __p); | |
25660 | } | |
25661 | ||
25662 | __extension__ extern __inline int8x16_t | |
25663 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25664 | __arm_vmovnbq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) | |
25665 | { | |
25666 | return __arm_vmovnbq_m_s16 (__a, __b, __p); | |
25667 | } | |
25668 | ||
25669 | __extension__ extern __inline int8x16_t | |
25670 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25671 | __arm_vmovntq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) | |
25672 | { | |
25673 | return __arm_vmovntq_m_s16 (__a, __b, __p); | |
25674 | } | |
25675 | ||
25676 | __extension__ extern __inline int8x16_t | |
25677 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25678 | __arm_vqmovnbq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) | |
25679 | { | |
25680 | return __arm_vqmovnbq_m_s16 (__a, __b, __p); | |
25681 | } | |
25682 | ||
25683 | __extension__ extern __inline int8x16_t | |
25684 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25685 | __arm_vqmovntq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) | |
25686 | { | |
25687 | return __arm_vqmovntq_m_s16 (__a, __b, __p); | |
25688 | } | |
25689 | ||
25690 | __extension__ extern __inline int8x16_t | |
25691 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25692 | __arm_vrev32q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) | |
25693 | { | |
25694 | return __arm_vrev32q_m_s8 (__inactive, __a, __p); | |
25695 | } | |
25696 | ||
25697 | __extension__ extern __inline uint16x8_t | |
25698 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25699 | __arm_vmvnq_m (uint16x8_t __inactive, const int __imm, mve_pred16_t __p) | |
25700 | { | |
25701 | return __arm_vmvnq_m_n_u16 (__inactive, __imm, __p); | |
25702 | } | |
25703 | ||
25704 | __extension__ extern __inline uint16x8_t | |
25705 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25706 | __arm_vorrq_m_n (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
25707 | { | |
25708 | return __arm_vorrq_m_n_u16 (__a, __imm, __p); | |
25709 | } | |
25710 | ||
25711 | __extension__ extern __inline uint8x16_t | |
25712 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25713 | __arm_vqrshruntq (uint8x16_t __a, int16x8_t __b, const int __imm) | |
25714 | { | |
25715 | return __arm_vqrshruntq_n_s16 (__a, __b, __imm); | |
25716 | } | |
25717 | ||
25718 | __extension__ extern __inline uint8x16_t | |
25719 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25720 | __arm_vqshrunbq (uint8x16_t __a, int16x8_t __b, const int __imm) | |
25721 | { | |
25722 | return __arm_vqshrunbq_n_s16 (__a, __b, __imm); | |
25723 | } | |
25724 | ||
25725 | __extension__ extern __inline uint8x16_t | |
25726 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25727 | __arm_vqshruntq (uint8x16_t __a, int16x8_t __b, const int __imm) | |
25728 | { | |
25729 | return __arm_vqshruntq_n_s16 (__a, __b, __imm); | |
25730 | } | |
25731 | ||
25732 | __extension__ extern __inline uint8x16_t | |
25733 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25734 | __arm_vqmovunbq_m (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p) | |
25735 | { | |
25736 | return __arm_vqmovunbq_m_s16 (__a, __b, __p); | |
25737 | } | |
25738 | ||
25739 | __extension__ extern __inline uint8x16_t | |
25740 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25741 | __arm_vqmovuntq_m (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p) | |
25742 | { | |
25743 | return __arm_vqmovuntq_m_s16 (__a, __b, __p); | |
25744 | } | |
25745 | ||
25746 | __extension__ extern __inline uint8x16_t | |
25747 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25748 | __arm_vqrshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm) | |
25749 | { | |
25750 | return __arm_vqrshrntq_n_u16 (__a, __b, __imm); | |
25751 | } | |
25752 | ||
25753 | __extension__ extern __inline uint8x16_t | |
25754 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25755 | __arm_vqshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm) | |
25756 | { | |
25757 | return __arm_vqshrnbq_n_u16 (__a, __b, __imm); | |
25758 | } | |
25759 | ||
25760 | __extension__ extern __inline uint8x16_t | |
25761 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25762 | __arm_vqshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm) | |
25763 | { | |
25764 | return __arm_vqshrntq_n_u16 (__a, __b, __imm); | |
25765 | } | |
25766 | ||
25767 | __extension__ extern __inline uint8x16_t | |
25768 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25769 | __arm_vrshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm) | |
25770 | { | |
25771 | return __arm_vrshrnbq_n_u16 (__a, __b, __imm); | |
25772 | } | |
25773 | ||
25774 | __extension__ extern __inline uint8x16_t | |
25775 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25776 | __arm_vrshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm) | |
25777 | { | |
25778 | return __arm_vrshrntq_n_u16 (__a, __b, __imm); | |
25779 | } | |
25780 | ||
25781 | __extension__ extern __inline uint8x16_t | |
25782 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25783 | __arm_vshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm) | |
25784 | { | |
25785 | return __arm_vshrnbq_n_u16 (__a, __b, __imm); | |
25786 | } | |
25787 | ||
25788 | __extension__ extern __inline uint8x16_t | |
25789 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25790 | __arm_vshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm) | |
25791 | { | |
25792 | return __arm_vshrntq_n_u16 (__a, __b, __imm); | |
25793 | } | |
25794 | ||
25795 | __extension__ extern __inline uint64_t | |
25796 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25797 | __arm_vmlaldavaq (uint64_t __a, uint16x8_t __b, uint16x8_t __c) | |
25798 | { | |
25799 | return __arm_vmlaldavaq_u16 (__a, __b, __c); | |
25800 | } | |
25801 | ||
25802 | __extension__ extern __inline uint64_t | |
25803 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25804 | __arm_vmlaldavq_p (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
25805 | { | |
25806 | return __arm_vmlaldavq_p_u16 (__a, __b, __p); | |
25807 | } | |
25808 | ||
25809 | __extension__ extern __inline uint16x8_t | |
25810 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25811 | __arm_vmovlbq_m (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
25812 | { | |
25813 | return __arm_vmovlbq_m_u8 (__inactive, __a, __p); | |
25814 | } | |
25815 | ||
25816 | __extension__ extern __inline uint16x8_t | |
25817 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25818 | __arm_vmovltq_m (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
25819 | { | |
25820 | return __arm_vmovltq_m_u8 (__inactive, __a, __p); | |
25821 | } | |
25822 | ||
25823 | __extension__ extern __inline uint8x16_t | |
25824 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25825 | __arm_vmovnbq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) | |
25826 | { | |
25827 | return __arm_vmovnbq_m_u16 (__a, __b, __p); | |
25828 | } | |
25829 | ||
25830 | __extension__ extern __inline uint8x16_t | |
25831 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25832 | __arm_vmovntq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) | |
25833 | { | |
25834 | return __arm_vmovntq_m_u16 (__a, __b, __p); | |
25835 | } | |
25836 | ||
25837 | __extension__ extern __inline uint8x16_t | |
25838 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25839 | __arm_vqmovnbq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) | |
25840 | { | |
25841 | return __arm_vqmovnbq_m_u16 (__a, __b, __p); | |
25842 | } | |
25843 | ||
25844 | __extension__ extern __inline uint8x16_t | |
25845 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25846 | __arm_vqmovntq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) | |
25847 | { | |
25848 | return __arm_vqmovntq_m_u16 (__a, __b, __p); | |
25849 | } | |
25850 | ||
25851 | __extension__ extern __inline uint8x16_t | |
25852 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25853 | __arm_vrev32q_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) | |
25854 | { | |
25855 | return __arm_vrev32q_m_u8 (__inactive, __a, __p); | |
25856 | } | |
25857 | ||
25858 | __extension__ extern __inline int32x4_t | |
25859 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25860 | __arm_vmvnq_m (int32x4_t __inactive, const int __imm, mve_pred16_t __p) | |
25861 | { | |
25862 | return __arm_vmvnq_m_n_s32 (__inactive, __imm, __p); | |
25863 | } | |
25864 | ||
25865 | __extension__ extern __inline int32x4_t | |
25866 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25867 | __arm_vorrq_m_n (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
25868 | { | |
25869 | return __arm_vorrq_m_n_s32 (__a, __imm, __p); | |
25870 | } | |
25871 | ||
25872 | __extension__ extern __inline int16x8_t | |
25873 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25874 | __arm_vqrshrntq (int16x8_t __a, int32x4_t __b, const int __imm) | |
25875 | { | |
25876 | return __arm_vqrshrntq_n_s32 (__a, __b, __imm); | |
25877 | } | |
25878 | ||
25879 | __extension__ extern __inline int16x8_t | |
25880 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25881 | __arm_vqshrnbq (int16x8_t __a, int32x4_t __b, const int __imm) | |
25882 | { | |
25883 | return __arm_vqshrnbq_n_s32 (__a, __b, __imm); | |
25884 | } | |
25885 | ||
25886 | __extension__ extern __inline int16x8_t | |
25887 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25888 | __arm_vqshrntq (int16x8_t __a, int32x4_t __b, const int __imm) | |
25889 | { | |
25890 | return __arm_vqshrntq_n_s32 (__a, __b, __imm); | |
25891 | } | |
25892 | ||
25893 | __extension__ extern __inline int16x8_t | |
25894 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25895 | __arm_vrshrnbq (int16x8_t __a, int32x4_t __b, const int __imm) | |
25896 | { | |
25897 | return __arm_vrshrnbq_n_s32 (__a, __b, __imm); | |
25898 | } | |
25899 | ||
25900 | __extension__ extern __inline int16x8_t | |
25901 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25902 | __arm_vrshrntq (int16x8_t __a, int32x4_t __b, const int __imm) | |
25903 | { | |
25904 | return __arm_vrshrntq_n_s32 (__a, __b, __imm); | |
25905 | } | |
25906 | ||
25907 | __extension__ extern __inline int16x8_t | |
25908 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25909 | __arm_vshrnbq (int16x8_t __a, int32x4_t __b, const int __imm) | |
25910 | { | |
25911 | return __arm_vshrnbq_n_s32 (__a, __b, __imm); | |
25912 | } | |
25913 | ||
25914 | __extension__ extern __inline int16x8_t | |
25915 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25916 | __arm_vshrntq (int16x8_t __a, int32x4_t __b, const int __imm) | |
25917 | { | |
25918 | return __arm_vshrntq_n_s32 (__a, __b, __imm); | |
25919 | } | |
25920 | ||
25921 | __extension__ extern __inline int64_t | |
25922 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25923 | __arm_vmlaldavaq (int64_t __a, int32x4_t __b, int32x4_t __c) | |
25924 | { | |
25925 | return __arm_vmlaldavaq_s32 (__a, __b, __c); | |
25926 | } | |
25927 | ||
25928 | __extension__ extern __inline int64_t | |
25929 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25930 | __arm_vmlaldavaxq (int64_t __a, int32x4_t __b, int32x4_t __c) | |
25931 | { | |
25932 | return __arm_vmlaldavaxq_s32 (__a, __b, __c); | |
25933 | } | |
25934 | ||
25935 | __extension__ extern __inline int64_t | |
25936 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25937 | __arm_vmlsldavaq (int64_t __a, int32x4_t __b, int32x4_t __c) | |
25938 | { | |
25939 | return __arm_vmlsldavaq_s32 (__a, __b, __c); | |
25940 | } | |
25941 | ||
25942 | __extension__ extern __inline int64_t | |
25943 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25944 | __arm_vmlsldavaxq (int64_t __a, int32x4_t __b, int32x4_t __c) | |
25945 | { | |
25946 | return __arm_vmlsldavaxq_s32 (__a, __b, __c); | |
25947 | } | |
25948 | ||
25949 | __extension__ extern __inline int64_t | |
25950 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25951 | __arm_vmlaldavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25952 | { | |
25953 | return __arm_vmlaldavq_p_s32 (__a, __b, __p); | |
25954 | } | |
25955 | ||
25956 | __extension__ extern __inline int64_t | |
25957 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25958 | __arm_vmlaldavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25959 | { | |
25960 | return __arm_vmlaldavxq_p_s32 (__a, __b, __p); | |
25961 | } | |
25962 | ||
25963 | __extension__ extern __inline int64_t | |
25964 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25965 | __arm_vmlsldavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25966 | { | |
25967 | return __arm_vmlsldavq_p_s32 (__a, __b, __p); | |
25968 | } | |
25969 | ||
25970 | __extension__ extern __inline int64_t | |
25971 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25972 | __arm_vmlsldavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
25973 | { | |
25974 | return __arm_vmlsldavxq_p_s32 (__a, __b, __p); | |
25975 | } | |
25976 | ||
25977 | __extension__ extern __inline int32x4_t | |
25978 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25979 | __arm_vmovlbq_m (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
25980 | { | |
25981 | return __arm_vmovlbq_m_s16 (__inactive, __a, __p); | |
25982 | } | |
25983 | ||
25984 | __extension__ extern __inline int32x4_t | |
25985 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25986 | __arm_vmovltq_m (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
25987 | { | |
25988 | return __arm_vmovltq_m_s16 (__inactive, __a, __p); | |
25989 | } | |
25990 | ||
25991 | __extension__ extern __inline int16x8_t | |
25992 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
25993 | __arm_vmovnbq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) | |
25994 | { | |
25995 | return __arm_vmovnbq_m_s32 (__a, __b, __p); | |
25996 | } | |
25997 | ||
25998 | __extension__ extern __inline int16x8_t | |
25999 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26000 | __arm_vmovntq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) | |
26001 | { | |
26002 | return __arm_vmovntq_m_s32 (__a, __b, __p); | |
26003 | } | |
26004 | ||
26005 | __extension__ extern __inline int16x8_t | |
26006 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26007 | __arm_vqmovnbq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) | |
26008 | { | |
26009 | return __arm_vqmovnbq_m_s32 (__a, __b, __p); | |
26010 | } | |
26011 | ||
26012 | __extension__ extern __inline int16x8_t | |
26013 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26014 | __arm_vqmovntq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) | |
26015 | { | |
26016 | return __arm_vqmovntq_m_s32 (__a, __b, __p); | |
26017 | } | |
26018 | ||
26019 | __extension__ extern __inline int16x8_t | |
26020 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26021 | __arm_vrev32q_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
26022 | { | |
26023 | return __arm_vrev32q_m_s16 (__inactive, __a, __p); | |
26024 | } | |
26025 | ||
26026 | __extension__ extern __inline uint32x4_t | |
26027 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26028 | __arm_vmvnq_m (uint32x4_t __inactive, const int __imm, mve_pred16_t __p) | |
26029 | { | |
26030 | return __arm_vmvnq_m_n_u32 (__inactive, __imm, __p); | |
26031 | } | |
26032 | ||
26033 | __extension__ extern __inline uint32x4_t | |
26034 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26035 | __arm_vorrq_m_n (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
26036 | { | |
26037 | return __arm_vorrq_m_n_u32 (__a, __imm, __p); | |
26038 | } | |
26039 | ||
26040 | __extension__ extern __inline uint16x8_t | |
26041 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26042 | __arm_vqrshruntq (uint16x8_t __a, int32x4_t __b, const int __imm) | |
26043 | { | |
26044 | return __arm_vqrshruntq_n_s32 (__a, __b, __imm); | |
26045 | } | |
26046 | ||
26047 | __extension__ extern __inline uint16x8_t | |
26048 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26049 | __arm_vqshrunbq (uint16x8_t __a, int32x4_t __b, const int __imm) | |
26050 | { | |
26051 | return __arm_vqshrunbq_n_s32 (__a, __b, __imm); | |
26052 | } | |
26053 | ||
26054 | __extension__ extern __inline uint16x8_t | |
26055 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26056 | __arm_vqshruntq (uint16x8_t __a, int32x4_t __b, const int __imm) | |
26057 | { | |
26058 | return __arm_vqshruntq_n_s32 (__a, __b, __imm); | |
26059 | } | |
26060 | ||
26061 | __extension__ extern __inline uint16x8_t | |
26062 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26063 | __arm_vqmovunbq_m (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p) | |
26064 | { | |
26065 | return __arm_vqmovunbq_m_s32 (__a, __b, __p); | |
26066 | } | |
26067 | ||
26068 | __extension__ extern __inline uint16x8_t | |
26069 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26070 | __arm_vqmovuntq_m (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p) | |
26071 | { | |
26072 | return __arm_vqmovuntq_m_s32 (__a, __b, __p); | |
26073 | } | |
26074 | ||
26075 | __extension__ extern __inline uint16x8_t | |
26076 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26077 | __arm_vqrshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm) | |
26078 | { | |
26079 | return __arm_vqrshrntq_n_u32 (__a, __b, __imm); | |
26080 | } | |
26081 | ||
26082 | __extension__ extern __inline uint16x8_t | |
26083 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26084 | __arm_vqshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm) | |
26085 | { | |
26086 | return __arm_vqshrnbq_n_u32 (__a, __b, __imm); | |
26087 | } | |
26088 | ||
26089 | __extension__ extern __inline uint16x8_t | |
26090 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26091 | __arm_vqshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm) | |
26092 | { | |
26093 | return __arm_vqshrntq_n_u32 (__a, __b, __imm); | |
26094 | } | |
26095 | ||
26096 | __extension__ extern __inline uint16x8_t | |
26097 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26098 | __arm_vrshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm) | |
26099 | { | |
26100 | return __arm_vrshrnbq_n_u32 (__a, __b, __imm); | |
26101 | } | |
26102 | ||
26103 | __extension__ extern __inline uint16x8_t | |
26104 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26105 | __arm_vrshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm) | |
26106 | { | |
26107 | return __arm_vrshrntq_n_u32 (__a, __b, __imm); | |
26108 | } | |
26109 | ||
26110 | __extension__ extern __inline uint16x8_t | |
26111 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26112 | __arm_vshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm) | |
26113 | { | |
26114 | return __arm_vshrnbq_n_u32 (__a, __b, __imm); | |
26115 | } | |
26116 | ||
26117 | __extension__ extern __inline uint16x8_t | |
26118 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26119 | __arm_vshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm) | |
26120 | { | |
26121 | return __arm_vshrntq_n_u32 (__a, __b, __imm); | |
26122 | } | |
26123 | ||
26124 | __extension__ extern __inline uint64_t | |
26125 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26126 | __arm_vmlaldavaq (uint64_t __a, uint32x4_t __b, uint32x4_t __c) | |
26127 | { | |
26128 | return __arm_vmlaldavaq_u32 (__a, __b, __c); | |
26129 | } | |
26130 | ||
26131 | __extension__ extern __inline uint64_t | |
26132 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26133 | __arm_vmlaldavq_p (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26134 | { | |
26135 | return __arm_vmlaldavq_p_u32 (__a, __b, __p); | |
26136 | } | |
26137 | ||
26138 | __extension__ extern __inline uint32x4_t | |
26139 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26140 | __arm_vmovlbq_m (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
26141 | { | |
26142 | return __arm_vmovlbq_m_u16 (__inactive, __a, __p); | |
26143 | } | |
26144 | ||
26145 | __extension__ extern __inline uint32x4_t | |
26146 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26147 | __arm_vmovltq_m (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
26148 | { | |
26149 | return __arm_vmovltq_m_u16 (__inactive, __a, __p); | |
26150 | } | |
26151 | ||
26152 | __extension__ extern __inline uint16x8_t | |
26153 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26154 | __arm_vmovnbq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26155 | { | |
26156 | return __arm_vmovnbq_m_u32 (__a, __b, __p); | |
26157 | } | |
26158 | ||
26159 | __extension__ extern __inline uint16x8_t | |
26160 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26161 | __arm_vmovntq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26162 | { | |
26163 | return __arm_vmovntq_m_u32 (__a, __b, __p); | |
26164 | } | |
26165 | ||
26166 | __extension__ extern __inline uint16x8_t | |
26167 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26168 | __arm_vqmovnbq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26169 | { | |
26170 | return __arm_vqmovnbq_m_u32 (__a, __b, __p); | |
26171 | } | |
26172 | ||
26173 | __extension__ extern __inline uint16x8_t | |
26174 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26175 | __arm_vqmovntq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26176 | { | |
26177 | return __arm_vqmovntq_m_u32 (__a, __b, __p); | |
26178 | } | |
26179 | ||
26180 | __extension__ extern __inline uint16x8_t | |
26181 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26182 | __arm_vrev32q_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
26183 | { | |
26184 | return __arm_vrev32q_m_u16 (__inactive, __a, __p); | |
26185 | } | |
26186 | ||
26187 | __extension__ extern __inline int8x16_t | |
26188 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26189 | __arm_vsriq_m (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p) | |
26190 | { | |
26191 | return __arm_vsriq_m_n_s8 (__a, __b, __imm, __p); | |
26192 | } | |
26193 | ||
26194 | __extension__ extern __inline int8x16_t | |
26195 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26196 | __arm_vsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
26197 | { | |
26198 | return __arm_vsubq_m_s8 (__inactive, __a, __b, __p); | |
26199 | } | |
26200 | ||
26201 | __extension__ extern __inline uint8x16_t | |
26202 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26203 | __arm_vqshluq_m (uint8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
26204 | { | |
26205 | return __arm_vqshluq_m_n_s8 (__inactive, __a, __imm, __p); | |
26206 | } | |
26207 | ||
26208 | __extension__ extern __inline uint32_t | |
26209 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26210 | __arm_vabavq_p (uint32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
26211 | { | |
26212 | return __arm_vabavq_p_s8 (__a, __b, __c, __p); | |
26213 | } | |
26214 | ||
26215 | __extension__ extern __inline uint8x16_t | |
26216 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26217 | __arm_vsriq_m (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p) | |
26218 | { | |
26219 | return __arm_vsriq_m_n_u8 (__a, __b, __imm, __p); | |
26220 | } | |
26221 | ||
26222 | __extension__ extern __inline uint8x16_t | |
26223 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26224 | __arm_vshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
26225 | { | |
26226 | return __arm_vshlq_m_u8 (__inactive, __a, __b, __p); | |
26227 | } | |
26228 | ||
26229 | __extension__ extern __inline uint8x16_t | |
26230 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26231 | __arm_vsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
26232 | { | |
26233 | return __arm_vsubq_m_u8 (__inactive, __a, __b, __p); | |
26234 | } | |
26235 | ||
26236 | __extension__ extern __inline uint32_t | |
26237 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26238 | __arm_vabavq_p (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p) | |
26239 | { | |
26240 | return __arm_vabavq_p_u8 (__a, __b, __c, __p); | |
26241 | } | |
26242 | ||
26243 | __extension__ extern __inline int8x16_t | |
26244 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26245 | __arm_vshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
26246 | { | |
26247 | return __arm_vshlq_m_s8 (__inactive, __a, __b, __p); | |
26248 | } | |
26249 | ||
26250 | __extension__ extern __inline int16x8_t | |
26251 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26252 | __arm_vsriq_m (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
26253 | { | |
26254 | return __arm_vsriq_m_n_s16 (__a, __b, __imm, __p); | |
26255 | } | |
26256 | ||
26257 | __extension__ extern __inline int16x8_t | |
26258 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26259 | __arm_vsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
26260 | { | |
26261 | return __arm_vsubq_m_s16 (__inactive, __a, __b, __p); | |
26262 | } | |
26263 | ||
26264 | __extension__ extern __inline uint16x8_t | |
26265 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26266 | __arm_vqshluq_m (uint16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
26267 | { | |
26268 | return __arm_vqshluq_m_n_s16 (__inactive, __a, __imm, __p); | |
26269 | } | |
26270 | ||
26271 | __extension__ extern __inline uint32_t | |
26272 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26273 | __arm_vabavq_p (uint32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
26274 | { | |
26275 | return __arm_vabavq_p_s16 (__a, __b, __c, __p); | |
26276 | } | |
26277 | ||
26278 | __extension__ extern __inline uint16x8_t | |
26279 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26280 | __arm_vsriq_m (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
26281 | { | |
26282 | return __arm_vsriq_m_n_u16 (__a, __b, __imm, __p); | |
26283 | } | |
26284 | ||
26285 | __extension__ extern __inline uint16x8_t | |
26286 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26287 | __arm_vshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
26288 | { | |
26289 | return __arm_vshlq_m_u16 (__inactive, __a, __b, __p); | |
26290 | } | |
26291 | ||
26292 | __extension__ extern __inline uint16x8_t | |
26293 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26294 | __arm_vsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
26295 | { | |
26296 | return __arm_vsubq_m_u16 (__inactive, __a, __b, __p); | |
26297 | } | |
26298 | ||
26299 | __extension__ extern __inline uint32_t | |
26300 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26301 | __arm_vabavq_p (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) | |
26302 | { | |
26303 | return __arm_vabavq_p_u16 (__a, __b, __c, __p); | |
26304 | } | |
26305 | ||
26306 | __extension__ extern __inline int16x8_t | |
26307 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26308 | __arm_vshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
26309 | { | |
26310 | return __arm_vshlq_m_s16 (__inactive, __a, __b, __p); | |
26311 | } | |
26312 | ||
26313 | __extension__ extern __inline int32x4_t | |
26314 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26315 | __arm_vsriq_m (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
26316 | { | |
26317 | return __arm_vsriq_m_n_s32 (__a, __b, __imm, __p); | |
26318 | } | |
26319 | ||
26320 | __extension__ extern __inline int32x4_t | |
26321 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26322 | __arm_vsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
26323 | { | |
26324 | return __arm_vsubq_m_s32 (__inactive, __a, __b, __p); | |
26325 | } | |
26326 | ||
26327 | __extension__ extern __inline uint32x4_t | |
26328 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26329 | __arm_vqshluq_m (uint32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
26330 | { | |
26331 | return __arm_vqshluq_m_n_s32 (__inactive, __a, __imm, __p); | |
26332 | } | |
26333 | ||
26334 | __extension__ extern __inline uint32_t | |
26335 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26336 | __arm_vabavq_p (uint32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
26337 | { | |
26338 | return __arm_vabavq_p_s32 (__a, __b, __c, __p); | |
26339 | } | |
26340 | ||
26341 | __extension__ extern __inline uint32x4_t | |
26342 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26343 | __arm_vsriq_m (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
26344 | { | |
26345 | return __arm_vsriq_m_n_u32 (__a, __b, __imm, __p); | |
26346 | } | |
26347 | ||
26348 | __extension__ extern __inline uint32x4_t | |
26349 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26350 | __arm_vshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
26351 | { | |
26352 | return __arm_vshlq_m_u32 (__inactive, __a, __b, __p); | |
26353 | } | |
26354 | ||
26355 | __extension__ extern __inline uint32x4_t | |
26356 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26357 | __arm_vsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26358 | { | |
26359 | return __arm_vsubq_m_u32 (__inactive, __a, __b, __p); | |
26360 | } | |
26361 | ||
26362 | __extension__ extern __inline uint32_t | |
26363 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26364 | __arm_vabavq_p (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
26365 | { | |
26366 | return __arm_vabavq_p_u32 (__a, __b, __c, __p); | |
26367 | } | |
26368 | ||
26369 | __extension__ extern __inline int32x4_t | |
26370 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26371 | __arm_vshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
26372 | { | |
26373 | return __arm_vshlq_m_s32 (__inactive, __a, __b, __p); | |
26374 | } | |
26375 | ||
26376 | __extension__ extern __inline int8x16_t | |
26377 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26378 | __arm_vabdq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
26379 | { | |
26380 | return __arm_vabdq_m_s8 (__inactive, __a, __b, __p); | |
26381 | } | |
26382 | ||
26383 | __extension__ extern __inline int32x4_t | |
26384 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26385 | __arm_vabdq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
26386 | { | |
26387 | return __arm_vabdq_m_s32 (__inactive, __a, __b, __p); | |
26388 | } | |
26389 | ||
26390 | __extension__ extern __inline int16x8_t | |
26391 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26392 | __arm_vabdq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
26393 | { | |
26394 | return __arm_vabdq_m_s16 (__inactive, __a, __b, __p); | |
26395 | } | |
26396 | ||
26397 | __extension__ extern __inline uint8x16_t | |
26398 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26399 | __arm_vabdq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
26400 | { | |
26401 | return __arm_vabdq_m_u8 (__inactive, __a, __b, __p); | |
26402 | } | |
26403 | ||
26404 | __extension__ extern __inline uint32x4_t | |
26405 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26406 | __arm_vabdq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26407 | { | |
26408 | return __arm_vabdq_m_u32 (__inactive, __a, __b, __p); | |
26409 | } | |
26410 | ||
26411 | __extension__ extern __inline uint16x8_t | |
26412 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26413 | __arm_vabdq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
26414 | { | |
26415 | return __arm_vabdq_m_u16 (__inactive, __a, __b, __p); | |
26416 | } | |
26417 | ||
26418 | __extension__ extern __inline int8x16_t | |
26419 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e0dd75fe | 26420 | __arm_vaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) |
6a90680b ASDV |
26421 | { |
26422 | return __arm_vaddq_m_n_s8 (__inactive, __a, __b, __p); | |
26423 | } | |
26424 | ||
26425 | __extension__ extern __inline int32x4_t | |
26426 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e0dd75fe | 26427 | __arm_vaddq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) |
6a90680b ASDV |
26428 | { |
26429 | return __arm_vaddq_m_n_s32 (__inactive, __a, __b, __p); | |
26430 | } | |
26431 | ||
26432 | __extension__ extern __inline int16x8_t | |
26433 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e0dd75fe | 26434 | __arm_vaddq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) |
6a90680b ASDV |
26435 | { |
26436 | return __arm_vaddq_m_n_s16 (__inactive, __a, __b, __p); | |
26437 | } | |
26438 | ||
26439 | __extension__ extern __inline uint8x16_t | |
26440 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e0dd75fe | 26441 | __arm_vaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) |
6a90680b ASDV |
26442 | { |
26443 | return __arm_vaddq_m_n_u8 (__inactive, __a, __b, __p); | |
26444 | } | |
26445 | ||
26446 | __extension__ extern __inline uint32x4_t | |
26447 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e0dd75fe | 26448 | __arm_vaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) |
6a90680b ASDV |
26449 | { |
26450 | return __arm_vaddq_m_n_u32 (__inactive, __a, __b, __p); | |
26451 | } | |
26452 | ||
26453 | __extension__ extern __inline uint16x8_t | |
26454 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
e0dd75fe | 26455 | __arm_vaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) |
6a90680b ASDV |
26456 | { |
26457 | return __arm_vaddq_m_n_u16 (__inactive, __a, __b, __p); | |
26458 | } | |
26459 | ||
26460 | __extension__ extern __inline int8x16_t | |
26461 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26462 | __arm_vaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
26463 | { | |
26464 | return __arm_vaddq_m_s8 (__inactive, __a, __b, __p); | |
26465 | } | |
26466 | ||
26467 | __extension__ extern __inline int32x4_t | |
26468 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26469 | __arm_vaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
26470 | { | |
26471 | return __arm_vaddq_m_s32 (__inactive, __a, __b, __p); | |
26472 | } | |
26473 | ||
26474 | __extension__ extern __inline int16x8_t | |
26475 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26476 | __arm_vaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
26477 | { | |
26478 | return __arm_vaddq_m_s16 (__inactive, __a, __b, __p); | |
26479 | } | |
26480 | ||
26481 | __extension__ extern __inline uint8x16_t | |
26482 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26483 | __arm_vaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
26484 | { | |
26485 | return __arm_vaddq_m_u8 (__inactive, __a, __b, __p); | |
26486 | } | |
26487 | ||
26488 | __extension__ extern __inline uint32x4_t | |
26489 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26490 | __arm_vaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26491 | { | |
26492 | return __arm_vaddq_m_u32 (__inactive, __a, __b, __p); | |
26493 | } | |
26494 | ||
26495 | __extension__ extern __inline uint16x8_t | |
26496 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26497 | __arm_vaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
26498 | { | |
26499 | return __arm_vaddq_m_u16 (__inactive, __a, __b, __p); | |
26500 | } | |
26501 | ||
26502 | __extension__ extern __inline int8x16_t | |
26503 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26504 | __arm_vandq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
26505 | { | |
26506 | return __arm_vandq_m_s8 (__inactive, __a, __b, __p); | |
26507 | } | |
26508 | ||
26509 | __extension__ extern __inline int32x4_t | |
26510 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26511 | __arm_vandq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
26512 | { | |
26513 | return __arm_vandq_m_s32 (__inactive, __a, __b, __p); | |
26514 | } | |
26515 | ||
26516 | __extension__ extern __inline int16x8_t | |
26517 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26518 | __arm_vandq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
26519 | { | |
26520 | return __arm_vandq_m_s16 (__inactive, __a, __b, __p); | |
26521 | } | |
26522 | ||
26523 | __extension__ extern __inline uint8x16_t | |
26524 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26525 | __arm_vandq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
26526 | { | |
26527 | return __arm_vandq_m_u8 (__inactive, __a, __b, __p); | |
26528 | } | |
26529 | ||
26530 | __extension__ extern __inline uint32x4_t | |
26531 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26532 | __arm_vandq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26533 | { | |
26534 | return __arm_vandq_m_u32 (__inactive, __a, __b, __p); | |
26535 | } | |
26536 | ||
26537 | __extension__ extern __inline uint16x8_t | |
26538 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26539 | __arm_vandq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
26540 | { | |
26541 | return __arm_vandq_m_u16 (__inactive, __a, __b, __p); | |
26542 | } | |
26543 | ||
26544 | __extension__ extern __inline int8x16_t | |
26545 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26546 | __arm_vbicq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
26547 | { | |
26548 | return __arm_vbicq_m_s8 (__inactive, __a, __b, __p); | |
26549 | } | |
26550 | ||
26551 | __extension__ extern __inline int32x4_t | |
26552 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26553 | __arm_vbicq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
26554 | { | |
26555 | return __arm_vbicq_m_s32 (__inactive, __a, __b, __p); | |
26556 | } | |
26557 | ||
26558 | __extension__ extern __inline int16x8_t | |
26559 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26560 | __arm_vbicq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
26561 | { | |
26562 | return __arm_vbicq_m_s16 (__inactive, __a, __b, __p); | |
26563 | } | |
26564 | ||
26565 | __extension__ extern __inline uint8x16_t | |
26566 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26567 | __arm_vbicq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
26568 | { | |
26569 | return __arm_vbicq_m_u8 (__inactive, __a, __b, __p); | |
26570 | } | |
26571 | ||
26572 | __extension__ extern __inline uint32x4_t | |
26573 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26574 | __arm_vbicq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26575 | { | |
26576 | return __arm_vbicq_m_u32 (__inactive, __a, __b, __p); | |
26577 | } | |
26578 | ||
26579 | __extension__ extern __inline uint16x8_t | |
26580 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26581 | __arm_vbicq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
26582 | { | |
26583 | return __arm_vbicq_m_u16 (__inactive, __a, __b, __p); | |
26584 | } | |
26585 | ||
26586 | __extension__ extern __inline int8x16_t | |
26587 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26588 | __arm_vbrsrq_m (int8x16_t __inactive, int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
26589 | { | |
26590 | return __arm_vbrsrq_m_n_s8 (__inactive, __a, __b, __p); | |
26591 | } | |
26592 | ||
26593 | __extension__ extern __inline int32x4_t | |
26594 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26595 | __arm_vbrsrq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
26596 | { | |
26597 | return __arm_vbrsrq_m_n_s32 (__inactive, __a, __b, __p); | |
26598 | } | |
26599 | ||
26600 | __extension__ extern __inline int16x8_t | |
26601 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26602 | __arm_vbrsrq_m (int16x8_t __inactive, int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
26603 | { | |
26604 | return __arm_vbrsrq_m_n_s16 (__inactive, __a, __b, __p); | |
26605 | } | |
26606 | ||
26607 | __extension__ extern __inline uint8x16_t | |
26608 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26609 | __arm_vbrsrq_m (uint8x16_t __inactive, uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
26610 | { | |
26611 | return __arm_vbrsrq_m_n_u8 (__inactive, __a, __b, __p); | |
26612 | } | |
26613 | ||
26614 | __extension__ extern __inline uint32x4_t | |
26615 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26616 | __arm_vbrsrq_m (uint32x4_t __inactive, uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
26617 | { | |
26618 | return __arm_vbrsrq_m_n_u32 (__inactive, __a, __b, __p); | |
26619 | } | |
26620 | ||
26621 | __extension__ extern __inline uint16x8_t | |
26622 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26623 | __arm_vbrsrq_m (uint16x8_t __inactive, uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
26624 | { | |
26625 | return __arm_vbrsrq_m_n_u16 (__inactive, __a, __b, __p); | |
26626 | } | |
26627 | ||
26628 | __extension__ extern __inline int8x16_t | |
26629 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26630 | __arm_vcaddq_rot270_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
26631 | { | |
26632 | return __arm_vcaddq_rot270_m_s8 (__inactive, __a, __b, __p); | |
26633 | } | |
26634 | ||
26635 | __extension__ extern __inline int32x4_t | |
26636 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26637 | __arm_vcaddq_rot270_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
26638 | { | |
26639 | return __arm_vcaddq_rot270_m_s32 (__inactive, __a, __b, __p); | |
26640 | } | |
26641 | ||
26642 | __extension__ extern __inline int16x8_t | |
26643 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26644 | __arm_vcaddq_rot270_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
26645 | { | |
26646 | return __arm_vcaddq_rot270_m_s16 (__inactive, __a, __b, __p); | |
26647 | } | |
26648 | ||
26649 | __extension__ extern __inline uint8x16_t | |
26650 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26651 | __arm_vcaddq_rot270_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
26652 | { | |
26653 | return __arm_vcaddq_rot270_m_u8 (__inactive, __a, __b, __p); | |
26654 | } | |
26655 | ||
26656 | __extension__ extern __inline uint32x4_t | |
26657 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26658 | __arm_vcaddq_rot270_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26659 | { | |
26660 | return __arm_vcaddq_rot270_m_u32 (__inactive, __a, __b, __p); | |
26661 | } | |
26662 | ||
26663 | __extension__ extern __inline uint16x8_t | |
26664 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26665 | __arm_vcaddq_rot270_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
26666 | { | |
26667 | return __arm_vcaddq_rot270_m_u16 (__inactive, __a, __b, __p); | |
26668 | } | |
26669 | ||
26670 | __extension__ extern __inline int8x16_t | |
26671 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26672 | __arm_vcaddq_rot90_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
26673 | { | |
26674 | return __arm_vcaddq_rot90_m_s8 (__inactive, __a, __b, __p); | |
26675 | } | |
26676 | ||
26677 | __extension__ extern __inline int32x4_t | |
26678 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26679 | __arm_vcaddq_rot90_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
26680 | { | |
26681 | return __arm_vcaddq_rot90_m_s32 (__inactive, __a, __b, __p); | |
26682 | } | |
26683 | ||
26684 | __extension__ extern __inline int16x8_t | |
26685 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26686 | __arm_vcaddq_rot90_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
26687 | { | |
26688 | return __arm_vcaddq_rot90_m_s16 (__inactive, __a, __b, __p); | |
26689 | } | |
26690 | ||
26691 | __extension__ extern __inline uint8x16_t | |
26692 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26693 | __arm_vcaddq_rot90_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
26694 | { | |
26695 | return __arm_vcaddq_rot90_m_u8 (__inactive, __a, __b, __p); | |
26696 | } | |
26697 | ||
26698 | __extension__ extern __inline uint32x4_t | |
26699 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26700 | __arm_vcaddq_rot90_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26701 | { | |
26702 | return __arm_vcaddq_rot90_m_u32 (__inactive, __a, __b, __p); | |
26703 | } | |
26704 | ||
26705 | __extension__ extern __inline uint16x8_t | |
26706 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26707 | __arm_vcaddq_rot90_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
26708 | { | |
26709 | return __arm_vcaddq_rot90_m_u16 (__inactive, __a, __b, __p); | |
26710 | } | |
26711 | ||
26712 | __extension__ extern __inline int8x16_t | |
26713 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26714 | __arm_veorq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
26715 | { | |
26716 | return __arm_veorq_m_s8 (__inactive, __a, __b, __p); | |
26717 | } | |
26718 | ||
26719 | __extension__ extern __inline int32x4_t | |
26720 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26721 | __arm_veorq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
26722 | { | |
26723 | return __arm_veorq_m_s32 (__inactive, __a, __b, __p); | |
26724 | } | |
26725 | ||
26726 | __extension__ extern __inline int16x8_t | |
26727 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26728 | __arm_veorq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
26729 | { | |
26730 | return __arm_veorq_m_s16 (__inactive, __a, __b, __p); | |
26731 | } | |
26732 | ||
26733 | __extension__ extern __inline uint8x16_t | |
26734 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26735 | __arm_veorq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
26736 | { | |
26737 | return __arm_veorq_m_u8 (__inactive, __a, __b, __p); | |
26738 | } | |
26739 | ||
26740 | __extension__ extern __inline uint32x4_t | |
26741 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26742 | __arm_veorq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26743 | { | |
26744 | return __arm_veorq_m_u32 (__inactive, __a, __b, __p); | |
26745 | } | |
26746 | ||
26747 | __extension__ extern __inline uint16x8_t | |
26748 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26749 | __arm_veorq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
26750 | { | |
26751 | return __arm_veorq_m_u16 (__inactive, __a, __b, __p); | |
26752 | } | |
26753 | ||
26754 | __extension__ extern __inline int8x16_t | |
26755 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26756 | __arm_vhaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
26757 | { | |
26758 | return __arm_vhaddq_m_n_s8 (__inactive, __a, __b, __p); | |
26759 | } | |
26760 | ||
26761 | __extension__ extern __inline int32x4_t | |
26762 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26763 | __arm_vhaddq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
26764 | { | |
26765 | return __arm_vhaddq_m_n_s32 (__inactive, __a, __b, __p); | |
26766 | } | |
26767 | ||
26768 | __extension__ extern __inline int16x8_t | |
26769 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26770 | __arm_vhaddq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
26771 | { | |
26772 | return __arm_vhaddq_m_n_s16 (__inactive, __a, __b, __p); | |
26773 | } | |
26774 | ||
26775 | __extension__ extern __inline uint8x16_t | |
26776 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26777 | __arm_vhaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
26778 | { | |
26779 | return __arm_vhaddq_m_n_u8 (__inactive, __a, __b, __p); | |
26780 | } | |
26781 | ||
26782 | __extension__ extern __inline uint32x4_t | |
26783 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26784 | __arm_vhaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
26785 | { | |
26786 | return __arm_vhaddq_m_n_u32 (__inactive, __a, __b, __p); | |
26787 | } | |
26788 | ||
26789 | __extension__ extern __inline uint16x8_t | |
26790 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26791 | __arm_vhaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
26792 | { | |
26793 | return __arm_vhaddq_m_n_u16 (__inactive, __a, __b, __p); | |
26794 | } | |
26795 | ||
26796 | __extension__ extern __inline int8x16_t | |
26797 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26798 | __arm_vhaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
26799 | { | |
26800 | return __arm_vhaddq_m_s8 (__inactive, __a, __b, __p); | |
26801 | } | |
26802 | ||
26803 | __extension__ extern __inline int32x4_t | |
26804 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26805 | __arm_vhaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
26806 | { | |
26807 | return __arm_vhaddq_m_s32 (__inactive, __a, __b, __p); | |
26808 | } | |
26809 | ||
26810 | __extension__ extern __inline int16x8_t | |
26811 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26812 | __arm_vhaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
26813 | { | |
26814 | return __arm_vhaddq_m_s16 (__inactive, __a, __b, __p); | |
26815 | } | |
26816 | ||
26817 | __extension__ extern __inline uint8x16_t | |
26818 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26819 | __arm_vhaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
26820 | { | |
26821 | return __arm_vhaddq_m_u8 (__inactive, __a, __b, __p); | |
26822 | } | |
26823 | ||
26824 | __extension__ extern __inline uint32x4_t | |
26825 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26826 | __arm_vhaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26827 | { | |
26828 | return __arm_vhaddq_m_u32 (__inactive, __a, __b, __p); | |
26829 | } | |
26830 | ||
26831 | __extension__ extern __inline uint16x8_t | |
26832 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26833 | __arm_vhaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
26834 | { | |
26835 | return __arm_vhaddq_m_u16 (__inactive, __a, __b, __p); | |
26836 | } | |
26837 | ||
26838 | __extension__ extern __inline int8x16_t | |
26839 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26840 | __arm_vhcaddq_rot270_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
26841 | { | |
26842 | return __arm_vhcaddq_rot270_m_s8 (__inactive, __a, __b, __p); | |
26843 | } | |
26844 | ||
26845 | __extension__ extern __inline int32x4_t | |
26846 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26847 | __arm_vhcaddq_rot270_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
26848 | { | |
26849 | return __arm_vhcaddq_rot270_m_s32 (__inactive, __a, __b, __p); | |
26850 | } | |
26851 | ||
26852 | __extension__ extern __inline int16x8_t | |
26853 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26854 | __arm_vhcaddq_rot270_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
26855 | { | |
26856 | return __arm_vhcaddq_rot270_m_s16 (__inactive, __a, __b, __p); | |
26857 | } | |
26858 | ||
26859 | __extension__ extern __inline int8x16_t | |
26860 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26861 | __arm_vhcaddq_rot90_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
26862 | { | |
26863 | return __arm_vhcaddq_rot90_m_s8 (__inactive, __a, __b, __p); | |
26864 | } | |
26865 | ||
26866 | __extension__ extern __inline int32x4_t | |
26867 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26868 | __arm_vhcaddq_rot90_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
26869 | { | |
26870 | return __arm_vhcaddq_rot90_m_s32 (__inactive, __a, __b, __p); | |
26871 | } | |
26872 | ||
26873 | __extension__ extern __inline int16x8_t | |
26874 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26875 | __arm_vhcaddq_rot90_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
26876 | { | |
26877 | return __arm_vhcaddq_rot90_m_s16 (__inactive, __a, __b, __p); | |
26878 | } | |
26879 | ||
26880 | __extension__ extern __inline int8x16_t | |
26881 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26882 | __arm_vhsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
26883 | { | |
26884 | return __arm_vhsubq_m_n_s8 (__inactive, __a, __b, __p); | |
26885 | } | |
26886 | ||
26887 | __extension__ extern __inline int32x4_t | |
26888 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26889 | __arm_vhsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
26890 | { | |
26891 | return __arm_vhsubq_m_n_s32 (__inactive, __a, __b, __p); | |
26892 | } | |
26893 | ||
26894 | __extension__ extern __inline int16x8_t | |
26895 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26896 | __arm_vhsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
26897 | { | |
26898 | return __arm_vhsubq_m_n_s16 (__inactive, __a, __b, __p); | |
26899 | } | |
26900 | ||
26901 | __extension__ extern __inline uint8x16_t | |
26902 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26903 | __arm_vhsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
26904 | { | |
26905 | return __arm_vhsubq_m_n_u8 (__inactive, __a, __b, __p); | |
26906 | } | |
26907 | ||
26908 | __extension__ extern __inline uint32x4_t | |
26909 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26910 | __arm_vhsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
26911 | { | |
26912 | return __arm_vhsubq_m_n_u32 (__inactive, __a, __b, __p); | |
26913 | } | |
26914 | ||
26915 | __extension__ extern __inline uint16x8_t | |
26916 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26917 | __arm_vhsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
26918 | { | |
26919 | return __arm_vhsubq_m_n_u16 (__inactive, __a, __b, __p); | |
26920 | } | |
26921 | ||
26922 | __extension__ extern __inline int8x16_t | |
26923 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26924 | __arm_vhsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
26925 | { | |
26926 | return __arm_vhsubq_m_s8 (__inactive, __a, __b, __p); | |
26927 | } | |
26928 | ||
26929 | __extension__ extern __inline int32x4_t | |
26930 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26931 | __arm_vhsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
26932 | { | |
26933 | return __arm_vhsubq_m_s32 (__inactive, __a, __b, __p); | |
26934 | } | |
26935 | ||
26936 | __extension__ extern __inline int16x8_t | |
26937 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26938 | __arm_vhsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
26939 | { | |
26940 | return __arm_vhsubq_m_s16 (__inactive, __a, __b, __p); | |
26941 | } | |
26942 | ||
26943 | __extension__ extern __inline uint8x16_t | |
26944 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26945 | __arm_vhsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
26946 | { | |
26947 | return __arm_vhsubq_m_u8 (__inactive, __a, __b, __p); | |
26948 | } | |
26949 | ||
26950 | __extension__ extern __inline uint32x4_t | |
26951 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26952 | __arm_vhsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26953 | { | |
26954 | return __arm_vhsubq_m_u32 (__inactive, __a, __b, __p); | |
26955 | } | |
26956 | ||
26957 | __extension__ extern __inline uint16x8_t | |
26958 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26959 | __arm_vhsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
26960 | { | |
26961 | return __arm_vhsubq_m_u16 (__inactive, __a, __b, __p); | |
26962 | } | |
26963 | ||
26964 | __extension__ extern __inline int8x16_t | |
26965 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26966 | __arm_vmaxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
26967 | { | |
26968 | return __arm_vmaxq_m_s8 (__inactive, __a, __b, __p); | |
26969 | } | |
26970 | ||
26971 | __extension__ extern __inline int32x4_t | |
26972 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26973 | __arm_vmaxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
26974 | { | |
26975 | return __arm_vmaxq_m_s32 (__inactive, __a, __b, __p); | |
26976 | } | |
26977 | ||
26978 | __extension__ extern __inline int16x8_t | |
26979 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26980 | __arm_vmaxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
26981 | { | |
26982 | return __arm_vmaxq_m_s16 (__inactive, __a, __b, __p); | |
26983 | } | |
26984 | ||
26985 | __extension__ extern __inline uint8x16_t | |
26986 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26987 | __arm_vmaxq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
26988 | { | |
26989 | return __arm_vmaxq_m_u8 (__inactive, __a, __b, __p); | |
26990 | } | |
26991 | ||
26992 | __extension__ extern __inline uint32x4_t | |
26993 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
26994 | __arm_vmaxq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
26995 | { | |
26996 | return __arm_vmaxq_m_u32 (__inactive, __a, __b, __p); | |
26997 | } | |
26998 | ||
26999 | __extension__ extern __inline uint16x8_t | |
27000 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27001 | __arm_vmaxq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
27002 | { | |
27003 | return __arm_vmaxq_m_u16 (__inactive, __a, __b, __p); | |
27004 | } | |
27005 | ||
27006 | __extension__ extern __inline int8x16_t | |
27007 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27008 | __arm_vminq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27009 | { | |
27010 | return __arm_vminq_m_s8 (__inactive, __a, __b, __p); | |
27011 | } | |
27012 | ||
27013 | __extension__ extern __inline int32x4_t | |
27014 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27015 | __arm_vminq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27016 | { | |
27017 | return __arm_vminq_m_s32 (__inactive, __a, __b, __p); | |
27018 | } | |
27019 | ||
27020 | __extension__ extern __inline int16x8_t | |
27021 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27022 | __arm_vminq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27023 | { | |
27024 | return __arm_vminq_m_s16 (__inactive, __a, __b, __p); | |
27025 | } | |
27026 | ||
27027 | __extension__ extern __inline uint8x16_t | |
27028 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27029 | __arm_vminq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
27030 | { | |
27031 | return __arm_vminq_m_u8 (__inactive, __a, __b, __p); | |
27032 | } | |
27033 | ||
27034 | __extension__ extern __inline uint32x4_t | |
27035 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27036 | __arm_vminq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
27037 | { | |
27038 | return __arm_vminq_m_u32 (__inactive, __a, __b, __p); | |
27039 | } | |
27040 | ||
27041 | __extension__ extern __inline uint16x8_t | |
27042 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27043 | __arm_vminq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
27044 | { | |
27045 | return __arm_vminq_m_u16 (__inactive, __a, __b, __p); | |
27046 | } | |
27047 | ||
27048 | __extension__ extern __inline int32_t | |
27049 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27050 | __arm_vmladavaq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
27051 | { | |
27052 | return __arm_vmladavaq_p_s8 (__a, __b, __c, __p); | |
27053 | } | |
27054 | ||
27055 | __extension__ extern __inline int32_t | |
27056 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27057 | __arm_vmladavaq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
27058 | { | |
27059 | return __arm_vmladavaq_p_s32 (__a, __b, __c, __p); | |
27060 | } | |
27061 | ||
27062 | __extension__ extern __inline int32_t | |
27063 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27064 | __arm_vmladavaq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
27065 | { | |
27066 | return __arm_vmladavaq_p_s16 (__a, __b, __c, __p); | |
27067 | } | |
27068 | ||
27069 | __extension__ extern __inline uint32_t | |
27070 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27071 | __arm_vmladavaq_p (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p) | |
27072 | { | |
27073 | return __arm_vmladavaq_p_u8 (__a, __b, __c, __p); | |
27074 | } | |
27075 | ||
27076 | __extension__ extern __inline uint32_t | |
27077 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27078 | __arm_vmladavaq_p (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
27079 | { | |
27080 | return __arm_vmladavaq_p_u32 (__a, __b, __c, __p); | |
27081 | } | |
27082 | ||
27083 | __extension__ extern __inline uint32_t | |
27084 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27085 | __arm_vmladavaq_p (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) | |
27086 | { | |
27087 | return __arm_vmladavaq_p_u16 (__a, __b, __c, __p); | |
27088 | } | |
27089 | ||
27090 | __extension__ extern __inline int32_t | |
27091 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27092 | __arm_vmladavaxq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
27093 | { | |
27094 | return __arm_vmladavaxq_p_s8 (__a, __b, __c, __p); | |
27095 | } | |
27096 | ||
27097 | __extension__ extern __inline int32_t | |
27098 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27099 | __arm_vmladavaxq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
27100 | { | |
27101 | return __arm_vmladavaxq_p_s32 (__a, __b, __c, __p); | |
27102 | } | |
27103 | ||
27104 | __extension__ extern __inline int32_t | |
27105 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27106 | __arm_vmladavaxq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
27107 | { | |
27108 | return __arm_vmladavaxq_p_s16 (__a, __b, __c, __p); | |
27109 | } | |
27110 | ||
27111 | __extension__ extern __inline int8x16_t | |
27112 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27113 | __arm_vmlaq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
27114 | { | |
27115 | return __arm_vmlaq_m_n_s8 (__a, __b, __c, __p); | |
27116 | } | |
27117 | ||
27118 | __extension__ extern __inline int32x4_t | |
27119 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27120 | __arm_vmlaq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
27121 | { | |
27122 | return __arm_vmlaq_m_n_s32 (__a, __b, __c, __p); | |
27123 | } | |
27124 | ||
27125 | __extension__ extern __inline int16x8_t | |
27126 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27127 | __arm_vmlaq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
27128 | { | |
27129 | return __arm_vmlaq_m_n_s16 (__a, __b, __c, __p); | |
27130 | } | |
27131 | ||
27132 | __extension__ extern __inline uint8x16_t | |
27133 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27134 | __arm_vmlaq_m (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p) | |
27135 | { | |
27136 | return __arm_vmlaq_m_n_u8 (__a, __b, __c, __p); | |
27137 | } | |
27138 | ||
27139 | __extension__ extern __inline uint32x4_t | |
27140 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27141 | __arm_vmlaq_m (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p) | |
27142 | { | |
27143 | return __arm_vmlaq_m_n_u32 (__a, __b, __c, __p); | |
27144 | } | |
27145 | ||
27146 | __extension__ extern __inline uint16x8_t | |
27147 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27148 | __arm_vmlaq_m (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p) | |
27149 | { | |
27150 | return __arm_vmlaq_m_n_u16 (__a, __b, __c, __p); | |
27151 | } | |
27152 | ||
27153 | __extension__ extern __inline int8x16_t | |
27154 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27155 | __arm_vmlasq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
27156 | { | |
27157 | return __arm_vmlasq_m_n_s8 (__a, __b, __c, __p); | |
27158 | } | |
27159 | ||
27160 | __extension__ extern __inline int32x4_t | |
27161 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27162 | __arm_vmlasq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
27163 | { | |
27164 | return __arm_vmlasq_m_n_s32 (__a, __b, __c, __p); | |
27165 | } | |
27166 | ||
27167 | __extension__ extern __inline int16x8_t | |
27168 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27169 | __arm_vmlasq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
27170 | { | |
27171 | return __arm_vmlasq_m_n_s16 (__a, __b, __c, __p); | |
27172 | } | |
27173 | ||
27174 | __extension__ extern __inline uint8x16_t | |
27175 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27176 | __arm_vmlasq_m (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p) | |
27177 | { | |
27178 | return __arm_vmlasq_m_n_u8 (__a, __b, __c, __p); | |
27179 | } | |
27180 | ||
27181 | __extension__ extern __inline uint32x4_t | |
27182 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27183 | __arm_vmlasq_m (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p) | |
27184 | { | |
27185 | return __arm_vmlasq_m_n_u32 (__a, __b, __c, __p); | |
27186 | } | |
27187 | ||
27188 | __extension__ extern __inline uint16x8_t | |
27189 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27190 | __arm_vmlasq_m (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p) | |
27191 | { | |
27192 | return __arm_vmlasq_m_n_u16 (__a, __b, __c, __p); | |
27193 | } | |
27194 | ||
27195 | __extension__ extern __inline int32_t | |
27196 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27197 | __arm_vmlsdavaq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
27198 | { | |
27199 | return __arm_vmlsdavaq_p_s8 (__a, __b, __c, __p); | |
27200 | } | |
27201 | ||
27202 | __extension__ extern __inline int32_t | |
27203 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27204 | __arm_vmlsdavaq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
27205 | { | |
27206 | return __arm_vmlsdavaq_p_s32 (__a, __b, __c, __p); | |
27207 | } | |
27208 | ||
27209 | __extension__ extern __inline int32_t | |
27210 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27211 | __arm_vmlsdavaq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
27212 | { | |
27213 | return __arm_vmlsdavaq_p_s16 (__a, __b, __c, __p); | |
27214 | } | |
27215 | ||
27216 | __extension__ extern __inline int32_t | |
27217 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27218 | __arm_vmlsdavaxq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) | |
27219 | { | |
27220 | return __arm_vmlsdavaxq_p_s8 (__a, __b, __c, __p); | |
27221 | } | |
27222 | ||
27223 | __extension__ extern __inline int32_t | |
27224 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27225 | __arm_vmlsdavaxq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
27226 | { | |
27227 | return __arm_vmlsdavaxq_p_s32 (__a, __b, __c, __p); | |
27228 | } | |
27229 | ||
27230 | __extension__ extern __inline int32_t | |
27231 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27232 | __arm_vmlsdavaxq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
27233 | { | |
27234 | return __arm_vmlsdavaxq_p_s16 (__a, __b, __c, __p); | |
27235 | } | |
27236 | ||
27237 | __extension__ extern __inline int8x16_t | |
27238 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27239 | __arm_vmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27240 | { | |
27241 | return __arm_vmulhq_m_s8 (__inactive, __a, __b, __p); | |
27242 | } | |
27243 | ||
27244 | __extension__ extern __inline int32x4_t | |
27245 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27246 | __arm_vmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27247 | { | |
27248 | return __arm_vmulhq_m_s32 (__inactive, __a, __b, __p); | |
27249 | } | |
27250 | ||
27251 | __extension__ extern __inline int16x8_t | |
27252 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27253 | __arm_vmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27254 | { | |
27255 | return __arm_vmulhq_m_s16 (__inactive, __a, __b, __p); | |
27256 | } | |
27257 | ||
27258 | __extension__ extern __inline uint8x16_t | |
27259 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27260 | __arm_vmulhq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
27261 | { | |
27262 | return __arm_vmulhq_m_u8 (__inactive, __a, __b, __p); | |
27263 | } | |
27264 | ||
27265 | __extension__ extern __inline uint32x4_t | |
27266 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27267 | __arm_vmulhq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
27268 | { | |
27269 | return __arm_vmulhq_m_u32 (__inactive, __a, __b, __p); | |
27270 | } | |
27271 | ||
27272 | __extension__ extern __inline uint16x8_t | |
27273 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27274 | __arm_vmulhq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
27275 | { | |
27276 | return __arm_vmulhq_m_u16 (__inactive, __a, __b, __p); | |
27277 | } | |
27278 | ||
27279 | __extension__ extern __inline int16x8_t | |
27280 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27281 | __arm_vmullbq_int_m (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27282 | { | |
27283 | return __arm_vmullbq_int_m_s8 (__inactive, __a, __b, __p); | |
27284 | } | |
27285 | ||
27286 | __extension__ extern __inline int64x2_t | |
27287 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27288 | __arm_vmullbq_int_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27289 | { | |
27290 | return __arm_vmullbq_int_m_s32 (__inactive, __a, __b, __p); | |
27291 | } | |
27292 | ||
27293 | __extension__ extern __inline int32x4_t | |
27294 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27295 | __arm_vmullbq_int_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27296 | { | |
27297 | return __arm_vmullbq_int_m_s16 (__inactive, __a, __b, __p); | |
27298 | } | |
27299 | ||
27300 | __extension__ extern __inline uint16x8_t | |
27301 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27302 | __arm_vmullbq_int_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
27303 | { | |
27304 | return __arm_vmullbq_int_m_u8 (__inactive, __a, __b, __p); | |
27305 | } | |
27306 | ||
27307 | __extension__ extern __inline uint64x2_t | |
27308 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27309 | __arm_vmullbq_int_m (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
27310 | { | |
27311 | return __arm_vmullbq_int_m_u32 (__inactive, __a, __b, __p); | |
27312 | } | |
27313 | ||
27314 | __extension__ extern __inline uint32x4_t | |
27315 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27316 | __arm_vmullbq_int_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
27317 | { | |
27318 | return __arm_vmullbq_int_m_u16 (__inactive, __a, __b, __p); | |
27319 | } | |
27320 | ||
27321 | __extension__ extern __inline int16x8_t | |
27322 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27323 | __arm_vmulltq_int_m (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27324 | { | |
27325 | return __arm_vmulltq_int_m_s8 (__inactive, __a, __b, __p); | |
27326 | } | |
27327 | ||
27328 | __extension__ extern __inline int64x2_t | |
27329 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27330 | __arm_vmulltq_int_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27331 | { | |
27332 | return __arm_vmulltq_int_m_s32 (__inactive, __a, __b, __p); | |
27333 | } | |
27334 | ||
27335 | __extension__ extern __inline int32x4_t | |
27336 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27337 | __arm_vmulltq_int_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27338 | { | |
27339 | return __arm_vmulltq_int_m_s16 (__inactive, __a, __b, __p); | |
27340 | } | |
27341 | ||
27342 | __extension__ extern __inline uint16x8_t | |
27343 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27344 | __arm_vmulltq_int_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
27345 | { | |
27346 | return __arm_vmulltq_int_m_u8 (__inactive, __a, __b, __p); | |
27347 | } | |
27348 | ||
27349 | __extension__ extern __inline uint64x2_t | |
27350 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27351 | __arm_vmulltq_int_m (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
27352 | { | |
27353 | return __arm_vmulltq_int_m_u32 (__inactive, __a, __b, __p); | |
27354 | } | |
27355 | ||
27356 | __extension__ extern __inline uint32x4_t | |
27357 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27358 | __arm_vmulltq_int_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
27359 | { | |
27360 | return __arm_vmulltq_int_m_u16 (__inactive, __a, __b, __p); | |
27361 | } | |
27362 | ||
27363 | __extension__ extern __inline int8x16_t | |
27364 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27365 | __arm_vmulq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
27366 | { | |
27367 | return __arm_vmulq_m_n_s8 (__inactive, __a, __b, __p); | |
27368 | } | |
27369 | ||
27370 | __extension__ extern __inline int32x4_t | |
27371 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27372 | __arm_vmulq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
27373 | { | |
27374 | return __arm_vmulq_m_n_s32 (__inactive, __a, __b, __p); | |
27375 | } | |
27376 | ||
27377 | __extension__ extern __inline int16x8_t | |
27378 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27379 | __arm_vmulq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
27380 | { | |
27381 | return __arm_vmulq_m_n_s16 (__inactive, __a, __b, __p); | |
27382 | } | |
27383 | ||
27384 | __extension__ extern __inline uint8x16_t | |
27385 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27386 | __arm_vmulq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
27387 | { | |
27388 | return __arm_vmulq_m_n_u8 (__inactive, __a, __b, __p); | |
27389 | } | |
27390 | ||
27391 | __extension__ extern __inline uint32x4_t | |
27392 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27393 | __arm_vmulq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
27394 | { | |
27395 | return __arm_vmulq_m_n_u32 (__inactive, __a, __b, __p); | |
27396 | } | |
27397 | ||
27398 | __extension__ extern __inline uint16x8_t | |
27399 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27400 | __arm_vmulq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
27401 | { | |
27402 | return __arm_vmulq_m_n_u16 (__inactive, __a, __b, __p); | |
27403 | } | |
27404 | ||
27405 | __extension__ extern __inline int8x16_t | |
27406 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27407 | __arm_vmulq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27408 | { | |
27409 | return __arm_vmulq_m_s8 (__inactive, __a, __b, __p); | |
27410 | } | |
27411 | ||
27412 | __extension__ extern __inline int32x4_t | |
27413 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27414 | __arm_vmulq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27415 | { | |
27416 | return __arm_vmulq_m_s32 (__inactive, __a, __b, __p); | |
27417 | } | |
27418 | ||
27419 | __extension__ extern __inline int16x8_t | |
27420 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27421 | __arm_vmulq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27422 | { | |
27423 | return __arm_vmulq_m_s16 (__inactive, __a, __b, __p); | |
27424 | } | |
27425 | ||
27426 | __extension__ extern __inline uint8x16_t | |
27427 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27428 | __arm_vmulq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
27429 | { | |
27430 | return __arm_vmulq_m_u8 (__inactive, __a, __b, __p); | |
27431 | } | |
27432 | ||
27433 | __extension__ extern __inline uint32x4_t | |
27434 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27435 | __arm_vmulq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
27436 | { | |
27437 | return __arm_vmulq_m_u32 (__inactive, __a, __b, __p); | |
27438 | } | |
27439 | ||
27440 | __extension__ extern __inline uint16x8_t | |
27441 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27442 | __arm_vmulq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
27443 | { | |
27444 | return __arm_vmulq_m_u16 (__inactive, __a, __b, __p); | |
27445 | } | |
27446 | ||
27447 | __extension__ extern __inline int8x16_t | |
27448 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27449 | __arm_vornq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27450 | { | |
27451 | return __arm_vornq_m_s8 (__inactive, __a, __b, __p); | |
27452 | } | |
27453 | ||
27454 | __extension__ extern __inline int32x4_t | |
27455 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27456 | __arm_vornq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27457 | { | |
27458 | return __arm_vornq_m_s32 (__inactive, __a, __b, __p); | |
27459 | } | |
27460 | ||
27461 | __extension__ extern __inline int16x8_t | |
27462 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27463 | __arm_vornq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27464 | { | |
27465 | return __arm_vornq_m_s16 (__inactive, __a, __b, __p); | |
27466 | } | |
27467 | ||
27468 | __extension__ extern __inline uint8x16_t | |
27469 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27470 | __arm_vornq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
27471 | { | |
27472 | return __arm_vornq_m_u8 (__inactive, __a, __b, __p); | |
27473 | } | |
27474 | ||
27475 | __extension__ extern __inline uint32x4_t | |
27476 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27477 | __arm_vornq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
27478 | { | |
27479 | return __arm_vornq_m_u32 (__inactive, __a, __b, __p); | |
27480 | } | |
27481 | ||
27482 | __extension__ extern __inline uint16x8_t | |
27483 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27484 | __arm_vornq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
27485 | { | |
27486 | return __arm_vornq_m_u16 (__inactive, __a, __b, __p); | |
27487 | } | |
27488 | ||
27489 | __extension__ extern __inline int8x16_t | |
27490 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27491 | __arm_vorrq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27492 | { | |
27493 | return __arm_vorrq_m_s8 (__inactive, __a, __b, __p); | |
27494 | } | |
27495 | ||
27496 | __extension__ extern __inline int32x4_t | |
27497 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27498 | __arm_vorrq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27499 | { | |
27500 | return __arm_vorrq_m_s32 (__inactive, __a, __b, __p); | |
27501 | } | |
27502 | ||
27503 | __extension__ extern __inline int16x8_t | |
27504 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27505 | __arm_vorrq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27506 | { | |
27507 | return __arm_vorrq_m_s16 (__inactive, __a, __b, __p); | |
27508 | } | |
27509 | ||
27510 | __extension__ extern __inline uint8x16_t | |
27511 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27512 | __arm_vorrq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
27513 | { | |
27514 | return __arm_vorrq_m_u8 (__inactive, __a, __b, __p); | |
27515 | } | |
27516 | ||
27517 | __extension__ extern __inline uint32x4_t | |
27518 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27519 | __arm_vorrq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
27520 | { | |
27521 | return __arm_vorrq_m_u32 (__inactive, __a, __b, __p); | |
27522 | } | |
27523 | ||
27524 | __extension__ extern __inline uint16x8_t | |
27525 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27526 | __arm_vorrq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
27527 | { | |
27528 | return __arm_vorrq_m_u16 (__inactive, __a, __b, __p); | |
27529 | } | |
27530 | ||
27531 | __extension__ extern __inline int8x16_t | |
27532 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27533 | __arm_vqaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
27534 | { | |
27535 | return __arm_vqaddq_m_n_s8 (__inactive, __a, __b, __p); | |
27536 | } | |
27537 | ||
27538 | __extension__ extern __inline int32x4_t | |
27539 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27540 | __arm_vqaddq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
27541 | { | |
27542 | return __arm_vqaddq_m_n_s32 (__inactive, __a, __b, __p); | |
27543 | } | |
27544 | ||
27545 | __extension__ extern __inline int16x8_t | |
27546 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27547 | __arm_vqaddq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
27548 | { | |
27549 | return __arm_vqaddq_m_n_s16 (__inactive, __a, __b, __p); | |
27550 | } | |
27551 | ||
27552 | __extension__ extern __inline uint8x16_t | |
27553 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27554 | __arm_vqaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
27555 | { | |
27556 | return __arm_vqaddq_m_n_u8 (__inactive, __a, __b, __p); | |
27557 | } | |
27558 | ||
27559 | __extension__ extern __inline uint32x4_t | |
27560 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27561 | __arm_vqaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
27562 | { | |
27563 | return __arm_vqaddq_m_n_u32 (__inactive, __a, __b, __p); | |
27564 | } | |
27565 | ||
27566 | __extension__ extern __inline uint16x8_t | |
27567 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27568 | __arm_vqaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
27569 | { | |
27570 | return __arm_vqaddq_m_n_u16 (__inactive, __a, __b, __p); | |
27571 | } | |
27572 | ||
27573 | __extension__ extern __inline int8x16_t | |
27574 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27575 | __arm_vqaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27576 | { | |
27577 | return __arm_vqaddq_m_s8 (__inactive, __a, __b, __p); | |
27578 | } | |
27579 | ||
27580 | __extension__ extern __inline int32x4_t | |
27581 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27582 | __arm_vqaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27583 | { | |
27584 | return __arm_vqaddq_m_s32 (__inactive, __a, __b, __p); | |
27585 | } | |
27586 | ||
27587 | __extension__ extern __inline int16x8_t | |
27588 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27589 | __arm_vqaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27590 | { | |
27591 | return __arm_vqaddq_m_s16 (__inactive, __a, __b, __p); | |
27592 | } | |
27593 | ||
27594 | __extension__ extern __inline uint8x16_t | |
27595 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27596 | __arm_vqaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
27597 | { | |
27598 | return __arm_vqaddq_m_u8 (__inactive, __a, __b, __p); | |
27599 | } | |
27600 | ||
27601 | __extension__ extern __inline uint32x4_t | |
27602 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27603 | __arm_vqaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
27604 | { | |
27605 | return __arm_vqaddq_m_u32 (__inactive, __a, __b, __p); | |
27606 | } | |
27607 | ||
27608 | __extension__ extern __inline uint16x8_t | |
27609 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27610 | __arm_vqaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
27611 | { | |
27612 | return __arm_vqaddq_m_u16 (__inactive, __a, __b, __p); | |
27613 | } | |
27614 | ||
27615 | __extension__ extern __inline int8x16_t | |
27616 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27617 | __arm_vqdmladhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27618 | { | |
27619 | return __arm_vqdmladhq_m_s8 (__inactive, __a, __b, __p); | |
27620 | } | |
27621 | ||
27622 | __extension__ extern __inline int32x4_t | |
27623 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27624 | __arm_vqdmladhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27625 | { | |
27626 | return __arm_vqdmladhq_m_s32 (__inactive, __a, __b, __p); | |
27627 | } | |
27628 | ||
27629 | __extension__ extern __inline int16x8_t | |
27630 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27631 | __arm_vqdmladhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27632 | { | |
27633 | return __arm_vqdmladhq_m_s16 (__inactive, __a, __b, __p); | |
27634 | } | |
27635 | ||
27636 | __extension__ extern __inline int8x16_t | |
27637 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27638 | __arm_vqdmladhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27639 | { | |
27640 | return __arm_vqdmladhxq_m_s8 (__inactive, __a, __b, __p); | |
27641 | } | |
27642 | ||
27643 | __extension__ extern __inline int32x4_t | |
27644 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27645 | __arm_vqdmladhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27646 | { | |
27647 | return __arm_vqdmladhxq_m_s32 (__inactive, __a, __b, __p); | |
27648 | } | |
27649 | ||
27650 | __extension__ extern __inline int16x8_t | |
27651 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27652 | __arm_vqdmladhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27653 | { | |
27654 | return __arm_vqdmladhxq_m_s16 (__inactive, __a, __b, __p); | |
27655 | } | |
27656 | ||
27657 | __extension__ extern __inline int8x16_t | |
27658 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27659 | __arm_vqdmlahq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
27660 | { | |
27661 | return __arm_vqdmlahq_m_n_s8 (__a, __b, __c, __p); | |
27662 | } | |
27663 | ||
27664 | __extension__ extern __inline int32x4_t | |
27665 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27666 | __arm_vqdmlahq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
27667 | { | |
27668 | return __arm_vqdmlahq_m_n_s32 (__a, __b, __c, __p); | |
27669 | } | |
27670 | ||
27671 | __extension__ extern __inline int16x8_t | |
27672 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27673 | __arm_vqdmlahq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
27674 | { | |
27675 | return __arm_vqdmlahq_m_n_s16 (__a, __b, __c, __p); | |
27676 | } | |
27677 | ||
27678 | __extension__ extern __inline int8x16_t | |
27679 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27680 | __arm_vqdmlsdhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27681 | { | |
27682 | return __arm_vqdmlsdhq_m_s8 (__inactive, __a, __b, __p); | |
27683 | } | |
27684 | ||
27685 | __extension__ extern __inline int32x4_t | |
27686 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27687 | __arm_vqdmlsdhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27688 | { | |
27689 | return __arm_vqdmlsdhq_m_s32 (__inactive, __a, __b, __p); | |
27690 | } | |
27691 | ||
27692 | __extension__ extern __inline int16x8_t | |
27693 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27694 | __arm_vqdmlsdhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27695 | { | |
27696 | return __arm_vqdmlsdhq_m_s16 (__inactive, __a, __b, __p); | |
27697 | } | |
27698 | ||
27699 | __extension__ extern __inline int8x16_t | |
27700 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27701 | __arm_vqdmlsdhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27702 | { | |
27703 | return __arm_vqdmlsdhxq_m_s8 (__inactive, __a, __b, __p); | |
27704 | } | |
27705 | ||
27706 | __extension__ extern __inline int32x4_t | |
27707 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27708 | __arm_vqdmlsdhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27709 | { | |
27710 | return __arm_vqdmlsdhxq_m_s32 (__inactive, __a, __b, __p); | |
27711 | } | |
27712 | ||
27713 | __extension__ extern __inline int16x8_t | |
27714 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27715 | __arm_vqdmlsdhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27716 | { | |
27717 | return __arm_vqdmlsdhxq_m_s16 (__inactive, __a, __b, __p); | |
27718 | } | |
27719 | ||
27720 | __extension__ extern __inline int8x16_t | |
27721 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27722 | __arm_vqdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
27723 | { | |
27724 | return __arm_vqdmulhq_m_n_s8 (__inactive, __a, __b, __p); | |
27725 | } | |
27726 | ||
27727 | __extension__ extern __inline int32x4_t | |
27728 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27729 | __arm_vqdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
27730 | { | |
27731 | return __arm_vqdmulhq_m_n_s32 (__inactive, __a, __b, __p); | |
27732 | } | |
27733 | ||
27734 | __extension__ extern __inline int16x8_t | |
27735 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27736 | __arm_vqdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
27737 | { | |
27738 | return __arm_vqdmulhq_m_n_s16 (__inactive, __a, __b, __p); | |
27739 | } | |
27740 | ||
27741 | __extension__ extern __inline int8x16_t | |
27742 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27743 | __arm_vqdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27744 | { | |
27745 | return __arm_vqdmulhq_m_s8 (__inactive, __a, __b, __p); | |
27746 | } | |
27747 | ||
27748 | __extension__ extern __inline int32x4_t | |
27749 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27750 | __arm_vqdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27751 | { | |
27752 | return __arm_vqdmulhq_m_s32 (__inactive, __a, __b, __p); | |
27753 | } | |
27754 | ||
27755 | __extension__ extern __inline int16x8_t | |
27756 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27757 | __arm_vqdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27758 | { | |
27759 | return __arm_vqdmulhq_m_s16 (__inactive, __a, __b, __p); | |
27760 | } | |
27761 | ||
27762 | __extension__ extern __inline int8x16_t | |
27763 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27764 | __arm_vqrdmladhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27765 | { | |
27766 | return __arm_vqrdmladhq_m_s8 (__inactive, __a, __b, __p); | |
27767 | } | |
27768 | ||
27769 | __extension__ extern __inline int32x4_t | |
27770 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27771 | __arm_vqrdmladhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27772 | { | |
27773 | return __arm_vqrdmladhq_m_s32 (__inactive, __a, __b, __p); | |
27774 | } | |
27775 | ||
27776 | __extension__ extern __inline int16x8_t | |
27777 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27778 | __arm_vqrdmladhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27779 | { | |
27780 | return __arm_vqrdmladhq_m_s16 (__inactive, __a, __b, __p); | |
27781 | } | |
27782 | ||
27783 | __extension__ extern __inline int8x16_t | |
27784 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27785 | __arm_vqrdmladhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27786 | { | |
27787 | return __arm_vqrdmladhxq_m_s8 (__inactive, __a, __b, __p); | |
27788 | } | |
27789 | ||
27790 | __extension__ extern __inline int32x4_t | |
27791 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27792 | __arm_vqrdmladhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27793 | { | |
27794 | return __arm_vqrdmladhxq_m_s32 (__inactive, __a, __b, __p); | |
27795 | } | |
27796 | ||
27797 | __extension__ extern __inline int16x8_t | |
27798 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27799 | __arm_vqrdmladhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27800 | { | |
27801 | return __arm_vqrdmladhxq_m_s16 (__inactive, __a, __b, __p); | |
27802 | } | |
27803 | ||
27804 | __extension__ extern __inline int8x16_t | |
27805 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27806 | __arm_vqrdmlahq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
27807 | { | |
27808 | return __arm_vqrdmlahq_m_n_s8 (__a, __b, __c, __p); | |
27809 | } | |
27810 | ||
27811 | __extension__ extern __inline int32x4_t | |
27812 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27813 | __arm_vqrdmlahq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
27814 | { | |
27815 | return __arm_vqrdmlahq_m_n_s32 (__a, __b, __c, __p); | |
27816 | } | |
27817 | ||
27818 | __extension__ extern __inline int16x8_t | |
27819 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27820 | __arm_vqrdmlahq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
27821 | { | |
27822 | return __arm_vqrdmlahq_m_n_s16 (__a, __b, __c, __p); | |
27823 | } | |
27824 | ||
27825 | __extension__ extern __inline int8x16_t | |
27826 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27827 | __arm_vqrdmlashq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
27828 | { | |
27829 | return __arm_vqrdmlashq_m_n_s8 (__a, __b, __c, __p); | |
27830 | } | |
27831 | ||
27832 | __extension__ extern __inline int32x4_t | |
27833 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27834 | __arm_vqrdmlashq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
27835 | { | |
27836 | return __arm_vqrdmlashq_m_n_s32 (__a, __b, __c, __p); | |
27837 | } | |
27838 | ||
27839 | __extension__ extern __inline int16x8_t | |
27840 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27841 | __arm_vqrdmlashq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
27842 | { | |
27843 | return __arm_vqrdmlashq_m_n_s16 (__a, __b, __c, __p); | |
27844 | } | |
27845 | ||
afb198ee CL |
27846 | __extension__ extern __inline int8x16_t |
27847 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27848 | __arm_vqdmlashq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) | |
27849 | { | |
27850 | return __arm_vqdmlashq_m_n_s8 (__a, __b, __c, __p); | |
27851 | } | |
27852 | ||
27853 | __extension__ extern __inline int16x8_t | |
27854 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27855 | __arm_vqdmlashq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) | |
27856 | { | |
27857 | return __arm_vqdmlashq_m_n_s16 (__a, __b, __c, __p); | |
27858 | } | |
27859 | ||
27860 | __extension__ extern __inline int32x4_t | |
27861 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27862 | __arm_vqdmlashq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) | |
27863 | { | |
27864 | return __arm_vqdmlashq_m_n_s32 (__a, __b, __c, __p); | |
27865 | } | |
27866 | ||
6a90680b ASDV |
27867 | __extension__ extern __inline int8x16_t |
27868 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27869 | __arm_vqrdmlsdhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27870 | { | |
27871 | return __arm_vqrdmlsdhq_m_s8 (__inactive, __a, __b, __p); | |
27872 | } | |
27873 | ||
27874 | __extension__ extern __inline int32x4_t | |
27875 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27876 | __arm_vqrdmlsdhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27877 | { | |
27878 | return __arm_vqrdmlsdhq_m_s32 (__inactive, __a, __b, __p); | |
27879 | } | |
27880 | ||
27881 | __extension__ extern __inline int16x8_t | |
27882 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27883 | __arm_vqrdmlsdhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27884 | { | |
27885 | return __arm_vqrdmlsdhq_m_s16 (__inactive, __a, __b, __p); | |
27886 | } | |
27887 | ||
27888 | __extension__ extern __inline int8x16_t | |
27889 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27890 | __arm_vqrdmlsdhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27891 | { | |
27892 | return __arm_vqrdmlsdhxq_m_s8 (__inactive, __a, __b, __p); | |
27893 | } | |
27894 | ||
27895 | __extension__ extern __inline int32x4_t | |
27896 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27897 | __arm_vqrdmlsdhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27898 | { | |
27899 | return __arm_vqrdmlsdhxq_m_s32 (__inactive, __a, __b, __p); | |
27900 | } | |
27901 | ||
27902 | __extension__ extern __inline int16x8_t | |
27903 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27904 | __arm_vqrdmlsdhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27905 | { | |
27906 | return __arm_vqrdmlsdhxq_m_s16 (__inactive, __a, __b, __p); | |
27907 | } | |
27908 | ||
27909 | __extension__ extern __inline int8x16_t | |
27910 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27911 | __arm_vqrdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
27912 | { | |
27913 | return __arm_vqrdmulhq_m_n_s8 (__inactive, __a, __b, __p); | |
27914 | } | |
27915 | ||
27916 | __extension__ extern __inline int32x4_t | |
27917 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27918 | __arm_vqrdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
27919 | { | |
27920 | return __arm_vqrdmulhq_m_n_s32 (__inactive, __a, __b, __p); | |
27921 | } | |
27922 | ||
27923 | __extension__ extern __inline int16x8_t | |
27924 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27925 | __arm_vqrdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
27926 | { | |
27927 | return __arm_vqrdmulhq_m_n_s16 (__inactive, __a, __b, __p); | |
27928 | } | |
27929 | ||
27930 | __extension__ extern __inline int8x16_t | |
27931 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27932 | __arm_vqrdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27933 | { | |
27934 | return __arm_vqrdmulhq_m_s8 (__inactive, __a, __b, __p); | |
27935 | } | |
27936 | ||
27937 | __extension__ extern __inline int32x4_t | |
27938 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27939 | __arm_vqrdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27940 | { | |
27941 | return __arm_vqrdmulhq_m_s32 (__inactive, __a, __b, __p); | |
27942 | } | |
27943 | ||
27944 | __extension__ extern __inline int16x8_t | |
27945 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27946 | __arm_vqrdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27947 | { | |
27948 | return __arm_vqrdmulhq_m_s16 (__inactive, __a, __b, __p); | |
27949 | } | |
27950 | ||
27951 | __extension__ extern __inline int8x16_t | |
27952 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27953 | __arm_vqrshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27954 | { | |
27955 | return __arm_vqrshlq_m_s8 (__inactive, __a, __b, __p); | |
27956 | } | |
27957 | ||
27958 | __extension__ extern __inline int32x4_t | |
27959 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27960 | __arm_vqrshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27961 | { | |
27962 | return __arm_vqrshlq_m_s32 (__inactive, __a, __b, __p); | |
27963 | } | |
27964 | ||
27965 | __extension__ extern __inline int16x8_t | |
27966 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27967 | __arm_vqrshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27968 | { | |
27969 | return __arm_vqrshlq_m_s16 (__inactive, __a, __b, __p); | |
27970 | } | |
27971 | ||
27972 | __extension__ extern __inline uint8x16_t | |
27973 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27974 | __arm_vqrshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
27975 | { | |
27976 | return __arm_vqrshlq_m_u8 (__inactive, __a, __b, __p); | |
27977 | } | |
27978 | ||
27979 | __extension__ extern __inline uint32x4_t | |
27980 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27981 | __arm_vqrshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
27982 | { | |
27983 | return __arm_vqrshlq_m_u32 (__inactive, __a, __b, __p); | |
27984 | } | |
27985 | ||
27986 | __extension__ extern __inline uint16x8_t | |
27987 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27988 | __arm_vqrshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
27989 | { | |
27990 | return __arm_vqrshlq_m_u16 (__inactive, __a, __b, __p); | |
27991 | } | |
27992 | ||
27993 | __extension__ extern __inline int8x16_t | |
27994 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
27995 | __arm_vqshlq_m_n (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
27996 | { | |
27997 | return __arm_vqshlq_m_n_s8 (__inactive, __a, __imm, __p); | |
27998 | } | |
27999 | ||
28000 | __extension__ extern __inline int32x4_t | |
28001 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28002 | __arm_vqshlq_m_n (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
28003 | { | |
28004 | return __arm_vqshlq_m_n_s32 (__inactive, __a, __imm, __p); | |
28005 | } | |
28006 | ||
28007 | __extension__ extern __inline int16x8_t | |
28008 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28009 | __arm_vqshlq_m_n (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
28010 | { | |
28011 | return __arm_vqshlq_m_n_s16 (__inactive, __a, __imm, __p); | |
28012 | } | |
28013 | ||
28014 | __extension__ extern __inline uint8x16_t | |
28015 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28016 | __arm_vqshlq_m_n (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
28017 | { | |
28018 | return __arm_vqshlq_m_n_u8 (__inactive, __a, __imm, __p); | |
28019 | } | |
28020 | ||
28021 | __extension__ extern __inline uint32x4_t | |
28022 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28023 | __arm_vqshlq_m_n (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
28024 | { | |
28025 | return __arm_vqshlq_m_n_u32 (__inactive, __a, __imm, __p); | |
28026 | } | |
28027 | ||
28028 | __extension__ extern __inline uint16x8_t | |
28029 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28030 | __arm_vqshlq_m_n (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
28031 | { | |
28032 | return __arm_vqshlq_m_n_u16 (__inactive, __a, __imm, __p); | |
28033 | } | |
28034 | ||
28035 | __extension__ extern __inline int8x16_t | |
28036 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28037 | __arm_vqshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
28038 | { | |
28039 | return __arm_vqshlq_m_s8 (__inactive, __a, __b, __p); | |
28040 | } | |
28041 | ||
28042 | __extension__ extern __inline int32x4_t | |
28043 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28044 | __arm_vqshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
28045 | { | |
28046 | return __arm_vqshlq_m_s32 (__inactive, __a, __b, __p); | |
28047 | } | |
28048 | ||
28049 | __extension__ extern __inline int16x8_t | |
28050 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28051 | __arm_vqshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
28052 | { | |
28053 | return __arm_vqshlq_m_s16 (__inactive, __a, __b, __p); | |
28054 | } | |
28055 | ||
28056 | __extension__ extern __inline uint8x16_t | |
28057 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28058 | __arm_vqshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
28059 | { | |
28060 | return __arm_vqshlq_m_u8 (__inactive, __a, __b, __p); | |
28061 | } | |
28062 | ||
28063 | __extension__ extern __inline uint32x4_t | |
28064 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28065 | __arm_vqshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
28066 | { | |
28067 | return __arm_vqshlq_m_u32 (__inactive, __a, __b, __p); | |
28068 | } | |
28069 | ||
28070 | __extension__ extern __inline uint16x8_t | |
28071 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28072 | __arm_vqshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
28073 | { | |
28074 | return __arm_vqshlq_m_u16 (__inactive, __a, __b, __p); | |
28075 | } | |
28076 | ||
28077 | __extension__ extern __inline int8x16_t | |
28078 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28079 | __arm_vqsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
28080 | { | |
28081 | return __arm_vqsubq_m_n_s8 (__inactive, __a, __b, __p); | |
28082 | } | |
28083 | ||
28084 | __extension__ extern __inline int32x4_t | |
28085 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28086 | __arm_vqsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
28087 | { | |
28088 | return __arm_vqsubq_m_n_s32 (__inactive, __a, __b, __p); | |
28089 | } | |
28090 | ||
28091 | __extension__ extern __inline int16x8_t | |
28092 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28093 | __arm_vqsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
28094 | { | |
28095 | return __arm_vqsubq_m_n_s16 (__inactive, __a, __b, __p); | |
28096 | } | |
28097 | ||
28098 | __extension__ extern __inline uint8x16_t | |
28099 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28100 | __arm_vqsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
28101 | { | |
28102 | return __arm_vqsubq_m_n_u8 (__inactive, __a, __b, __p); | |
28103 | } | |
28104 | ||
28105 | __extension__ extern __inline uint32x4_t | |
28106 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28107 | __arm_vqsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
28108 | { | |
28109 | return __arm_vqsubq_m_n_u32 (__inactive, __a, __b, __p); | |
28110 | } | |
28111 | ||
28112 | __extension__ extern __inline uint16x8_t | |
28113 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28114 | __arm_vqsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
28115 | { | |
28116 | return __arm_vqsubq_m_n_u16 (__inactive, __a, __b, __p); | |
28117 | } | |
28118 | ||
28119 | __extension__ extern __inline int8x16_t | |
28120 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28121 | __arm_vqsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
28122 | { | |
28123 | return __arm_vqsubq_m_s8 (__inactive, __a, __b, __p); | |
28124 | } | |
28125 | ||
28126 | __extension__ extern __inline int32x4_t | |
28127 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28128 | __arm_vqsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
28129 | { | |
28130 | return __arm_vqsubq_m_s32 (__inactive, __a, __b, __p); | |
28131 | } | |
28132 | ||
28133 | __extension__ extern __inline int16x8_t | |
28134 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28135 | __arm_vqsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
28136 | { | |
28137 | return __arm_vqsubq_m_s16 (__inactive, __a, __b, __p); | |
28138 | } | |
28139 | ||
28140 | __extension__ extern __inline uint8x16_t | |
28141 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28142 | __arm_vqsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
28143 | { | |
28144 | return __arm_vqsubq_m_u8 (__inactive, __a, __b, __p); | |
28145 | } | |
28146 | ||
28147 | __extension__ extern __inline uint32x4_t | |
28148 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28149 | __arm_vqsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
28150 | { | |
28151 | return __arm_vqsubq_m_u32 (__inactive, __a, __b, __p); | |
28152 | } | |
28153 | ||
28154 | __extension__ extern __inline uint16x8_t | |
28155 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28156 | __arm_vqsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
28157 | { | |
28158 | return __arm_vqsubq_m_u16 (__inactive, __a, __b, __p); | |
28159 | } | |
28160 | ||
28161 | __extension__ extern __inline int8x16_t | |
28162 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28163 | __arm_vrhaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
28164 | { | |
28165 | return __arm_vrhaddq_m_s8 (__inactive, __a, __b, __p); | |
28166 | } | |
28167 | ||
28168 | __extension__ extern __inline int32x4_t | |
28169 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28170 | __arm_vrhaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
28171 | { | |
28172 | return __arm_vrhaddq_m_s32 (__inactive, __a, __b, __p); | |
28173 | } | |
28174 | ||
28175 | __extension__ extern __inline int16x8_t | |
28176 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28177 | __arm_vrhaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
28178 | { | |
28179 | return __arm_vrhaddq_m_s16 (__inactive, __a, __b, __p); | |
28180 | } | |
28181 | ||
28182 | __extension__ extern __inline uint8x16_t | |
28183 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28184 | __arm_vrhaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
28185 | { | |
28186 | return __arm_vrhaddq_m_u8 (__inactive, __a, __b, __p); | |
28187 | } | |
28188 | ||
28189 | __extension__ extern __inline uint32x4_t | |
28190 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28191 | __arm_vrhaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
28192 | { | |
28193 | return __arm_vrhaddq_m_u32 (__inactive, __a, __b, __p); | |
28194 | } | |
28195 | ||
28196 | __extension__ extern __inline uint16x8_t | |
28197 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28198 | __arm_vrhaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
28199 | { | |
28200 | return __arm_vrhaddq_m_u16 (__inactive, __a, __b, __p); | |
28201 | } | |
28202 | ||
28203 | __extension__ extern __inline int8x16_t | |
28204 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28205 | __arm_vrmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
28206 | { | |
28207 | return __arm_vrmulhq_m_s8 (__inactive, __a, __b, __p); | |
28208 | } | |
28209 | ||
28210 | __extension__ extern __inline int32x4_t | |
28211 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28212 | __arm_vrmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
28213 | { | |
28214 | return __arm_vrmulhq_m_s32 (__inactive, __a, __b, __p); | |
28215 | } | |
28216 | ||
28217 | __extension__ extern __inline int16x8_t | |
28218 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28219 | __arm_vrmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
28220 | { | |
28221 | return __arm_vrmulhq_m_s16 (__inactive, __a, __b, __p); | |
28222 | } | |
28223 | ||
28224 | __extension__ extern __inline uint8x16_t | |
28225 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28226 | __arm_vrmulhq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
28227 | { | |
28228 | return __arm_vrmulhq_m_u8 (__inactive, __a, __b, __p); | |
28229 | } | |
28230 | ||
28231 | __extension__ extern __inline uint32x4_t | |
28232 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28233 | __arm_vrmulhq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
28234 | { | |
28235 | return __arm_vrmulhq_m_u32 (__inactive, __a, __b, __p); | |
28236 | } | |
28237 | ||
28238 | __extension__ extern __inline uint16x8_t | |
28239 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28240 | __arm_vrmulhq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
28241 | { | |
28242 | return __arm_vrmulhq_m_u16 (__inactive, __a, __b, __p); | |
28243 | } | |
28244 | ||
28245 | __extension__ extern __inline int8x16_t | |
28246 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28247 | __arm_vrshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
28248 | { | |
28249 | return __arm_vrshlq_m_s8 (__inactive, __a, __b, __p); | |
28250 | } | |
28251 | ||
28252 | __extension__ extern __inline int32x4_t | |
28253 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28254 | __arm_vrshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
28255 | { | |
28256 | return __arm_vrshlq_m_s32 (__inactive, __a, __b, __p); | |
28257 | } | |
28258 | ||
28259 | __extension__ extern __inline int16x8_t | |
28260 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28261 | __arm_vrshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
28262 | { | |
28263 | return __arm_vrshlq_m_s16 (__inactive, __a, __b, __p); | |
28264 | } | |
28265 | ||
28266 | __extension__ extern __inline uint8x16_t | |
28267 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28268 | __arm_vrshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
28269 | { | |
28270 | return __arm_vrshlq_m_u8 (__inactive, __a, __b, __p); | |
28271 | } | |
28272 | ||
28273 | __extension__ extern __inline uint32x4_t | |
28274 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28275 | __arm_vrshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
28276 | { | |
28277 | return __arm_vrshlq_m_u32 (__inactive, __a, __b, __p); | |
28278 | } | |
28279 | ||
28280 | __extension__ extern __inline uint16x8_t | |
28281 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28282 | __arm_vrshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
28283 | { | |
28284 | return __arm_vrshlq_m_u16 (__inactive, __a, __b, __p); | |
28285 | } | |
28286 | ||
28287 | __extension__ extern __inline int8x16_t | |
28288 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28289 | __arm_vrshrq_m (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
28290 | { | |
28291 | return __arm_vrshrq_m_n_s8 (__inactive, __a, __imm, __p); | |
28292 | } | |
28293 | ||
28294 | __extension__ extern __inline int32x4_t | |
28295 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28296 | __arm_vrshrq_m (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
28297 | { | |
28298 | return __arm_vrshrq_m_n_s32 (__inactive, __a, __imm, __p); | |
28299 | } | |
28300 | ||
28301 | __extension__ extern __inline int16x8_t | |
28302 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28303 | __arm_vrshrq_m (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
28304 | { | |
28305 | return __arm_vrshrq_m_n_s16 (__inactive, __a, __imm, __p); | |
28306 | } | |
28307 | ||
28308 | __extension__ extern __inline uint8x16_t | |
28309 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28310 | __arm_vrshrq_m (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
28311 | { | |
28312 | return __arm_vrshrq_m_n_u8 (__inactive, __a, __imm, __p); | |
28313 | } | |
28314 | ||
28315 | __extension__ extern __inline uint32x4_t | |
28316 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28317 | __arm_vrshrq_m (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
28318 | { | |
28319 | return __arm_vrshrq_m_n_u32 (__inactive, __a, __imm, __p); | |
28320 | } | |
28321 | ||
28322 | __extension__ extern __inline uint16x8_t | |
28323 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28324 | __arm_vrshrq_m (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
28325 | { | |
28326 | return __arm_vrshrq_m_n_u16 (__inactive, __a, __imm, __p); | |
28327 | } | |
28328 | ||
28329 | __extension__ extern __inline int8x16_t | |
28330 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28331 | __arm_vshlq_m_n (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
28332 | { | |
28333 | return __arm_vshlq_m_n_s8 (__inactive, __a, __imm, __p); | |
28334 | } | |
28335 | ||
28336 | __extension__ extern __inline int32x4_t | |
28337 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28338 | __arm_vshlq_m_n (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
28339 | { | |
28340 | return __arm_vshlq_m_n_s32 (__inactive, __a, __imm, __p); | |
28341 | } | |
28342 | ||
28343 | __extension__ extern __inline int16x8_t | |
28344 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28345 | __arm_vshlq_m_n (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
28346 | { | |
28347 | return __arm_vshlq_m_n_s16 (__inactive, __a, __imm, __p); | |
28348 | } | |
28349 | ||
28350 | __extension__ extern __inline uint8x16_t | |
28351 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28352 | __arm_vshlq_m_n (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
28353 | { | |
28354 | return __arm_vshlq_m_n_u8 (__inactive, __a, __imm, __p); | |
28355 | } | |
28356 | ||
28357 | __extension__ extern __inline uint32x4_t | |
28358 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28359 | __arm_vshlq_m_n (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
28360 | { | |
28361 | return __arm_vshlq_m_n_u32 (__inactive, __a, __imm, __p); | |
28362 | } | |
28363 | ||
28364 | __extension__ extern __inline uint16x8_t | |
28365 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28366 | __arm_vshlq_m_n (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
28367 | { | |
28368 | return __arm_vshlq_m_n_u16 (__inactive, __a, __imm, __p); | |
28369 | } | |
28370 | ||
28371 | __extension__ extern __inline int8x16_t | |
28372 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28373 | __arm_vshrq_m (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
28374 | { | |
28375 | return __arm_vshrq_m_n_s8 (__inactive, __a, __imm, __p); | |
28376 | } | |
28377 | ||
28378 | __extension__ extern __inline int32x4_t | |
28379 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28380 | __arm_vshrq_m (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) | |
28381 | { | |
28382 | return __arm_vshrq_m_n_s32 (__inactive, __a, __imm, __p); | |
28383 | } | |
28384 | ||
28385 | __extension__ extern __inline int16x8_t | |
28386 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28387 | __arm_vshrq_m (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
28388 | { | |
28389 | return __arm_vshrq_m_n_s16 (__inactive, __a, __imm, __p); | |
28390 | } | |
28391 | ||
28392 | __extension__ extern __inline uint8x16_t | |
28393 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28394 | __arm_vshrq_m (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
28395 | { | |
28396 | return __arm_vshrq_m_n_u8 (__inactive, __a, __imm, __p); | |
28397 | } | |
28398 | ||
28399 | __extension__ extern __inline uint32x4_t | |
28400 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28401 | __arm_vshrq_m (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
28402 | { | |
28403 | return __arm_vshrq_m_n_u32 (__inactive, __a, __imm, __p); | |
28404 | } | |
28405 | ||
28406 | __extension__ extern __inline uint16x8_t | |
28407 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28408 | __arm_vshrq_m (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
28409 | { | |
28410 | return __arm_vshrq_m_n_u16 (__inactive, __a, __imm, __p); | |
28411 | } | |
28412 | ||
28413 | __extension__ extern __inline int8x16_t | |
28414 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28415 | __arm_vsliq_m (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p) | |
28416 | { | |
28417 | return __arm_vsliq_m_n_s8 (__a, __b, __imm, __p); | |
28418 | } | |
28419 | ||
28420 | __extension__ extern __inline int32x4_t | |
28421 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28422 | __arm_vsliq_m (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
28423 | { | |
28424 | return __arm_vsliq_m_n_s32 (__a, __b, __imm, __p); | |
28425 | } | |
28426 | ||
28427 | __extension__ extern __inline int16x8_t | |
28428 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28429 | __arm_vsliq_m (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
28430 | { | |
28431 | return __arm_vsliq_m_n_s16 (__a, __b, __imm, __p); | |
28432 | } | |
28433 | ||
28434 | __extension__ extern __inline uint8x16_t | |
28435 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28436 | __arm_vsliq_m (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p) | |
28437 | { | |
28438 | return __arm_vsliq_m_n_u8 (__a, __b, __imm, __p); | |
28439 | } | |
28440 | ||
28441 | __extension__ extern __inline uint32x4_t | |
28442 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28443 | __arm_vsliq_m (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
28444 | { | |
28445 | return __arm_vsliq_m_n_u32 (__a, __b, __imm, __p); | |
28446 | } | |
28447 | ||
28448 | __extension__ extern __inline uint16x8_t | |
28449 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28450 | __arm_vsliq_m (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
28451 | { | |
28452 | return __arm_vsliq_m_n_u16 (__a, __b, __imm, __p); | |
28453 | } | |
28454 | ||
28455 | __extension__ extern __inline int8x16_t | |
28456 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28457 | __arm_vsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
28458 | { | |
28459 | return __arm_vsubq_m_n_s8 (__inactive, __a, __b, __p); | |
28460 | } | |
28461 | ||
28462 | __extension__ extern __inline int32x4_t | |
28463 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28464 | __arm_vsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
28465 | { | |
28466 | return __arm_vsubq_m_n_s32 (__inactive, __a, __b, __p); | |
28467 | } | |
28468 | ||
28469 | __extension__ extern __inline int16x8_t | |
28470 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28471 | __arm_vsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
28472 | { | |
28473 | return __arm_vsubq_m_n_s16 (__inactive, __a, __b, __p); | |
28474 | } | |
28475 | ||
28476 | __extension__ extern __inline uint8x16_t | |
28477 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28478 | __arm_vsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
28479 | { | |
28480 | return __arm_vsubq_m_n_u8 (__inactive, __a, __b, __p); | |
28481 | } | |
28482 | ||
28483 | __extension__ extern __inline uint32x4_t | |
28484 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28485 | __arm_vsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
28486 | { | |
28487 | return __arm_vsubq_m_n_u32 (__inactive, __a, __b, __p); | |
28488 | } | |
28489 | ||
28490 | __extension__ extern __inline uint16x8_t | |
28491 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28492 | __arm_vsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
28493 | { | |
28494 | return __arm_vsubq_m_n_u16 (__inactive, __a, __b, __p); | |
28495 | } | |
28496 | ||
28497 | __extension__ extern __inline int64_t | |
28498 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28499 | __arm_vmlaldavaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
28500 | { | |
28501 | return __arm_vmlaldavaq_p_s32 (__a, __b, __c, __p); | |
28502 | } | |
28503 | ||
28504 | __extension__ extern __inline int64_t | |
28505 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28506 | __arm_vmlaldavaq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
28507 | { | |
28508 | return __arm_vmlaldavaq_p_s16 (__a, __b, __c, __p); | |
28509 | } | |
28510 | ||
28511 | __extension__ extern __inline uint64_t | |
28512 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28513 | __arm_vmlaldavaq_p (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
28514 | { | |
28515 | return __arm_vmlaldavaq_p_u32 (__a, __b, __c, __p); | |
28516 | } | |
28517 | ||
28518 | __extension__ extern __inline uint64_t | |
28519 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28520 | __arm_vmlaldavaq_p (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) | |
28521 | { | |
28522 | return __arm_vmlaldavaq_p_u16 (__a, __b, __c, __p); | |
28523 | } | |
28524 | ||
28525 | __extension__ extern __inline int64_t | |
28526 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28527 | __arm_vmlaldavaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
28528 | { | |
28529 | return __arm_vmlaldavaxq_p_s32 (__a, __b, __c, __p); | |
28530 | } | |
28531 | ||
28532 | __extension__ extern __inline int64_t | |
28533 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28534 | __arm_vmlaldavaxq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
28535 | { | |
28536 | return __arm_vmlaldavaxq_p_s16 (__a, __b, __c, __p); | |
28537 | } | |
28538 | ||
6a90680b ASDV |
28539 | __extension__ extern __inline int64_t |
28540 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28541 | __arm_vmlsldavaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
28542 | { | |
28543 | return __arm_vmlsldavaq_p_s32 (__a, __b, __c, __p); | |
28544 | } | |
28545 | ||
28546 | __extension__ extern __inline int64_t | |
28547 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28548 | __arm_vmlsldavaq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
28549 | { | |
28550 | return __arm_vmlsldavaq_p_s16 (__a, __b, __c, __p); | |
28551 | } | |
28552 | ||
28553 | __extension__ extern __inline int64_t | |
28554 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28555 | __arm_vmlsldavaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
28556 | { | |
28557 | return __arm_vmlsldavaxq_p_s32 (__a, __b, __c, __p); | |
28558 | } | |
28559 | ||
28560 | __extension__ extern __inline int64_t | |
28561 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28562 | __arm_vmlsldavaxq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) | |
28563 | { | |
28564 | return __arm_vmlsldavaxq_p_s16 (__a, __b, __c, __p); | |
28565 | } | |
28566 | ||
28567 | __extension__ extern __inline uint16x8_t | |
28568 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28569 | __arm_vmullbq_poly_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
28570 | { | |
28571 | return __arm_vmullbq_poly_m_p8 (__inactive, __a, __b, __p); | |
28572 | } | |
28573 | ||
28574 | __extension__ extern __inline uint32x4_t | |
28575 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28576 | __arm_vmullbq_poly_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
28577 | { | |
28578 | return __arm_vmullbq_poly_m_p16 (__inactive, __a, __b, __p); | |
28579 | } | |
28580 | ||
28581 | __extension__ extern __inline uint16x8_t | |
28582 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28583 | __arm_vmulltq_poly_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
28584 | { | |
28585 | return __arm_vmulltq_poly_m_p8 (__inactive, __a, __b, __p); | |
28586 | } | |
28587 | ||
28588 | __extension__ extern __inline uint32x4_t | |
28589 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28590 | __arm_vmulltq_poly_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
28591 | { | |
28592 | return __arm_vmulltq_poly_m_p16 (__inactive, __a, __b, __p); | |
28593 | } | |
28594 | ||
28595 | __extension__ extern __inline int64x2_t | |
28596 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28597 | __arm_vqdmullbq_m (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
28598 | { | |
28599 | return __arm_vqdmullbq_m_n_s32 (__inactive, __a, __b, __p); | |
28600 | } | |
28601 | ||
28602 | __extension__ extern __inline int32x4_t | |
28603 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28604 | __arm_vqdmullbq_m (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
28605 | { | |
28606 | return __arm_vqdmullbq_m_n_s16 (__inactive, __a, __b, __p); | |
28607 | } | |
28608 | ||
28609 | __extension__ extern __inline int64x2_t | |
28610 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28611 | __arm_vqdmullbq_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
28612 | { | |
28613 | return __arm_vqdmullbq_m_s32 (__inactive, __a, __b, __p); | |
28614 | } | |
28615 | ||
28616 | __extension__ extern __inline int32x4_t | |
28617 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28618 | __arm_vqdmullbq_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
28619 | { | |
28620 | return __arm_vqdmullbq_m_s16 (__inactive, __a, __b, __p); | |
28621 | } | |
28622 | ||
28623 | __extension__ extern __inline int64x2_t | |
28624 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28625 | __arm_vqdmulltq_m (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
28626 | { | |
28627 | return __arm_vqdmulltq_m_n_s32 (__inactive, __a, __b, __p); | |
28628 | } | |
28629 | ||
28630 | __extension__ extern __inline int32x4_t | |
28631 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28632 | __arm_vqdmulltq_m (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
28633 | { | |
28634 | return __arm_vqdmulltq_m_n_s16 (__inactive, __a, __b, __p); | |
28635 | } | |
28636 | ||
28637 | __extension__ extern __inline int64x2_t | |
28638 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28639 | __arm_vqdmulltq_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
28640 | { | |
28641 | return __arm_vqdmulltq_m_s32 (__inactive, __a, __b, __p); | |
28642 | } | |
28643 | ||
28644 | __extension__ extern __inline int32x4_t | |
28645 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28646 | __arm_vqdmulltq_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
28647 | { | |
28648 | return __arm_vqdmulltq_m_s16 (__inactive, __a, __b, __p); | |
28649 | } | |
28650 | ||
28651 | __extension__ extern __inline int16x8_t | |
28652 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28653 | __arm_vqrshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
28654 | { | |
28655 | return __arm_vqrshrnbq_m_n_s32 (__a, __b, __imm, __p); | |
28656 | } | |
28657 | ||
28658 | __extension__ extern __inline int8x16_t | |
28659 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28660 | __arm_vqrshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
28661 | { | |
28662 | return __arm_vqrshrnbq_m_n_s16 (__a, __b, __imm, __p); | |
28663 | } | |
28664 | ||
28665 | __extension__ extern __inline uint16x8_t | |
28666 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28667 | __arm_vqrshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
28668 | { | |
28669 | return __arm_vqrshrnbq_m_n_u32 (__a, __b, __imm, __p); | |
28670 | } | |
28671 | ||
28672 | __extension__ extern __inline uint8x16_t | |
28673 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28674 | __arm_vqrshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
28675 | { | |
28676 | return __arm_vqrshrnbq_m_n_u16 (__a, __b, __imm, __p); | |
28677 | } | |
28678 | ||
28679 | __extension__ extern __inline int16x8_t | |
28680 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28681 | __arm_vqrshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
28682 | { | |
28683 | return __arm_vqrshrntq_m_n_s32 (__a, __b, __imm, __p); | |
28684 | } | |
28685 | ||
28686 | __extension__ extern __inline int8x16_t | |
28687 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28688 | __arm_vqrshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
28689 | { | |
28690 | return __arm_vqrshrntq_m_n_s16 (__a, __b, __imm, __p); | |
28691 | } | |
28692 | ||
28693 | __extension__ extern __inline uint16x8_t | |
28694 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28695 | __arm_vqrshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
28696 | { | |
28697 | return __arm_vqrshrntq_m_n_u32 (__a, __b, __imm, __p); | |
28698 | } | |
28699 | ||
28700 | __extension__ extern __inline uint8x16_t | |
28701 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28702 | __arm_vqrshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
28703 | { | |
28704 | return __arm_vqrshrntq_m_n_u16 (__a, __b, __imm, __p); | |
28705 | } | |
28706 | ||
28707 | __extension__ extern __inline uint16x8_t | |
28708 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28709 | __arm_vqrshrunbq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
28710 | { | |
28711 | return __arm_vqrshrunbq_m_n_s32 (__a, __b, __imm, __p); | |
28712 | } | |
28713 | ||
28714 | __extension__ extern __inline uint8x16_t | |
28715 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28716 | __arm_vqrshrunbq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
28717 | { | |
28718 | return __arm_vqrshrunbq_m_n_s16 (__a, __b, __imm, __p); | |
28719 | } | |
28720 | ||
28721 | __extension__ extern __inline uint16x8_t | |
28722 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28723 | __arm_vqrshruntq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
28724 | { | |
28725 | return __arm_vqrshruntq_m_n_s32 (__a, __b, __imm, __p); | |
28726 | } | |
28727 | ||
28728 | __extension__ extern __inline uint8x16_t | |
28729 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28730 | __arm_vqrshruntq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
28731 | { | |
28732 | return __arm_vqrshruntq_m_n_s16 (__a, __b, __imm, __p); | |
28733 | } | |
28734 | ||
28735 | __extension__ extern __inline int16x8_t | |
28736 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28737 | __arm_vqshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
28738 | { | |
28739 | return __arm_vqshrnbq_m_n_s32 (__a, __b, __imm, __p); | |
28740 | } | |
28741 | ||
28742 | __extension__ extern __inline int8x16_t | |
28743 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28744 | __arm_vqshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
28745 | { | |
28746 | return __arm_vqshrnbq_m_n_s16 (__a, __b, __imm, __p); | |
28747 | } | |
28748 | ||
28749 | __extension__ extern __inline uint16x8_t | |
28750 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28751 | __arm_vqshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
28752 | { | |
28753 | return __arm_vqshrnbq_m_n_u32 (__a, __b, __imm, __p); | |
28754 | } | |
28755 | ||
28756 | __extension__ extern __inline uint8x16_t | |
28757 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28758 | __arm_vqshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
28759 | { | |
28760 | return __arm_vqshrnbq_m_n_u16 (__a, __b, __imm, __p); | |
28761 | } | |
28762 | ||
28763 | __extension__ extern __inline int16x8_t | |
28764 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28765 | __arm_vqshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
28766 | { | |
28767 | return __arm_vqshrntq_m_n_s32 (__a, __b, __imm, __p); | |
28768 | } | |
28769 | ||
28770 | __extension__ extern __inline int8x16_t | |
28771 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28772 | __arm_vqshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
28773 | { | |
28774 | return __arm_vqshrntq_m_n_s16 (__a, __b, __imm, __p); | |
28775 | } | |
28776 | ||
28777 | __extension__ extern __inline uint16x8_t | |
28778 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28779 | __arm_vqshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
28780 | { | |
28781 | return __arm_vqshrntq_m_n_u32 (__a, __b, __imm, __p); | |
28782 | } | |
28783 | ||
28784 | __extension__ extern __inline uint8x16_t | |
28785 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28786 | __arm_vqshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
28787 | { | |
28788 | return __arm_vqshrntq_m_n_u16 (__a, __b, __imm, __p); | |
28789 | } | |
28790 | ||
28791 | __extension__ extern __inline uint16x8_t | |
28792 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28793 | __arm_vqshrunbq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
28794 | { | |
28795 | return __arm_vqshrunbq_m_n_s32 (__a, __b, __imm, __p); | |
28796 | } | |
28797 | ||
28798 | __extension__ extern __inline uint8x16_t | |
28799 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28800 | __arm_vqshrunbq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
28801 | { | |
28802 | return __arm_vqshrunbq_m_n_s16 (__a, __b, __imm, __p); | |
28803 | } | |
28804 | ||
28805 | __extension__ extern __inline uint16x8_t | |
28806 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28807 | __arm_vqshruntq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
28808 | { | |
28809 | return __arm_vqshruntq_m_n_s32 (__a, __b, __imm, __p); | |
28810 | } | |
28811 | ||
28812 | __extension__ extern __inline uint8x16_t | |
28813 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28814 | __arm_vqshruntq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
28815 | { | |
28816 | return __arm_vqshruntq_m_n_s16 (__a, __b, __imm, __p); | |
28817 | } | |
28818 | ||
28819 | __extension__ extern __inline int64_t | |
28820 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28821 | __arm_vrmlaldavhaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
28822 | { | |
28823 | return __arm_vrmlaldavhaq_p_s32 (__a, __b, __c, __p); | |
28824 | } | |
28825 | ||
28826 | __extension__ extern __inline uint64_t | |
28827 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28828 | __arm_vrmlaldavhaq_p (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) | |
28829 | { | |
28830 | return __arm_vrmlaldavhaq_p_u32 (__a, __b, __c, __p); | |
28831 | } | |
28832 | ||
28833 | __extension__ extern __inline int64_t | |
28834 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28835 | __arm_vrmlaldavhaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
28836 | { | |
28837 | return __arm_vrmlaldavhaxq_p_s32 (__a, __b, __c, __p); | |
28838 | } | |
28839 | ||
28840 | __extension__ extern __inline int64_t | |
28841 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28842 | __arm_vrmlsldavhaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
28843 | { | |
28844 | return __arm_vrmlsldavhaq_p_s32 (__a, __b, __c, __p); | |
28845 | } | |
28846 | ||
28847 | __extension__ extern __inline int64_t | |
28848 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28849 | __arm_vrmlsldavhaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) | |
28850 | { | |
28851 | return __arm_vrmlsldavhaxq_p_s32 (__a, __b, __c, __p); | |
28852 | } | |
28853 | ||
28854 | __extension__ extern __inline int16x8_t | |
28855 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28856 | __arm_vrshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
28857 | { | |
28858 | return __arm_vrshrnbq_m_n_s32 (__a, __b, __imm, __p); | |
28859 | } | |
28860 | ||
28861 | __extension__ extern __inline int8x16_t | |
28862 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28863 | __arm_vrshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
28864 | { | |
28865 | return __arm_vrshrnbq_m_n_s16 (__a, __b, __imm, __p); | |
28866 | } | |
28867 | ||
28868 | __extension__ extern __inline uint16x8_t | |
28869 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28870 | __arm_vrshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
28871 | { | |
28872 | return __arm_vrshrnbq_m_n_u32 (__a, __b, __imm, __p); | |
28873 | } | |
28874 | ||
28875 | __extension__ extern __inline uint8x16_t | |
28876 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28877 | __arm_vrshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
28878 | { | |
28879 | return __arm_vrshrnbq_m_n_u16 (__a, __b, __imm, __p); | |
28880 | } | |
28881 | ||
28882 | __extension__ extern __inline int16x8_t | |
28883 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28884 | __arm_vrshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
28885 | { | |
28886 | return __arm_vrshrntq_m_n_s32 (__a, __b, __imm, __p); | |
28887 | } | |
28888 | ||
28889 | __extension__ extern __inline int8x16_t | |
28890 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28891 | __arm_vrshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
28892 | { | |
28893 | return __arm_vrshrntq_m_n_s16 (__a, __b, __imm, __p); | |
28894 | } | |
28895 | ||
28896 | __extension__ extern __inline uint16x8_t | |
28897 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28898 | __arm_vrshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
28899 | { | |
28900 | return __arm_vrshrntq_m_n_u32 (__a, __b, __imm, __p); | |
28901 | } | |
28902 | ||
28903 | __extension__ extern __inline uint8x16_t | |
28904 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28905 | __arm_vrshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
28906 | { | |
28907 | return __arm_vrshrntq_m_n_u16 (__a, __b, __imm, __p); | |
28908 | } | |
28909 | ||
28910 | __extension__ extern __inline int16x8_t | |
28911 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28912 | __arm_vshllbq_m (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
28913 | { | |
28914 | return __arm_vshllbq_m_n_s8 (__inactive, __a, __imm, __p); | |
28915 | } | |
28916 | ||
28917 | __extension__ extern __inline int32x4_t | |
28918 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28919 | __arm_vshllbq_m (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
28920 | { | |
28921 | return __arm_vshllbq_m_n_s16 (__inactive, __a, __imm, __p); | |
28922 | } | |
28923 | ||
28924 | __extension__ extern __inline uint16x8_t | |
28925 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28926 | __arm_vshllbq_m (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
28927 | { | |
28928 | return __arm_vshllbq_m_n_u8 (__inactive, __a, __imm, __p); | |
28929 | } | |
28930 | ||
28931 | __extension__ extern __inline uint32x4_t | |
28932 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28933 | __arm_vshllbq_m (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
28934 | { | |
28935 | return __arm_vshllbq_m_n_u16 (__inactive, __a, __imm, __p); | |
28936 | } | |
28937 | ||
28938 | __extension__ extern __inline int16x8_t | |
28939 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28940 | __arm_vshlltq_m (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) | |
28941 | { | |
28942 | return __arm_vshlltq_m_n_s8 (__inactive, __a, __imm, __p); | |
28943 | } | |
28944 | ||
28945 | __extension__ extern __inline int32x4_t | |
28946 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28947 | __arm_vshlltq_m (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) | |
28948 | { | |
28949 | return __arm_vshlltq_m_n_s16 (__inactive, __a, __imm, __p); | |
28950 | } | |
28951 | ||
28952 | __extension__ extern __inline uint16x8_t | |
28953 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28954 | __arm_vshlltq_m (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
28955 | { | |
28956 | return __arm_vshlltq_m_n_u8 (__inactive, __a, __imm, __p); | |
28957 | } | |
28958 | ||
28959 | __extension__ extern __inline uint32x4_t | |
28960 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28961 | __arm_vshlltq_m (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
28962 | { | |
28963 | return __arm_vshlltq_m_n_u16 (__inactive, __a, __imm, __p); | |
28964 | } | |
28965 | ||
28966 | __extension__ extern __inline int16x8_t | |
28967 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28968 | __arm_vshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
28969 | { | |
28970 | return __arm_vshrnbq_m_n_s32 (__a, __b, __imm, __p); | |
28971 | } | |
28972 | ||
28973 | __extension__ extern __inline int8x16_t | |
28974 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28975 | __arm_vshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
28976 | { | |
28977 | return __arm_vshrnbq_m_n_s16 (__a, __b, __imm, __p); | |
28978 | } | |
28979 | ||
28980 | __extension__ extern __inline uint16x8_t | |
28981 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28982 | __arm_vshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
28983 | { | |
28984 | return __arm_vshrnbq_m_n_u32 (__a, __b, __imm, __p); | |
28985 | } | |
28986 | ||
28987 | __extension__ extern __inline uint8x16_t | |
28988 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28989 | __arm_vshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
28990 | { | |
28991 | return __arm_vshrnbq_m_n_u16 (__a, __b, __imm, __p); | |
28992 | } | |
28993 | ||
28994 | __extension__ extern __inline int16x8_t | |
28995 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
28996 | __arm_vshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) | |
28997 | { | |
28998 | return __arm_vshrntq_m_n_s32 (__a, __b, __imm, __p); | |
28999 | } | |
29000 | ||
29001 | __extension__ extern __inline int8x16_t | |
29002 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29003 | __arm_vshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) | |
29004 | { | |
29005 | return __arm_vshrntq_m_n_s16 (__a, __b, __imm, __p); | |
29006 | } | |
29007 | ||
29008 | __extension__ extern __inline uint16x8_t | |
29009 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29010 | __arm_vshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) | |
29011 | { | |
29012 | return __arm_vshrntq_m_n_u32 (__a, __b, __imm, __p); | |
29013 | } | |
29014 | ||
29015 | __extension__ extern __inline uint8x16_t | |
29016 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29017 | __arm_vshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) | |
29018 | { | |
29019 | return __arm_vshrntq_m_n_u16 (__a, __b, __imm, __p); | |
29020 | } | |
29021 | ||
29022 | __extension__ extern __inline void | |
29023 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29024 | __arm_vstrbq_scatter_offset (int8_t * __base, uint8x16_t __offset, int8x16_t __value) | |
29025 | { | |
29026 | __arm_vstrbq_scatter_offset_s8 (__base, __offset, __value); | |
29027 | } | |
29028 | ||
29029 | __extension__ extern __inline void | |
29030 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29031 | __arm_vstrbq_scatter_offset (int8_t * __base, uint32x4_t __offset, int32x4_t __value) | |
29032 | { | |
29033 | __arm_vstrbq_scatter_offset_s32 (__base, __offset, __value); | |
29034 | } | |
29035 | ||
29036 | __extension__ extern __inline void | |
29037 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29038 | __arm_vstrbq_scatter_offset (int8_t * __base, uint16x8_t __offset, int16x8_t __value) | |
29039 | { | |
29040 | __arm_vstrbq_scatter_offset_s16 (__base, __offset, __value); | |
29041 | } | |
29042 | ||
29043 | __extension__ extern __inline void | |
29044 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29045 | __arm_vstrbq_scatter_offset (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value) | |
29046 | { | |
29047 | __arm_vstrbq_scatter_offset_u8 (__base, __offset, __value); | |
29048 | } | |
29049 | ||
29050 | __extension__ extern __inline void | |
29051 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29052 | __arm_vstrbq_scatter_offset (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
29053 | { | |
29054 | __arm_vstrbq_scatter_offset_u32 (__base, __offset, __value); | |
29055 | } | |
29056 | ||
29057 | __extension__ extern __inline void | |
29058 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29059 | __arm_vstrbq_scatter_offset (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value) | |
29060 | { | |
29061 | __arm_vstrbq_scatter_offset_u16 (__base, __offset, __value); | |
29062 | } | |
29063 | ||
29064 | __extension__ extern __inline void | |
29065 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29066 | __arm_vstrbq (int8_t * __addr, int8x16_t __value) | |
29067 | { | |
29068 | __arm_vstrbq_s8 (__addr, __value); | |
29069 | } | |
29070 | ||
29071 | __extension__ extern __inline void | |
29072 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29073 | __arm_vstrbq (int8_t * __addr, int32x4_t __value) | |
29074 | { | |
29075 | __arm_vstrbq_s32 (__addr, __value); | |
29076 | } | |
29077 | ||
29078 | __extension__ extern __inline void | |
29079 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29080 | __arm_vstrbq (int8_t * __addr, int16x8_t __value) | |
29081 | { | |
29082 | __arm_vstrbq_s16 (__addr, __value); | |
29083 | } | |
29084 | ||
29085 | __extension__ extern __inline void | |
29086 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29087 | __arm_vstrbq (uint8_t * __addr, uint8x16_t __value) | |
29088 | { | |
29089 | __arm_vstrbq_u8 (__addr, __value); | |
29090 | } | |
29091 | ||
29092 | __extension__ extern __inline void | |
29093 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29094 | __arm_vstrbq (uint8_t * __addr, uint32x4_t __value) | |
29095 | { | |
29096 | __arm_vstrbq_u32 (__addr, __value); | |
29097 | } | |
29098 | ||
29099 | __extension__ extern __inline void | |
29100 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29101 | __arm_vstrbq (uint8_t * __addr, uint16x8_t __value) | |
29102 | { | |
29103 | __arm_vstrbq_u16 (__addr, __value); | |
29104 | } | |
29105 | ||
29106 | __extension__ extern __inline void | |
29107 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29108 | __arm_vstrwq_scatter_base (uint32x4_t __addr, const int __offset, int32x4_t __value) | |
29109 | { | |
29110 | __arm_vstrwq_scatter_base_s32 (__addr, __offset, __value); | |
29111 | } | |
29112 | ||
29113 | __extension__ extern __inline void | |
29114 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29115 | __arm_vstrwq_scatter_base (uint32x4_t __addr, const int __offset, uint32x4_t __value) | |
29116 | { | |
29117 | __arm_vstrwq_scatter_base_u32 (__addr, __offset, __value); | |
29118 | } | |
29119 | ||
29120 | __extension__ extern __inline uint8x16_t | |
29121 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29122 | __arm_vldrbq_gather_offset (uint8_t const * __base, uint8x16_t __offset) | |
29123 | { | |
29124 | return __arm_vldrbq_gather_offset_u8 (__base, __offset); | |
29125 | } | |
29126 | ||
29127 | __extension__ extern __inline int8x16_t | |
29128 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29129 | __arm_vldrbq_gather_offset (int8_t const * __base, uint8x16_t __offset) | |
29130 | { | |
29131 | return __arm_vldrbq_gather_offset_s8 (__base, __offset); | |
29132 | } | |
29133 | ||
29134 | __extension__ extern __inline uint16x8_t | |
29135 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29136 | __arm_vldrbq_gather_offset (uint8_t const * __base, uint16x8_t __offset) | |
29137 | { | |
29138 | return __arm_vldrbq_gather_offset_u16 (__base, __offset); | |
29139 | } | |
29140 | ||
29141 | __extension__ extern __inline int16x8_t | |
29142 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29143 | __arm_vldrbq_gather_offset (int8_t const * __base, uint16x8_t __offset) | |
29144 | { | |
29145 | return __arm_vldrbq_gather_offset_s16 (__base, __offset); | |
29146 | } | |
29147 | ||
29148 | __extension__ extern __inline uint32x4_t | |
29149 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29150 | __arm_vldrbq_gather_offset (uint8_t const * __base, uint32x4_t __offset) | |
29151 | { | |
29152 | return __arm_vldrbq_gather_offset_u32 (__base, __offset); | |
29153 | } | |
29154 | ||
29155 | __extension__ extern __inline int32x4_t | |
29156 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29157 | __arm_vldrbq_gather_offset (int8_t const * __base, uint32x4_t __offset) | |
29158 | { | |
29159 | return __arm_vldrbq_gather_offset_s32 (__base, __offset); | |
29160 | } | |
29161 | ||
29162 | __extension__ extern __inline void | |
29163 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29164 | __arm_vstrbq_p (int8_t * __addr, int8x16_t __value, mve_pred16_t __p) | |
29165 | { | |
29166 | __arm_vstrbq_p_s8 (__addr, __value, __p); | |
29167 | } | |
29168 | ||
29169 | __extension__ extern __inline void | |
29170 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29171 | __arm_vstrbq_p (int8_t * __addr, int32x4_t __value, mve_pred16_t __p) | |
29172 | { | |
29173 | __arm_vstrbq_p_s32 (__addr, __value, __p); | |
29174 | } | |
29175 | ||
29176 | __extension__ extern __inline void | |
29177 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29178 | __arm_vstrbq_p (int8_t * __addr, int16x8_t __value, mve_pred16_t __p) | |
29179 | { | |
29180 | __arm_vstrbq_p_s16 (__addr, __value, __p); | |
29181 | } | |
29182 | ||
29183 | __extension__ extern __inline void | |
29184 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29185 | __arm_vstrbq_p (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p) | |
29186 | { | |
29187 | __arm_vstrbq_p_u8 (__addr, __value, __p); | |
29188 | } | |
29189 | ||
29190 | __extension__ extern __inline void | |
29191 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29192 | __arm_vstrbq_p (uint8_t * __addr, uint32x4_t __value, mve_pred16_t __p) | |
29193 | { | |
29194 | __arm_vstrbq_p_u32 (__addr, __value, __p); | |
29195 | } | |
29196 | ||
29197 | __extension__ extern __inline void | |
29198 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29199 | __arm_vstrbq_p (uint8_t * __addr, uint16x8_t __value, mve_pred16_t __p) | |
29200 | { | |
29201 | __arm_vstrbq_p_u16 (__addr, __value, __p); | |
29202 | } | |
29203 | ||
29204 | __extension__ extern __inline void | |
29205 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29206 | __arm_vstrbq_scatter_offset_p (int8_t * __base, uint8x16_t __offset, int8x16_t __value, mve_pred16_t __p) | |
29207 | { | |
29208 | __arm_vstrbq_scatter_offset_p_s8 (__base, __offset, __value, __p); | |
29209 | } | |
29210 | ||
29211 | __extension__ extern __inline void | |
29212 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29213 | __arm_vstrbq_scatter_offset_p (int8_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
29214 | { | |
29215 | __arm_vstrbq_scatter_offset_p_s32 (__base, __offset, __value, __p); | |
29216 | } | |
29217 | ||
29218 | __extension__ extern __inline void | |
29219 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29220 | __arm_vstrbq_scatter_offset_p (int8_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p) | |
29221 | { | |
29222 | __arm_vstrbq_scatter_offset_p_s16 (__base, __offset, __value, __p); | |
29223 | } | |
29224 | ||
29225 | __extension__ extern __inline void | |
29226 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29227 | __arm_vstrbq_scatter_offset_p (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value, mve_pred16_t __p) | |
29228 | { | |
29229 | __arm_vstrbq_scatter_offset_p_u8 (__base, __offset, __value, __p); | |
29230 | } | |
29231 | ||
29232 | __extension__ extern __inline void | |
29233 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29234 | __arm_vstrbq_scatter_offset_p (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
29235 | { | |
29236 | __arm_vstrbq_scatter_offset_p_u32 (__base, __offset, __value, __p); | |
29237 | } | |
29238 | ||
29239 | __extension__ extern __inline void | |
29240 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29241 | __arm_vstrbq_scatter_offset_p (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p) | |
29242 | { | |
29243 | __arm_vstrbq_scatter_offset_p_u16 (__base, __offset, __value, __p); | |
29244 | } | |
29245 | ||
29246 | __extension__ extern __inline void | |
29247 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29248 | __arm_vstrwq_scatter_base_p (uint32x4_t __addr, const int __offset, int32x4_t __value, mve_pred16_t __p) | |
29249 | { | |
29250 | __arm_vstrwq_scatter_base_p_s32 (__addr, __offset, __value, __p); | |
29251 | } | |
29252 | ||
29253 | __extension__ extern __inline void | |
29254 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29255 | __arm_vstrwq_scatter_base_p (uint32x4_t __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p) | |
29256 | { | |
29257 | __arm_vstrwq_scatter_base_p_u32 (__addr, __offset, __value, __p); | |
29258 | } | |
29259 | ||
29260 | __extension__ extern __inline int8x16_t | |
29261 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29262 | __arm_vldrbq_gather_offset_z (int8_t const * __base, uint8x16_t __offset, mve_pred16_t __p) | |
29263 | { | |
29264 | return __arm_vldrbq_gather_offset_z_s8 (__base, __offset, __p); | |
29265 | } | |
29266 | ||
29267 | __extension__ extern __inline int32x4_t | |
29268 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29269 | __arm_vldrbq_gather_offset_z (int8_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
29270 | { | |
29271 | return __arm_vldrbq_gather_offset_z_s32 (__base, __offset, __p); | |
29272 | } | |
29273 | ||
29274 | __extension__ extern __inline int16x8_t | |
29275 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29276 | __arm_vldrbq_gather_offset_z (int8_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
29277 | { | |
29278 | return __arm_vldrbq_gather_offset_z_s16 (__base, __offset, __p); | |
29279 | } | |
29280 | ||
29281 | __extension__ extern __inline uint8x16_t | |
29282 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29283 | __arm_vldrbq_gather_offset_z (uint8_t const * __base, uint8x16_t __offset, mve_pred16_t __p) | |
29284 | { | |
29285 | return __arm_vldrbq_gather_offset_z_u8 (__base, __offset, __p); | |
29286 | } | |
29287 | ||
29288 | __extension__ extern __inline uint32x4_t | |
29289 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29290 | __arm_vldrbq_gather_offset_z (uint8_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
29291 | { | |
29292 | return __arm_vldrbq_gather_offset_z_u32 (__base, __offset, __p); | |
29293 | } | |
29294 | ||
29295 | __extension__ extern __inline uint16x8_t | |
29296 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29297 | __arm_vldrbq_gather_offset_z (uint8_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
29298 | { | |
29299 | return __arm_vldrbq_gather_offset_z_u16 (__base, __offset, __p); | |
29300 | } | |
29301 | ||
29302 | __extension__ extern __inline int8x16_t | |
29303 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29304 | __arm_vld1q (int8_t const * __base) | |
29305 | { | |
29306 | return __arm_vld1q_s8 (__base); | |
29307 | } | |
29308 | ||
29309 | __extension__ extern __inline int32x4_t | |
29310 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29311 | __arm_vld1q (int32_t const * __base) | |
29312 | { | |
29313 | return __arm_vld1q_s32 (__base); | |
29314 | } | |
29315 | ||
29316 | __extension__ extern __inline int16x8_t | |
29317 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29318 | __arm_vld1q (int16_t const * __base) | |
29319 | { | |
29320 | return __arm_vld1q_s16 (__base); | |
29321 | } | |
29322 | ||
29323 | __extension__ extern __inline uint8x16_t | |
29324 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29325 | __arm_vld1q (uint8_t const * __base) | |
29326 | { | |
29327 | return __arm_vld1q_u8 (__base); | |
29328 | } | |
29329 | ||
29330 | __extension__ extern __inline uint32x4_t | |
29331 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29332 | __arm_vld1q (uint32_t const * __base) | |
29333 | { | |
29334 | return __arm_vld1q_u32 (__base); | |
29335 | } | |
29336 | ||
29337 | __extension__ extern __inline uint16x8_t | |
29338 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29339 | __arm_vld1q (uint16_t const * __base) | |
29340 | { | |
29341 | return __arm_vld1q_u16 (__base); | |
29342 | } | |
29343 | ||
29344 | __extension__ extern __inline int32x4_t | |
29345 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29346 | __arm_vldrhq_gather_offset (int16_t const * __base, uint32x4_t __offset) | |
29347 | { | |
29348 | return __arm_vldrhq_gather_offset_s32 (__base, __offset); | |
29349 | } | |
29350 | ||
29351 | __extension__ extern __inline int16x8_t | |
29352 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29353 | __arm_vldrhq_gather_offset (int16_t const * __base, uint16x8_t __offset) | |
29354 | { | |
29355 | return __arm_vldrhq_gather_offset_s16 (__base, __offset); | |
29356 | } | |
29357 | ||
29358 | __extension__ extern __inline uint32x4_t | |
29359 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29360 | __arm_vldrhq_gather_offset (uint16_t const * __base, uint32x4_t __offset) | |
29361 | { | |
29362 | return __arm_vldrhq_gather_offset_u32 (__base, __offset); | |
29363 | } | |
29364 | ||
29365 | __extension__ extern __inline uint16x8_t | |
29366 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29367 | __arm_vldrhq_gather_offset (uint16_t const * __base, uint16x8_t __offset) | |
29368 | { | |
29369 | return __arm_vldrhq_gather_offset_u16 (__base, __offset); | |
29370 | } | |
29371 | ||
29372 | __extension__ extern __inline int32x4_t | |
29373 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29374 | __arm_vldrhq_gather_offset_z (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
29375 | { | |
29376 | return __arm_vldrhq_gather_offset_z_s32 (__base, __offset, __p); | |
29377 | } | |
29378 | ||
29379 | __extension__ extern __inline int16x8_t | |
29380 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29381 | __arm_vldrhq_gather_offset_z (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
29382 | { | |
29383 | return __arm_vldrhq_gather_offset_z_s16 (__base, __offset, __p); | |
29384 | } | |
29385 | ||
29386 | __extension__ extern __inline uint32x4_t | |
29387 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29388 | __arm_vldrhq_gather_offset_z (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
29389 | { | |
29390 | return __arm_vldrhq_gather_offset_z_u32 (__base, __offset, __p); | |
29391 | } | |
29392 | ||
29393 | __extension__ extern __inline uint16x8_t | |
29394 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29395 | __arm_vldrhq_gather_offset_z (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
29396 | { | |
29397 | return __arm_vldrhq_gather_offset_z_u16 (__base, __offset, __p); | |
29398 | } | |
29399 | ||
29400 | __extension__ extern __inline int32x4_t | |
29401 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29402 | __arm_vldrhq_gather_shifted_offset (int16_t const * __base, uint32x4_t __offset) | |
29403 | { | |
29404 | return __arm_vldrhq_gather_shifted_offset_s32 (__base, __offset); | |
29405 | } | |
29406 | ||
29407 | __extension__ extern __inline int16x8_t | |
29408 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29409 | __arm_vldrhq_gather_shifted_offset (int16_t const * __base, uint16x8_t __offset) | |
29410 | { | |
29411 | return __arm_vldrhq_gather_shifted_offset_s16 (__base, __offset); | |
29412 | } | |
29413 | ||
29414 | __extension__ extern __inline uint32x4_t | |
29415 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29416 | __arm_vldrhq_gather_shifted_offset (uint16_t const * __base, uint32x4_t __offset) | |
29417 | { | |
29418 | return __arm_vldrhq_gather_shifted_offset_u32 (__base, __offset); | |
29419 | } | |
29420 | ||
29421 | __extension__ extern __inline uint16x8_t | |
29422 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29423 | __arm_vldrhq_gather_shifted_offset (uint16_t const * __base, uint16x8_t __offset) | |
29424 | { | |
29425 | return __arm_vldrhq_gather_shifted_offset_u16 (__base, __offset); | |
29426 | } | |
29427 | ||
29428 | __extension__ extern __inline int32x4_t | |
29429 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29430 | __arm_vldrhq_gather_shifted_offset_z (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
29431 | { | |
29432 | return __arm_vldrhq_gather_shifted_offset_z_s32 (__base, __offset, __p); | |
29433 | } | |
29434 | ||
29435 | __extension__ extern __inline int16x8_t | |
29436 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29437 | __arm_vldrhq_gather_shifted_offset_z (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
29438 | { | |
29439 | return __arm_vldrhq_gather_shifted_offset_z_s16 (__base, __offset, __p); | |
29440 | } | |
29441 | ||
29442 | __extension__ extern __inline uint32x4_t | |
29443 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29444 | __arm_vldrhq_gather_shifted_offset_z (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
29445 | { | |
29446 | return __arm_vldrhq_gather_shifted_offset_z_u32 (__base, __offset, __p); | |
29447 | } | |
29448 | ||
29449 | __extension__ extern __inline uint16x8_t | |
29450 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29451 | __arm_vldrhq_gather_shifted_offset_z (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
29452 | { | |
29453 | return __arm_vldrhq_gather_shifted_offset_z_u16 (__base, __offset, __p); | |
29454 | } | |
29455 | ||
29456 | __extension__ extern __inline int64x2_t | |
29457 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29458 | __arm_vldrdq_gather_offset (int64_t const * __base, uint64x2_t __offset) | |
29459 | { | |
29460 | return __arm_vldrdq_gather_offset_s64 (__base, __offset); | |
29461 | } | |
29462 | ||
29463 | __extension__ extern __inline uint64x2_t | |
29464 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29465 | __arm_vldrdq_gather_offset (uint64_t const * __base, uint64x2_t __offset) | |
29466 | { | |
29467 | return __arm_vldrdq_gather_offset_u64 (__base, __offset); | |
29468 | } | |
29469 | ||
29470 | __extension__ extern __inline int64x2_t | |
29471 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29472 | __arm_vldrdq_gather_offset_z (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
29473 | { | |
29474 | return __arm_vldrdq_gather_offset_z_s64 (__base, __offset, __p); | |
29475 | } | |
29476 | ||
29477 | __extension__ extern __inline uint64x2_t | |
29478 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29479 | __arm_vldrdq_gather_offset_z (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
29480 | { | |
29481 | return __arm_vldrdq_gather_offset_z_u64 (__base, __offset, __p); | |
29482 | } | |
29483 | ||
29484 | __extension__ extern __inline int64x2_t | |
29485 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29486 | __arm_vldrdq_gather_shifted_offset (int64_t const * __base, uint64x2_t __offset) | |
29487 | { | |
29488 | return __arm_vldrdq_gather_shifted_offset_s64 (__base, __offset); | |
29489 | } | |
29490 | ||
29491 | __extension__ extern __inline uint64x2_t | |
29492 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29493 | __arm_vldrdq_gather_shifted_offset (uint64_t const * __base, uint64x2_t __offset) | |
29494 | { | |
29495 | return __arm_vldrdq_gather_shifted_offset_u64 (__base, __offset); | |
29496 | } | |
29497 | ||
29498 | __extension__ extern __inline int64x2_t | |
29499 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29500 | __arm_vldrdq_gather_shifted_offset_z (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
29501 | { | |
29502 | return __arm_vldrdq_gather_shifted_offset_z_s64 (__base, __offset, __p); | |
29503 | } | |
29504 | ||
29505 | __extension__ extern __inline uint64x2_t | |
29506 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29507 | __arm_vldrdq_gather_shifted_offset_z (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) | |
29508 | { | |
29509 | return __arm_vldrdq_gather_shifted_offset_z_u64 (__base, __offset, __p); | |
29510 | } | |
29511 | ||
29512 | __extension__ extern __inline int32x4_t | |
29513 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29514 | __arm_vldrwq_gather_offset (int32_t const * __base, uint32x4_t __offset) | |
29515 | { | |
29516 | return __arm_vldrwq_gather_offset_s32 (__base, __offset); | |
29517 | } | |
29518 | ||
29519 | __extension__ extern __inline uint32x4_t | |
29520 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29521 | __arm_vldrwq_gather_offset (uint32_t const * __base, uint32x4_t __offset) | |
29522 | { | |
29523 | return __arm_vldrwq_gather_offset_u32 (__base, __offset); | |
29524 | } | |
29525 | ||
29526 | __extension__ extern __inline int32x4_t | |
29527 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29528 | __arm_vldrwq_gather_offset_z (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
29529 | { | |
29530 | return __arm_vldrwq_gather_offset_z_s32 (__base, __offset, __p); | |
29531 | } | |
29532 | ||
29533 | __extension__ extern __inline uint32x4_t | |
29534 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29535 | __arm_vldrwq_gather_offset_z (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
29536 | { | |
29537 | return __arm_vldrwq_gather_offset_z_u32 (__base, __offset, __p); | |
29538 | } | |
29539 | ||
29540 | __extension__ extern __inline int32x4_t | |
29541 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29542 | __arm_vldrwq_gather_shifted_offset (int32_t const * __base, uint32x4_t __offset) | |
29543 | { | |
29544 | return __arm_vldrwq_gather_shifted_offset_s32 (__base, __offset); | |
29545 | } | |
29546 | ||
29547 | __extension__ extern __inline uint32x4_t | |
29548 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29549 | __arm_vldrwq_gather_shifted_offset (uint32_t const * __base, uint32x4_t __offset) | |
29550 | { | |
29551 | return __arm_vldrwq_gather_shifted_offset_u32 (__base, __offset); | |
29552 | } | |
29553 | ||
29554 | __extension__ extern __inline int32x4_t | |
29555 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29556 | __arm_vldrwq_gather_shifted_offset_z (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
29557 | { | |
29558 | return __arm_vldrwq_gather_shifted_offset_z_s32 (__base, __offset, __p); | |
29559 | } | |
29560 | ||
29561 | __extension__ extern __inline uint32x4_t | |
29562 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29563 | __arm_vldrwq_gather_shifted_offset_z (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
29564 | { | |
29565 | return __arm_vldrwq_gather_shifted_offset_z_u32 (__base, __offset, __p); | |
29566 | } | |
29567 | ||
29568 | __extension__ extern __inline void | |
29569 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29570 | __arm_vst1q (int8_t * __addr, int8x16_t __value) | |
29571 | { | |
29572 | __arm_vst1q_s8 (__addr, __value); | |
29573 | } | |
29574 | ||
29575 | __extension__ extern __inline void | |
29576 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29577 | __arm_vst1q (int32_t * __addr, int32x4_t __value) | |
29578 | { | |
29579 | __arm_vst1q_s32 (__addr, __value); | |
29580 | } | |
29581 | ||
29582 | __extension__ extern __inline void | |
29583 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29584 | __arm_vst1q (int16_t * __addr, int16x8_t __value) | |
29585 | { | |
29586 | __arm_vst1q_s16 (__addr, __value); | |
29587 | } | |
29588 | ||
29589 | __extension__ extern __inline void | |
29590 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29591 | __arm_vst1q (uint8_t * __addr, uint8x16_t __value) | |
29592 | { | |
29593 | __arm_vst1q_u8 (__addr, __value); | |
29594 | } | |
29595 | ||
29596 | __extension__ extern __inline void | |
29597 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29598 | __arm_vst1q (uint32_t * __addr, uint32x4_t __value) | |
29599 | { | |
29600 | __arm_vst1q_u32 (__addr, __value); | |
29601 | } | |
29602 | ||
29603 | __extension__ extern __inline void | |
29604 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29605 | __arm_vst1q (uint16_t * __addr, uint16x8_t __value) | |
29606 | { | |
29607 | __arm_vst1q_u16 (__addr, __value); | |
29608 | } | |
29609 | ||
29610 | __extension__ extern __inline void | |
29611 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29612 | __arm_vstrhq_scatter_offset (int16_t * __base, uint32x4_t __offset, int32x4_t __value) | |
29613 | { | |
29614 | __arm_vstrhq_scatter_offset_s32 (__base, __offset, __value); | |
29615 | } | |
29616 | ||
29617 | __extension__ extern __inline void | |
29618 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29619 | __arm_vstrhq_scatter_offset (int16_t * __base, uint16x8_t __offset, int16x8_t __value) | |
29620 | { | |
29621 | __arm_vstrhq_scatter_offset_s16 (__base, __offset, __value); | |
29622 | } | |
29623 | ||
29624 | __extension__ extern __inline void | |
29625 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29626 | __arm_vstrhq_scatter_offset (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
29627 | { | |
29628 | __arm_vstrhq_scatter_offset_u32 (__base, __offset, __value); | |
29629 | } | |
29630 | ||
29631 | __extension__ extern __inline void | |
29632 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29633 | __arm_vstrhq_scatter_offset (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value) | |
29634 | { | |
29635 | __arm_vstrhq_scatter_offset_u16 (__base, __offset, __value); | |
29636 | } | |
29637 | ||
29638 | __extension__ extern __inline void | |
29639 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29640 | __arm_vstrhq_scatter_offset_p (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
29641 | { | |
29642 | __arm_vstrhq_scatter_offset_p_s32 (__base, __offset, __value, __p); | |
29643 | } | |
29644 | ||
29645 | __extension__ extern __inline void | |
29646 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29647 | __arm_vstrhq_scatter_offset_p (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p) | |
29648 | { | |
29649 | __arm_vstrhq_scatter_offset_p_s16 (__base, __offset, __value, __p); | |
29650 | } | |
29651 | ||
29652 | __extension__ extern __inline void | |
29653 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29654 | __arm_vstrhq_scatter_offset_p (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
29655 | { | |
29656 | __arm_vstrhq_scatter_offset_p_u32 (__base, __offset, __value, __p); | |
29657 | } | |
29658 | ||
29659 | __extension__ extern __inline void | |
29660 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29661 | __arm_vstrhq_scatter_offset_p (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p) | |
29662 | { | |
29663 | __arm_vstrhq_scatter_offset_p_u16 (__base, __offset, __value, __p); | |
29664 | } | |
29665 | ||
29666 | __extension__ extern __inline void | |
29667 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29668 | __arm_vstrhq_scatter_shifted_offset (int16_t * __base, uint32x4_t __offset, int32x4_t __value) | |
29669 | { | |
29670 | __arm_vstrhq_scatter_shifted_offset_s32 (__base, __offset, __value); | |
29671 | } | |
29672 | ||
29673 | __extension__ extern __inline void | |
29674 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29675 | __arm_vstrhq_scatter_shifted_offset (int16_t * __base, uint16x8_t __offset, int16x8_t __value) | |
29676 | { | |
29677 | __arm_vstrhq_scatter_shifted_offset_s16 (__base, __offset, __value); | |
29678 | } | |
29679 | ||
29680 | __extension__ extern __inline void | |
29681 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29682 | __arm_vstrhq_scatter_shifted_offset (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
29683 | { | |
29684 | __arm_vstrhq_scatter_shifted_offset_u32 (__base, __offset, __value); | |
29685 | } | |
29686 | ||
29687 | __extension__ extern __inline void | |
29688 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29689 | __arm_vstrhq_scatter_shifted_offset (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value) | |
29690 | { | |
29691 | __arm_vstrhq_scatter_shifted_offset_u16 (__base, __offset, __value); | |
29692 | } | |
29693 | ||
29694 | __extension__ extern __inline void | |
29695 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29696 | __arm_vstrhq_scatter_shifted_offset_p (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
29697 | { | |
29698 | __arm_vstrhq_scatter_shifted_offset_p_s32 (__base, __offset, __value, __p); | |
29699 | } | |
29700 | ||
29701 | __extension__ extern __inline void | |
29702 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29703 | __arm_vstrhq_scatter_shifted_offset_p (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p) | |
29704 | { | |
29705 | __arm_vstrhq_scatter_shifted_offset_p_s16 (__base, __offset, __value, __p); | |
29706 | } | |
29707 | ||
29708 | __extension__ extern __inline void | |
29709 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29710 | __arm_vstrhq_scatter_shifted_offset_p (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
29711 | { | |
29712 | __arm_vstrhq_scatter_shifted_offset_p_u32 (__base, __offset, __value, __p); | |
29713 | } | |
29714 | ||
29715 | __extension__ extern __inline void | |
29716 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29717 | __arm_vstrhq_scatter_shifted_offset_p (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p) | |
29718 | { | |
29719 | __arm_vstrhq_scatter_shifted_offset_p_u16 (__base, __offset, __value, __p); | |
29720 | } | |
29721 | ||
29722 | __extension__ extern __inline void | |
29723 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29724 | __arm_vstrhq (int16_t * __addr, int32x4_t __value) | |
29725 | { | |
29726 | __arm_vstrhq_s32 (__addr, __value); | |
29727 | } | |
29728 | ||
29729 | __extension__ extern __inline void | |
29730 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29731 | __arm_vstrhq (int16_t * __addr, int16x8_t __value) | |
29732 | { | |
29733 | __arm_vstrhq_s16 (__addr, __value); | |
29734 | } | |
29735 | ||
29736 | __extension__ extern __inline void | |
29737 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29738 | __arm_vstrhq (uint16_t * __addr, uint32x4_t __value) | |
29739 | { | |
29740 | __arm_vstrhq_u32 (__addr, __value); | |
29741 | } | |
29742 | ||
29743 | __extension__ extern __inline void | |
29744 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29745 | __arm_vstrhq (uint16_t * __addr, uint16x8_t __value) | |
29746 | { | |
29747 | __arm_vstrhq_u16 (__addr, __value); | |
29748 | } | |
29749 | ||
29750 | __extension__ extern __inline void | |
29751 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29752 | __arm_vstrhq_p (int16_t * __addr, int32x4_t __value, mve_pred16_t __p) | |
29753 | { | |
29754 | __arm_vstrhq_p_s32 (__addr, __value, __p); | |
29755 | } | |
29756 | ||
29757 | __extension__ extern __inline void | |
29758 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29759 | __arm_vstrhq_p (int16_t * __addr, int16x8_t __value, mve_pred16_t __p) | |
29760 | { | |
29761 | __arm_vstrhq_p_s16 (__addr, __value, __p); | |
29762 | } | |
29763 | ||
29764 | __extension__ extern __inline void | |
29765 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29766 | __arm_vstrhq_p (uint16_t * __addr, uint32x4_t __value, mve_pred16_t __p) | |
29767 | { | |
29768 | __arm_vstrhq_p_u32 (__addr, __value, __p); | |
29769 | } | |
29770 | ||
29771 | __extension__ extern __inline void | |
29772 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29773 | __arm_vstrhq_p (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p) | |
29774 | { | |
29775 | __arm_vstrhq_p_u16 (__addr, __value, __p); | |
29776 | } | |
29777 | ||
29778 | __extension__ extern __inline void | |
29779 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29780 | __arm_vstrwq (int32_t * __addr, int32x4_t __value) | |
29781 | { | |
29782 | __arm_vstrwq_s32 (__addr, __value); | |
29783 | } | |
29784 | ||
29785 | __extension__ extern __inline void | |
29786 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29787 | __arm_vstrwq (uint32_t * __addr, uint32x4_t __value) | |
29788 | { | |
29789 | __arm_vstrwq_u32 (__addr, __value); | |
29790 | } | |
29791 | ||
29792 | __extension__ extern __inline void | |
29793 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29794 | __arm_vstrwq_p (int32_t * __addr, int32x4_t __value, mve_pred16_t __p) | |
29795 | { | |
29796 | __arm_vstrwq_p_s32 (__addr, __value, __p); | |
29797 | } | |
29798 | ||
29799 | __extension__ extern __inline void | |
29800 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29801 | __arm_vstrwq_p (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p) | |
29802 | { | |
29803 | __arm_vstrwq_p_u32 (__addr, __value, __p); | |
29804 | } | |
29805 | ||
29806 | __extension__ extern __inline void | |
29807 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29808 | __arm_vstrdq_scatter_base_p (uint64x2_t __addr, const int __offset, int64x2_t __value, mve_pred16_t __p) | |
29809 | { | |
29810 | __arm_vstrdq_scatter_base_p_s64 (__addr, __offset, __value, __p); | |
29811 | } | |
29812 | ||
29813 | __extension__ extern __inline void | |
29814 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29815 | __arm_vstrdq_scatter_base_p (uint64x2_t __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p) | |
29816 | { | |
29817 | __arm_vstrdq_scatter_base_p_u64 (__addr, __offset, __value, __p); | |
29818 | } | |
29819 | ||
29820 | __extension__ extern __inline void | |
29821 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29822 | __arm_vstrdq_scatter_base (uint64x2_t __addr, const int __offset, int64x2_t __value) | |
29823 | { | |
29824 | __arm_vstrdq_scatter_base_s64 (__addr, __offset, __value); | |
29825 | } | |
29826 | ||
29827 | __extension__ extern __inline void | |
29828 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29829 | __arm_vstrdq_scatter_base (uint64x2_t __addr, const int __offset, uint64x2_t __value) | |
29830 | { | |
29831 | __arm_vstrdq_scatter_base_u64 (__addr, __offset, __value); | |
29832 | } | |
29833 | ||
29834 | __extension__ extern __inline void | |
29835 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29836 | __arm_vstrdq_scatter_offset_p (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p) | |
29837 | { | |
29838 | __arm_vstrdq_scatter_offset_p_s64 (__base, __offset, __value, __p); | |
29839 | } | |
29840 | ||
29841 | __extension__ extern __inline void | |
29842 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29843 | __arm_vstrdq_scatter_offset_p (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p) | |
29844 | { | |
29845 | __arm_vstrdq_scatter_offset_p_u64 (__base, __offset, __value, __p); | |
29846 | } | |
29847 | ||
29848 | __extension__ extern __inline void | |
29849 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29850 | __arm_vstrdq_scatter_offset (int64_t * __base, uint64x2_t __offset, int64x2_t __value) | |
29851 | { | |
29852 | __arm_vstrdq_scatter_offset_s64 (__base, __offset, __value); | |
29853 | } | |
29854 | ||
29855 | __extension__ extern __inline void | |
29856 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29857 | __arm_vstrdq_scatter_offset (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value) | |
29858 | { | |
29859 | __arm_vstrdq_scatter_offset_u64 (__base, __offset, __value); | |
29860 | } | |
29861 | ||
29862 | __extension__ extern __inline void | |
29863 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29864 | __arm_vstrdq_scatter_shifted_offset_p (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p) | |
29865 | { | |
29866 | __arm_vstrdq_scatter_shifted_offset_p_s64 (__base, __offset, __value, __p); | |
29867 | } | |
29868 | ||
29869 | __extension__ extern __inline void | |
29870 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29871 | __arm_vstrdq_scatter_shifted_offset_p (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p) | |
29872 | { | |
29873 | __arm_vstrdq_scatter_shifted_offset_p_u64 (__base, __offset, __value, __p); | |
29874 | } | |
29875 | ||
29876 | __extension__ extern __inline void | |
29877 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29878 | __arm_vstrdq_scatter_shifted_offset (int64_t * __base, uint64x2_t __offset, int64x2_t __value) | |
29879 | { | |
29880 | __arm_vstrdq_scatter_shifted_offset_s64 (__base, __offset, __value); | |
29881 | } | |
29882 | ||
29883 | __extension__ extern __inline void | |
29884 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29885 | __arm_vstrdq_scatter_shifted_offset (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value) | |
29886 | { | |
29887 | __arm_vstrdq_scatter_shifted_offset_u64 (__base, __offset, __value); | |
29888 | } | |
29889 | ||
29890 | __extension__ extern __inline void | |
29891 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29892 | __arm_vstrwq_scatter_offset_p (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
29893 | { | |
29894 | __arm_vstrwq_scatter_offset_p_s32 (__base, __offset, __value, __p); | |
29895 | } | |
29896 | ||
29897 | __extension__ extern __inline void | |
29898 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29899 | __arm_vstrwq_scatter_offset_p (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
29900 | { | |
29901 | __arm_vstrwq_scatter_offset_p_u32 (__base, __offset, __value, __p); | |
29902 | } | |
29903 | ||
29904 | __extension__ extern __inline void | |
29905 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29906 | __arm_vstrwq_scatter_offset (int32_t * __base, uint32x4_t __offset, int32x4_t __value) | |
29907 | { | |
29908 | __arm_vstrwq_scatter_offset_s32 (__base, __offset, __value); | |
29909 | } | |
29910 | ||
29911 | __extension__ extern __inline void | |
29912 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29913 | __arm_vstrwq_scatter_offset (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
29914 | { | |
29915 | __arm_vstrwq_scatter_offset_u32 (__base, __offset, __value); | |
29916 | } | |
29917 | ||
29918 | __extension__ extern __inline void | |
29919 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29920 | __arm_vstrwq_scatter_shifted_offset_p (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) | |
29921 | { | |
29922 | __arm_vstrwq_scatter_shifted_offset_p_s32 (__base, __offset, __value, __p); | |
29923 | } | |
29924 | ||
29925 | __extension__ extern __inline void | |
29926 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29927 | __arm_vstrwq_scatter_shifted_offset_p (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) | |
29928 | { | |
29929 | __arm_vstrwq_scatter_shifted_offset_p_u32 (__base, __offset, __value, __p); | |
29930 | } | |
29931 | ||
29932 | __extension__ extern __inline void | |
29933 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29934 | __arm_vstrwq_scatter_shifted_offset (int32_t * __base, uint32x4_t __offset, int32x4_t __value) | |
29935 | { | |
29936 | __arm_vstrwq_scatter_shifted_offset_s32 (__base, __offset, __value); | |
29937 | } | |
29938 | ||
29939 | __extension__ extern __inline void | |
29940 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29941 | __arm_vstrwq_scatter_shifted_offset (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value) | |
29942 | { | |
29943 | __arm_vstrwq_scatter_shifted_offset_u32 (__base, __offset, __value); | |
29944 | } | |
29945 | ||
29946 | __extension__ extern __inline int8x16_t | |
29947 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29948 | __arm_vaddq (int8x16_t __a, int8x16_t __b) | |
29949 | { | |
29950 | return __arm_vaddq_s8 (__a, __b); | |
29951 | } | |
29952 | ||
29953 | __extension__ extern __inline int16x8_t | |
29954 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29955 | __arm_vaddq (int16x8_t __a, int16x8_t __b) | |
29956 | { | |
29957 | return __arm_vaddq_s16 (__a, __b); | |
29958 | } | |
29959 | ||
29960 | __extension__ extern __inline int32x4_t | |
29961 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29962 | __arm_vaddq (int32x4_t __a, int32x4_t __b) | |
29963 | { | |
29964 | return __arm_vaddq_s32 (__a, __b); | |
29965 | } | |
29966 | ||
29967 | __extension__ extern __inline uint8x16_t | |
29968 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29969 | __arm_vaddq (uint8x16_t __a, uint8x16_t __b) | |
29970 | { | |
29971 | return __arm_vaddq_u8 (__a, __b); | |
29972 | } | |
29973 | ||
29974 | __extension__ extern __inline uint16x8_t | |
29975 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29976 | __arm_vaddq (uint16x8_t __a, uint16x8_t __b) | |
29977 | { | |
29978 | return __arm_vaddq_u16 (__a, __b); | |
29979 | } | |
29980 | ||
29981 | __extension__ extern __inline uint32x4_t | |
29982 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29983 | __arm_vaddq (uint32x4_t __a, uint32x4_t __b) | |
29984 | { | |
29985 | return __arm_vaddq_u32 (__a, __b); | |
29986 | } | |
29987 | ||
6a90680b ASDV |
29988 | __extension__ extern __inline uint8x16_t |
29989 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29990 | __arm_vddupq_m (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
29991 | { | |
29992 | return __arm_vddupq_m_n_u8 (__inactive, __a, __imm, __p); | |
29993 | } | |
29994 | ||
29995 | __extension__ extern __inline uint32x4_t | |
29996 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
29997 | __arm_vddupq_m (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
29998 | { | |
29999 | return __arm_vddupq_m_n_u32 (__inactive, __a, __imm, __p); | |
30000 | } | |
30001 | ||
30002 | __extension__ extern __inline uint16x8_t | |
30003 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30004 | __arm_vddupq_m (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
30005 | { | |
30006 | return __arm_vddupq_m_n_u16 (__inactive, __a, __imm, __p); | |
30007 | } | |
30008 | ||
30009 | __extension__ extern __inline uint8x16_t | |
30010 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30011 | __arm_vddupq_m (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
30012 | { | |
30013 | return __arm_vddupq_m_wb_u8 (__inactive, __a, __imm, __p); | |
30014 | } | |
30015 | ||
30016 | __extension__ extern __inline uint16x8_t | |
30017 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30018 | __arm_vddupq_m (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
30019 | { | |
30020 | return __arm_vddupq_m_wb_u16 (__inactive, __a, __imm, __p); | |
30021 | } | |
30022 | ||
30023 | __extension__ extern __inline uint32x4_t | |
30024 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30025 | __arm_vddupq_m (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
30026 | { | |
30027 | return __arm_vddupq_m_wb_u32 (__inactive, __a, __imm, __p); | |
30028 | } | |
30029 | ||
30030 | __extension__ extern __inline uint8x16_t | |
30031 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30032 | __arm_vddupq_u8 (uint32_t __a, const int __imm) | |
30033 | { | |
30034 | return __arm_vddupq_n_u8 (__a, __imm); | |
30035 | } | |
30036 | ||
30037 | __extension__ extern __inline uint32x4_t | |
30038 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30039 | __arm_vddupq_u32 (uint32_t __a, const int __imm) | |
30040 | { | |
30041 | return __arm_vddupq_n_u32 (__a, __imm); | |
30042 | } | |
30043 | ||
30044 | __extension__ extern __inline uint16x8_t | |
30045 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30046 | __arm_vddupq_u16 (uint32_t __a, const int __imm) | |
30047 | { | |
30048 | return __arm_vddupq_n_u16 (__a, __imm); | |
30049 | } | |
30050 | ||
30051 | __extension__ extern __inline uint8x16_t | |
30052 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30053 | __arm_vdwdupq_m (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30054 | { | |
30055 | return __arm_vdwdupq_m_n_u8 (__inactive, __a, __b, __imm, __p); | |
30056 | } | |
30057 | ||
30058 | __extension__ extern __inline uint32x4_t | |
30059 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30060 | __arm_vdwdupq_m (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30061 | { | |
30062 | return __arm_vdwdupq_m_n_u32 (__inactive, __a, __b, __imm, __p); | |
30063 | } | |
30064 | ||
30065 | __extension__ extern __inline uint16x8_t | |
30066 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30067 | __arm_vdwdupq_m (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30068 | { | |
30069 | return __arm_vdwdupq_m_n_u16 (__inactive, __a, __b, __imm, __p); | |
30070 | } | |
30071 | ||
30072 | __extension__ extern __inline uint8x16_t | |
30073 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30074 | __arm_vdwdupq_m (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30075 | { | |
30076 | return __arm_vdwdupq_m_wb_u8 (__inactive, __a, __b, __imm, __p); | |
30077 | } | |
30078 | ||
30079 | __extension__ extern __inline uint32x4_t | |
30080 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30081 | __arm_vdwdupq_m (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30082 | { | |
30083 | return __arm_vdwdupq_m_wb_u32 (__inactive, __a, __b, __imm, __p); | |
30084 | } | |
30085 | ||
30086 | __extension__ extern __inline uint16x8_t | |
30087 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30088 | __arm_vdwdupq_m (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30089 | { | |
30090 | return __arm_vdwdupq_m_wb_u16 (__inactive, __a, __b, __imm, __p); | |
30091 | } | |
30092 | ||
30093 | __extension__ extern __inline uint8x16_t | |
30094 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30095 | __arm_vdwdupq_u8 (uint32_t __a, uint32_t __b, const int __imm) | |
30096 | { | |
30097 | return __arm_vdwdupq_n_u8 (__a, __b, __imm); | |
30098 | } | |
30099 | ||
30100 | __extension__ extern __inline uint32x4_t | |
30101 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30102 | __arm_vdwdupq_u32 (uint32_t __a, uint32_t __b, const int __imm) | |
30103 | { | |
30104 | return __arm_vdwdupq_n_u32 (__a, __b, __imm); | |
30105 | } | |
30106 | ||
30107 | __extension__ extern __inline uint16x8_t | |
30108 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30109 | __arm_vdwdupq_u16 (uint32_t __a, uint32_t __b, const int __imm) | |
30110 | { | |
30111 | return __arm_vdwdupq_n_u16 (__a, __b, __imm); | |
30112 | } | |
30113 | ||
30114 | __extension__ extern __inline uint8x16_t | |
30115 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30116 | __arm_vdwdupq_u8 (uint32_t * __a, uint32_t __b, const int __imm) | |
30117 | { | |
30118 | return __arm_vdwdupq_wb_u8 (__a, __b, __imm); | |
30119 | } | |
30120 | ||
30121 | __extension__ extern __inline uint32x4_t | |
30122 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30123 | __arm_vdwdupq_u32 (uint32_t * __a, uint32_t __b, const int __imm) | |
30124 | { | |
30125 | return __arm_vdwdupq_wb_u32 (__a, __b, __imm); | |
30126 | } | |
30127 | ||
30128 | __extension__ extern __inline uint16x8_t | |
30129 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30130 | __arm_vdwdupq_u16 (uint32_t * __a, uint32_t __b, const int __imm) | |
30131 | { | |
30132 | return __arm_vdwdupq_wb_u16 (__a, __b, __imm); | |
30133 | } | |
30134 | ||
30135 | __extension__ extern __inline uint8x16_t | |
30136 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30137 | __arm_vidupq_m (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
30138 | { | |
30139 | return __arm_vidupq_m_n_u8 (__inactive, __a, __imm, __p); | |
30140 | } | |
30141 | ||
30142 | __extension__ extern __inline uint32x4_t | |
30143 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30144 | __arm_vidupq_m (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
30145 | { | |
30146 | return __arm_vidupq_m_n_u32 (__inactive, __a, __imm, __p); | |
30147 | } | |
30148 | ||
30149 | __extension__ extern __inline uint16x8_t | |
30150 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30151 | __arm_vidupq_m (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) | |
30152 | { | |
30153 | return __arm_vidupq_m_n_u16 (__inactive, __a, __imm, __p); | |
30154 | } | |
30155 | ||
30156 | __extension__ extern __inline uint8x16_t | |
30157 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30158 | __arm_vidupq_u8 (uint32_t __a, const int __imm) | |
30159 | { | |
30160 | return __arm_vidupq_n_u8 (__a, __imm); | |
30161 | } | |
30162 | ||
30163 | __extension__ extern __inline uint8x16_t | |
30164 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30165 | __arm_vidupq_m (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
30166 | { | |
30167 | return __arm_vidupq_m_wb_u8 (__inactive, __a, __imm, __p); | |
30168 | } | |
30169 | ||
30170 | __extension__ extern __inline uint16x8_t | |
30171 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30172 | __arm_vidupq_m (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
30173 | { | |
30174 | return __arm_vidupq_m_wb_u16 (__inactive, __a, __imm, __p); | |
30175 | } | |
30176 | ||
30177 | __extension__ extern __inline uint32x4_t | |
30178 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30179 | __arm_vidupq_m (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) | |
30180 | { | |
30181 | return __arm_vidupq_m_wb_u32 (__inactive, __a, __imm, __p); | |
30182 | } | |
30183 | ||
30184 | __extension__ extern __inline uint32x4_t | |
30185 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30186 | __arm_vidupq_u32 (uint32_t __a, const int __imm) | |
30187 | { | |
30188 | return __arm_vidupq_n_u32 (__a, __imm); | |
30189 | } | |
30190 | ||
30191 | __extension__ extern __inline uint16x8_t | |
30192 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30193 | __arm_vidupq_u16 (uint32_t __a, const int __imm) | |
30194 | { | |
30195 | return __arm_vidupq_n_u16 (__a, __imm); | |
30196 | } | |
30197 | ||
30198 | __extension__ extern __inline uint8x16_t | |
30199 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30200 | __arm_vidupq_u8 (uint32_t * __a, const int __imm) | |
30201 | { | |
30202 | return __arm_vidupq_wb_u8 (__a, __imm); | |
30203 | } | |
30204 | ||
30205 | __extension__ extern __inline uint16x8_t | |
30206 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30207 | __arm_vidupq_u16 (uint32_t * __a, const int __imm) | |
30208 | { | |
30209 | return __arm_vidupq_wb_u16 (__a, __imm); | |
30210 | } | |
30211 | ||
30212 | __extension__ extern __inline uint32x4_t | |
30213 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30214 | __arm_vidupq_u32 (uint32_t * __a, const int __imm) | |
30215 | { | |
30216 | return __arm_vidupq_wb_u32 (__a, __imm); | |
30217 | } | |
30218 | ||
30219 | __extension__ extern __inline uint8x16_t | |
30220 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30221 | __arm_vddupq_u8 (uint32_t * __a, const int __imm) | |
30222 | { | |
30223 | return __arm_vddupq_wb_u8 (__a, __imm); | |
30224 | } | |
30225 | ||
30226 | __extension__ extern __inline uint16x8_t | |
30227 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30228 | __arm_vddupq_u16 (uint32_t * __a, const int __imm) | |
30229 | { | |
30230 | return __arm_vddupq_wb_u16 (__a, __imm); | |
30231 | } | |
30232 | ||
30233 | __extension__ extern __inline uint32x4_t | |
30234 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30235 | __arm_vddupq_u32 (uint32_t * __a, const int __imm) | |
30236 | { | |
30237 | return __arm_vddupq_wb_u32 (__a, __imm); | |
30238 | } | |
30239 | ||
30240 | __extension__ extern __inline uint8x16_t | |
30241 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30242 | __arm_viwdupq_m (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30243 | { | |
30244 | return __arm_viwdupq_m_n_u8 (__inactive, __a, __b, __imm, __p); | |
30245 | } | |
30246 | ||
30247 | __extension__ extern __inline uint32x4_t | |
30248 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30249 | __arm_viwdupq_m (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30250 | { | |
30251 | return __arm_viwdupq_m_n_u32 (__inactive, __a, __b, __imm, __p); | |
30252 | } | |
30253 | ||
30254 | __extension__ extern __inline uint16x8_t | |
30255 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30256 | __arm_viwdupq_m (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30257 | { | |
30258 | return __arm_viwdupq_m_n_u16 (__inactive, __a, __b, __imm, __p); | |
30259 | } | |
30260 | ||
30261 | __extension__ extern __inline uint8x16_t | |
30262 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30263 | __arm_viwdupq_m (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30264 | { | |
30265 | return __arm_viwdupq_m_wb_u8 (__inactive, __a, __b, __imm, __p); | |
30266 | } | |
30267 | ||
30268 | __extension__ extern __inline uint32x4_t | |
30269 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30270 | __arm_viwdupq_m (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30271 | { | |
30272 | return __arm_viwdupq_m_wb_u32 (__inactive, __a, __b, __imm, __p); | |
30273 | } | |
30274 | ||
30275 | __extension__ extern __inline uint16x8_t | |
30276 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30277 | __arm_viwdupq_m (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30278 | { | |
30279 | return __arm_viwdupq_m_wb_u16 (__inactive, __a, __b, __imm, __p); | |
30280 | } | |
30281 | ||
30282 | __extension__ extern __inline uint8x16_t | |
30283 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30284 | __arm_viwdupq_u8 (uint32_t __a, uint32_t __b, const int __imm) | |
30285 | { | |
30286 | return __arm_viwdupq_n_u8 (__a, __b, __imm); | |
30287 | } | |
30288 | ||
30289 | __extension__ extern __inline uint32x4_t | |
30290 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30291 | __arm_viwdupq_u32 (uint32_t __a, uint32_t __b, const int __imm) | |
30292 | { | |
30293 | return __arm_viwdupq_n_u32 (__a, __b, __imm); | |
30294 | } | |
30295 | ||
30296 | __extension__ extern __inline uint16x8_t | |
30297 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30298 | __arm_viwdupq_u16 (uint32_t __a, uint32_t __b, const int __imm) | |
30299 | { | |
30300 | return __arm_viwdupq_n_u16 (__a, __b, __imm); | |
30301 | } | |
30302 | ||
30303 | __extension__ extern __inline uint8x16_t | |
30304 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30305 | __arm_viwdupq_u8 (uint32_t * __a, uint32_t __b, const int __imm) | |
30306 | { | |
30307 | return __arm_viwdupq_wb_u8 (__a, __b, __imm); | |
30308 | } | |
30309 | ||
30310 | __extension__ extern __inline uint32x4_t | |
30311 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30312 | __arm_viwdupq_u32 (uint32_t * __a, uint32_t __b, const int __imm) | |
30313 | { | |
30314 | return __arm_viwdupq_wb_u32 (__a, __b, __imm); | |
30315 | } | |
30316 | ||
30317 | __extension__ extern __inline uint16x8_t | |
30318 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30319 | __arm_viwdupq_u16 (uint32_t * __a, uint32_t __b, const int __imm) | |
30320 | { | |
30321 | return __arm_viwdupq_wb_u16 (__a, __b, __imm); | |
30322 | } | |
30323 | ||
30324 | __extension__ extern __inline void | |
30325 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30326 | __arm_vstrdq_scatter_base_wb (uint64x2_t * __addr, const int __offset, int64x2_t __value) | |
30327 | { | |
30328 | __arm_vstrdq_scatter_base_wb_s64 (__addr, __offset, __value); | |
30329 | } | |
30330 | ||
30331 | __extension__ extern __inline void | |
30332 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30333 | __arm_vstrdq_scatter_base_wb (uint64x2_t * __addr, const int __offset, uint64x2_t __value) | |
30334 | { | |
30335 | __arm_vstrdq_scatter_base_wb_u64 (__addr, __offset, __value); | |
30336 | } | |
30337 | ||
30338 | __extension__ extern __inline void | |
30339 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30340 | __arm_vstrdq_scatter_base_wb_p (uint64x2_t * __addr, const int __offset, int64x2_t __value, mve_pred16_t __p) | |
30341 | { | |
30342 | __arm_vstrdq_scatter_base_wb_p_s64 (__addr, __offset, __value, __p); | |
30343 | } | |
30344 | ||
30345 | __extension__ extern __inline void | |
30346 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30347 | __arm_vstrdq_scatter_base_wb_p (uint64x2_t * __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p) | |
30348 | { | |
30349 | __arm_vstrdq_scatter_base_wb_p_u64 (__addr, __offset, __value, __p); | |
30350 | } | |
30351 | ||
30352 | __extension__ extern __inline void | |
30353 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30354 | __arm_vstrwq_scatter_base_wb_p (uint32x4_t * __addr, const int __offset, int32x4_t __value, mve_pred16_t __p) | |
30355 | { | |
30356 | __arm_vstrwq_scatter_base_wb_p_s32 (__addr, __offset, __value, __p); | |
30357 | } | |
30358 | ||
30359 | __extension__ extern __inline void | |
30360 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30361 | __arm_vstrwq_scatter_base_wb_p (uint32x4_t * __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p) | |
30362 | { | |
30363 | __arm_vstrwq_scatter_base_wb_p_u32 (__addr, __offset, __value, __p); | |
30364 | } | |
30365 | ||
30366 | __extension__ extern __inline void | |
30367 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30368 | __arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, int32x4_t __value) | |
30369 | { | |
30370 | __arm_vstrwq_scatter_base_wb_s32 (__addr, __offset, __value); | |
30371 | } | |
30372 | ||
30373 | __extension__ extern __inline void | |
30374 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30375 | __arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, uint32x4_t __value) | |
30376 | { | |
30377 | __arm_vstrwq_scatter_base_wb_u32 (__addr, __offset, __value); | |
30378 | } | |
30379 | ||
30380 | __extension__ extern __inline uint8x16_t | |
30381 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30382 | __arm_vddupq_x_u8 (uint32_t __a, const int __imm, mve_pred16_t __p) | |
30383 | { | |
30384 | return __arm_vddupq_x_n_u8 (__a, __imm, __p); | |
30385 | } | |
30386 | ||
30387 | __extension__ extern __inline uint16x8_t | |
30388 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30389 | __arm_vddupq_x_u16 (uint32_t __a, const int __imm, mve_pred16_t __p) | |
30390 | { | |
30391 | return __arm_vddupq_x_n_u16 (__a, __imm, __p); | |
30392 | } | |
30393 | ||
30394 | __extension__ extern __inline uint32x4_t | |
30395 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30396 | __arm_vddupq_x_u32 (uint32_t __a, const int __imm, mve_pred16_t __p) | |
30397 | { | |
30398 | return __arm_vddupq_x_n_u32 (__a, __imm, __p); | |
30399 | } | |
30400 | ||
30401 | __extension__ extern __inline uint8x16_t | |
30402 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30403 | __arm_vddupq_x_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p) | |
30404 | { | |
30405 | return __arm_vddupq_x_wb_u8 (__a, __imm, __p); | |
30406 | } | |
30407 | ||
30408 | __extension__ extern __inline uint16x8_t | |
30409 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30410 | __arm_vddupq_x_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p) | |
30411 | { | |
30412 | return __arm_vddupq_x_wb_u16 (__a, __imm, __p); | |
30413 | } | |
30414 | ||
30415 | __extension__ extern __inline uint32x4_t | |
30416 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30417 | __arm_vddupq_x_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p) | |
30418 | { | |
30419 | return __arm_vddupq_x_wb_u32 (__a, __imm, __p); | |
30420 | } | |
30421 | ||
30422 | __extension__ extern __inline uint8x16_t | |
30423 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30424 | __arm_vdwdupq_x_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30425 | { | |
30426 | return __arm_vdwdupq_x_n_u8 (__a, __b, __imm, __p); | |
30427 | } | |
30428 | ||
30429 | __extension__ extern __inline uint16x8_t | |
30430 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30431 | __arm_vdwdupq_x_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30432 | { | |
30433 | return __arm_vdwdupq_x_n_u16 (__a, __b, __imm, __p); | |
30434 | } | |
30435 | ||
30436 | __extension__ extern __inline uint32x4_t | |
30437 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30438 | __arm_vdwdupq_x_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30439 | { | |
30440 | return __arm_vdwdupq_x_n_u32 (__a, __b, __imm, __p); | |
30441 | } | |
30442 | ||
30443 | __extension__ extern __inline uint8x16_t | |
30444 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30445 | __arm_vdwdupq_x_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30446 | { | |
30447 | return __arm_vdwdupq_x_wb_u8 (__a, __b, __imm, __p); | |
30448 | } | |
30449 | ||
30450 | __extension__ extern __inline uint16x8_t | |
30451 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30452 | __arm_vdwdupq_x_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30453 | { | |
30454 | return __arm_vdwdupq_x_wb_u16 (__a, __b, __imm, __p); | |
30455 | } | |
30456 | ||
30457 | __extension__ extern __inline uint32x4_t | |
30458 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30459 | __arm_vdwdupq_x_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30460 | { | |
30461 | return __arm_vdwdupq_x_wb_u32 (__a, __b, __imm, __p); | |
30462 | } | |
30463 | ||
30464 | __extension__ extern __inline uint8x16_t | |
30465 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30466 | __arm_vidupq_x_u8 (uint32_t __a, const int __imm, mve_pred16_t __p) | |
30467 | { | |
30468 | return __arm_vidupq_x_n_u8 (__a, __imm, __p); | |
30469 | } | |
30470 | ||
30471 | __extension__ extern __inline uint16x8_t | |
30472 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30473 | __arm_vidupq_x_u16 (uint32_t __a, const int __imm, mve_pred16_t __p) | |
30474 | { | |
30475 | return __arm_vidupq_x_n_u16 (__a, __imm, __p); | |
30476 | } | |
30477 | ||
30478 | __extension__ extern __inline uint32x4_t | |
30479 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30480 | __arm_vidupq_x_u32 (uint32_t __a, const int __imm, mve_pred16_t __p) | |
30481 | { | |
30482 | return __arm_vidupq_x_n_u32 (__a, __imm, __p); | |
30483 | } | |
30484 | ||
30485 | __extension__ extern __inline uint8x16_t | |
30486 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30487 | __arm_vidupq_x_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p) | |
30488 | { | |
30489 | return __arm_vidupq_x_wb_u8 (__a, __imm, __p); | |
30490 | } | |
30491 | ||
30492 | __extension__ extern __inline uint16x8_t | |
30493 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30494 | __arm_vidupq_x_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p) | |
30495 | { | |
30496 | return __arm_vidupq_x_wb_u16 (__a, __imm, __p); | |
30497 | } | |
30498 | ||
30499 | __extension__ extern __inline uint32x4_t | |
30500 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30501 | __arm_vidupq_x_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p) | |
30502 | { | |
30503 | return __arm_vidupq_x_wb_u32 (__a, __imm, __p); | |
30504 | } | |
30505 | ||
30506 | __extension__ extern __inline uint8x16_t | |
30507 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30508 | __arm_viwdupq_x_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30509 | { | |
30510 | return __arm_viwdupq_x_n_u8 (__a, __b, __imm, __p); | |
30511 | } | |
30512 | ||
30513 | __extension__ extern __inline uint16x8_t | |
30514 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30515 | __arm_viwdupq_x_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30516 | { | |
30517 | return __arm_viwdupq_x_n_u16 (__a, __b, __imm, __p); | |
30518 | } | |
30519 | ||
30520 | __extension__ extern __inline uint32x4_t | |
30521 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30522 | __arm_viwdupq_x_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30523 | { | |
30524 | return __arm_viwdupq_x_n_u32 (__a, __b, __imm, __p); | |
30525 | } | |
30526 | ||
30527 | __extension__ extern __inline uint8x16_t | |
30528 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30529 | __arm_viwdupq_x_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30530 | { | |
30531 | return __arm_viwdupq_x_wb_u8 (__a, __b, __imm, __p); | |
30532 | } | |
30533 | ||
30534 | __extension__ extern __inline uint16x8_t | |
30535 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30536 | __arm_viwdupq_x_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30537 | { | |
30538 | return __arm_viwdupq_x_wb_u16 (__a, __b, __imm, __p); | |
30539 | } | |
30540 | ||
30541 | __extension__ extern __inline uint32x4_t | |
30542 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30543 | __arm_viwdupq_x_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) | |
30544 | { | |
30545 | return __arm_viwdupq_x_wb_u32 (__a, __b, __imm, __p); | |
30546 | } | |
30547 | ||
30548 | __extension__ extern __inline int8x16_t | |
30549 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30550 | __arm_vminq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
30551 | { | |
30552 | return __arm_vminq_x_s8 (__a, __b, __p); | |
30553 | } | |
30554 | ||
30555 | __extension__ extern __inline int16x8_t | |
30556 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30557 | __arm_vminq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
30558 | { | |
30559 | return __arm_vminq_x_s16 (__a, __b, __p); | |
30560 | } | |
30561 | ||
30562 | __extension__ extern __inline int32x4_t | |
30563 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30564 | __arm_vminq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
30565 | { | |
30566 | return __arm_vminq_x_s32 (__a, __b, __p); | |
30567 | } | |
30568 | ||
30569 | __extension__ extern __inline uint8x16_t | |
30570 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30571 | __arm_vminq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
30572 | { | |
30573 | return __arm_vminq_x_u8 (__a, __b, __p); | |
30574 | } | |
30575 | ||
30576 | __extension__ extern __inline uint16x8_t | |
30577 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30578 | __arm_vminq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
30579 | { | |
30580 | return __arm_vminq_x_u16 (__a, __b, __p); | |
30581 | } | |
30582 | ||
30583 | __extension__ extern __inline uint32x4_t | |
30584 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30585 | __arm_vminq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
30586 | { | |
30587 | return __arm_vminq_x_u32 (__a, __b, __p); | |
30588 | } | |
30589 | ||
30590 | __extension__ extern __inline int8x16_t | |
30591 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30592 | __arm_vmaxq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
30593 | { | |
30594 | return __arm_vmaxq_x_s8 (__a, __b, __p); | |
30595 | } | |
30596 | ||
30597 | __extension__ extern __inline int16x8_t | |
30598 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30599 | __arm_vmaxq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
30600 | { | |
30601 | return __arm_vmaxq_x_s16 (__a, __b, __p); | |
30602 | } | |
30603 | ||
30604 | __extension__ extern __inline int32x4_t | |
30605 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30606 | __arm_vmaxq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
30607 | { | |
30608 | return __arm_vmaxq_x_s32 (__a, __b, __p); | |
30609 | } | |
30610 | ||
30611 | __extension__ extern __inline uint8x16_t | |
30612 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30613 | __arm_vmaxq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
30614 | { | |
30615 | return __arm_vmaxq_x_u8 (__a, __b, __p); | |
30616 | } | |
30617 | ||
30618 | __extension__ extern __inline uint16x8_t | |
30619 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30620 | __arm_vmaxq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
30621 | { | |
30622 | return __arm_vmaxq_x_u16 (__a, __b, __p); | |
30623 | } | |
30624 | ||
30625 | __extension__ extern __inline uint32x4_t | |
30626 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30627 | __arm_vmaxq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
30628 | { | |
30629 | return __arm_vmaxq_x_u32 (__a, __b, __p); | |
30630 | } | |
30631 | ||
30632 | __extension__ extern __inline int8x16_t | |
30633 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30634 | __arm_vabdq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
30635 | { | |
30636 | return __arm_vabdq_x_s8 (__a, __b, __p); | |
30637 | } | |
30638 | ||
30639 | __extension__ extern __inline int16x8_t | |
30640 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30641 | __arm_vabdq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
30642 | { | |
30643 | return __arm_vabdq_x_s16 (__a, __b, __p); | |
30644 | } | |
30645 | ||
30646 | __extension__ extern __inline int32x4_t | |
30647 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30648 | __arm_vabdq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
30649 | { | |
30650 | return __arm_vabdq_x_s32 (__a, __b, __p); | |
30651 | } | |
30652 | ||
30653 | __extension__ extern __inline uint8x16_t | |
30654 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30655 | __arm_vabdq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
30656 | { | |
30657 | return __arm_vabdq_x_u8 (__a, __b, __p); | |
30658 | } | |
30659 | ||
30660 | __extension__ extern __inline uint16x8_t | |
30661 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30662 | __arm_vabdq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
30663 | { | |
30664 | return __arm_vabdq_x_u16 (__a, __b, __p); | |
30665 | } | |
30666 | ||
30667 | __extension__ extern __inline uint32x4_t | |
30668 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30669 | __arm_vabdq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
30670 | { | |
30671 | return __arm_vabdq_x_u32 (__a, __b, __p); | |
30672 | } | |
30673 | ||
30674 | __extension__ extern __inline int8x16_t | |
30675 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30676 | __arm_vabsq_x (int8x16_t __a, mve_pred16_t __p) | |
30677 | { | |
30678 | return __arm_vabsq_x_s8 (__a, __p); | |
30679 | } | |
30680 | ||
30681 | __extension__ extern __inline int16x8_t | |
30682 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30683 | __arm_vabsq_x (int16x8_t __a, mve_pred16_t __p) | |
30684 | { | |
30685 | return __arm_vabsq_x_s16 (__a, __p); | |
30686 | } | |
30687 | ||
30688 | __extension__ extern __inline int32x4_t | |
30689 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30690 | __arm_vabsq_x (int32x4_t __a, mve_pred16_t __p) | |
30691 | { | |
30692 | return __arm_vabsq_x_s32 (__a, __p); | |
30693 | } | |
30694 | ||
30695 | __extension__ extern __inline int8x16_t | |
30696 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30697 | __arm_vaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
30698 | { | |
30699 | return __arm_vaddq_x_s8 (__a, __b, __p); | |
30700 | } | |
30701 | ||
30702 | __extension__ extern __inline int16x8_t | |
30703 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30704 | __arm_vaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
30705 | { | |
30706 | return __arm_vaddq_x_s16 (__a, __b, __p); | |
30707 | } | |
30708 | ||
30709 | __extension__ extern __inline int32x4_t | |
30710 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30711 | __arm_vaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
30712 | { | |
30713 | return __arm_vaddq_x_s32 (__a, __b, __p); | |
30714 | } | |
30715 | ||
30716 | __extension__ extern __inline int8x16_t | |
30717 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30718 | __arm_vaddq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
30719 | { | |
30720 | return __arm_vaddq_x_n_s8 (__a, __b, __p); | |
30721 | } | |
30722 | ||
30723 | __extension__ extern __inline int16x8_t | |
30724 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30725 | __arm_vaddq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
30726 | { | |
30727 | return __arm_vaddq_x_n_s16 (__a, __b, __p); | |
30728 | } | |
30729 | ||
30730 | __extension__ extern __inline int32x4_t | |
30731 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30732 | __arm_vaddq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
30733 | { | |
30734 | return __arm_vaddq_x_n_s32 (__a, __b, __p); | |
30735 | } | |
30736 | ||
30737 | __extension__ extern __inline uint8x16_t | |
30738 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30739 | __arm_vaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
30740 | { | |
30741 | return __arm_vaddq_x_u8 (__a, __b, __p); | |
30742 | } | |
30743 | ||
30744 | __extension__ extern __inline uint16x8_t | |
30745 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30746 | __arm_vaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
30747 | { | |
30748 | return __arm_vaddq_x_u16 (__a, __b, __p); | |
30749 | } | |
30750 | ||
30751 | __extension__ extern __inline uint32x4_t | |
30752 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30753 | __arm_vaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
30754 | { | |
30755 | return __arm_vaddq_x_u32 (__a, __b, __p); | |
30756 | } | |
30757 | ||
30758 | __extension__ extern __inline uint8x16_t | |
30759 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30760 | __arm_vaddq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
30761 | { | |
30762 | return __arm_vaddq_x_n_u8 (__a, __b, __p); | |
30763 | } | |
30764 | ||
30765 | __extension__ extern __inline uint16x8_t | |
30766 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30767 | __arm_vaddq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
30768 | { | |
30769 | return __arm_vaddq_x_n_u16 (__a, __b, __p); | |
30770 | } | |
30771 | ||
30772 | __extension__ extern __inline uint32x4_t | |
30773 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30774 | __arm_vaddq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
30775 | { | |
30776 | return __arm_vaddq_x_n_u32 (__a, __b, __p); | |
30777 | } | |
30778 | ||
30779 | __extension__ extern __inline int8x16_t | |
30780 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30781 | __arm_vclsq_x (int8x16_t __a, mve_pred16_t __p) | |
30782 | { | |
30783 | return __arm_vclsq_x_s8 (__a, __p); | |
30784 | } | |
30785 | ||
30786 | __extension__ extern __inline int16x8_t | |
30787 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30788 | __arm_vclsq_x (int16x8_t __a, mve_pred16_t __p) | |
30789 | { | |
30790 | return __arm_vclsq_x_s16 (__a, __p); | |
30791 | } | |
30792 | ||
30793 | __extension__ extern __inline int32x4_t | |
30794 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30795 | __arm_vclsq_x (int32x4_t __a, mve_pred16_t __p) | |
30796 | { | |
30797 | return __arm_vclsq_x_s32 (__a, __p); | |
30798 | } | |
30799 | ||
30800 | __extension__ extern __inline int8x16_t | |
30801 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30802 | __arm_vclzq_x (int8x16_t __a, mve_pred16_t __p) | |
30803 | { | |
30804 | return __arm_vclzq_x_s8 (__a, __p); | |
30805 | } | |
30806 | ||
30807 | __extension__ extern __inline int16x8_t | |
30808 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30809 | __arm_vclzq_x (int16x8_t __a, mve_pred16_t __p) | |
30810 | { | |
30811 | return __arm_vclzq_x_s16 (__a, __p); | |
30812 | } | |
30813 | ||
30814 | __extension__ extern __inline int32x4_t | |
30815 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30816 | __arm_vclzq_x (int32x4_t __a, mve_pred16_t __p) | |
30817 | { | |
30818 | return __arm_vclzq_x_s32 (__a, __p); | |
30819 | } | |
30820 | ||
30821 | __extension__ extern __inline uint8x16_t | |
30822 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30823 | __arm_vclzq_x (uint8x16_t __a, mve_pred16_t __p) | |
30824 | { | |
30825 | return __arm_vclzq_x_u8 (__a, __p); | |
30826 | } | |
30827 | ||
30828 | __extension__ extern __inline uint16x8_t | |
30829 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30830 | __arm_vclzq_x (uint16x8_t __a, mve_pred16_t __p) | |
30831 | { | |
30832 | return __arm_vclzq_x_u16 (__a, __p); | |
30833 | } | |
30834 | ||
30835 | __extension__ extern __inline uint32x4_t | |
30836 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30837 | __arm_vclzq_x (uint32x4_t __a, mve_pred16_t __p) | |
30838 | { | |
30839 | return __arm_vclzq_x_u32 (__a, __p); | |
30840 | } | |
30841 | ||
30842 | __extension__ extern __inline int8x16_t | |
30843 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30844 | __arm_vnegq_x (int8x16_t __a, mve_pred16_t __p) | |
30845 | { | |
30846 | return __arm_vnegq_x_s8 (__a, __p); | |
30847 | } | |
30848 | ||
30849 | __extension__ extern __inline int16x8_t | |
30850 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30851 | __arm_vnegq_x (int16x8_t __a, mve_pred16_t __p) | |
30852 | { | |
30853 | return __arm_vnegq_x_s16 (__a, __p); | |
30854 | } | |
30855 | ||
30856 | __extension__ extern __inline int32x4_t | |
30857 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30858 | __arm_vnegq_x (int32x4_t __a, mve_pred16_t __p) | |
30859 | { | |
30860 | return __arm_vnegq_x_s32 (__a, __p); | |
30861 | } | |
30862 | ||
30863 | __extension__ extern __inline int8x16_t | |
30864 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30865 | __arm_vmulhq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
30866 | { | |
30867 | return __arm_vmulhq_x_s8 (__a, __b, __p); | |
30868 | } | |
30869 | ||
30870 | __extension__ extern __inline int16x8_t | |
30871 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30872 | __arm_vmulhq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
30873 | { | |
30874 | return __arm_vmulhq_x_s16 (__a, __b, __p); | |
30875 | } | |
30876 | ||
30877 | __extension__ extern __inline int32x4_t | |
30878 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30879 | __arm_vmulhq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
30880 | { | |
30881 | return __arm_vmulhq_x_s32 (__a, __b, __p); | |
30882 | } | |
30883 | ||
30884 | __extension__ extern __inline uint8x16_t | |
30885 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30886 | __arm_vmulhq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
30887 | { | |
30888 | return __arm_vmulhq_x_u8 (__a, __b, __p); | |
30889 | } | |
30890 | ||
30891 | __extension__ extern __inline uint16x8_t | |
30892 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30893 | __arm_vmulhq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
30894 | { | |
30895 | return __arm_vmulhq_x_u16 (__a, __b, __p); | |
30896 | } | |
30897 | ||
30898 | __extension__ extern __inline uint32x4_t | |
30899 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30900 | __arm_vmulhq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
30901 | { | |
30902 | return __arm_vmulhq_x_u32 (__a, __b, __p); | |
30903 | } | |
30904 | ||
30905 | __extension__ extern __inline uint16x8_t | |
30906 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30907 | __arm_vmullbq_poly_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
30908 | { | |
30909 | return __arm_vmullbq_poly_x_p8 (__a, __b, __p); | |
30910 | } | |
30911 | ||
30912 | __extension__ extern __inline uint32x4_t | |
30913 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30914 | __arm_vmullbq_poly_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
30915 | { | |
30916 | return __arm_vmullbq_poly_x_p16 (__a, __b, __p); | |
30917 | } | |
30918 | ||
30919 | __extension__ extern __inline int16x8_t | |
30920 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30921 | __arm_vmullbq_int_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
30922 | { | |
30923 | return __arm_vmullbq_int_x_s8 (__a, __b, __p); | |
30924 | } | |
30925 | ||
30926 | __extension__ extern __inline int32x4_t | |
30927 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30928 | __arm_vmullbq_int_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
30929 | { | |
30930 | return __arm_vmullbq_int_x_s16 (__a, __b, __p); | |
30931 | } | |
30932 | ||
30933 | __extension__ extern __inline int64x2_t | |
30934 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30935 | __arm_vmullbq_int_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
30936 | { | |
30937 | return __arm_vmullbq_int_x_s32 (__a, __b, __p); | |
30938 | } | |
30939 | ||
30940 | __extension__ extern __inline uint16x8_t | |
30941 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30942 | __arm_vmullbq_int_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
30943 | { | |
30944 | return __arm_vmullbq_int_x_u8 (__a, __b, __p); | |
30945 | } | |
30946 | ||
30947 | __extension__ extern __inline uint32x4_t | |
30948 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30949 | __arm_vmullbq_int_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
30950 | { | |
30951 | return __arm_vmullbq_int_x_u16 (__a, __b, __p); | |
30952 | } | |
30953 | ||
30954 | __extension__ extern __inline uint64x2_t | |
30955 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30956 | __arm_vmullbq_int_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
30957 | { | |
30958 | return __arm_vmullbq_int_x_u32 (__a, __b, __p); | |
30959 | } | |
30960 | ||
30961 | __extension__ extern __inline uint16x8_t | |
30962 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30963 | __arm_vmulltq_poly_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
30964 | { | |
30965 | return __arm_vmulltq_poly_x_p8 (__a, __b, __p); | |
30966 | } | |
30967 | ||
30968 | __extension__ extern __inline uint32x4_t | |
30969 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30970 | __arm_vmulltq_poly_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
30971 | { | |
30972 | return __arm_vmulltq_poly_x_p16 (__a, __b, __p); | |
30973 | } | |
30974 | ||
30975 | __extension__ extern __inline int16x8_t | |
30976 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30977 | __arm_vmulltq_int_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
30978 | { | |
30979 | return __arm_vmulltq_int_x_s8 (__a, __b, __p); | |
30980 | } | |
30981 | ||
30982 | __extension__ extern __inline int32x4_t | |
30983 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30984 | __arm_vmulltq_int_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
30985 | { | |
30986 | return __arm_vmulltq_int_x_s16 (__a, __b, __p); | |
30987 | } | |
30988 | ||
30989 | __extension__ extern __inline int64x2_t | |
30990 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30991 | __arm_vmulltq_int_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
30992 | { | |
30993 | return __arm_vmulltq_int_x_s32 (__a, __b, __p); | |
30994 | } | |
30995 | ||
30996 | __extension__ extern __inline uint16x8_t | |
30997 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
30998 | __arm_vmulltq_int_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
30999 | { | |
31000 | return __arm_vmulltq_int_x_u8 (__a, __b, __p); | |
31001 | } | |
31002 | ||
31003 | __extension__ extern __inline uint32x4_t | |
31004 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31005 | __arm_vmulltq_int_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
31006 | { | |
31007 | return __arm_vmulltq_int_x_u16 (__a, __b, __p); | |
31008 | } | |
31009 | ||
31010 | __extension__ extern __inline uint64x2_t | |
31011 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31012 | __arm_vmulltq_int_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
31013 | { | |
31014 | return __arm_vmulltq_int_x_u32 (__a, __b, __p); | |
31015 | } | |
31016 | ||
31017 | __extension__ extern __inline int8x16_t | |
31018 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31019 | __arm_vmulq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
31020 | { | |
31021 | return __arm_vmulq_x_s8 (__a, __b, __p); | |
31022 | } | |
31023 | ||
31024 | __extension__ extern __inline int16x8_t | |
31025 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31026 | __arm_vmulq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
31027 | { | |
31028 | return __arm_vmulq_x_s16 (__a, __b, __p); | |
31029 | } | |
31030 | ||
31031 | __extension__ extern __inline int32x4_t | |
31032 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31033 | __arm_vmulq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
31034 | { | |
31035 | return __arm_vmulq_x_s32 (__a, __b, __p); | |
31036 | } | |
31037 | ||
31038 | __extension__ extern __inline int8x16_t | |
31039 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31040 | __arm_vmulq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
31041 | { | |
31042 | return __arm_vmulq_x_n_s8 (__a, __b, __p); | |
31043 | } | |
31044 | ||
31045 | __extension__ extern __inline int16x8_t | |
31046 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31047 | __arm_vmulq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
31048 | { | |
31049 | return __arm_vmulq_x_n_s16 (__a, __b, __p); | |
31050 | } | |
31051 | ||
31052 | __extension__ extern __inline int32x4_t | |
31053 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31054 | __arm_vmulq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
31055 | { | |
31056 | return __arm_vmulq_x_n_s32 (__a, __b, __p); | |
31057 | } | |
31058 | ||
31059 | __extension__ extern __inline uint8x16_t | |
31060 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31061 | __arm_vmulq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
31062 | { | |
31063 | return __arm_vmulq_x_u8 (__a, __b, __p); | |
31064 | } | |
31065 | ||
31066 | __extension__ extern __inline uint16x8_t | |
31067 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31068 | __arm_vmulq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
31069 | { | |
31070 | return __arm_vmulq_x_u16 (__a, __b, __p); | |
31071 | } | |
31072 | ||
31073 | __extension__ extern __inline uint32x4_t | |
31074 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31075 | __arm_vmulq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
31076 | { | |
31077 | return __arm_vmulq_x_u32 (__a, __b, __p); | |
31078 | } | |
31079 | ||
31080 | __extension__ extern __inline uint8x16_t | |
31081 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31082 | __arm_vmulq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
31083 | { | |
31084 | return __arm_vmulq_x_n_u8 (__a, __b, __p); | |
31085 | } | |
31086 | ||
31087 | __extension__ extern __inline uint16x8_t | |
31088 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31089 | __arm_vmulq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
31090 | { | |
31091 | return __arm_vmulq_x_n_u16 (__a, __b, __p); | |
31092 | } | |
31093 | ||
31094 | __extension__ extern __inline uint32x4_t | |
31095 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31096 | __arm_vmulq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
31097 | { | |
31098 | return __arm_vmulq_x_n_u32 (__a, __b, __p); | |
31099 | } | |
31100 | ||
31101 | __extension__ extern __inline int8x16_t | |
31102 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31103 | __arm_vsubq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
31104 | { | |
31105 | return __arm_vsubq_x_s8 (__a, __b, __p); | |
31106 | } | |
31107 | ||
31108 | __extension__ extern __inline int16x8_t | |
31109 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31110 | __arm_vsubq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
31111 | { | |
31112 | return __arm_vsubq_x_s16 (__a, __b, __p); | |
31113 | } | |
31114 | ||
31115 | __extension__ extern __inline int32x4_t | |
31116 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31117 | __arm_vsubq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
31118 | { | |
31119 | return __arm_vsubq_x_s32 (__a, __b, __p); | |
31120 | } | |
31121 | ||
31122 | __extension__ extern __inline int8x16_t | |
31123 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31124 | __arm_vsubq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
31125 | { | |
31126 | return __arm_vsubq_x_n_s8 (__a, __b, __p); | |
31127 | } | |
31128 | ||
31129 | __extension__ extern __inline int16x8_t | |
31130 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31131 | __arm_vsubq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
31132 | { | |
31133 | return __arm_vsubq_x_n_s16 (__a, __b, __p); | |
31134 | } | |
31135 | ||
31136 | __extension__ extern __inline int32x4_t | |
31137 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31138 | __arm_vsubq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
31139 | { | |
31140 | return __arm_vsubq_x_n_s32 (__a, __b, __p); | |
31141 | } | |
31142 | ||
31143 | __extension__ extern __inline uint8x16_t | |
31144 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31145 | __arm_vsubq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
31146 | { | |
31147 | return __arm_vsubq_x_u8 (__a, __b, __p); | |
31148 | } | |
31149 | ||
31150 | __extension__ extern __inline uint16x8_t | |
31151 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31152 | __arm_vsubq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
31153 | { | |
31154 | return __arm_vsubq_x_u16 (__a, __b, __p); | |
31155 | } | |
31156 | ||
31157 | __extension__ extern __inline uint32x4_t | |
31158 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31159 | __arm_vsubq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
31160 | { | |
31161 | return __arm_vsubq_x_u32 (__a, __b, __p); | |
31162 | } | |
31163 | ||
31164 | __extension__ extern __inline uint8x16_t | |
31165 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31166 | __arm_vsubq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
31167 | { | |
31168 | return __arm_vsubq_x_n_u8 (__a, __b, __p); | |
31169 | } | |
31170 | ||
31171 | __extension__ extern __inline uint16x8_t | |
31172 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31173 | __arm_vsubq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
31174 | { | |
31175 | return __arm_vsubq_x_n_u16 (__a, __b, __p); | |
31176 | } | |
31177 | ||
31178 | __extension__ extern __inline uint32x4_t | |
31179 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31180 | __arm_vsubq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
31181 | { | |
31182 | return __arm_vsubq_x_n_u32 (__a, __b, __p); | |
31183 | } | |
31184 | ||
31185 | __extension__ extern __inline int8x16_t | |
31186 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31187 | __arm_vcaddq_rot90_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
31188 | { | |
31189 | return __arm_vcaddq_rot90_x_s8 (__a, __b, __p); | |
31190 | } | |
31191 | ||
31192 | __extension__ extern __inline int16x8_t | |
31193 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31194 | __arm_vcaddq_rot90_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
31195 | { | |
31196 | return __arm_vcaddq_rot90_x_s16 (__a, __b, __p); | |
31197 | } | |
31198 | ||
31199 | __extension__ extern __inline int32x4_t | |
31200 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31201 | __arm_vcaddq_rot90_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
31202 | { | |
31203 | return __arm_vcaddq_rot90_x_s32 (__a, __b, __p); | |
31204 | } | |
31205 | ||
31206 | __extension__ extern __inline uint8x16_t | |
31207 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31208 | __arm_vcaddq_rot90_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
31209 | { | |
31210 | return __arm_vcaddq_rot90_x_u8 (__a, __b, __p); | |
31211 | } | |
31212 | ||
31213 | __extension__ extern __inline uint16x8_t | |
31214 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31215 | __arm_vcaddq_rot90_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
31216 | { | |
31217 | return __arm_vcaddq_rot90_x_u16 (__a, __b, __p); | |
31218 | } | |
31219 | ||
31220 | __extension__ extern __inline uint32x4_t | |
31221 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31222 | __arm_vcaddq_rot90_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
31223 | { | |
31224 | return __arm_vcaddq_rot90_x_u32 (__a, __b, __p); | |
31225 | } | |
31226 | ||
31227 | __extension__ extern __inline int8x16_t | |
31228 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31229 | __arm_vcaddq_rot270_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
31230 | { | |
31231 | return __arm_vcaddq_rot270_x_s8 (__a, __b, __p); | |
31232 | } | |
31233 | ||
31234 | __extension__ extern __inline int16x8_t | |
31235 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31236 | __arm_vcaddq_rot270_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
31237 | { | |
31238 | return __arm_vcaddq_rot270_x_s16 (__a, __b, __p); | |
31239 | } | |
31240 | ||
31241 | __extension__ extern __inline int32x4_t | |
31242 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31243 | __arm_vcaddq_rot270_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
31244 | { | |
31245 | return __arm_vcaddq_rot270_x_s32 (__a, __b, __p); | |
31246 | } | |
31247 | ||
31248 | __extension__ extern __inline uint8x16_t | |
31249 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31250 | __arm_vcaddq_rot270_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
31251 | { | |
31252 | return __arm_vcaddq_rot270_x_u8 (__a, __b, __p); | |
31253 | } | |
31254 | ||
31255 | __extension__ extern __inline uint16x8_t | |
31256 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31257 | __arm_vcaddq_rot270_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
31258 | { | |
31259 | return __arm_vcaddq_rot270_x_u16 (__a, __b, __p); | |
31260 | } | |
31261 | ||
31262 | __extension__ extern __inline uint32x4_t | |
31263 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31264 | __arm_vcaddq_rot270_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
31265 | { | |
31266 | return __arm_vcaddq_rot270_x_u32 (__a, __b, __p); | |
31267 | } | |
31268 | ||
31269 | __extension__ extern __inline int8x16_t | |
31270 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31271 | __arm_vhaddq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
31272 | { | |
31273 | return __arm_vhaddq_x_n_s8 (__a, __b, __p); | |
31274 | } | |
31275 | ||
31276 | __extension__ extern __inline int16x8_t | |
31277 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31278 | __arm_vhaddq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
31279 | { | |
31280 | return __arm_vhaddq_x_n_s16 (__a, __b, __p); | |
31281 | } | |
31282 | ||
31283 | __extension__ extern __inline int32x4_t | |
31284 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31285 | __arm_vhaddq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
31286 | { | |
31287 | return __arm_vhaddq_x_n_s32 (__a, __b, __p); | |
31288 | } | |
31289 | ||
31290 | __extension__ extern __inline uint8x16_t | |
31291 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31292 | __arm_vhaddq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
31293 | { | |
31294 | return __arm_vhaddq_x_n_u8 (__a, __b, __p); | |
31295 | } | |
31296 | ||
31297 | __extension__ extern __inline uint16x8_t | |
31298 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31299 | __arm_vhaddq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
31300 | { | |
31301 | return __arm_vhaddq_x_n_u16 (__a, __b, __p); | |
31302 | } | |
31303 | ||
31304 | __extension__ extern __inline uint32x4_t | |
31305 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31306 | __arm_vhaddq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
31307 | { | |
31308 | return __arm_vhaddq_x_n_u32 (__a, __b, __p); | |
31309 | } | |
31310 | ||
31311 | __extension__ extern __inline int8x16_t | |
31312 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31313 | __arm_vhaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
31314 | { | |
31315 | return __arm_vhaddq_x_s8 (__a, __b, __p); | |
31316 | } | |
31317 | ||
31318 | __extension__ extern __inline int16x8_t | |
31319 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31320 | __arm_vhaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
31321 | { | |
31322 | return __arm_vhaddq_x_s16 (__a, __b, __p); | |
31323 | } | |
31324 | ||
31325 | __extension__ extern __inline int32x4_t | |
31326 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31327 | __arm_vhaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
31328 | { | |
31329 | return __arm_vhaddq_x_s32 (__a, __b, __p); | |
31330 | } | |
31331 | ||
31332 | __extension__ extern __inline uint8x16_t | |
31333 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31334 | __arm_vhaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
31335 | { | |
31336 | return __arm_vhaddq_x_u8 (__a, __b, __p); | |
31337 | } | |
31338 | ||
31339 | __extension__ extern __inline uint16x8_t | |
31340 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31341 | __arm_vhaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
31342 | { | |
31343 | return __arm_vhaddq_x_u16 (__a, __b, __p); | |
31344 | } | |
31345 | ||
31346 | __extension__ extern __inline uint32x4_t | |
31347 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31348 | __arm_vhaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
31349 | { | |
31350 | return __arm_vhaddq_x_u32 (__a, __b, __p); | |
31351 | } | |
31352 | ||
31353 | __extension__ extern __inline int8x16_t | |
31354 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31355 | __arm_vhcaddq_rot90_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
31356 | { | |
31357 | return __arm_vhcaddq_rot90_x_s8 (__a, __b, __p); | |
31358 | } | |
31359 | ||
31360 | __extension__ extern __inline int16x8_t | |
31361 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31362 | __arm_vhcaddq_rot90_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
31363 | { | |
31364 | return __arm_vhcaddq_rot90_x_s16 (__a, __b, __p); | |
31365 | } | |
31366 | ||
31367 | __extension__ extern __inline int32x4_t | |
31368 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31369 | __arm_vhcaddq_rot90_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
31370 | { | |
31371 | return __arm_vhcaddq_rot90_x_s32 (__a, __b, __p); | |
31372 | } | |
31373 | ||
31374 | __extension__ extern __inline int8x16_t | |
31375 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31376 | __arm_vhcaddq_rot270_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
31377 | { | |
31378 | return __arm_vhcaddq_rot270_x_s8 (__a, __b, __p); | |
31379 | } | |
31380 | ||
31381 | __extension__ extern __inline int16x8_t | |
31382 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31383 | __arm_vhcaddq_rot270_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
31384 | { | |
31385 | return __arm_vhcaddq_rot270_x_s16 (__a, __b, __p); | |
31386 | } | |
31387 | ||
31388 | __extension__ extern __inline int32x4_t | |
31389 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31390 | __arm_vhcaddq_rot270_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
31391 | { | |
31392 | return __arm_vhcaddq_rot270_x_s32 (__a, __b, __p); | |
31393 | } | |
31394 | ||
31395 | __extension__ extern __inline int8x16_t | |
31396 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31397 | __arm_vhsubq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p) | |
31398 | { | |
31399 | return __arm_vhsubq_x_n_s8 (__a, __b, __p); | |
31400 | } | |
31401 | ||
31402 | __extension__ extern __inline int16x8_t | |
31403 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31404 | __arm_vhsubq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p) | |
31405 | { | |
31406 | return __arm_vhsubq_x_n_s16 (__a, __b, __p); | |
31407 | } | |
31408 | ||
31409 | __extension__ extern __inline int32x4_t | |
31410 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31411 | __arm_vhsubq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
31412 | { | |
31413 | return __arm_vhsubq_x_n_s32 (__a, __b, __p); | |
31414 | } | |
31415 | ||
31416 | __extension__ extern __inline uint8x16_t | |
31417 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31418 | __arm_vhsubq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) | |
31419 | { | |
31420 | return __arm_vhsubq_x_n_u8 (__a, __b, __p); | |
31421 | } | |
31422 | ||
31423 | __extension__ extern __inline uint16x8_t | |
31424 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31425 | __arm_vhsubq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) | |
31426 | { | |
31427 | return __arm_vhsubq_x_n_u16 (__a, __b, __p); | |
31428 | } | |
31429 | ||
31430 | __extension__ extern __inline uint32x4_t | |
31431 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31432 | __arm_vhsubq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) | |
31433 | { | |
31434 | return __arm_vhsubq_x_n_u32 (__a, __b, __p); | |
31435 | } | |
31436 | ||
31437 | __extension__ extern __inline int8x16_t | |
31438 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31439 | __arm_vhsubq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
31440 | { | |
31441 | return __arm_vhsubq_x_s8 (__a, __b, __p); | |
31442 | } | |
31443 | ||
31444 | __extension__ extern __inline int16x8_t | |
31445 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31446 | __arm_vhsubq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
31447 | { | |
31448 | return __arm_vhsubq_x_s16 (__a, __b, __p); | |
31449 | } | |
31450 | ||
31451 | __extension__ extern __inline int32x4_t | |
31452 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31453 | __arm_vhsubq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
31454 | { | |
31455 | return __arm_vhsubq_x_s32 (__a, __b, __p); | |
31456 | } | |
31457 | ||
31458 | __extension__ extern __inline uint8x16_t | |
31459 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31460 | __arm_vhsubq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
31461 | { | |
31462 | return __arm_vhsubq_x_u8 (__a, __b, __p); | |
31463 | } | |
31464 | ||
31465 | __extension__ extern __inline uint16x8_t | |
31466 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31467 | __arm_vhsubq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
31468 | { | |
31469 | return __arm_vhsubq_x_u16 (__a, __b, __p); | |
31470 | } | |
31471 | ||
31472 | __extension__ extern __inline uint32x4_t | |
31473 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31474 | __arm_vhsubq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
31475 | { | |
31476 | return __arm_vhsubq_x_u32 (__a, __b, __p); | |
31477 | } | |
31478 | ||
31479 | __extension__ extern __inline int8x16_t | |
31480 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31481 | __arm_vrhaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
31482 | { | |
31483 | return __arm_vrhaddq_x_s8 (__a, __b, __p); | |
31484 | } | |
31485 | ||
31486 | __extension__ extern __inline int16x8_t | |
31487 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31488 | __arm_vrhaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
31489 | { | |
31490 | return __arm_vrhaddq_x_s16 (__a, __b, __p); | |
31491 | } | |
31492 | ||
31493 | __extension__ extern __inline int32x4_t | |
31494 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31495 | __arm_vrhaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
31496 | { | |
31497 | return __arm_vrhaddq_x_s32 (__a, __b, __p); | |
31498 | } | |
31499 | ||
31500 | __extension__ extern __inline uint8x16_t | |
31501 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31502 | __arm_vrhaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
31503 | { | |
31504 | return __arm_vrhaddq_x_u8 (__a, __b, __p); | |
31505 | } | |
31506 | ||
31507 | __extension__ extern __inline uint16x8_t | |
31508 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31509 | __arm_vrhaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
31510 | { | |
31511 | return __arm_vrhaddq_x_u16 (__a, __b, __p); | |
31512 | } | |
31513 | ||
31514 | __extension__ extern __inline uint32x4_t | |
31515 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31516 | __arm_vrhaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
31517 | { | |
31518 | return __arm_vrhaddq_x_u32 (__a, __b, __p); | |
31519 | } | |
31520 | ||
31521 | __extension__ extern __inline int8x16_t | |
31522 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31523 | __arm_vrmulhq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
31524 | { | |
31525 | return __arm_vrmulhq_x_s8 (__a, __b, __p); | |
31526 | } | |
31527 | ||
31528 | __extension__ extern __inline int16x8_t | |
31529 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31530 | __arm_vrmulhq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
31531 | { | |
31532 | return __arm_vrmulhq_x_s16 (__a, __b, __p); | |
31533 | } | |
31534 | ||
31535 | __extension__ extern __inline int32x4_t | |
31536 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31537 | __arm_vrmulhq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
31538 | { | |
31539 | return __arm_vrmulhq_x_s32 (__a, __b, __p); | |
31540 | } | |
31541 | ||
31542 | __extension__ extern __inline uint8x16_t | |
31543 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31544 | __arm_vrmulhq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
31545 | { | |
31546 | return __arm_vrmulhq_x_u8 (__a, __b, __p); | |
31547 | } | |
31548 | ||
31549 | __extension__ extern __inline uint16x8_t | |
31550 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31551 | __arm_vrmulhq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
31552 | { | |
31553 | return __arm_vrmulhq_x_u16 (__a, __b, __p); | |
31554 | } | |
31555 | ||
31556 | __extension__ extern __inline uint32x4_t | |
31557 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31558 | __arm_vrmulhq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
31559 | { | |
31560 | return __arm_vrmulhq_x_u32 (__a, __b, __p); | |
31561 | } | |
31562 | ||
31563 | __extension__ extern __inline int8x16_t | |
31564 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31565 | __arm_vandq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
31566 | { | |
31567 | return __arm_vandq_x_s8 (__a, __b, __p); | |
31568 | } | |
31569 | ||
31570 | __extension__ extern __inline int16x8_t | |
31571 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31572 | __arm_vandq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
31573 | { | |
31574 | return __arm_vandq_x_s16 (__a, __b, __p); | |
31575 | } | |
31576 | ||
31577 | __extension__ extern __inline int32x4_t | |
31578 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31579 | __arm_vandq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
31580 | { | |
31581 | return __arm_vandq_x_s32 (__a, __b, __p); | |
31582 | } | |
31583 | ||
31584 | __extension__ extern __inline uint8x16_t | |
31585 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31586 | __arm_vandq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
31587 | { | |
31588 | return __arm_vandq_x_u8 (__a, __b, __p); | |
31589 | } | |
31590 | ||
31591 | __extension__ extern __inline uint16x8_t | |
31592 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31593 | __arm_vandq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
31594 | { | |
31595 | return __arm_vandq_x_u16 (__a, __b, __p); | |
31596 | } | |
31597 | ||
31598 | __extension__ extern __inline uint32x4_t | |
31599 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31600 | __arm_vandq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
31601 | { | |
31602 | return __arm_vandq_x_u32 (__a, __b, __p); | |
31603 | } | |
31604 | ||
31605 | __extension__ extern __inline int8x16_t | |
31606 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31607 | __arm_vbicq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
31608 | { | |
31609 | return __arm_vbicq_x_s8 (__a, __b, __p); | |
31610 | } | |
31611 | ||
31612 | __extension__ extern __inline int16x8_t | |
31613 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31614 | __arm_vbicq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
31615 | { | |
31616 | return __arm_vbicq_x_s16 (__a, __b, __p); | |
31617 | } | |
31618 | ||
31619 | __extension__ extern __inline int32x4_t | |
31620 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31621 | __arm_vbicq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
31622 | { | |
31623 | return __arm_vbicq_x_s32 (__a, __b, __p); | |
31624 | } | |
31625 | ||
31626 | __extension__ extern __inline uint8x16_t | |
31627 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31628 | __arm_vbicq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
31629 | { | |
31630 | return __arm_vbicq_x_u8 (__a, __b, __p); | |
31631 | } | |
31632 | ||
31633 | __extension__ extern __inline uint16x8_t | |
31634 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31635 | __arm_vbicq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
31636 | { | |
31637 | return __arm_vbicq_x_u16 (__a, __b, __p); | |
31638 | } | |
31639 | ||
31640 | __extension__ extern __inline uint32x4_t | |
31641 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31642 | __arm_vbicq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
31643 | { | |
31644 | return __arm_vbicq_x_u32 (__a, __b, __p); | |
31645 | } | |
31646 | ||
31647 | __extension__ extern __inline int8x16_t | |
31648 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31649 | __arm_vbrsrq_x (int8x16_t __a, int32_t __b, mve_pred16_t __p) | |
31650 | { | |
31651 | return __arm_vbrsrq_x_n_s8 (__a, __b, __p); | |
31652 | } | |
31653 | ||
31654 | __extension__ extern __inline int16x8_t | |
31655 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31656 | __arm_vbrsrq_x (int16x8_t __a, int32_t __b, mve_pred16_t __p) | |
31657 | { | |
31658 | return __arm_vbrsrq_x_n_s16 (__a, __b, __p); | |
31659 | } | |
31660 | ||
31661 | __extension__ extern __inline int32x4_t | |
31662 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31663 | __arm_vbrsrq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p) | |
31664 | { | |
31665 | return __arm_vbrsrq_x_n_s32 (__a, __b, __p); | |
31666 | } | |
31667 | ||
31668 | __extension__ extern __inline uint8x16_t | |
31669 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31670 | __arm_vbrsrq_x (uint8x16_t __a, int32_t __b, mve_pred16_t __p) | |
31671 | { | |
31672 | return __arm_vbrsrq_x_n_u8 (__a, __b, __p); | |
31673 | } | |
31674 | ||
31675 | __extension__ extern __inline uint16x8_t | |
31676 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31677 | __arm_vbrsrq_x (uint16x8_t __a, int32_t __b, mve_pred16_t __p) | |
31678 | { | |
31679 | return __arm_vbrsrq_x_n_u16 (__a, __b, __p); | |
31680 | } | |
31681 | ||
31682 | __extension__ extern __inline uint32x4_t | |
31683 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31684 | __arm_vbrsrq_x (uint32x4_t __a, int32_t __b, mve_pred16_t __p) | |
31685 | { | |
31686 | return __arm_vbrsrq_x_n_u32 (__a, __b, __p); | |
31687 | } | |
31688 | ||
31689 | __extension__ extern __inline int8x16_t | |
31690 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31691 | __arm_veorq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
31692 | { | |
31693 | return __arm_veorq_x_s8 (__a, __b, __p); | |
31694 | } | |
31695 | ||
31696 | __extension__ extern __inline int16x8_t | |
31697 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31698 | __arm_veorq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
31699 | { | |
31700 | return __arm_veorq_x_s16 (__a, __b, __p); | |
31701 | } | |
31702 | ||
31703 | __extension__ extern __inline int32x4_t | |
31704 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31705 | __arm_veorq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
31706 | { | |
31707 | return __arm_veorq_x_s32 (__a, __b, __p); | |
31708 | } | |
31709 | ||
31710 | __extension__ extern __inline uint8x16_t | |
31711 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31712 | __arm_veorq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
31713 | { | |
31714 | return __arm_veorq_x_u8 (__a, __b, __p); | |
31715 | } | |
31716 | ||
31717 | __extension__ extern __inline uint16x8_t | |
31718 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31719 | __arm_veorq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
31720 | { | |
31721 | return __arm_veorq_x_u16 (__a, __b, __p); | |
31722 | } | |
31723 | ||
31724 | __extension__ extern __inline uint32x4_t | |
31725 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31726 | __arm_veorq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
31727 | { | |
31728 | return __arm_veorq_x_u32 (__a, __b, __p); | |
31729 | } | |
31730 | ||
31731 | __extension__ extern __inline int16x8_t | |
31732 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31733 | __arm_vmovlbq_x (int8x16_t __a, mve_pred16_t __p) | |
31734 | { | |
31735 | return __arm_vmovlbq_x_s8 (__a, __p); | |
31736 | } | |
31737 | ||
31738 | __extension__ extern __inline int32x4_t | |
31739 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31740 | __arm_vmovlbq_x (int16x8_t __a, mve_pred16_t __p) | |
31741 | { | |
31742 | return __arm_vmovlbq_x_s16 (__a, __p); | |
31743 | } | |
31744 | ||
31745 | __extension__ extern __inline uint16x8_t | |
31746 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31747 | __arm_vmovlbq_x (uint8x16_t __a, mve_pred16_t __p) | |
31748 | { | |
31749 | return __arm_vmovlbq_x_u8 (__a, __p); | |
31750 | } | |
31751 | ||
31752 | __extension__ extern __inline uint32x4_t | |
31753 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31754 | __arm_vmovlbq_x (uint16x8_t __a, mve_pred16_t __p) | |
31755 | { | |
31756 | return __arm_vmovlbq_x_u16 (__a, __p); | |
31757 | } | |
31758 | ||
31759 | __extension__ extern __inline int16x8_t | |
31760 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31761 | __arm_vmovltq_x (int8x16_t __a, mve_pred16_t __p) | |
31762 | { | |
31763 | return __arm_vmovltq_x_s8 (__a, __p); | |
31764 | } | |
31765 | ||
31766 | __extension__ extern __inline int32x4_t | |
31767 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31768 | __arm_vmovltq_x (int16x8_t __a, mve_pred16_t __p) | |
31769 | { | |
31770 | return __arm_vmovltq_x_s16 (__a, __p); | |
31771 | } | |
31772 | ||
31773 | __extension__ extern __inline uint16x8_t | |
31774 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31775 | __arm_vmovltq_x (uint8x16_t __a, mve_pred16_t __p) | |
31776 | { | |
31777 | return __arm_vmovltq_x_u8 (__a, __p); | |
31778 | } | |
31779 | ||
31780 | __extension__ extern __inline uint32x4_t | |
31781 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31782 | __arm_vmovltq_x (uint16x8_t __a, mve_pred16_t __p) | |
31783 | { | |
31784 | return __arm_vmovltq_x_u16 (__a, __p); | |
31785 | } | |
31786 | ||
31787 | __extension__ extern __inline int8x16_t | |
31788 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31789 | __arm_vmvnq_x (int8x16_t __a, mve_pred16_t __p) | |
31790 | { | |
31791 | return __arm_vmvnq_x_s8 (__a, __p); | |
31792 | } | |
31793 | ||
31794 | __extension__ extern __inline int16x8_t | |
31795 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31796 | __arm_vmvnq_x (int16x8_t __a, mve_pred16_t __p) | |
31797 | { | |
31798 | return __arm_vmvnq_x_s16 (__a, __p); | |
31799 | } | |
31800 | ||
31801 | __extension__ extern __inline int32x4_t | |
31802 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31803 | __arm_vmvnq_x (int32x4_t __a, mve_pred16_t __p) | |
31804 | { | |
31805 | return __arm_vmvnq_x_s32 (__a, __p); | |
31806 | } | |
31807 | ||
31808 | __extension__ extern __inline uint8x16_t | |
31809 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31810 | __arm_vmvnq_x (uint8x16_t __a, mve_pred16_t __p) | |
31811 | { | |
31812 | return __arm_vmvnq_x_u8 (__a, __p); | |
31813 | } | |
31814 | ||
31815 | __extension__ extern __inline uint16x8_t | |
31816 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31817 | __arm_vmvnq_x (uint16x8_t __a, mve_pred16_t __p) | |
31818 | { | |
31819 | return __arm_vmvnq_x_u16 (__a, __p); | |
31820 | } | |
31821 | ||
31822 | __extension__ extern __inline uint32x4_t | |
31823 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31824 | __arm_vmvnq_x (uint32x4_t __a, mve_pred16_t __p) | |
31825 | { | |
31826 | return __arm_vmvnq_x_u32 (__a, __p); | |
31827 | } | |
31828 | ||
31829 | __extension__ extern __inline int8x16_t | |
31830 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31831 | __arm_vornq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
31832 | { | |
31833 | return __arm_vornq_x_s8 (__a, __b, __p); | |
31834 | } | |
31835 | ||
31836 | __extension__ extern __inline int16x8_t | |
31837 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31838 | __arm_vornq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
31839 | { | |
31840 | return __arm_vornq_x_s16 (__a, __b, __p); | |
31841 | } | |
31842 | ||
31843 | __extension__ extern __inline int32x4_t | |
31844 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31845 | __arm_vornq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
31846 | { | |
31847 | return __arm_vornq_x_s32 (__a, __b, __p); | |
31848 | } | |
31849 | ||
31850 | __extension__ extern __inline uint8x16_t | |
31851 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31852 | __arm_vornq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
31853 | { | |
31854 | return __arm_vornq_x_u8 (__a, __b, __p); | |
31855 | } | |
31856 | ||
31857 | __extension__ extern __inline uint16x8_t | |
31858 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31859 | __arm_vornq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
31860 | { | |
31861 | return __arm_vornq_x_u16 (__a, __b, __p); | |
31862 | } | |
31863 | ||
31864 | __extension__ extern __inline uint32x4_t | |
31865 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31866 | __arm_vornq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
31867 | { | |
31868 | return __arm_vornq_x_u32 (__a, __b, __p); | |
31869 | } | |
31870 | ||
31871 | __extension__ extern __inline int8x16_t | |
31872 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31873 | __arm_vorrq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
31874 | { | |
31875 | return __arm_vorrq_x_s8 (__a, __b, __p); | |
31876 | } | |
31877 | ||
31878 | __extension__ extern __inline int16x8_t | |
31879 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31880 | __arm_vorrq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
31881 | { | |
31882 | return __arm_vorrq_x_s16 (__a, __b, __p); | |
31883 | } | |
31884 | ||
31885 | __extension__ extern __inline int32x4_t | |
31886 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31887 | __arm_vorrq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
31888 | { | |
31889 | return __arm_vorrq_x_s32 (__a, __b, __p); | |
31890 | } | |
31891 | ||
31892 | __extension__ extern __inline uint8x16_t | |
31893 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31894 | __arm_vorrq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) | |
31895 | { | |
31896 | return __arm_vorrq_x_u8 (__a, __b, __p); | |
31897 | } | |
31898 | ||
31899 | __extension__ extern __inline uint16x8_t | |
31900 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31901 | __arm_vorrq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) | |
31902 | { | |
31903 | return __arm_vorrq_x_u16 (__a, __b, __p); | |
31904 | } | |
31905 | ||
31906 | __extension__ extern __inline uint32x4_t | |
31907 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31908 | __arm_vorrq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) | |
31909 | { | |
31910 | return __arm_vorrq_x_u32 (__a, __b, __p); | |
31911 | } | |
31912 | ||
31913 | __extension__ extern __inline int8x16_t | |
31914 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31915 | __arm_vrev16q_x (int8x16_t __a, mve_pred16_t __p) | |
31916 | { | |
31917 | return __arm_vrev16q_x_s8 (__a, __p); | |
31918 | } | |
31919 | ||
31920 | __extension__ extern __inline uint8x16_t | |
31921 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31922 | __arm_vrev16q_x (uint8x16_t __a, mve_pred16_t __p) | |
31923 | { | |
31924 | return __arm_vrev16q_x_u8 (__a, __p); | |
31925 | } | |
31926 | ||
31927 | __extension__ extern __inline int8x16_t | |
31928 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31929 | __arm_vrev32q_x (int8x16_t __a, mve_pred16_t __p) | |
31930 | { | |
31931 | return __arm_vrev32q_x_s8 (__a, __p); | |
31932 | } | |
31933 | ||
31934 | __extension__ extern __inline int16x8_t | |
31935 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31936 | __arm_vrev32q_x (int16x8_t __a, mve_pred16_t __p) | |
31937 | { | |
31938 | return __arm_vrev32q_x_s16 (__a, __p); | |
31939 | } | |
31940 | ||
31941 | __extension__ extern __inline uint8x16_t | |
31942 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31943 | __arm_vrev32q_x (uint8x16_t __a, mve_pred16_t __p) | |
31944 | { | |
31945 | return __arm_vrev32q_x_u8 (__a, __p); | |
31946 | } | |
31947 | ||
31948 | __extension__ extern __inline uint16x8_t | |
31949 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31950 | __arm_vrev32q_x (uint16x8_t __a, mve_pred16_t __p) | |
31951 | { | |
31952 | return __arm_vrev32q_x_u16 (__a, __p); | |
31953 | } | |
31954 | ||
31955 | __extension__ extern __inline int8x16_t | |
31956 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31957 | __arm_vrev64q_x (int8x16_t __a, mve_pred16_t __p) | |
31958 | { | |
31959 | return __arm_vrev64q_x_s8 (__a, __p); | |
31960 | } | |
31961 | ||
31962 | __extension__ extern __inline int16x8_t | |
31963 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31964 | __arm_vrev64q_x (int16x8_t __a, mve_pred16_t __p) | |
31965 | { | |
31966 | return __arm_vrev64q_x_s16 (__a, __p); | |
31967 | } | |
31968 | ||
31969 | __extension__ extern __inline int32x4_t | |
31970 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31971 | __arm_vrev64q_x (int32x4_t __a, mve_pred16_t __p) | |
31972 | { | |
31973 | return __arm_vrev64q_x_s32 (__a, __p); | |
31974 | } | |
31975 | ||
31976 | __extension__ extern __inline uint8x16_t | |
31977 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31978 | __arm_vrev64q_x (uint8x16_t __a, mve_pred16_t __p) | |
31979 | { | |
31980 | return __arm_vrev64q_x_u8 (__a, __p); | |
31981 | } | |
31982 | ||
31983 | __extension__ extern __inline uint16x8_t | |
31984 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31985 | __arm_vrev64q_x (uint16x8_t __a, mve_pred16_t __p) | |
31986 | { | |
31987 | return __arm_vrev64q_x_u16 (__a, __p); | |
31988 | } | |
31989 | ||
31990 | __extension__ extern __inline uint32x4_t | |
31991 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31992 | __arm_vrev64q_x (uint32x4_t __a, mve_pred16_t __p) | |
31993 | { | |
31994 | return __arm_vrev64q_x_u32 (__a, __p); | |
31995 | } | |
31996 | ||
31997 | __extension__ extern __inline int8x16_t | |
31998 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
31999 | __arm_vrshlq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
32000 | { | |
32001 | return __arm_vrshlq_x_s8 (__a, __b, __p); | |
32002 | } | |
32003 | ||
32004 | __extension__ extern __inline int16x8_t | |
32005 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32006 | __arm_vrshlq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
32007 | { | |
32008 | return __arm_vrshlq_x_s16 (__a, __b, __p); | |
32009 | } | |
32010 | ||
32011 | __extension__ extern __inline int32x4_t | |
32012 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32013 | __arm_vrshlq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
32014 | { | |
32015 | return __arm_vrshlq_x_s32 (__a, __b, __p); | |
32016 | } | |
32017 | ||
32018 | __extension__ extern __inline uint8x16_t | |
32019 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32020 | __arm_vrshlq_x (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
32021 | { | |
32022 | return __arm_vrshlq_x_u8 (__a, __b, __p); | |
32023 | } | |
32024 | ||
32025 | __extension__ extern __inline uint16x8_t | |
32026 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32027 | __arm_vrshlq_x (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
32028 | { | |
32029 | return __arm_vrshlq_x_u16 (__a, __b, __p); | |
32030 | } | |
32031 | ||
32032 | __extension__ extern __inline uint32x4_t | |
32033 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32034 | __arm_vrshlq_x (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
32035 | { | |
32036 | return __arm_vrshlq_x_u32 (__a, __b, __p); | |
32037 | } | |
32038 | ||
32039 | __extension__ extern __inline int16x8_t | |
32040 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32041 | __arm_vshllbq_x (int8x16_t __a, const int __imm, mve_pred16_t __p) | |
32042 | { | |
32043 | return __arm_vshllbq_x_n_s8 (__a, __imm, __p); | |
32044 | } | |
32045 | ||
32046 | __extension__ extern __inline int32x4_t | |
32047 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32048 | __arm_vshllbq_x (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
32049 | { | |
32050 | return __arm_vshllbq_x_n_s16 (__a, __imm, __p); | |
32051 | } | |
32052 | ||
32053 | __extension__ extern __inline uint16x8_t | |
32054 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32055 | __arm_vshllbq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
32056 | { | |
32057 | return __arm_vshllbq_x_n_u8 (__a, __imm, __p); | |
32058 | } | |
32059 | ||
32060 | __extension__ extern __inline uint32x4_t | |
32061 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32062 | __arm_vshllbq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
32063 | { | |
32064 | return __arm_vshllbq_x_n_u16 (__a, __imm, __p); | |
32065 | } | |
32066 | ||
32067 | __extension__ extern __inline int16x8_t | |
32068 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32069 | __arm_vshlltq_x (int8x16_t __a, const int __imm, mve_pred16_t __p) | |
32070 | { | |
32071 | return __arm_vshlltq_x_n_s8 (__a, __imm, __p); | |
32072 | } | |
32073 | ||
32074 | __extension__ extern __inline int32x4_t | |
32075 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32076 | __arm_vshlltq_x (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
32077 | { | |
32078 | return __arm_vshlltq_x_n_s16 (__a, __imm, __p); | |
32079 | } | |
32080 | ||
32081 | __extension__ extern __inline uint16x8_t | |
32082 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32083 | __arm_vshlltq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
32084 | { | |
32085 | return __arm_vshlltq_x_n_u8 (__a, __imm, __p); | |
32086 | } | |
32087 | ||
32088 | __extension__ extern __inline uint32x4_t | |
32089 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32090 | __arm_vshlltq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
32091 | { | |
32092 | return __arm_vshlltq_x_n_u16 (__a, __imm, __p); | |
32093 | } | |
32094 | ||
32095 | __extension__ extern __inline int8x16_t | |
32096 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32097 | __arm_vshlq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
32098 | { | |
32099 | return __arm_vshlq_x_s8 (__a, __b, __p); | |
32100 | } | |
32101 | ||
32102 | __extension__ extern __inline int16x8_t | |
32103 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32104 | __arm_vshlq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
32105 | { | |
32106 | return __arm_vshlq_x_s16 (__a, __b, __p); | |
32107 | } | |
32108 | ||
32109 | __extension__ extern __inline int32x4_t | |
32110 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32111 | __arm_vshlq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
32112 | { | |
32113 | return __arm_vshlq_x_s32 (__a, __b, __p); | |
32114 | } | |
32115 | ||
32116 | __extension__ extern __inline uint8x16_t | |
32117 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32118 | __arm_vshlq_x (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) | |
32119 | { | |
32120 | return __arm_vshlq_x_u8 (__a, __b, __p); | |
32121 | } | |
32122 | ||
32123 | __extension__ extern __inline uint16x8_t | |
32124 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32125 | __arm_vshlq_x (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) | |
32126 | { | |
32127 | return __arm_vshlq_x_u16 (__a, __b, __p); | |
32128 | } | |
32129 | ||
32130 | __extension__ extern __inline uint32x4_t | |
32131 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32132 | __arm_vshlq_x (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) | |
32133 | { | |
32134 | return __arm_vshlq_x_u32 (__a, __b, __p); | |
32135 | } | |
32136 | ||
32137 | __extension__ extern __inline int8x16_t | |
32138 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32139 | __arm_vshlq_x_n (int8x16_t __a, const int __imm, mve_pred16_t __p) | |
32140 | { | |
32141 | return __arm_vshlq_x_n_s8 (__a, __imm, __p); | |
32142 | } | |
32143 | ||
32144 | __extension__ extern __inline int16x8_t | |
32145 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32146 | __arm_vshlq_x_n (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
32147 | { | |
32148 | return __arm_vshlq_x_n_s16 (__a, __imm, __p); | |
32149 | } | |
32150 | ||
32151 | __extension__ extern __inline int32x4_t | |
32152 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32153 | __arm_vshlq_x_n (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
32154 | { | |
32155 | return __arm_vshlq_x_n_s32 (__a, __imm, __p); | |
32156 | } | |
32157 | ||
32158 | __extension__ extern __inline uint8x16_t | |
32159 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32160 | __arm_vshlq_x_n (uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
32161 | { | |
32162 | return __arm_vshlq_x_n_u8 (__a, __imm, __p); | |
32163 | } | |
32164 | ||
32165 | __extension__ extern __inline uint16x8_t | |
32166 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32167 | __arm_vshlq_x_n (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
32168 | { | |
32169 | return __arm_vshlq_x_n_u16 (__a, __imm, __p); | |
32170 | } | |
32171 | ||
32172 | __extension__ extern __inline uint32x4_t | |
32173 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32174 | __arm_vshlq_x_n (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
32175 | { | |
32176 | return __arm_vshlq_x_n_u32 (__a, __imm, __p); | |
32177 | } | |
32178 | ||
32179 | __extension__ extern __inline int8x16_t | |
32180 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32181 | __arm_vrshrq_x (int8x16_t __a, const int __imm, mve_pred16_t __p) | |
32182 | { | |
32183 | return __arm_vrshrq_x_n_s8 (__a, __imm, __p); | |
32184 | } | |
32185 | ||
32186 | __extension__ extern __inline int16x8_t | |
32187 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32188 | __arm_vrshrq_x (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
32189 | { | |
32190 | return __arm_vrshrq_x_n_s16 (__a, __imm, __p); | |
32191 | } | |
32192 | ||
32193 | __extension__ extern __inline int32x4_t | |
32194 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32195 | __arm_vrshrq_x (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
32196 | { | |
32197 | return __arm_vrshrq_x_n_s32 (__a, __imm, __p); | |
32198 | } | |
32199 | ||
32200 | __extension__ extern __inline uint8x16_t | |
32201 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32202 | __arm_vrshrq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
32203 | { | |
32204 | return __arm_vrshrq_x_n_u8 (__a, __imm, __p); | |
32205 | } | |
32206 | ||
32207 | __extension__ extern __inline uint16x8_t | |
32208 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32209 | __arm_vrshrq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
32210 | { | |
32211 | return __arm_vrshrq_x_n_u16 (__a, __imm, __p); | |
32212 | } | |
32213 | ||
32214 | __extension__ extern __inline uint32x4_t | |
32215 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32216 | __arm_vrshrq_x (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
32217 | { | |
32218 | return __arm_vrshrq_x_n_u32 (__a, __imm, __p); | |
32219 | } | |
32220 | ||
32221 | __extension__ extern __inline int8x16_t | |
32222 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32223 | __arm_vshrq_x (int8x16_t __a, const int __imm, mve_pred16_t __p) | |
32224 | { | |
32225 | return __arm_vshrq_x_n_s8 (__a, __imm, __p); | |
32226 | } | |
32227 | ||
32228 | __extension__ extern __inline int16x8_t | |
32229 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32230 | __arm_vshrq_x (int16x8_t __a, const int __imm, mve_pred16_t __p) | |
32231 | { | |
32232 | return __arm_vshrq_x_n_s16 (__a, __imm, __p); | |
32233 | } | |
32234 | ||
32235 | __extension__ extern __inline int32x4_t | |
32236 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32237 | __arm_vshrq_x (int32x4_t __a, const int __imm, mve_pred16_t __p) | |
32238 | { | |
32239 | return __arm_vshrq_x_n_s32 (__a, __imm, __p); | |
32240 | } | |
32241 | ||
32242 | __extension__ extern __inline uint8x16_t | |
32243 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32244 | __arm_vshrq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p) | |
32245 | { | |
32246 | return __arm_vshrq_x_n_u8 (__a, __imm, __p); | |
32247 | } | |
32248 | ||
32249 | __extension__ extern __inline uint16x8_t | |
32250 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32251 | __arm_vshrq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p) | |
32252 | { | |
32253 | return __arm_vshrq_x_n_u16 (__a, __imm, __p); | |
32254 | } | |
32255 | ||
32256 | __extension__ extern __inline uint32x4_t | |
32257 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32258 | __arm_vshrq_x (uint32x4_t __a, const int __imm, mve_pred16_t __p) | |
32259 | { | |
32260 | return __arm_vshrq_x_n_u32 (__a, __imm, __p); | |
32261 | } | |
32262 | ||
32263 | __extension__ extern __inline int32x4_t | |
32264 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32265 | __arm_vadciq (int32x4_t __a, int32x4_t __b, unsigned * __carry_out) | |
32266 | { | |
32267 | return __arm_vadciq_s32 (__a, __b, __carry_out); | |
32268 | } | |
32269 | ||
32270 | __extension__ extern __inline uint32x4_t | |
32271 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32272 | __arm_vadciq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out) | |
32273 | { | |
32274 | return __arm_vadciq_u32 (__a, __b, __carry_out); | |
32275 | } | |
32276 | ||
32277 | __extension__ extern __inline int32x4_t | |
32278 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32279 | __arm_vadciq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) | |
32280 | { | |
32281 | return __arm_vadciq_m_s32 (__inactive, __a, __b, __carry_out, __p); | |
32282 | } | |
32283 | ||
32284 | __extension__ extern __inline uint32x4_t | |
32285 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32286 | __arm_vadciq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) | |
32287 | { | |
32288 | return __arm_vadciq_m_u32 (__inactive, __a, __b, __carry_out, __p); | |
32289 | } | |
32290 | ||
32291 | __extension__ extern __inline int32x4_t | |
32292 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32293 | __arm_vadcq (int32x4_t __a, int32x4_t __b, unsigned * __carry) | |
32294 | { | |
32295 | return __arm_vadcq_s32 (__a, __b, __carry); | |
32296 | } | |
32297 | ||
32298 | __extension__ extern __inline uint32x4_t | |
32299 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32300 | __arm_vadcq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry) | |
32301 | { | |
32302 | return __arm_vadcq_u32 (__a, __b, __carry); | |
32303 | } | |
32304 | ||
32305 | __extension__ extern __inline int32x4_t | |
32306 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32307 | __arm_vadcq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p) | |
32308 | { | |
32309 | return __arm_vadcq_m_s32 (__inactive, __a, __b, __carry, __p); | |
32310 | } | |
32311 | ||
32312 | __extension__ extern __inline uint32x4_t | |
32313 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32314 | __arm_vadcq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p) | |
32315 | { | |
32316 | return __arm_vadcq_m_u32 (__inactive, __a, __b, __carry, __p); | |
32317 | } | |
32318 | ||
32319 | __extension__ extern __inline int32x4_t | |
32320 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32321 | __arm_vsbciq (int32x4_t __a, int32x4_t __b, unsigned * __carry_out) | |
32322 | { | |
32323 | return __arm_vsbciq_s32 (__a, __b, __carry_out); | |
32324 | } | |
32325 | ||
32326 | __extension__ extern __inline uint32x4_t | |
32327 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32328 | __arm_vsbciq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out) | |
32329 | { | |
32330 | return __arm_vsbciq_u32 (__a, __b, __carry_out); | |
32331 | } | |
32332 | ||
32333 | __extension__ extern __inline int32x4_t | |
32334 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32335 | __arm_vsbciq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) | |
32336 | { | |
32337 | return __arm_vsbciq_m_s32 (__inactive, __a, __b, __carry_out, __p); | |
32338 | } | |
32339 | ||
32340 | __extension__ extern __inline uint32x4_t | |
32341 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32342 | __arm_vsbciq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) | |
32343 | { | |
32344 | return __arm_vsbciq_m_u32 (__inactive, __a, __b, __carry_out, __p); | |
32345 | } | |
32346 | ||
32347 | __extension__ extern __inline int32x4_t | |
32348 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32349 | __arm_vsbcq (int32x4_t __a, int32x4_t __b, unsigned * __carry) | |
32350 | { | |
32351 | return __arm_vsbcq_s32 (__a, __b, __carry); | |
32352 | } | |
32353 | ||
32354 | __extension__ extern __inline uint32x4_t | |
32355 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32356 | __arm_vsbcq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry) | |
32357 | { | |
32358 | return __arm_vsbcq_u32 (__a, __b, __carry); | |
32359 | } | |
32360 | ||
32361 | __extension__ extern __inline int32x4_t | |
32362 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32363 | __arm_vsbcq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p) | |
32364 | { | |
32365 | return __arm_vsbcq_m_s32 (__inactive, __a, __b, __carry, __p); | |
32366 | } | |
32367 | ||
32368 | __extension__ extern __inline uint32x4_t | |
32369 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32370 | __arm_vsbcq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p) | |
32371 | { | |
32372 | return __arm_vsbcq_m_u32 (__inactive, __a, __b, __carry, __p); | |
32373 | } | |
32374 | ||
32375 | __extension__ extern __inline void | |
32376 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32377 | __arm_vst1q_p (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p) | |
32378 | { | |
32379 | __arm_vst1q_p_u8 (__addr, __value, __p); | |
32380 | } | |
32381 | ||
32382 | __extension__ extern __inline void | |
32383 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32384 | __arm_vst1q_p (int8_t * __addr, int8x16_t __value, mve_pred16_t __p) | |
32385 | { | |
32386 | __arm_vst1q_p_s8 (__addr, __value, __p); | |
32387 | } | |
32388 | ||
32389 | __extension__ extern __inline void | |
32390 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32391 | __arm_vst2q (int8_t * __addr, int8x16x2_t __value) | |
32392 | { | |
32393 | __arm_vst2q_s8 (__addr, __value); | |
32394 | } | |
32395 | ||
32396 | __extension__ extern __inline void | |
32397 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32398 | __arm_vst2q (uint8_t * __addr, uint8x16x2_t __value) | |
32399 | { | |
32400 | __arm_vst2q_u8 (__addr, __value); | |
32401 | } | |
32402 | ||
32403 | __extension__ extern __inline uint8x16_t | |
32404 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32405 | __arm_vld1q_z (uint8_t const *__base, mve_pred16_t __p) | |
32406 | { | |
32407 | return __arm_vld1q_z_u8 (__base, __p); | |
32408 | } | |
32409 | ||
32410 | __extension__ extern __inline int8x16_t | |
32411 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32412 | __arm_vld1q_z (int8_t const *__base, mve_pred16_t __p) | |
32413 | { | |
32414 | return __arm_vld1q_z_s8 (__base, __p); | |
32415 | } | |
32416 | ||
32417 | __extension__ extern __inline int8x16x2_t | |
32418 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32419 | __arm_vld2q (int8_t const * __addr) | |
32420 | { | |
32421 | return __arm_vld2q_s8 (__addr); | |
32422 | } | |
32423 | ||
32424 | __extension__ extern __inline uint8x16x2_t | |
32425 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32426 | __arm_vld2q (uint8_t const * __addr) | |
32427 | { | |
32428 | return __arm_vld2q_u8 (__addr); | |
32429 | } | |
32430 | ||
32431 | __extension__ extern __inline int8x16x4_t | |
32432 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32433 | __arm_vld4q (int8_t const * __addr) | |
32434 | { | |
32435 | return __arm_vld4q_s8 (__addr); | |
32436 | } | |
32437 | ||
32438 | __extension__ extern __inline uint8x16x4_t | |
32439 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32440 | __arm_vld4q (uint8_t const * __addr) | |
32441 | { | |
32442 | return __arm_vld4q_u8 (__addr); | |
32443 | } | |
32444 | ||
32445 | __extension__ extern __inline void | |
32446 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32447 | __arm_vst1q_p (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p) | |
32448 | { | |
32449 | __arm_vst1q_p_u16 (__addr, __value, __p); | |
32450 | } | |
32451 | ||
32452 | __extension__ extern __inline void | |
32453 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32454 | __arm_vst1q_p (int16_t * __addr, int16x8_t __value, mve_pred16_t __p) | |
32455 | { | |
32456 | __arm_vst1q_p_s16 (__addr, __value, __p); | |
32457 | } | |
32458 | ||
32459 | __extension__ extern __inline void | |
32460 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32461 | __arm_vst2q (int16_t * __addr, int16x8x2_t __value) | |
32462 | { | |
32463 | __arm_vst2q_s16 (__addr, __value); | |
32464 | } | |
32465 | ||
32466 | __extension__ extern __inline void | |
32467 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32468 | __arm_vst2q (uint16_t * __addr, uint16x8x2_t __value) | |
32469 | { | |
32470 | __arm_vst2q_u16 (__addr, __value); | |
32471 | } | |
32472 | ||
32473 | __extension__ extern __inline uint16x8_t | |
32474 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32475 | __arm_vld1q_z (uint16_t const *__base, mve_pred16_t __p) | |
32476 | { | |
32477 | return __arm_vld1q_z_u16 (__base, __p); | |
32478 | } | |
32479 | ||
32480 | __extension__ extern __inline int16x8_t | |
32481 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32482 | __arm_vld1q_z (int16_t const *__base, mve_pred16_t __p) | |
32483 | { | |
32484 | return __arm_vld1q_z_s16 (__base, __p); | |
32485 | } | |
32486 | ||
32487 | __extension__ extern __inline int16x8x2_t | |
32488 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32489 | __arm_vld2q (int16_t const * __addr) | |
32490 | { | |
32491 | return __arm_vld2q_s16 (__addr); | |
32492 | } | |
32493 | ||
32494 | __extension__ extern __inline uint16x8x2_t | |
32495 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32496 | __arm_vld2q (uint16_t const * __addr) | |
32497 | { | |
32498 | return __arm_vld2q_u16 (__addr); | |
32499 | } | |
32500 | ||
32501 | __extension__ extern __inline int16x8x4_t | |
32502 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32503 | __arm_vld4q (int16_t const * __addr) | |
32504 | { | |
32505 | return __arm_vld4q_s16 (__addr); | |
32506 | } | |
32507 | ||
32508 | __extension__ extern __inline uint16x8x4_t | |
32509 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32510 | __arm_vld4q (uint16_t const * __addr) | |
32511 | { | |
32512 | return __arm_vld4q_u16 (__addr); | |
32513 | } | |
32514 | ||
32515 | __extension__ extern __inline void | |
32516 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32517 | __arm_vst1q_p (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p) | |
32518 | { | |
32519 | __arm_vst1q_p_u32 (__addr, __value, __p); | |
32520 | } | |
32521 | ||
32522 | __extension__ extern __inline void | |
32523 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32524 | __arm_vst1q_p (int32_t * __addr, int32x4_t __value, mve_pred16_t __p) | |
32525 | { | |
32526 | __arm_vst1q_p_s32 (__addr, __value, __p); | |
32527 | } | |
32528 | ||
32529 | __extension__ extern __inline void | |
32530 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32531 | __arm_vst2q (int32_t * __addr, int32x4x2_t __value) | |
32532 | { | |
32533 | __arm_vst2q_s32 (__addr, __value); | |
32534 | } | |
32535 | ||
32536 | __extension__ extern __inline void | |
32537 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32538 | __arm_vst2q (uint32_t * __addr, uint32x4x2_t __value) | |
32539 | { | |
32540 | __arm_vst2q_u32 (__addr, __value); | |
32541 | } | |
32542 | ||
32543 | __extension__ extern __inline uint32x4_t | |
32544 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32545 | __arm_vld1q_z (uint32_t const *__base, mve_pred16_t __p) | |
32546 | { | |
32547 | return __arm_vld1q_z_u32 (__base, __p); | |
32548 | } | |
32549 | ||
32550 | __extension__ extern __inline int32x4_t | |
32551 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32552 | __arm_vld1q_z (int32_t const *__base, mve_pred16_t __p) | |
32553 | { | |
32554 | return __arm_vld1q_z_s32 (__base, __p); | |
32555 | } | |
32556 | ||
32557 | __extension__ extern __inline int32x4x2_t | |
32558 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32559 | __arm_vld2q (int32_t const * __addr) | |
32560 | { | |
32561 | return __arm_vld2q_s32 (__addr); | |
32562 | } | |
32563 | ||
32564 | __extension__ extern __inline uint32x4x2_t | |
32565 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32566 | __arm_vld2q (uint32_t const * __addr) | |
32567 | { | |
32568 | return __arm_vld2q_u32 (__addr); | |
32569 | } | |
32570 | ||
32571 | __extension__ extern __inline int32x4x4_t | |
32572 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32573 | __arm_vld4q (int32_t const * __addr) | |
32574 | { | |
32575 | return __arm_vld4q_s32 (__addr); | |
32576 | } | |
32577 | ||
32578 | __extension__ extern __inline uint32x4x4_t | |
32579 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32580 | __arm_vld4q (uint32_t const * __addr) | |
32581 | { | |
32582 | return __arm_vld4q_u32 (__addr); | |
32583 | } | |
32584 | ||
32585 | __extension__ extern __inline int16x8_t | |
32586 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32587 | __arm_vsetq_lane (int16_t __a, int16x8_t __b, const int __idx) | |
32588 | { | |
32589 | return __arm_vsetq_lane_s16 (__a, __b, __idx); | |
32590 | } | |
32591 | ||
32592 | __extension__ extern __inline int32x4_t | |
32593 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32594 | __arm_vsetq_lane (int32_t __a, int32x4_t __b, const int __idx) | |
32595 | { | |
32596 | return __arm_vsetq_lane_s32 (__a, __b, __idx); | |
32597 | } | |
32598 | ||
32599 | __extension__ extern __inline int8x16_t | |
32600 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32601 | __arm_vsetq_lane (int8_t __a, int8x16_t __b, const int __idx) | |
32602 | { | |
32603 | return __arm_vsetq_lane_s8 (__a, __b, __idx); | |
32604 | } | |
32605 | ||
32606 | __extension__ extern __inline int64x2_t | |
32607 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32608 | __arm_vsetq_lane (int64_t __a, int64x2_t __b, const int __idx) | |
32609 | { | |
32610 | return __arm_vsetq_lane_s64 (__a, __b, __idx); | |
32611 | } | |
32612 | ||
32613 | __extension__ extern __inline uint8x16_t | |
32614 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32615 | __arm_vsetq_lane (uint8_t __a, uint8x16_t __b, const int __idx) | |
32616 | { | |
32617 | return __arm_vsetq_lane_u8 (__a, __b, __idx); | |
32618 | } | |
32619 | ||
32620 | __extension__ extern __inline uint16x8_t | |
32621 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32622 | __arm_vsetq_lane (uint16_t __a, uint16x8_t __b, const int __idx) | |
32623 | { | |
32624 | return __arm_vsetq_lane_u16 (__a, __b, __idx); | |
32625 | } | |
32626 | ||
32627 | __extension__ extern __inline uint32x4_t | |
32628 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32629 | __arm_vsetq_lane (uint32_t __a, uint32x4_t __b, const int __idx) | |
32630 | { | |
32631 | return __arm_vsetq_lane_u32 (__a, __b, __idx); | |
32632 | } | |
32633 | ||
32634 | __extension__ extern __inline uint64x2_t | |
32635 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32636 | __arm_vsetq_lane (uint64_t __a, uint64x2_t __b, const int __idx) | |
32637 | { | |
32638 | return __arm_vsetq_lane_u64 (__a, __b, __idx); | |
32639 | } | |
32640 | ||
32641 | __extension__ extern __inline int16_t | |
32642 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32643 | __arm_vgetq_lane (int16x8_t __a, const int __idx) | |
32644 | { | |
32645 | return __arm_vgetq_lane_s16 (__a, __idx); | |
32646 | } | |
32647 | ||
32648 | __extension__ extern __inline int32_t | |
32649 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32650 | __arm_vgetq_lane (int32x4_t __a, const int __idx) | |
32651 | { | |
32652 | return __arm_vgetq_lane_s32 (__a, __idx); | |
32653 | } | |
32654 | ||
32655 | __extension__ extern __inline int8_t | |
32656 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32657 | __arm_vgetq_lane (int8x16_t __a, const int __idx) | |
32658 | { | |
32659 | return __arm_vgetq_lane_s8 (__a, __idx); | |
32660 | } | |
32661 | ||
32662 | __extension__ extern __inline int64_t | |
32663 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32664 | __arm_vgetq_lane (int64x2_t __a, const int __idx) | |
32665 | { | |
32666 | return __arm_vgetq_lane_s64 (__a, __idx); | |
32667 | } | |
32668 | ||
32669 | __extension__ extern __inline uint8_t | |
32670 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32671 | __arm_vgetq_lane (uint8x16_t __a, const int __idx) | |
32672 | { | |
32673 | return __arm_vgetq_lane_u8 (__a, __idx); | |
32674 | } | |
32675 | ||
32676 | __extension__ extern __inline uint16_t | |
32677 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32678 | __arm_vgetq_lane (uint16x8_t __a, const int __idx) | |
32679 | { | |
32680 | return __arm_vgetq_lane_u16 (__a, __idx); | |
32681 | } | |
32682 | ||
32683 | __extension__ extern __inline uint32_t | |
32684 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32685 | __arm_vgetq_lane (uint32x4_t __a, const int __idx) | |
32686 | { | |
32687 | return __arm_vgetq_lane_u32 (__a, __idx); | |
32688 | } | |
32689 | ||
32690 | __extension__ extern __inline uint64_t | |
32691 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32692 | __arm_vgetq_lane (uint64x2_t __a, const int __idx) | |
32693 | { | |
32694 | return __arm_vgetq_lane_u64 (__a, __idx); | |
32695 | } | |
32696 | ||
32697 | __extension__ extern __inline int8x16_t | |
32698 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32699 | __arm_vshlcq_m (int8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
32700 | { | |
32701 | return __arm_vshlcq_m_s8 (__a, __b, __imm, __p); | |
32702 | } | |
32703 | ||
32704 | __extension__ extern __inline uint8x16_t | |
32705 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32706 | __arm_vshlcq_m (uint8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
32707 | { | |
32708 | return __arm_vshlcq_m_u8 (__a, __b, __imm, __p); | |
32709 | } | |
32710 | ||
32711 | __extension__ extern __inline int16x8_t | |
32712 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32713 | __arm_vshlcq_m (int16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
32714 | { | |
32715 | return __arm_vshlcq_m_s16 (__a, __b, __imm, __p); | |
32716 | } | |
32717 | ||
32718 | __extension__ extern __inline uint16x8_t | |
32719 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32720 | __arm_vshlcq_m (uint16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
32721 | { | |
32722 | return __arm_vshlcq_m_u16 (__a, __b, __imm, __p); | |
32723 | } | |
32724 | ||
32725 | __extension__ extern __inline int32x4_t | |
32726 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32727 | __arm_vshlcq_m (int32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
32728 | { | |
32729 | return __arm_vshlcq_m_s32 (__a, __b, __imm, __p); | |
32730 | } | |
32731 | ||
32732 | __extension__ extern __inline uint32x4_t | |
32733 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32734 | __arm_vshlcq_m (uint32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) | |
32735 | { | |
32736 | return __arm_vshlcq_m_u32 (__a, __b, __imm, __p); | |
32737 | } | |
32738 | ||
32739 | #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ | |
32740 | ||
32741 | __extension__ extern __inline void | |
32742 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32743 | __arm_vst4q (float16_t * __addr, float16x8x4_t __value) | |
32744 | { | |
32745 | __arm_vst4q_f16 (__addr, __value); | |
32746 | } | |
32747 | ||
32748 | __extension__ extern __inline void | |
32749 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32750 | __arm_vst4q (float32_t * __addr, float32x4x4_t __value) | |
32751 | { | |
32752 | __arm_vst4q_f32 (__addr, __value); | |
32753 | } | |
32754 | ||
32755 | __extension__ extern __inline float16x8_t | |
32756 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32757 | __arm_vrndxq (float16x8_t __a) | |
32758 | { | |
32759 | return __arm_vrndxq_f16 (__a); | |
32760 | } | |
32761 | ||
32762 | __extension__ extern __inline float32x4_t | |
32763 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32764 | __arm_vrndxq (float32x4_t __a) | |
32765 | { | |
32766 | return __arm_vrndxq_f32 (__a); | |
32767 | } | |
32768 | ||
32769 | __extension__ extern __inline float16x8_t | |
32770 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32771 | __arm_vrndq (float16x8_t __a) | |
32772 | { | |
32773 | return __arm_vrndq_f16 (__a); | |
32774 | } | |
32775 | ||
32776 | __extension__ extern __inline float32x4_t | |
32777 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32778 | __arm_vrndq (float32x4_t __a) | |
32779 | { | |
32780 | return __arm_vrndq_f32 (__a); | |
32781 | } | |
32782 | ||
32783 | __extension__ extern __inline float16x8_t | |
32784 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32785 | __arm_vrndpq (float16x8_t __a) | |
32786 | { | |
32787 | return __arm_vrndpq_f16 (__a); | |
32788 | } | |
32789 | ||
32790 | __extension__ extern __inline float32x4_t | |
32791 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32792 | __arm_vrndpq (float32x4_t __a) | |
32793 | { | |
32794 | return __arm_vrndpq_f32 (__a); | |
32795 | } | |
32796 | ||
32797 | __extension__ extern __inline float16x8_t | |
32798 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32799 | __arm_vrndnq (float16x8_t __a) | |
32800 | { | |
32801 | return __arm_vrndnq_f16 (__a); | |
32802 | } | |
32803 | ||
32804 | __extension__ extern __inline float32x4_t | |
32805 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32806 | __arm_vrndnq (float32x4_t __a) | |
32807 | { | |
32808 | return __arm_vrndnq_f32 (__a); | |
32809 | } | |
32810 | ||
32811 | __extension__ extern __inline float16x8_t | |
32812 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32813 | __arm_vrndmq (float16x8_t __a) | |
32814 | { | |
32815 | return __arm_vrndmq_f16 (__a); | |
32816 | } | |
32817 | ||
32818 | __extension__ extern __inline float32x4_t | |
32819 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32820 | __arm_vrndmq (float32x4_t __a) | |
32821 | { | |
32822 | return __arm_vrndmq_f32 (__a); | |
32823 | } | |
32824 | ||
32825 | __extension__ extern __inline float16x8_t | |
32826 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32827 | __arm_vrndaq (float16x8_t __a) | |
32828 | { | |
32829 | return __arm_vrndaq_f16 (__a); | |
32830 | } | |
32831 | ||
32832 | __extension__ extern __inline float32x4_t | |
32833 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32834 | __arm_vrndaq (float32x4_t __a) | |
32835 | { | |
32836 | return __arm_vrndaq_f32 (__a); | |
32837 | } | |
32838 | ||
32839 | __extension__ extern __inline float16x8_t | |
32840 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32841 | __arm_vrev64q (float16x8_t __a) | |
32842 | { | |
32843 | return __arm_vrev64q_f16 (__a); | |
32844 | } | |
32845 | ||
32846 | __extension__ extern __inline float32x4_t | |
32847 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32848 | __arm_vrev64q (float32x4_t __a) | |
32849 | { | |
32850 | return __arm_vrev64q_f32 (__a); | |
32851 | } | |
32852 | ||
32853 | __extension__ extern __inline float16x8_t | |
32854 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32855 | __arm_vnegq (float16x8_t __a) | |
32856 | { | |
32857 | return __arm_vnegq_f16 (__a); | |
32858 | } | |
32859 | ||
32860 | __extension__ extern __inline float32x4_t | |
32861 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32862 | __arm_vnegq (float32x4_t __a) | |
32863 | { | |
32864 | return __arm_vnegq_f32 (__a); | |
32865 | } | |
32866 | ||
32867 | __extension__ extern __inline float16x8_t | |
32868 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32869 | __arm_vdupq_n (float16_t __a) | |
32870 | { | |
32871 | return __arm_vdupq_n_f16 (__a); | |
32872 | } | |
32873 | ||
32874 | __extension__ extern __inline float32x4_t | |
32875 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32876 | __arm_vdupq_n (float32_t __a) | |
32877 | { | |
32878 | return __arm_vdupq_n_f32 (__a); | |
32879 | } | |
32880 | ||
32881 | __extension__ extern __inline float16x8_t | |
32882 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32883 | __arm_vabsq (float16x8_t __a) | |
32884 | { | |
32885 | return __arm_vabsq_f16 (__a); | |
32886 | } | |
32887 | ||
32888 | __extension__ extern __inline float32x4_t | |
32889 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32890 | __arm_vabsq (float32x4_t __a) | |
32891 | { | |
32892 | return __arm_vabsq_f32 (__a); | |
32893 | } | |
32894 | ||
32895 | __extension__ extern __inline float16x8_t | |
32896 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32897 | __arm_vrev32q (float16x8_t __a) | |
32898 | { | |
32899 | return __arm_vrev32q_f16 (__a); | |
32900 | } | |
32901 | ||
32902 | __extension__ extern __inline float32x4_t | |
32903 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32904 | __arm_vcvttq_f32 (float16x8_t __a) | |
32905 | { | |
32906 | return __arm_vcvttq_f32_f16 (__a); | |
32907 | } | |
32908 | ||
32909 | __extension__ extern __inline float32x4_t | |
32910 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32911 | __arm_vcvtbq_f32 (float16x8_t __a) | |
32912 | { | |
32913 | return __arm_vcvtbq_f32_f16 (__a); | |
32914 | } | |
32915 | ||
32916 | __extension__ extern __inline float16x8_t | |
32917 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32918 | __arm_vcvtq (int16x8_t __a) | |
32919 | { | |
32920 | return __arm_vcvtq_f16_s16 (__a); | |
32921 | } | |
32922 | ||
32923 | __extension__ extern __inline float32x4_t | |
32924 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32925 | __arm_vcvtq (int32x4_t __a) | |
32926 | { | |
32927 | return __arm_vcvtq_f32_s32 (__a); | |
32928 | } | |
32929 | ||
32930 | __extension__ extern __inline float16x8_t | |
32931 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32932 | __arm_vcvtq (uint16x8_t __a) | |
32933 | { | |
32934 | return __arm_vcvtq_f16_u16 (__a); | |
32935 | } | |
32936 | ||
32937 | __extension__ extern __inline float32x4_t | |
32938 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32939 | __arm_vcvtq (uint32x4_t __a) | |
32940 | { | |
32941 | return __arm_vcvtq_f32_u32 (__a); | |
32942 | } | |
32943 | ||
32944 | __extension__ extern __inline float16x8_t | |
32945 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32946 | __arm_vsubq (float16x8_t __a, float16_t __b) | |
32947 | { | |
32948 | return __arm_vsubq_n_f16 (__a, __b); | |
32949 | } | |
32950 | ||
32951 | __extension__ extern __inline float32x4_t | |
32952 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32953 | __arm_vsubq (float32x4_t __a, float32_t __b) | |
32954 | { | |
32955 | return __arm_vsubq_n_f32 (__a, __b); | |
32956 | } | |
32957 | ||
32958 | __extension__ extern __inline float16x8_t | |
32959 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32960 | __arm_vbrsrq (float16x8_t __a, int32_t __b) | |
32961 | { | |
32962 | return __arm_vbrsrq_n_f16 (__a, __b); | |
32963 | } | |
32964 | ||
32965 | __extension__ extern __inline float32x4_t | |
32966 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32967 | __arm_vbrsrq (float32x4_t __a, int32_t __b) | |
32968 | { | |
32969 | return __arm_vbrsrq_n_f32 (__a, __b); | |
32970 | } | |
32971 | ||
32972 | __extension__ extern __inline float16x8_t | |
32973 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32974 | __arm_vcvtq_n (int16x8_t __a, const int __imm6) | |
32975 | { | |
32976 | return __arm_vcvtq_n_f16_s16 (__a, __imm6); | |
32977 | } | |
32978 | ||
32979 | __extension__ extern __inline float32x4_t | |
32980 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32981 | __arm_vcvtq_n (int32x4_t __a, const int __imm6) | |
32982 | { | |
32983 | return __arm_vcvtq_n_f32_s32 (__a, __imm6); | |
32984 | } | |
32985 | ||
32986 | __extension__ extern __inline float16x8_t | |
32987 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32988 | __arm_vcvtq_n (uint16x8_t __a, const int __imm6) | |
32989 | { | |
32990 | return __arm_vcvtq_n_f16_u16 (__a, __imm6); | |
32991 | } | |
32992 | ||
32993 | __extension__ extern __inline float32x4_t | |
32994 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
32995 | __arm_vcvtq_n (uint32x4_t __a, const int __imm6) | |
32996 | { | |
32997 | return __arm_vcvtq_n_f32_u32 (__a, __imm6); | |
32998 | } | |
32999 | ||
33000 | __extension__ extern __inline mve_pred16_t | |
33001 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33002 | __arm_vcmpneq (float16x8_t __a, float16_t __b) | |
33003 | { | |
33004 | return __arm_vcmpneq_n_f16 (__a, __b); | |
33005 | } | |
33006 | ||
33007 | __extension__ extern __inline mve_pred16_t | |
33008 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33009 | __arm_vcmpneq (float16x8_t __a, float16x8_t __b) | |
33010 | { | |
33011 | return __arm_vcmpneq_f16 (__a, __b); | |
33012 | } | |
33013 | ||
33014 | __extension__ extern __inline mve_pred16_t | |
33015 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33016 | __arm_vcmpltq (float16x8_t __a, float16_t __b) | |
33017 | { | |
33018 | return __arm_vcmpltq_n_f16 (__a, __b); | |
33019 | } | |
33020 | ||
33021 | __extension__ extern __inline mve_pred16_t | |
33022 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33023 | __arm_vcmpltq (float16x8_t __a, float16x8_t __b) | |
33024 | { | |
33025 | return __arm_vcmpltq_f16 (__a, __b); | |
33026 | } | |
33027 | ||
33028 | __extension__ extern __inline mve_pred16_t | |
33029 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33030 | __arm_vcmpleq (float16x8_t __a, float16_t __b) | |
33031 | { | |
33032 | return __arm_vcmpleq_n_f16 (__a, __b); | |
33033 | } | |
33034 | ||
33035 | __extension__ extern __inline mve_pred16_t | |
33036 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33037 | __arm_vcmpleq (float16x8_t __a, float16x8_t __b) | |
33038 | { | |
33039 | return __arm_vcmpleq_f16 (__a, __b); | |
33040 | } | |
33041 | ||
33042 | __extension__ extern __inline mve_pred16_t | |
33043 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33044 | __arm_vcmpgtq (float16x8_t __a, float16_t __b) | |
33045 | { | |
33046 | return __arm_vcmpgtq_n_f16 (__a, __b); | |
33047 | } | |
33048 | ||
33049 | __extension__ extern __inline mve_pred16_t | |
33050 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33051 | __arm_vcmpgtq (float16x8_t __a, float16x8_t __b) | |
33052 | { | |
33053 | return __arm_vcmpgtq_f16 (__a, __b); | |
33054 | } | |
33055 | ||
33056 | __extension__ extern __inline mve_pred16_t | |
33057 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33058 | __arm_vcmpgeq (float16x8_t __a, float16_t __b) | |
33059 | { | |
33060 | return __arm_vcmpgeq_n_f16 (__a, __b); | |
33061 | } | |
33062 | ||
33063 | __extension__ extern __inline mve_pred16_t | |
33064 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33065 | __arm_vcmpgeq (float16x8_t __a, float16x8_t __b) | |
33066 | { | |
33067 | return __arm_vcmpgeq_f16 (__a, __b); | |
33068 | } | |
33069 | ||
33070 | __extension__ extern __inline mve_pred16_t | |
33071 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33072 | __arm_vcmpeqq (float16x8_t __a, float16_t __b) | |
33073 | { | |
33074 | return __arm_vcmpeqq_n_f16 (__a, __b); | |
33075 | } | |
33076 | ||
33077 | __extension__ extern __inline mve_pred16_t | |
33078 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33079 | __arm_vcmpeqq (float16x8_t __a, float16x8_t __b) | |
33080 | { | |
33081 | return __arm_vcmpeqq_f16 (__a, __b); | |
33082 | } | |
33083 | ||
33084 | __extension__ extern __inline float16x8_t | |
33085 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33086 | __arm_vsubq (float16x8_t __a, float16x8_t __b) | |
33087 | { | |
33088 | return __arm_vsubq_f16 (__a, __b); | |
33089 | } | |
33090 | ||
33091 | __extension__ extern __inline float16x8_t | |
33092 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33093 | __arm_vorrq (float16x8_t __a, float16x8_t __b) | |
33094 | { | |
33095 | return __arm_vorrq_f16 (__a, __b); | |
33096 | } | |
33097 | ||
33098 | __extension__ extern __inline float16x8_t | |
33099 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33100 | __arm_vornq (float16x8_t __a, float16x8_t __b) | |
33101 | { | |
33102 | return __arm_vornq_f16 (__a, __b); | |
33103 | } | |
33104 | ||
33105 | __extension__ extern __inline float16x8_t | |
33106 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33107 | __arm_vmulq (float16x8_t __a, float16_t __b) | |
33108 | { | |
33109 | return __arm_vmulq_n_f16 (__a, __b); | |
33110 | } | |
33111 | ||
33112 | __extension__ extern __inline float16x8_t | |
33113 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33114 | __arm_vmulq (float16x8_t __a, float16x8_t __b) | |
33115 | { | |
33116 | return __arm_vmulq_f16 (__a, __b); | |
33117 | } | |
33118 | ||
33119 | __extension__ extern __inline float16_t | |
33120 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33121 | __arm_vminnmvq (float16_t __a, float16x8_t __b) | |
33122 | { | |
33123 | return __arm_vminnmvq_f16 (__a, __b); | |
33124 | } | |
33125 | ||
33126 | __extension__ extern __inline float16x8_t | |
33127 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33128 | __arm_vminnmq (float16x8_t __a, float16x8_t __b) | |
33129 | { | |
33130 | return __arm_vminnmq_f16 (__a, __b); | |
33131 | } | |
33132 | ||
33133 | __extension__ extern __inline float16_t | |
33134 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33135 | __arm_vminnmavq (float16_t __a, float16x8_t __b) | |
33136 | { | |
33137 | return __arm_vminnmavq_f16 (__a, __b); | |
33138 | } | |
33139 | ||
33140 | __extension__ extern __inline float16x8_t | |
33141 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33142 | __arm_vminnmaq (float16x8_t __a, float16x8_t __b) | |
33143 | { | |
33144 | return __arm_vminnmaq_f16 (__a, __b); | |
33145 | } | |
33146 | ||
33147 | __extension__ extern __inline float16_t | |
33148 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33149 | __arm_vmaxnmvq (float16_t __a, float16x8_t __b) | |
33150 | { | |
33151 | return __arm_vmaxnmvq_f16 (__a, __b); | |
33152 | } | |
33153 | ||
33154 | __extension__ extern __inline float16x8_t | |
33155 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33156 | __arm_vmaxnmq (float16x8_t __a, float16x8_t __b) | |
33157 | { | |
33158 | return __arm_vmaxnmq_f16 (__a, __b); | |
33159 | } | |
33160 | ||
33161 | __extension__ extern __inline float16_t | |
33162 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33163 | __arm_vmaxnmavq (float16_t __a, float16x8_t __b) | |
33164 | { | |
33165 | return __arm_vmaxnmavq_f16 (__a, __b); | |
33166 | } | |
33167 | ||
33168 | __extension__ extern __inline float16x8_t | |
33169 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33170 | __arm_vmaxnmaq (float16x8_t __a, float16x8_t __b) | |
33171 | { | |
33172 | return __arm_vmaxnmaq_f16 (__a, __b); | |
33173 | } | |
33174 | ||
33175 | __extension__ extern __inline float16x8_t | |
33176 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33177 | __arm_veorq (float16x8_t __a, float16x8_t __b) | |
33178 | { | |
33179 | return __arm_veorq_f16 (__a, __b); | |
33180 | } | |
33181 | ||
33182 | __extension__ extern __inline float16x8_t | |
33183 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33184 | __arm_vcmulq_rot90 (float16x8_t __a, float16x8_t __b) | |
33185 | { | |
33186 | return __arm_vcmulq_rot90_f16 (__a, __b); | |
33187 | } | |
33188 | ||
33189 | __extension__ extern __inline float16x8_t | |
33190 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33191 | __arm_vcmulq_rot270 (float16x8_t __a, float16x8_t __b) | |
33192 | { | |
33193 | return __arm_vcmulq_rot270_f16 (__a, __b); | |
33194 | } | |
33195 | ||
33196 | __extension__ extern __inline float16x8_t | |
33197 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33198 | __arm_vcmulq_rot180 (float16x8_t __a, float16x8_t __b) | |
33199 | { | |
33200 | return __arm_vcmulq_rot180_f16 (__a, __b); | |
33201 | } | |
33202 | ||
33203 | __extension__ extern __inline float16x8_t | |
33204 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33205 | __arm_vcmulq (float16x8_t __a, float16x8_t __b) | |
33206 | { | |
33207 | return __arm_vcmulq_f16 (__a, __b); | |
33208 | } | |
33209 | ||
33210 | __extension__ extern __inline float16x8_t | |
33211 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33212 | __arm_vcaddq_rot90 (float16x8_t __a, float16x8_t __b) | |
33213 | { | |
33214 | return __arm_vcaddq_rot90_f16 (__a, __b); | |
33215 | } | |
33216 | ||
33217 | __extension__ extern __inline float16x8_t | |
33218 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33219 | __arm_vcaddq_rot270 (float16x8_t __a, float16x8_t __b) | |
33220 | { | |
33221 | return __arm_vcaddq_rot270_f16 (__a, __b); | |
33222 | } | |
33223 | ||
33224 | __extension__ extern __inline float16x8_t | |
33225 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33226 | __arm_vbicq (float16x8_t __a, float16x8_t __b) | |
33227 | { | |
33228 | return __arm_vbicq_f16 (__a, __b); | |
33229 | } | |
33230 | ||
33231 | __extension__ extern __inline float16x8_t | |
33232 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33233 | __arm_vandq (float16x8_t __a, float16x8_t __b) | |
33234 | { | |
33235 | return __arm_vandq_f16 (__a, __b); | |
33236 | } | |
33237 | ||
33238 | __extension__ extern __inline float16x8_t | |
33239 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33240 | __arm_vaddq (float16x8_t __a, float16_t __b) | |
33241 | { | |
33242 | return __arm_vaddq_n_f16 (__a, __b); | |
33243 | } | |
33244 | ||
33245 | __extension__ extern __inline float16x8_t | |
33246 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33247 | __arm_vabdq (float16x8_t __a, float16x8_t __b) | |
33248 | { | |
33249 | return __arm_vabdq_f16 (__a, __b); | |
33250 | } | |
33251 | ||
33252 | __extension__ extern __inline mve_pred16_t | |
33253 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33254 | __arm_vcmpneq (float32x4_t __a, float32_t __b) | |
33255 | { | |
33256 | return __arm_vcmpneq_n_f32 (__a, __b); | |
33257 | } | |
33258 | ||
33259 | __extension__ extern __inline mve_pred16_t | |
33260 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33261 | __arm_vcmpneq (float32x4_t __a, float32x4_t __b) | |
33262 | { | |
33263 | return __arm_vcmpneq_f32 (__a, __b); | |
33264 | } | |
33265 | ||
33266 | __extension__ extern __inline mve_pred16_t | |
33267 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33268 | __arm_vcmpltq (float32x4_t __a, float32_t __b) | |
33269 | { | |
33270 | return __arm_vcmpltq_n_f32 (__a, __b); | |
33271 | } | |
33272 | ||
33273 | __extension__ extern __inline mve_pred16_t | |
33274 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33275 | __arm_vcmpltq (float32x4_t __a, float32x4_t __b) | |
33276 | { | |
33277 | return __arm_vcmpltq_f32 (__a, __b); | |
33278 | } | |
33279 | ||
33280 | __extension__ extern __inline mve_pred16_t | |
33281 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33282 | __arm_vcmpleq (float32x4_t __a, float32_t __b) | |
33283 | { | |
33284 | return __arm_vcmpleq_n_f32 (__a, __b); | |
33285 | } | |
33286 | ||
33287 | __extension__ extern __inline mve_pred16_t | |
33288 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33289 | __arm_vcmpleq (float32x4_t __a, float32x4_t __b) | |
33290 | { | |
33291 | return __arm_vcmpleq_f32 (__a, __b); | |
33292 | } | |
33293 | ||
33294 | __extension__ extern __inline mve_pred16_t | |
33295 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33296 | __arm_vcmpgtq (float32x4_t __a, float32_t __b) | |
33297 | { | |
33298 | return __arm_vcmpgtq_n_f32 (__a, __b); | |
33299 | } | |
33300 | ||
33301 | __extension__ extern __inline mve_pred16_t | |
33302 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33303 | __arm_vcmpgtq (float32x4_t __a, float32x4_t __b) | |
33304 | { | |
33305 | return __arm_vcmpgtq_f32 (__a, __b); | |
33306 | } | |
33307 | ||
33308 | __extension__ extern __inline mve_pred16_t | |
33309 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33310 | __arm_vcmpgeq (float32x4_t __a, float32_t __b) | |
33311 | { | |
33312 | return __arm_vcmpgeq_n_f32 (__a, __b); | |
33313 | } | |
33314 | ||
33315 | __extension__ extern __inline mve_pred16_t | |
33316 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33317 | __arm_vcmpgeq (float32x4_t __a, float32x4_t __b) | |
33318 | { | |
33319 | return __arm_vcmpgeq_f32 (__a, __b); | |
33320 | } | |
33321 | ||
33322 | __extension__ extern __inline mve_pred16_t | |
33323 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33324 | __arm_vcmpeqq (float32x4_t __a, float32_t __b) | |
33325 | { | |
33326 | return __arm_vcmpeqq_n_f32 (__a, __b); | |
33327 | } | |
33328 | ||
33329 | __extension__ extern __inline mve_pred16_t | |
33330 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33331 | __arm_vcmpeqq (float32x4_t __a, float32x4_t __b) | |
33332 | { | |
33333 | return __arm_vcmpeqq_f32 (__a, __b); | |
33334 | } | |
33335 | ||
33336 | __extension__ extern __inline float32x4_t | |
33337 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33338 | __arm_vsubq (float32x4_t __a, float32x4_t __b) | |
33339 | { | |
33340 | return __arm_vsubq_f32 (__a, __b); | |
33341 | } | |
33342 | ||
33343 | __extension__ extern __inline float32x4_t | |
33344 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33345 | __arm_vorrq (float32x4_t __a, float32x4_t __b) | |
33346 | { | |
33347 | return __arm_vorrq_f32 (__a, __b); | |
33348 | } | |
33349 | ||
33350 | __extension__ extern __inline float32x4_t | |
33351 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33352 | __arm_vornq (float32x4_t __a, float32x4_t __b) | |
33353 | { | |
33354 | return __arm_vornq_f32 (__a, __b); | |
33355 | } | |
33356 | ||
33357 | __extension__ extern __inline float32x4_t | |
33358 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33359 | __arm_vmulq (float32x4_t __a, float32_t __b) | |
33360 | { | |
33361 | return __arm_vmulq_n_f32 (__a, __b); | |
33362 | } | |
33363 | ||
33364 | __extension__ extern __inline float32x4_t | |
33365 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33366 | __arm_vmulq (float32x4_t __a, float32x4_t __b) | |
33367 | { | |
33368 | return __arm_vmulq_f32 (__a, __b); | |
33369 | } | |
33370 | ||
33371 | __extension__ extern __inline float32_t | |
33372 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33373 | __arm_vminnmvq (float32_t __a, float32x4_t __b) | |
33374 | { | |
33375 | return __arm_vminnmvq_f32 (__a, __b); | |
33376 | } | |
33377 | ||
33378 | __extension__ extern __inline float32x4_t | |
33379 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33380 | __arm_vminnmq (float32x4_t __a, float32x4_t __b) | |
33381 | { | |
33382 | return __arm_vminnmq_f32 (__a, __b); | |
33383 | } | |
33384 | ||
33385 | __extension__ extern __inline float32_t | |
33386 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33387 | __arm_vminnmavq (float32_t __a, float32x4_t __b) | |
33388 | { | |
33389 | return __arm_vminnmavq_f32 (__a, __b); | |
33390 | } | |
33391 | ||
33392 | __extension__ extern __inline float32x4_t | |
33393 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33394 | __arm_vminnmaq (float32x4_t __a, float32x4_t __b) | |
33395 | { | |
33396 | return __arm_vminnmaq_f32 (__a, __b); | |
33397 | } | |
33398 | ||
33399 | __extension__ extern __inline float32_t | |
33400 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33401 | __arm_vmaxnmvq (float32_t __a, float32x4_t __b) | |
33402 | { | |
33403 | return __arm_vmaxnmvq_f32 (__a, __b); | |
33404 | } | |
33405 | ||
33406 | __extension__ extern __inline float32x4_t | |
33407 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33408 | __arm_vmaxnmq (float32x4_t __a, float32x4_t __b) | |
33409 | { | |
33410 | return __arm_vmaxnmq_f32 (__a, __b); | |
33411 | } | |
33412 | ||
33413 | __extension__ extern __inline float32_t | |
33414 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33415 | __arm_vmaxnmavq (float32_t __a, float32x4_t __b) | |
33416 | { | |
33417 | return __arm_vmaxnmavq_f32 (__a, __b); | |
33418 | } | |
33419 | ||
33420 | __extension__ extern __inline float32x4_t | |
33421 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33422 | __arm_vmaxnmaq (float32x4_t __a, float32x4_t __b) | |
33423 | { | |
33424 | return __arm_vmaxnmaq_f32 (__a, __b); | |
33425 | } | |
33426 | ||
33427 | __extension__ extern __inline float32x4_t | |
33428 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33429 | __arm_veorq (float32x4_t __a, float32x4_t __b) | |
33430 | { | |
33431 | return __arm_veorq_f32 (__a, __b); | |
33432 | } | |
33433 | ||
33434 | __extension__ extern __inline float32x4_t | |
33435 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33436 | __arm_vcmulq_rot90 (float32x4_t __a, float32x4_t __b) | |
33437 | { | |
33438 | return __arm_vcmulq_rot90_f32 (__a, __b); | |
33439 | } | |
33440 | ||
33441 | __extension__ extern __inline float32x4_t | |
33442 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33443 | __arm_vcmulq_rot270 (float32x4_t __a, float32x4_t __b) | |
33444 | { | |
33445 | return __arm_vcmulq_rot270_f32 (__a, __b); | |
33446 | } | |
33447 | ||
33448 | __extension__ extern __inline float32x4_t | |
33449 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33450 | __arm_vcmulq_rot180 (float32x4_t __a, float32x4_t __b) | |
33451 | { | |
33452 | return __arm_vcmulq_rot180_f32 (__a, __b); | |
33453 | } | |
33454 | ||
33455 | __extension__ extern __inline float32x4_t | |
33456 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33457 | __arm_vcmulq (float32x4_t __a, float32x4_t __b) | |
33458 | { | |
33459 | return __arm_vcmulq_f32 (__a, __b); | |
33460 | } | |
33461 | ||
33462 | __extension__ extern __inline float32x4_t | |
33463 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33464 | __arm_vcaddq_rot90 (float32x4_t __a, float32x4_t __b) | |
33465 | { | |
33466 | return __arm_vcaddq_rot90_f32 (__a, __b); | |
33467 | } | |
33468 | ||
33469 | __extension__ extern __inline float32x4_t | |
33470 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33471 | __arm_vcaddq_rot270 (float32x4_t __a, float32x4_t __b) | |
33472 | { | |
33473 | return __arm_vcaddq_rot270_f32 (__a, __b); | |
33474 | } | |
33475 | ||
33476 | __extension__ extern __inline float32x4_t | |
33477 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33478 | __arm_vbicq (float32x4_t __a, float32x4_t __b) | |
33479 | { | |
33480 | return __arm_vbicq_f32 (__a, __b); | |
33481 | } | |
33482 | ||
33483 | __extension__ extern __inline float32x4_t | |
33484 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33485 | __arm_vandq (float32x4_t __a, float32x4_t __b) | |
33486 | { | |
33487 | return __arm_vandq_f32 (__a, __b); | |
33488 | } | |
33489 | ||
33490 | __extension__ extern __inline float32x4_t | |
33491 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33492 | __arm_vaddq (float32x4_t __a, float32_t __b) | |
33493 | { | |
33494 | return __arm_vaddq_n_f32 (__a, __b); | |
33495 | } | |
33496 | ||
33497 | __extension__ extern __inline float32x4_t | |
33498 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33499 | __arm_vabdq (float32x4_t __a, float32x4_t __b) | |
33500 | { | |
33501 | return __arm_vabdq_f32 (__a, __b); | |
33502 | } | |
33503 | ||
33504 | __extension__ extern __inline mve_pred16_t | |
33505 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33506 | __arm_vcmpeqq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
33507 | { | |
33508 | return __arm_vcmpeqq_m_f16 (__a, __b, __p); | |
33509 | } | |
33510 | ||
33511 | __extension__ extern __inline mve_pred16_t | |
33512 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33513 | __arm_vcmpeqq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
33514 | { | |
33515 | return __arm_vcmpeqq_m_f32 (__a, __b, __p); | |
33516 | } | |
33517 | ||
33518 | __extension__ extern __inline int16x8_t | |
33519 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33520 | __arm_vcvtaq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33521 | { | |
33522 | return __arm_vcvtaq_m_s16_f16 (__inactive, __a, __p); | |
33523 | } | |
33524 | ||
33525 | __extension__ extern __inline uint16x8_t | |
33526 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33527 | __arm_vcvtaq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33528 | { | |
33529 | return __arm_vcvtaq_m_u16_f16 (__inactive, __a, __p); | |
33530 | } | |
33531 | ||
33532 | __extension__ extern __inline int32x4_t | |
33533 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33534 | __arm_vcvtaq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
33535 | { | |
33536 | return __arm_vcvtaq_m_s32_f32 (__inactive, __a, __p); | |
33537 | } | |
33538 | ||
33539 | __extension__ extern __inline uint32x4_t | |
33540 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33541 | __arm_vcvtaq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
33542 | { | |
33543 | return __arm_vcvtaq_m_u32_f32 (__inactive, __a, __p); | |
33544 | } | |
33545 | ||
33546 | __extension__ extern __inline float16x8_t | |
33547 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33548 | __arm_vcvtq_m (float16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) | |
33549 | { | |
33550 | return __arm_vcvtq_m_f16_s16 (__inactive, __a, __p); | |
33551 | } | |
33552 | ||
33553 | __extension__ extern __inline float16x8_t | |
33554 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33555 | __arm_vcvtq_m (float16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) | |
33556 | { | |
33557 | return __arm_vcvtq_m_f16_u16 (__inactive, __a, __p); | |
33558 | } | |
33559 | ||
33560 | __extension__ extern __inline float32x4_t | |
33561 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33562 | __arm_vcvtq_m (float32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) | |
33563 | { | |
33564 | return __arm_vcvtq_m_f32_s32 (__inactive, __a, __p); | |
33565 | } | |
33566 | ||
33567 | __extension__ extern __inline float32x4_t | |
33568 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33569 | __arm_vcvtq_m (float32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) | |
33570 | { | |
33571 | return __arm_vcvtq_m_f32_u32 (__inactive, __a, __p); | |
33572 | } | |
33573 | ||
33574 | __extension__ extern __inline float16x8_t | |
33575 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33576 | __arm_vcvtbq_m (float16x8_t __a, float32x4_t __b, mve_pred16_t __p) | |
33577 | { | |
33578 | return __arm_vcvtbq_m_f16_f32 (__a, __b, __p); | |
33579 | } | |
33580 | ||
33581 | __extension__ extern __inline float32x4_t | |
33582 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33583 | __arm_vcvtbq_m (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33584 | { | |
33585 | return __arm_vcvtbq_m_f32_f16 (__inactive, __a, __p); | |
33586 | } | |
33587 | ||
33588 | __extension__ extern __inline float16x8_t | |
33589 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33590 | __arm_vcvttq_m (float16x8_t __a, float32x4_t __b, mve_pred16_t __p) | |
33591 | { | |
33592 | return __arm_vcvttq_m_f16_f32 (__a, __b, __p); | |
33593 | } | |
33594 | ||
33595 | __extension__ extern __inline float32x4_t | |
33596 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33597 | __arm_vcvttq_m (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33598 | { | |
33599 | return __arm_vcvttq_m_f32_f16 (__inactive, __a, __p); | |
33600 | } | |
33601 | ||
33602 | __extension__ extern __inline float16x8_t | |
33603 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33604 | __arm_vrev32q_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33605 | { | |
33606 | return __arm_vrev32q_m_f16 (__inactive, __a, __p); | |
33607 | } | |
33608 | ||
33609 | __extension__ extern __inline float16x8_t | |
33610 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33611 | __arm_vcmlaq (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
33612 | { | |
33613 | return __arm_vcmlaq_f16 (__a, __b, __c); | |
33614 | } | |
33615 | ||
33616 | __extension__ extern __inline float16x8_t | |
33617 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33618 | __arm_vcmlaq_rot180 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
33619 | { | |
33620 | return __arm_vcmlaq_rot180_f16 (__a, __b, __c); | |
33621 | } | |
33622 | ||
33623 | __extension__ extern __inline float16x8_t | |
33624 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33625 | __arm_vcmlaq_rot270 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
33626 | { | |
33627 | return __arm_vcmlaq_rot270_f16 (__a, __b, __c); | |
33628 | } | |
33629 | ||
33630 | __extension__ extern __inline float16x8_t | |
33631 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33632 | __arm_vcmlaq_rot90 (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
33633 | { | |
33634 | return __arm_vcmlaq_rot90_f16 (__a, __b, __c); | |
33635 | } | |
33636 | ||
33637 | __extension__ extern __inline float16x8_t | |
33638 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33639 | __arm_vfmaq (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
33640 | { | |
33641 | return __arm_vfmaq_f16 (__a, __b, __c); | |
33642 | } | |
33643 | ||
33644 | __extension__ extern __inline float16x8_t | |
33645 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33646 | __arm_vfmaq (float16x8_t __a, float16x8_t __b, float16_t __c) | |
33647 | { | |
33648 | return __arm_vfmaq_n_f16 (__a, __b, __c); | |
33649 | } | |
33650 | ||
33651 | __extension__ extern __inline float16x8_t | |
33652 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33653 | __arm_vfmasq (float16x8_t __a, float16x8_t __b, float16_t __c) | |
33654 | { | |
33655 | return __arm_vfmasq_n_f16 (__a, __b, __c); | |
33656 | } | |
33657 | ||
33658 | __extension__ extern __inline float16x8_t | |
33659 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33660 | __arm_vfmsq (float16x8_t __a, float16x8_t __b, float16x8_t __c) | |
33661 | { | |
33662 | return __arm_vfmsq_f16 (__a, __b, __c); | |
33663 | } | |
33664 | ||
33665 | __extension__ extern __inline float16x8_t | |
33666 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33667 | __arm_vabsq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33668 | { | |
33669 | return __arm_vabsq_m_f16 (__inactive, __a, __p); | |
33670 | } | |
33671 | ||
33672 | __extension__ extern __inline int16x8_t | |
33673 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33674 | __arm_vcvtmq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33675 | { | |
33676 | return __arm_vcvtmq_m_s16_f16 (__inactive, __a, __p); | |
33677 | } | |
33678 | ||
33679 | __extension__ extern __inline int16x8_t | |
33680 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33681 | __arm_vcvtnq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33682 | { | |
33683 | return __arm_vcvtnq_m_s16_f16 (__inactive, __a, __p); | |
33684 | } | |
33685 | ||
33686 | __extension__ extern __inline int16x8_t | |
33687 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33688 | __arm_vcvtpq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33689 | { | |
33690 | return __arm_vcvtpq_m_s16_f16 (__inactive, __a, __p); | |
33691 | } | |
33692 | ||
33693 | __extension__ extern __inline int16x8_t | |
33694 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33695 | __arm_vcvtq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33696 | { | |
33697 | return __arm_vcvtq_m_s16_f16 (__inactive, __a, __p); | |
33698 | } | |
33699 | ||
33700 | __extension__ extern __inline float16x8_t | |
33701 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33702 | __arm_vdupq_m (float16x8_t __inactive, float16_t __a, mve_pred16_t __p) | |
33703 | { | |
33704 | return __arm_vdupq_m_n_f16 (__inactive, __a, __p); | |
33705 | } | |
33706 | ||
33707 | __extension__ extern __inline float16x8_t | |
33708 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33709 | __arm_vmaxnmaq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
33710 | { | |
33711 | return __arm_vmaxnmaq_m_f16 (__a, __b, __p); | |
33712 | } | |
33713 | ||
33714 | __extension__ extern __inline float16_t | |
33715 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33716 | __arm_vmaxnmavq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
33717 | { | |
33718 | return __arm_vmaxnmavq_p_f16 (__a, __b, __p); | |
33719 | } | |
33720 | ||
33721 | __extension__ extern __inline float16_t | |
33722 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33723 | __arm_vmaxnmvq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
33724 | { | |
33725 | return __arm_vmaxnmvq_p_f16 (__a, __b, __p); | |
33726 | } | |
33727 | ||
33728 | __extension__ extern __inline float16x8_t | |
33729 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33730 | __arm_vminnmaq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
33731 | { | |
33732 | return __arm_vminnmaq_m_f16 (__a, __b, __p); | |
33733 | } | |
33734 | ||
33735 | __extension__ extern __inline float16_t | |
33736 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33737 | __arm_vminnmavq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
33738 | { | |
33739 | return __arm_vminnmavq_p_f16 (__a, __b, __p); | |
33740 | } | |
33741 | ||
33742 | __extension__ extern __inline float16_t | |
33743 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33744 | __arm_vminnmvq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p) | |
33745 | { | |
33746 | return __arm_vminnmvq_p_f16 (__a, __b, __p); | |
33747 | } | |
33748 | ||
33749 | __extension__ extern __inline float16x8_t | |
33750 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33751 | __arm_vnegq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33752 | { | |
33753 | return __arm_vnegq_m_f16 (__inactive, __a, __p); | |
33754 | } | |
33755 | ||
33756 | __extension__ extern __inline float16x8_t | |
33757 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33758 | __arm_vpselq (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
33759 | { | |
33760 | return __arm_vpselq_f16 (__a, __b, __p); | |
33761 | } | |
33762 | ||
33763 | __extension__ extern __inline float16x8_t | |
33764 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33765 | __arm_vrev64q_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33766 | { | |
33767 | return __arm_vrev64q_m_f16 (__inactive, __a, __p); | |
33768 | } | |
33769 | ||
33770 | __extension__ extern __inline float16x8_t | |
33771 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33772 | __arm_vrndaq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33773 | { | |
33774 | return __arm_vrndaq_m_f16 (__inactive, __a, __p); | |
33775 | } | |
33776 | ||
33777 | __extension__ extern __inline float16x8_t | |
33778 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33779 | __arm_vrndmq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33780 | { | |
33781 | return __arm_vrndmq_m_f16 (__inactive, __a, __p); | |
33782 | } | |
33783 | ||
33784 | __extension__ extern __inline float16x8_t | |
33785 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33786 | __arm_vrndnq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33787 | { | |
33788 | return __arm_vrndnq_m_f16 (__inactive, __a, __p); | |
33789 | } | |
33790 | ||
33791 | __extension__ extern __inline float16x8_t | |
33792 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33793 | __arm_vrndpq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33794 | { | |
33795 | return __arm_vrndpq_m_f16 (__inactive, __a, __p); | |
33796 | } | |
33797 | ||
33798 | __extension__ extern __inline float16x8_t | |
33799 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33800 | __arm_vrndq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33801 | { | |
33802 | return __arm_vrndq_m_f16 (__inactive, __a, __p); | |
33803 | } | |
33804 | ||
33805 | __extension__ extern __inline float16x8_t | |
33806 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33807 | __arm_vrndxq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33808 | { | |
33809 | return __arm_vrndxq_m_f16 (__inactive, __a, __p); | |
33810 | } | |
33811 | ||
33812 | __extension__ extern __inline mve_pred16_t | |
33813 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33814 | __arm_vcmpeqq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
33815 | { | |
33816 | return __arm_vcmpeqq_m_n_f16 (__a, __b, __p); | |
33817 | } | |
33818 | ||
33819 | __extension__ extern __inline mve_pred16_t | |
33820 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33821 | __arm_vcmpgeq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
33822 | { | |
33823 | return __arm_vcmpgeq_m_f16 (__a, __b, __p); | |
33824 | } | |
33825 | ||
33826 | __extension__ extern __inline mve_pred16_t | |
33827 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33828 | __arm_vcmpgeq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
33829 | { | |
33830 | return __arm_vcmpgeq_m_n_f16 (__a, __b, __p); | |
33831 | } | |
33832 | ||
33833 | __extension__ extern __inline mve_pred16_t | |
33834 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33835 | __arm_vcmpgtq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
33836 | { | |
33837 | return __arm_vcmpgtq_m_f16 (__a, __b, __p); | |
33838 | } | |
33839 | ||
33840 | __extension__ extern __inline mve_pred16_t | |
33841 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33842 | __arm_vcmpgtq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
33843 | { | |
33844 | return __arm_vcmpgtq_m_n_f16 (__a, __b, __p); | |
33845 | } | |
33846 | ||
33847 | __extension__ extern __inline mve_pred16_t | |
33848 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33849 | __arm_vcmpleq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
33850 | { | |
33851 | return __arm_vcmpleq_m_f16 (__a, __b, __p); | |
33852 | } | |
33853 | ||
33854 | __extension__ extern __inline mve_pred16_t | |
33855 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33856 | __arm_vcmpleq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
33857 | { | |
33858 | return __arm_vcmpleq_m_n_f16 (__a, __b, __p); | |
33859 | } | |
33860 | ||
33861 | __extension__ extern __inline mve_pred16_t | |
33862 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33863 | __arm_vcmpltq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
33864 | { | |
33865 | return __arm_vcmpltq_m_f16 (__a, __b, __p); | |
33866 | } | |
33867 | ||
33868 | __extension__ extern __inline mve_pred16_t | |
33869 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33870 | __arm_vcmpltq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
33871 | { | |
33872 | return __arm_vcmpltq_m_n_f16 (__a, __b, __p); | |
33873 | } | |
33874 | ||
33875 | __extension__ extern __inline mve_pred16_t | |
33876 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33877 | __arm_vcmpneq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
33878 | { | |
33879 | return __arm_vcmpneq_m_f16 (__a, __b, __p); | |
33880 | } | |
33881 | ||
33882 | __extension__ extern __inline mve_pred16_t | |
33883 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33884 | __arm_vcmpneq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
33885 | { | |
33886 | return __arm_vcmpneq_m_n_f16 (__a, __b, __p); | |
33887 | } | |
33888 | ||
33889 | __extension__ extern __inline uint16x8_t | |
33890 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33891 | __arm_vcvtmq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33892 | { | |
33893 | return __arm_vcvtmq_m_u16_f16 (__inactive, __a, __p); | |
33894 | } | |
33895 | ||
33896 | __extension__ extern __inline uint16x8_t | |
33897 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33898 | __arm_vcvtnq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33899 | { | |
33900 | return __arm_vcvtnq_m_u16_f16 (__inactive, __a, __p); | |
33901 | } | |
33902 | ||
33903 | __extension__ extern __inline uint16x8_t | |
33904 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33905 | __arm_vcvtpq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33906 | { | |
33907 | return __arm_vcvtpq_m_u16_f16 (__inactive, __a, __p); | |
33908 | } | |
33909 | ||
33910 | __extension__ extern __inline uint16x8_t | |
33911 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33912 | __arm_vcvtq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) | |
33913 | { | |
33914 | return __arm_vcvtq_m_u16_f16 (__inactive, __a, __p); | |
33915 | } | |
33916 | ||
33917 | __extension__ extern __inline float32x4_t | |
33918 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33919 | __arm_vcmlaq (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
33920 | { | |
33921 | return __arm_vcmlaq_f32 (__a, __b, __c); | |
33922 | } | |
33923 | ||
33924 | __extension__ extern __inline float32x4_t | |
33925 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33926 | __arm_vcmlaq_rot180 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
33927 | { | |
33928 | return __arm_vcmlaq_rot180_f32 (__a, __b, __c); | |
33929 | } | |
33930 | ||
33931 | __extension__ extern __inline float32x4_t | |
33932 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33933 | __arm_vcmlaq_rot270 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
33934 | { | |
33935 | return __arm_vcmlaq_rot270_f32 (__a, __b, __c); | |
33936 | } | |
33937 | ||
33938 | __extension__ extern __inline float32x4_t | |
33939 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33940 | __arm_vcmlaq_rot90 (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
33941 | { | |
33942 | return __arm_vcmlaq_rot90_f32 (__a, __b, __c); | |
33943 | } | |
33944 | ||
33945 | __extension__ extern __inline float32x4_t | |
33946 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33947 | __arm_vfmaq (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
33948 | { | |
33949 | return __arm_vfmaq_f32 (__a, __b, __c); | |
33950 | } | |
33951 | ||
33952 | __extension__ extern __inline float32x4_t | |
33953 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33954 | __arm_vfmaq (float32x4_t __a, float32x4_t __b, float32_t __c) | |
33955 | { | |
33956 | return __arm_vfmaq_n_f32 (__a, __b, __c); | |
33957 | } | |
33958 | ||
33959 | __extension__ extern __inline float32x4_t | |
33960 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33961 | __arm_vfmasq (float32x4_t __a, float32x4_t __b, float32_t __c) | |
33962 | { | |
33963 | return __arm_vfmasq_n_f32 (__a, __b, __c); | |
33964 | } | |
33965 | ||
33966 | __extension__ extern __inline float32x4_t | |
33967 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33968 | __arm_vfmsq (float32x4_t __a, float32x4_t __b, float32x4_t __c) | |
33969 | { | |
33970 | return __arm_vfmsq_f32 (__a, __b, __c); | |
33971 | } | |
33972 | ||
33973 | __extension__ extern __inline float32x4_t | |
33974 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33975 | __arm_vabsq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
33976 | { | |
33977 | return __arm_vabsq_m_f32 (__inactive, __a, __p); | |
33978 | } | |
33979 | ||
33980 | __extension__ extern __inline int32x4_t | |
33981 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33982 | __arm_vcvtmq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
33983 | { | |
33984 | return __arm_vcvtmq_m_s32_f32 (__inactive, __a, __p); | |
33985 | } | |
33986 | ||
33987 | __extension__ extern __inline int32x4_t | |
33988 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33989 | __arm_vcvtnq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
33990 | { | |
33991 | return __arm_vcvtnq_m_s32_f32 (__inactive, __a, __p); | |
33992 | } | |
33993 | ||
33994 | __extension__ extern __inline int32x4_t | |
33995 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
33996 | __arm_vcvtpq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
33997 | { | |
33998 | return __arm_vcvtpq_m_s32_f32 (__inactive, __a, __p); | |
33999 | } | |
34000 | ||
34001 | __extension__ extern __inline int32x4_t | |
34002 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34003 | __arm_vcvtq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
34004 | { | |
34005 | return __arm_vcvtq_m_s32_f32 (__inactive, __a, __p); | |
34006 | } | |
34007 | ||
34008 | __extension__ extern __inline float32x4_t | |
34009 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34010 | __arm_vdupq_m (float32x4_t __inactive, float32_t __a, mve_pred16_t __p) | |
34011 | { | |
34012 | return __arm_vdupq_m_n_f32 (__inactive, __a, __p); | |
34013 | } | |
34014 | ||
34015 | __extension__ extern __inline float32x4_t | |
34016 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34017 | __arm_vmaxnmaq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34018 | { | |
34019 | return __arm_vmaxnmaq_m_f32 (__a, __b, __p); | |
34020 | } | |
34021 | ||
34022 | __extension__ extern __inline float32_t | |
34023 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34024 | __arm_vmaxnmavq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
34025 | { | |
34026 | return __arm_vmaxnmavq_p_f32 (__a, __b, __p); | |
34027 | } | |
34028 | ||
34029 | __extension__ extern __inline float32_t | |
34030 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34031 | __arm_vmaxnmvq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
34032 | { | |
34033 | return __arm_vmaxnmvq_p_f32 (__a, __b, __p); | |
34034 | } | |
34035 | ||
34036 | __extension__ extern __inline float32x4_t | |
34037 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34038 | __arm_vminnmaq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34039 | { | |
34040 | return __arm_vminnmaq_m_f32 (__a, __b, __p); | |
34041 | } | |
34042 | ||
34043 | __extension__ extern __inline float32_t | |
34044 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34045 | __arm_vminnmavq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
34046 | { | |
34047 | return __arm_vminnmavq_p_f32 (__a, __b, __p); | |
34048 | } | |
34049 | ||
34050 | __extension__ extern __inline float32_t | |
34051 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34052 | __arm_vminnmvq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p) | |
34053 | { | |
34054 | return __arm_vminnmvq_p_f32 (__a, __b, __p); | |
34055 | } | |
34056 | ||
34057 | __extension__ extern __inline float32x4_t | |
34058 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34059 | __arm_vnegq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
34060 | { | |
34061 | return __arm_vnegq_m_f32 (__inactive, __a, __p); | |
34062 | } | |
34063 | ||
34064 | __extension__ extern __inline float32x4_t | |
34065 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34066 | __arm_vpselq (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34067 | { | |
34068 | return __arm_vpselq_f32 (__a, __b, __p); | |
34069 | } | |
34070 | ||
34071 | __extension__ extern __inline float32x4_t | |
34072 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34073 | __arm_vrev64q_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
34074 | { | |
34075 | return __arm_vrev64q_m_f32 (__inactive, __a, __p); | |
34076 | } | |
34077 | ||
34078 | __extension__ extern __inline float32x4_t | |
34079 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34080 | __arm_vrndaq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
34081 | { | |
34082 | return __arm_vrndaq_m_f32 (__inactive, __a, __p); | |
34083 | } | |
34084 | ||
34085 | __extension__ extern __inline float32x4_t | |
34086 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34087 | __arm_vrndmq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
34088 | { | |
34089 | return __arm_vrndmq_m_f32 (__inactive, __a, __p); | |
34090 | } | |
34091 | ||
34092 | __extension__ extern __inline float32x4_t | |
34093 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34094 | __arm_vrndnq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
34095 | { | |
34096 | return __arm_vrndnq_m_f32 (__inactive, __a, __p); | |
34097 | } | |
34098 | ||
34099 | __extension__ extern __inline float32x4_t | |
34100 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34101 | __arm_vrndpq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
34102 | { | |
34103 | return __arm_vrndpq_m_f32 (__inactive, __a, __p); | |
34104 | } | |
34105 | ||
34106 | __extension__ extern __inline float32x4_t | |
34107 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34108 | __arm_vrndq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
34109 | { | |
34110 | return __arm_vrndq_m_f32 (__inactive, __a, __p); | |
34111 | } | |
34112 | ||
34113 | __extension__ extern __inline float32x4_t | |
34114 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34115 | __arm_vrndxq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
34116 | { | |
34117 | return __arm_vrndxq_m_f32 (__inactive, __a, __p); | |
34118 | } | |
34119 | ||
34120 | __extension__ extern __inline mve_pred16_t | |
34121 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34122 | __arm_vcmpeqq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
34123 | { | |
34124 | return __arm_vcmpeqq_m_n_f32 (__a, __b, __p); | |
34125 | } | |
34126 | ||
34127 | __extension__ extern __inline mve_pred16_t | |
34128 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34129 | __arm_vcmpgeq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34130 | { | |
34131 | return __arm_vcmpgeq_m_f32 (__a, __b, __p); | |
34132 | } | |
34133 | ||
34134 | __extension__ extern __inline mve_pred16_t | |
34135 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34136 | __arm_vcmpgeq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
34137 | { | |
34138 | return __arm_vcmpgeq_m_n_f32 (__a, __b, __p); | |
34139 | } | |
34140 | ||
34141 | __extension__ extern __inline mve_pred16_t | |
34142 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34143 | __arm_vcmpgtq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34144 | { | |
34145 | return __arm_vcmpgtq_m_f32 (__a, __b, __p); | |
34146 | } | |
34147 | ||
34148 | __extension__ extern __inline mve_pred16_t | |
34149 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34150 | __arm_vcmpgtq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
34151 | { | |
34152 | return __arm_vcmpgtq_m_n_f32 (__a, __b, __p); | |
34153 | } | |
34154 | ||
34155 | __extension__ extern __inline mve_pred16_t | |
34156 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34157 | __arm_vcmpleq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34158 | { | |
34159 | return __arm_vcmpleq_m_f32 (__a, __b, __p); | |
34160 | } | |
34161 | ||
34162 | __extension__ extern __inline mve_pred16_t | |
34163 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34164 | __arm_vcmpleq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
34165 | { | |
34166 | return __arm_vcmpleq_m_n_f32 (__a, __b, __p); | |
34167 | } | |
34168 | ||
34169 | __extension__ extern __inline mve_pred16_t | |
34170 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34171 | __arm_vcmpltq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34172 | { | |
34173 | return __arm_vcmpltq_m_f32 (__a, __b, __p); | |
34174 | } | |
34175 | ||
34176 | __extension__ extern __inline mve_pred16_t | |
34177 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34178 | __arm_vcmpltq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
34179 | { | |
34180 | return __arm_vcmpltq_m_n_f32 (__a, __b, __p); | |
34181 | } | |
34182 | ||
34183 | __extension__ extern __inline mve_pred16_t | |
34184 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34185 | __arm_vcmpneq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34186 | { | |
34187 | return __arm_vcmpneq_m_f32 (__a, __b, __p); | |
34188 | } | |
34189 | ||
34190 | __extension__ extern __inline mve_pred16_t | |
34191 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34192 | __arm_vcmpneq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
34193 | { | |
34194 | return __arm_vcmpneq_m_n_f32 (__a, __b, __p); | |
34195 | } | |
34196 | ||
34197 | __extension__ extern __inline uint32x4_t | |
34198 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34199 | __arm_vcvtmq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
34200 | { | |
34201 | return __arm_vcvtmq_m_u32_f32 (__inactive, __a, __p); | |
34202 | } | |
34203 | ||
34204 | __extension__ extern __inline uint32x4_t | |
34205 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34206 | __arm_vcvtnq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
34207 | { | |
34208 | return __arm_vcvtnq_m_u32_f32 (__inactive, __a, __p); | |
34209 | } | |
34210 | ||
34211 | __extension__ extern __inline uint32x4_t | |
34212 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34213 | __arm_vcvtpq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
34214 | { | |
34215 | return __arm_vcvtpq_m_u32_f32 (__inactive, __a, __p); | |
34216 | } | |
34217 | ||
34218 | __extension__ extern __inline uint32x4_t | |
34219 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34220 | __arm_vcvtq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) | |
34221 | { | |
34222 | return __arm_vcvtq_m_u32_f32 (__inactive, __a, __p); | |
34223 | } | |
34224 | ||
34225 | __extension__ extern __inline float16x8_t | |
34226 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34227 | __arm_vcvtq_m_n (float16x8_t __inactive, uint16x8_t __a, const int __imm6, mve_pred16_t __p) | |
34228 | { | |
34229 | return __arm_vcvtq_m_n_f16_u16 (__inactive, __a, __imm6, __p); | |
34230 | } | |
34231 | ||
34232 | __extension__ extern __inline float16x8_t | |
34233 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34234 | __arm_vcvtq_m_n (float16x8_t __inactive, int16x8_t __a, const int __imm6, mve_pred16_t __p) | |
34235 | { | |
34236 | return __arm_vcvtq_m_n_f16_s16 (__inactive, __a, __imm6, __p); | |
34237 | } | |
34238 | ||
34239 | __extension__ extern __inline float32x4_t | |
34240 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34241 | __arm_vcvtq_m_n (float32x4_t __inactive, uint32x4_t __a, const int __imm6, mve_pred16_t __p) | |
34242 | { | |
34243 | return __arm_vcvtq_m_n_f32_u32 (__inactive, __a, __imm6, __p); | |
34244 | } | |
34245 | ||
34246 | __extension__ extern __inline float32x4_t | |
34247 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34248 | __arm_vcvtq_m_n (float32x4_t __inactive, int32x4_t __a, const int __imm6, mve_pred16_t __p) | |
34249 | { | |
34250 | return __arm_vcvtq_m_n_f32_s32 (__inactive, __a, __imm6, __p); | |
34251 | } | |
34252 | ||
34253 | __extension__ extern __inline float32x4_t | |
34254 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34255 | __arm_vabdq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34256 | { | |
34257 | return __arm_vabdq_m_f32 (__inactive, __a, __b, __p); | |
34258 | } | |
34259 | ||
34260 | __extension__ extern __inline float16x8_t | |
34261 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34262 | __arm_vabdq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34263 | { | |
34264 | return __arm_vabdq_m_f16 (__inactive, __a, __b, __p); | |
34265 | } | |
34266 | ||
34267 | __extension__ extern __inline float32x4_t | |
34268 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34269 | __arm_vaddq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34270 | { | |
34271 | return __arm_vaddq_m_f32 (__inactive, __a, __b, __p); | |
34272 | } | |
34273 | ||
34274 | __extension__ extern __inline float16x8_t | |
34275 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34276 | __arm_vaddq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34277 | { | |
34278 | return __arm_vaddq_m_f16 (__inactive, __a, __b, __p); | |
34279 | } | |
34280 | ||
34281 | __extension__ extern __inline float32x4_t | |
34282 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34283 | __arm_vaddq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
34284 | { | |
34285 | return __arm_vaddq_m_n_f32 (__inactive, __a, __b, __p); | |
34286 | } | |
34287 | ||
34288 | __extension__ extern __inline float16x8_t | |
34289 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34290 | __arm_vaddq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
34291 | { | |
34292 | return __arm_vaddq_m_n_f16 (__inactive, __a, __b, __p); | |
34293 | } | |
34294 | ||
34295 | __extension__ extern __inline float32x4_t | |
34296 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34297 | __arm_vandq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34298 | { | |
34299 | return __arm_vandq_m_f32 (__inactive, __a, __b, __p); | |
34300 | } | |
34301 | ||
34302 | __extension__ extern __inline float16x8_t | |
34303 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34304 | __arm_vandq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34305 | { | |
34306 | return __arm_vandq_m_f16 (__inactive, __a, __b, __p); | |
34307 | } | |
34308 | ||
34309 | __extension__ extern __inline float32x4_t | |
34310 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34311 | __arm_vbicq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34312 | { | |
34313 | return __arm_vbicq_m_f32 (__inactive, __a, __b, __p); | |
34314 | } | |
34315 | ||
34316 | __extension__ extern __inline float16x8_t | |
34317 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34318 | __arm_vbicq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34319 | { | |
34320 | return __arm_vbicq_m_f16 (__inactive, __a, __b, __p); | |
34321 | } | |
34322 | ||
34323 | __extension__ extern __inline float32x4_t | |
34324 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34325 | __arm_vbrsrq_m (float32x4_t __inactive, float32x4_t __a, int32_t __b, mve_pred16_t __p) | |
34326 | { | |
34327 | return __arm_vbrsrq_m_n_f32 (__inactive, __a, __b, __p); | |
34328 | } | |
34329 | ||
34330 | __extension__ extern __inline float16x8_t | |
34331 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34332 | __arm_vbrsrq_m (float16x8_t __inactive, float16x8_t __a, int32_t __b, mve_pred16_t __p) | |
34333 | { | |
34334 | return __arm_vbrsrq_m_n_f16 (__inactive, __a, __b, __p); | |
34335 | } | |
34336 | ||
34337 | __extension__ extern __inline float32x4_t | |
34338 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34339 | __arm_vcaddq_rot270_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34340 | { | |
34341 | return __arm_vcaddq_rot270_m_f32 (__inactive, __a, __b, __p); | |
34342 | } | |
34343 | ||
34344 | __extension__ extern __inline float16x8_t | |
34345 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34346 | __arm_vcaddq_rot270_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34347 | { | |
34348 | return __arm_vcaddq_rot270_m_f16 (__inactive, __a, __b, __p); | |
34349 | } | |
34350 | ||
34351 | __extension__ extern __inline float32x4_t | |
34352 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34353 | __arm_vcaddq_rot90_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34354 | { | |
34355 | return __arm_vcaddq_rot90_m_f32 (__inactive, __a, __b, __p); | |
34356 | } | |
34357 | ||
34358 | __extension__ extern __inline float16x8_t | |
34359 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34360 | __arm_vcaddq_rot90_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34361 | { | |
34362 | return __arm_vcaddq_rot90_m_f16 (__inactive, __a, __b, __p); | |
34363 | } | |
34364 | ||
34365 | __extension__ extern __inline float32x4_t | |
34366 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34367 | __arm_vcmlaq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
34368 | { | |
34369 | return __arm_vcmlaq_m_f32 (__a, __b, __c, __p); | |
34370 | } | |
34371 | ||
34372 | __extension__ extern __inline float16x8_t | |
34373 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34374 | __arm_vcmlaq_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
34375 | { | |
34376 | return __arm_vcmlaq_m_f16 (__a, __b, __c, __p); | |
34377 | } | |
34378 | ||
34379 | __extension__ extern __inline float32x4_t | |
34380 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34381 | __arm_vcmlaq_rot180_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
34382 | { | |
34383 | return __arm_vcmlaq_rot180_m_f32 (__a, __b, __c, __p); | |
34384 | } | |
34385 | ||
34386 | __extension__ extern __inline float16x8_t | |
34387 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34388 | __arm_vcmlaq_rot180_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
34389 | { | |
34390 | return __arm_vcmlaq_rot180_m_f16 (__a, __b, __c, __p); | |
34391 | } | |
34392 | ||
34393 | __extension__ extern __inline float32x4_t | |
34394 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34395 | __arm_vcmlaq_rot270_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
34396 | { | |
34397 | return __arm_vcmlaq_rot270_m_f32 (__a, __b, __c, __p); | |
34398 | } | |
34399 | ||
34400 | __extension__ extern __inline float16x8_t | |
34401 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34402 | __arm_vcmlaq_rot270_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
34403 | { | |
34404 | return __arm_vcmlaq_rot270_m_f16 (__a, __b, __c, __p); | |
34405 | } | |
34406 | ||
34407 | __extension__ extern __inline float32x4_t | |
34408 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34409 | __arm_vcmlaq_rot90_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
34410 | { | |
34411 | return __arm_vcmlaq_rot90_m_f32 (__a, __b, __c, __p); | |
34412 | } | |
34413 | ||
34414 | __extension__ extern __inline float16x8_t | |
34415 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34416 | __arm_vcmlaq_rot90_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
34417 | { | |
34418 | return __arm_vcmlaq_rot90_m_f16 (__a, __b, __c, __p); | |
34419 | } | |
34420 | ||
34421 | __extension__ extern __inline float32x4_t | |
34422 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34423 | __arm_vcmulq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34424 | { | |
34425 | return __arm_vcmulq_m_f32 (__inactive, __a, __b, __p); | |
34426 | } | |
34427 | ||
34428 | __extension__ extern __inline float16x8_t | |
34429 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34430 | __arm_vcmulq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34431 | { | |
34432 | return __arm_vcmulq_m_f16 (__inactive, __a, __b, __p); | |
34433 | } | |
34434 | ||
34435 | __extension__ extern __inline float32x4_t | |
34436 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34437 | __arm_vcmulq_rot180_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34438 | { | |
34439 | return __arm_vcmulq_rot180_m_f32 (__inactive, __a, __b, __p); | |
34440 | } | |
34441 | ||
34442 | __extension__ extern __inline float16x8_t | |
34443 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34444 | __arm_vcmulq_rot180_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34445 | { | |
34446 | return __arm_vcmulq_rot180_m_f16 (__inactive, __a, __b, __p); | |
34447 | } | |
34448 | ||
34449 | __extension__ extern __inline float32x4_t | |
34450 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34451 | __arm_vcmulq_rot270_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34452 | { | |
34453 | return __arm_vcmulq_rot270_m_f32 (__inactive, __a, __b, __p); | |
34454 | } | |
34455 | ||
34456 | __extension__ extern __inline float16x8_t | |
34457 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34458 | __arm_vcmulq_rot270_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34459 | { | |
34460 | return __arm_vcmulq_rot270_m_f16 (__inactive, __a, __b, __p); | |
34461 | } | |
34462 | ||
34463 | __extension__ extern __inline float32x4_t | |
34464 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34465 | __arm_vcmulq_rot90_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34466 | { | |
34467 | return __arm_vcmulq_rot90_m_f32 (__inactive, __a, __b, __p); | |
34468 | } | |
34469 | ||
34470 | __extension__ extern __inline float16x8_t | |
34471 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34472 | __arm_vcmulq_rot90_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34473 | { | |
34474 | return __arm_vcmulq_rot90_m_f16 (__inactive, __a, __b, __p); | |
34475 | } | |
34476 | ||
34477 | __extension__ extern __inline int32x4_t | |
34478 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34479 | __arm_vcvtq_m_n (int32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p) | |
34480 | { | |
34481 | return __arm_vcvtq_m_n_s32_f32 (__inactive, __a, __imm6, __p); | |
34482 | } | |
34483 | ||
34484 | __extension__ extern __inline int16x8_t | |
34485 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34486 | __arm_vcvtq_m_n (int16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p) | |
34487 | { | |
34488 | return __arm_vcvtq_m_n_s16_f16 (__inactive, __a, __imm6, __p); | |
34489 | } | |
34490 | ||
34491 | __extension__ extern __inline uint32x4_t | |
34492 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34493 | __arm_vcvtq_m_n (uint32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p) | |
34494 | { | |
34495 | return __arm_vcvtq_m_n_u32_f32 (__inactive, __a, __imm6, __p); | |
34496 | } | |
34497 | ||
34498 | __extension__ extern __inline uint16x8_t | |
34499 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34500 | __arm_vcvtq_m_n (uint16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p) | |
34501 | { | |
34502 | return __arm_vcvtq_m_n_u16_f16 (__inactive, __a, __imm6, __p); | |
34503 | } | |
34504 | ||
34505 | __extension__ extern __inline float32x4_t | |
34506 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34507 | __arm_veorq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34508 | { | |
34509 | return __arm_veorq_m_f32 (__inactive, __a, __b, __p); | |
34510 | } | |
34511 | ||
34512 | __extension__ extern __inline float16x8_t | |
34513 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34514 | __arm_veorq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34515 | { | |
34516 | return __arm_veorq_m_f16 (__inactive, __a, __b, __p); | |
34517 | } | |
34518 | ||
34519 | __extension__ extern __inline float32x4_t | |
34520 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34521 | __arm_vfmaq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
34522 | { | |
34523 | return __arm_vfmaq_m_f32 (__a, __b, __c, __p); | |
34524 | } | |
34525 | ||
34526 | __extension__ extern __inline float16x8_t | |
34527 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34528 | __arm_vfmaq_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
34529 | { | |
34530 | return __arm_vfmaq_m_f16 (__a, __b, __c, __p); | |
34531 | } | |
34532 | ||
34533 | __extension__ extern __inline float32x4_t | |
34534 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34535 | __arm_vfmaq_m (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p) | |
34536 | { | |
34537 | return __arm_vfmaq_m_n_f32 (__a, __b, __c, __p); | |
34538 | } | |
34539 | ||
34540 | __extension__ extern __inline float16x8_t | |
34541 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34542 | __arm_vfmaq_m (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p) | |
34543 | { | |
34544 | return __arm_vfmaq_m_n_f16 (__a, __b, __c, __p); | |
34545 | } | |
34546 | ||
34547 | __extension__ extern __inline float32x4_t | |
34548 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34549 | __arm_vfmasq_m (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p) | |
34550 | { | |
34551 | return __arm_vfmasq_m_n_f32 (__a, __b, __c, __p); | |
34552 | } | |
34553 | ||
34554 | __extension__ extern __inline float16x8_t | |
34555 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34556 | __arm_vfmasq_m (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p) | |
34557 | { | |
34558 | return __arm_vfmasq_m_n_f16 (__a, __b, __c, __p); | |
34559 | } | |
34560 | ||
34561 | __extension__ extern __inline float32x4_t | |
34562 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34563 | __arm_vfmsq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) | |
34564 | { | |
34565 | return __arm_vfmsq_m_f32 (__a, __b, __c, __p); | |
34566 | } | |
34567 | ||
34568 | __extension__ extern __inline float16x8_t | |
34569 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34570 | __arm_vfmsq_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) | |
34571 | { | |
34572 | return __arm_vfmsq_m_f16 (__a, __b, __c, __p); | |
34573 | } | |
34574 | ||
34575 | __extension__ extern __inline float32x4_t | |
34576 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34577 | __arm_vmaxnmq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34578 | { | |
34579 | return __arm_vmaxnmq_m_f32 (__inactive, __a, __b, __p); | |
34580 | } | |
34581 | ||
34582 | __extension__ extern __inline float16x8_t | |
34583 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34584 | __arm_vmaxnmq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34585 | { | |
34586 | return __arm_vmaxnmq_m_f16 (__inactive, __a, __b, __p); | |
34587 | } | |
34588 | ||
34589 | __extension__ extern __inline float32x4_t | |
34590 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34591 | __arm_vminnmq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34592 | { | |
34593 | return __arm_vminnmq_m_f32 (__inactive, __a, __b, __p); | |
34594 | } | |
34595 | ||
34596 | __extension__ extern __inline float16x8_t | |
34597 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34598 | __arm_vminnmq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34599 | { | |
34600 | return __arm_vminnmq_m_f16 (__inactive, __a, __b, __p); | |
34601 | } | |
34602 | ||
34603 | __extension__ extern __inline float32x4_t | |
34604 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34605 | __arm_vmulq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34606 | { | |
34607 | return __arm_vmulq_m_f32 (__inactive, __a, __b, __p); | |
34608 | } | |
34609 | ||
34610 | __extension__ extern __inline float16x8_t | |
34611 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34612 | __arm_vmulq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34613 | { | |
34614 | return __arm_vmulq_m_f16 (__inactive, __a, __b, __p); | |
34615 | } | |
34616 | ||
34617 | __extension__ extern __inline float32x4_t | |
34618 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34619 | __arm_vmulq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
34620 | { | |
34621 | return __arm_vmulq_m_n_f32 (__inactive, __a, __b, __p); | |
34622 | } | |
34623 | ||
34624 | __extension__ extern __inline float16x8_t | |
34625 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34626 | __arm_vmulq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
34627 | { | |
34628 | return __arm_vmulq_m_n_f16 (__inactive, __a, __b, __p); | |
34629 | } | |
34630 | ||
34631 | __extension__ extern __inline float32x4_t | |
34632 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34633 | __arm_vornq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34634 | { | |
34635 | return __arm_vornq_m_f32 (__inactive, __a, __b, __p); | |
34636 | } | |
34637 | ||
34638 | __extension__ extern __inline float16x8_t | |
34639 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34640 | __arm_vornq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34641 | { | |
34642 | return __arm_vornq_m_f16 (__inactive, __a, __b, __p); | |
34643 | } | |
34644 | ||
34645 | __extension__ extern __inline float32x4_t | |
34646 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34647 | __arm_vorrq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34648 | { | |
34649 | return __arm_vorrq_m_f32 (__inactive, __a, __b, __p); | |
34650 | } | |
34651 | ||
34652 | __extension__ extern __inline float16x8_t | |
34653 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34654 | __arm_vorrq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34655 | { | |
34656 | return __arm_vorrq_m_f16 (__inactive, __a, __b, __p); | |
34657 | } | |
34658 | ||
34659 | __extension__ extern __inline float32x4_t | |
34660 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34661 | __arm_vsubq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34662 | { | |
34663 | return __arm_vsubq_m_f32 (__inactive, __a, __b, __p); | |
34664 | } | |
34665 | ||
34666 | __extension__ extern __inline float16x8_t | |
34667 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34668 | __arm_vsubq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34669 | { | |
34670 | return __arm_vsubq_m_f16 (__inactive, __a, __b, __p); | |
34671 | } | |
34672 | ||
34673 | __extension__ extern __inline float32x4_t | |
34674 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34675 | __arm_vsubq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
34676 | { | |
34677 | return __arm_vsubq_m_n_f32 (__inactive, __a, __b, __p); | |
34678 | } | |
34679 | ||
34680 | __extension__ extern __inline float16x8_t | |
34681 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34682 | __arm_vsubq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
34683 | { | |
34684 | return __arm_vsubq_m_n_f16 (__inactive, __a, __b, __p); | |
34685 | } | |
34686 | ||
34687 | __extension__ extern __inline float32x4_t | |
34688 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34689 | __arm_vld1q (float32_t const * __base) | |
34690 | { | |
34691 | return __arm_vld1q_f32 (__base); | |
34692 | } | |
34693 | ||
34694 | __extension__ extern __inline float16x8_t | |
34695 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34696 | __arm_vld1q (float16_t const * __base) | |
34697 | { | |
34698 | return __arm_vld1q_f16 (__base); | |
34699 | } | |
34700 | ||
34701 | __extension__ extern __inline float16x8_t | |
34702 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34703 | __arm_vldrhq_gather_offset (float16_t const * __base, uint16x8_t __offset) | |
34704 | { | |
34705 | return __arm_vldrhq_gather_offset_f16 (__base, __offset); | |
34706 | } | |
34707 | ||
34708 | __extension__ extern __inline float16x8_t | |
34709 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34710 | __arm_vldrhq_gather_offset_z (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
34711 | { | |
34712 | return __arm_vldrhq_gather_offset_z_f16 (__base, __offset, __p); | |
34713 | } | |
34714 | ||
34715 | __extension__ extern __inline float16x8_t | |
34716 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34717 | __arm_vldrhq_gather_shifted_offset (float16_t const * __base, uint16x8_t __offset) | |
34718 | { | |
34719 | return __arm_vldrhq_gather_shifted_offset_f16 (__base, __offset); | |
34720 | } | |
34721 | ||
34722 | __extension__ extern __inline float16x8_t | |
34723 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34724 | __arm_vldrhq_gather_shifted_offset_z (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) | |
34725 | { | |
34726 | return __arm_vldrhq_gather_shifted_offset_z_f16 (__base, __offset, __p); | |
34727 | } | |
34728 | ||
34729 | __extension__ extern __inline float32x4_t | |
34730 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34731 | __arm_vldrwq_gather_offset (float32_t const * __base, uint32x4_t __offset) | |
34732 | { | |
34733 | return __arm_vldrwq_gather_offset_f32 (__base, __offset); | |
34734 | } | |
34735 | ||
34736 | __extension__ extern __inline float32x4_t | |
34737 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34738 | __arm_vldrwq_gather_offset_z (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
34739 | { | |
34740 | return __arm_vldrwq_gather_offset_z_f32 (__base, __offset, __p); | |
34741 | } | |
34742 | ||
34743 | __extension__ extern __inline float32x4_t | |
34744 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34745 | __arm_vldrwq_gather_shifted_offset (float32_t const * __base, uint32x4_t __offset) | |
34746 | { | |
34747 | return __arm_vldrwq_gather_shifted_offset_f32 (__base, __offset); | |
34748 | } | |
34749 | ||
34750 | __extension__ extern __inline float32x4_t | |
34751 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34752 | __arm_vldrwq_gather_shifted_offset_z (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) | |
34753 | { | |
34754 | return __arm_vldrwq_gather_shifted_offset_z_f32 (__base, __offset, __p); | |
34755 | } | |
34756 | ||
34757 | __extension__ extern __inline void | |
34758 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34759 | __arm_vstrwq_p (float32_t * __addr, float32x4_t __value, mve_pred16_t __p) | |
34760 | { | |
34761 | __arm_vstrwq_p_f32 (__addr, __value, __p); | |
34762 | } | |
34763 | ||
34764 | __extension__ extern __inline void | |
34765 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34766 | __arm_vstrwq (float32_t * __addr, float32x4_t __value) | |
34767 | { | |
34768 | __arm_vstrwq_f32 (__addr, __value); | |
34769 | } | |
34770 | ||
34771 | __extension__ extern __inline void | |
34772 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34773 | __arm_vst1q (float32_t * __addr, float32x4_t __value) | |
34774 | { | |
34775 | __arm_vst1q_f32 (__addr, __value); | |
34776 | } | |
34777 | ||
34778 | __extension__ extern __inline void | |
34779 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34780 | __arm_vst1q (float16_t * __addr, float16x8_t __value) | |
34781 | { | |
34782 | __arm_vst1q_f16 (__addr, __value); | |
34783 | } | |
34784 | ||
34785 | __extension__ extern __inline void | |
34786 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34787 | __arm_vstrhq (float16_t * __addr, float16x8_t __value) | |
34788 | { | |
34789 | __arm_vstrhq_f16 (__addr, __value); | |
34790 | } | |
34791 | ||
34792 | __extension__ extern __inline void | |
34793 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34794 | __arm_vstrhq_p (float16_t * __addr, float16x8_t __value, mve_pred16_t __p) | |
34795 | { | |
34796 | __arm_vstrhq_p_f16 (__addr, __value, __p); | |
34797 | } | |
34798 | ||
34799 | __extension__ extern __inline void | |
34800 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34801 | __arm_vstrhq_scatter_offset (float16_t * __base, uint16x8_t __offset, float16x8_t __value) | |
34802 | { | |
34803 | __arm_vstrhq_scatter_offset_f16 (__base, __offset, __value); | |
34804 | } | |
34805 | ||
34806 | __extension__ extern __inline void | |
34807 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34808 | __arm_vstrhq_scatter_offset_p (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p) | |
34809 | { | |
34810 | __arm_vstrhq_scatter_offset_p_f16 (__base, __offset, __value, __p); | |
34811 | } | |
34812 | ||
34813 | __extension__ extern __inline void | |
34814 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34815 | __arm_vstrhq_scatter_shifted_offset (float16_t * __base, uint16x8_t __offset, float16x8_t __value) | |
34816 | { | |
34817 | __arm_vstrhq_scatter_shifted_offset_f16 (__base, __offset, __value); | |
34818 | } | |
34819 | ||
34820 | __extension__ extern __inline void | |
34821 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34822 | __arm_vstrhq_scatter_shifted_offset_p (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p) | |
34823 | { | |
34824 | __arm_vstrhq_scatter_shifted_offset_p_f16 (__base, __offset, __value, __p); | |
34825 | } | |
34826 | ||
34827 | __extension__ extern __inline void | |
34828 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34829 | __arm_vstrwq_scatter_base (uint32x4_t __addr, const int __offset, float32x4_t __value) | |
34830 | { | |
34831 | __arm_vstrwq_scatter_base_f32 (__addr, __offset, __value); | |
34832 | } | |
34833 | ||
34834 | __extension__ extern __inline void | |
34835 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34836 | __arm_vstrwq_scatter_base_p (uint32x4_t __addr, const int __offset, float32x4_t __value, mve_pred16_t __p) | |
34837 | { | |
34838 | __arm_vstrwq_scatter_base_p_f32 (__addr, __offset, __value, __p); | |
34839 | } | |
34840 | ||
34841 | __extension__ extern __inline void | |
34842 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34843 | __arm_vstrwq_scatter_offset (float32_t * __base, uint32x4_t __offset, float32x4_t __value) | |
34844 | { | |
34845 | __arm_vstrwq_scatter_offset_f32 (__base, __offset, __value); | |
34846 | } | |
34847 | ||
34848 | __extension__ extern __inline void | |
34849 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34850 | __arm_vstrwq_scatter_offset_p (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p) | |
34851 | { | |
34852 | __arm_vstrwq_scatter_offset_p_f32 (__base, __offset, __value, __p); | |
34853 | } | |
34854 | ||
34855 | __extension__ extern __inline void | |
34856 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34857 | __arm_vstrwq_scatter_shifted_offset (float32_t * __base, uint32x4_t __offset, float32x4_t __value) | |
34858 | { | |
34859 | __arm_vstrwq_scatter_shifted_offset_f32 (__base, __offset, __value); | |
34860 | } | |
34861 | ||
34862 | __extension__ extern __inline void | |
34863 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34864 | __arm_vstrwq_scatter_shifted_offset_p (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p) | |
34865 | { | |
34866 | __arm_vstrwq_scatter_shifted_offset_p_f32 (__base, __offset, __value, __p); | |
34867 | } | |
34868 | ||
34869 | __extension__ extern __inline float16x8_t | |
34870 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34871 | __arm_vaddq (float16x8_t __a, float16x8_t __b) | |
34872 | { | |
34873 | return __arm_vaddq_f16 (__a, __b); | |
34874 | } | |
34875 | ||
34876 | __extension__ extern __inline float32x4_t | |
34877 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34878 | __arm_vaddq (float32x4_t __a, float32x4_t __b) | |
34879 | { | |
34880 | return __arm_vaddq_f32 (__a, __b); | |
34881 | } | |
34882 | ||
6a90680b ASDV |
34883 | __extension__ extern __inline void |
34884 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34885 | __arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, float32x4_t __value) | |
34886 | { | |
34887 | __arm_vstrwq_scatter_base_wb_f32 (__addr, __offset, __value); | |
34888 | } | |
34889 | ||
34890 | __extension__ extern __inline void | |
34891 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34892 | __arm_vstrwq_scatter_base_wb_p (uint32x4_t * __addr, const int __offset, float32x4_t __value, mve_pred16_t __p) | |
34893 | { | |
34894 | __arm_vstrwq_scatter_base_wb_p_f32 (__addr, __offset, __value, __p); | |
34895 | } | |
34896 | ||
34897 | __extension__ extern __inline float16x8_t | |
34898 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34899 | __arm_vminnmq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34900 | { | |
34901 | return __arm_vminnmq_x_f16 (__a, __b, __p); | |
34902 | } | |
34903 | ||
34904 | __extension__ extern __inline float32x4_t | |
34905 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34906 | __arm_vminnmq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34907 | { | |
34908 | return __arm_vminnmq_x_f32 (__a, __b, __p); | |
34909 | } | |
34910 | ||
34911 | __extension__ extern __inline float16x8_t | |
34912 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34913 | __arm_vmaxnmq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34914 | { | |
34915 | return __arm_vmaxnmq_x_f16 (__a, __b, __p); | |
34916 | } | |
34917 | ||
34918 | __extension__ extern __inline float32x4_t | |
34919 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34920 | __arm_vmaxnmq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34921 | { | |
34922 | return __arm_vmaxnmq_x_f32 (__a, __b, __p); | |
34923 | } | |
34924 | ||
34925 | __extension__ extern __inline float16x8_t | |
34926 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34927 | __arm_vabdq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34928 | { | |
34929 | return __arm_vabdq_x_f16 (__a, __b, __p); | |
34930 | } | |
34931 | ||
34932 | __extension__ extern __inline float32x4_t | |
34933 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34934 | __arm_vabdq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34935 | { | |
34936 | return __arm_vabdq_x_f32 (__a, __b, __p); | |
34937 | } | |
34938 | ||
34939 | __extension__ extern __inline float16x8_t | |
34940 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34941 | __arm_vabsq_x (float16x8_t __a, mve_pred16_t __p) | |
34942 | { | |
34943 | return __arm_vabsq_x_f16 (__a, __p); | |
34944 | } | |
34945 | ||
34946 | __extension__ extern __inline float32x4_t | |
34947 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34948 | __arm_vabsq_x (float32x4_t __a, mve_pred16_t __p) | |
34949 | { | |
34950 | return __arm_vabsq_x_f32 (__a, __p); | |
34951 | } | |
34952 | ||
34953 | __extension__ extern __inline float16x8_t | |
34954 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34955 | __arm_vaddq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34956 | { | |
34957 | return __arm_vaddq_x_f16 (__a, __b, __p); | |
34958 | } | |
34959 | ||
34960 | __extension__ extern __inline float32x4_t | |
34961 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34962 | __arm_vaddq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
34963 | { | |
34964 | return __arm_vaddq_x_f32 (__a, __b, __p); | |
34965 | } | |
34966 | ||
34967 | __extension__ extern __inline float16x8_t | |
34968 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34969 | __arm_vaddq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
34970 | { | |
34971 | return __arm_vaddq_x_n_f16 (__a, __b, __p); | |
34972 | } | |
34973 | ||
34974 | __extension__ extern __inline float32x4_t | |
34975 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34976 | __arm_vaddq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
34977 | { | |
34978 | return __arm_vaddq_x_n_f32 (__a, __b, __p); | |
34979 | } | |
34980 | ||
34981 | __extension__ extern __inline float16x8_t | |
34982 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34983 | __arm_vnegq_x (float16x8_t __a, mve_pred16_t __p) | |
34984 | { | |
34985 | return __arm_vnegq_x_f16 (__a, __p); | |
34986 | } | |
34987 | ||
34988 | __extension__ extern __inline float32x4_t | |
34989 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34990 | __arm_vnegq_x (float32x4_t __a, mve_pred16_t __p) | |
34991 | { | |
34992 | return __arm_vnegq_x_f32 (__a, __p); | |
34993 | } | |
34994 | ||
34995 | __extension__ extern __inline float16x8_t | |
34996 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
34997 | __arm_vmulq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
34998 | { | |
34999 | return __arm_vmulq_x_f16 (__a, __b, __p); | |
35000 | } | |
35001 | ||
35002 | __extension__ extern __inline float32x4_t | |
35003 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35004 | __arm_vmulq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
35005 | { | |
35006 | return __arm_vmulq_x_f32 (__a, __b, __p); | |
35007 | } | |
35008 | ||
35009 | __extension__ extern __inline float16x8_t | |
35010 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35011 | __arm_vmulq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
35012 | { | |
35013 | return __arm_vmulq_x_n_f16 (__a, __b, __p); | |
35014 | } | |
35015 | ||
35016 | __extension__ extern __inline float32x4_t | |
35017 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35018 | __arm_vmulq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
35019 | { | |
35020 | return __arm_vmulq_x_n_f32 (__a, __b, __p); | |
35021 | } | |
35022 | ||
35023 | __extension__ extern __inline float16x8_t | |
35024 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35025 | __arm_vsubq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
35026 | { | |
35027 | return __arm_vsubq_x_f16 (__a, __b, __p); | |
35028 | } | |
35029 | ||
35030 | __extension__ extern __inline float32x4_t | |
35031 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35032 | __arm_vsubq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
35033 | { | |
35034 | return __arm_vsubq_x_f32 (__a, __b, __p); | |
35035 | } | |
35036 | ||
35037 | __extension__ extern __inline float16x8_t | |
35038 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35039 | __arm_vsubq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p) | |
35040 | { | |
35041 | return __arm_vsubq_x_n_f16 (__a, __b, __p); | |
35042 | } | |
35043 | ||
35044 | __extension__ extern __inline float32x4_t | |
35045 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35046 | __arm_vsubq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p) | |
35047 | { | |
35048 | return __arm_vsubq_x_n_f32 (__a, __b, __p); | |
35049 | } | |
35050 | ||
35051 | __extension__ extern __inline float16x8_t | |
35052 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35053 | __arm_vcaddq_rot90_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
35054 | { | |
35055 | return __arm_vcaddq_rot90_x_f16 (__a, __b, __p); | |
35056 | } | |
35057 | ||
35058 | __extension__ extern __inline float32x4_t | |
35059 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35060 | __arm_vcaddq_rot90_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
35061 | { | |
35062 | return __arm_vcaddq_rot90_x_f32 (__a, __b, __p); | |
35063 | } | |
35064 | ||
35065 | __extension__ extern __inline float16x8_t | |
35066 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35067 | __arm_vcaddq_rot270_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
35068 | { | |
35069 | return __arm_vcaddq_rot270_x_f16 (__a, __b, __p); | |
35070 | } | |
35071 | ||
35072 | __extension__ extern __inline float32x4_t | |
35073 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35074 | __arm_vcaddq_rot270_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
35075 | { | |
35076 | return __arm_vcaddq_rot270_x_f32 (__a, __b, __p); | |
35077 | } | |
35078 | ||
35079 | __extension__ extern __inline float16x8_t | |
35080 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35081 | __arm_vcmulq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
35082 | { | |
35083 | return __arm_vcmulq_x_f16 (__a, __b, __p); | |
35084 | } | |
35085 | ||
35086 | __extension__ extern __inline float32x4_t | |
35087 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35088 | __arm_vcmulq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
35089 | { | |
35090 | return __arm_vcmulq_x_f32 (__a, __b, __p); | |
35091 | } | |
35092 | ||
35093 | __extension__ extern __inline float16x8_t | |
35094 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35095 | __arm_vcmulq_rot90_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
35096 | { | |
35097 | return __arm_vcmulq_rot90_x_f16 (__a, __b, __p); | |
35098 | } | |
35099 | ||
35100 | __extension__ extern __inline float32x4_t | |
35101 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35102 | __arm_vcmulq_rot90_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
35103 | { | |
35104 | return __arm_vcmulq_rot90_x_f32 (__a, __b, __p); | |
35105 | } | |
35106 | ||
35107 | __extension__ extern __inline float16x8_t | |
35108 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35109 | __arm_vcmulq_rot180_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
35110 | { | |
35111 | return __arm_vcmulq_rot180_x_f16 (__a, __b, __p); | |
35112 | } | |
35113 | ||
35114 | __extension__ extern __inline float32x4_t | |
35115 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35116 | __arm_vcmulq_rot180_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
35117 | { | |
35118 | return __arm_vcmulq_rot180_x_f32 (__a, __b, __p); | |
35119 | } | |
35120 | ||
35121 | __extension__ extern __inline float16x8_t | |
35122 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35123 | __arm_vcmulq_rot270_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
35124 | { | |
35125 | return __arm_vcmulq_rot270_x_f16 (__a, __b, __p); | |
35126 | } | |
35127 | ||
35128 | __extension__ extern __inline float32x4_t | |
35129 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35130 | __arm_vcmulq_rot270_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
35131 | { | |
35132 | return __arm_vcmulq_rot270_x_f32 (__a, __b, __p); | |
35133 | } | |
35134 | ||
35135 | __extension__ extern __inline float16x8_t | |
35136 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35137 | __arm_vcvtq_x (uint16x8_t __a, mve_pred16_t __p) | |
35138 | { | |
35139 | return __arm_vcvtq_x_f16_u16 (__a, __p); | |
35140 | } | |
35141 | ||
35142 | __extension__ extern __inline float16x8_t | |
35143 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35144 | __arm_vcvtq_x (int16x8_t __a, mve_pred16_t __p) | |
35145 | { | |
35146 | return __arm_vcvtq_x_f16_s16 (__a, __p); | |
35147 | } | |
35148 | ||
35149 | __extension__ extern __inline float32x4_t | |
35150 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35151 | __arm_vcvtq_x (int32x4_t __a, mve_pred16_t __p) | |
35152 | { | |
35153 | return __arm_vcvtq_x_f32_s32 (__a, __p); | |
35154 | } | |
35155 | ||
35156 | __extension__ extern __inline float32x4_t | |
35157 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35158 | __arm_vcvtq_x (uint32x4_t __a, mve_pred16_t __p) | |
35159 | { | |
35160 | return __arm_vcvtq_x_f32_u32 (__a, __p); | |
35161 | } | |
35162 | ||
35163 | __extension__ extern __inline float16x8_t | |
35164 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35165 | __arm_vcvtq_x_n (int16x8_t __a, const int __imm6, mve_pred16_t __p) | |
35166 | { | |
35167 | return __arm_vcvtq_x_n_f16_s16 (__a, __imm6, __p); | |
35168 | } | |
35169 | ||
35170 | __extension__ extern __inline float16x8_t | |
35171 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35172 | __arm_vcvtq_x_n (uint16x8_t __a, const int __imm6, mve_pred16_t __p) | |
35173 | { | |
35174 | return __arm_vcvtq_x_n_f16_u16 (__a, __imm6, __p); | |
35175 | } | |
35176 | ||
35177 | __extension__ extern __inline float32x4_t | |
35178 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35179 | __arm_vcvtq_x_n (int32x4_t __a, const int __imm6, mve_pred16_t __p) | |
35180 | { | |
35181 | return __arm_vcvtq_x_n_f32_s32 (__a, __imm6, __p); | |
35182 | } | |
35183 | ||
35184 | __extension__ extern __inline float32x4_t | |
35185 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35186 | __arm_vcvtq_x_n (uint32x4_t __a, const int __imm6, mve_pred16_t __p) | |
35187 | { | |
35188 | return __arm_vcvtq_x_n_f32_u32 (__a, __imm6, __p); | |
35189 | } | |
35190 | ||
35191 | __extension__ extern __inline float16x8_t | |
35192 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35193 | __arm_vrndq_x (float16x8_t __a, mve_pred16_t __p) | |
35194 | { | |
35195 | return __arm_vrndq_x_f16 (__a, __p); | |
35196 | } | |
35197 | ||
35198 | __extension__ extern __inline float32x4_t | |
35199 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35200 | __arm_vrndq_x (float32x4_t __a, mve_pred16_t __p) | |
35201 | { | |
35202 | return __arm_vrndq_x_f32 (__a, __p); | |
35203 | } | |
35204 | ||
35205 | __extension__ extern __inline float16x8_t | |
35206 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35207 | __arm_vrndnq_x (float16x8_t __a, mve_pred16_t __p) | |
35208 | { | |
35209 | return __arm_vrndnq_x_f16 (__a, __p); | |
35210 | } | |
35211 | ||
35212 | __extension__ extern __inline float32x4_t | |
35213 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35214 | __arm_vrndnq_x (float32x4_t __a, mve_pred16_t __p) | |
35215 | { | |
35216 | return __arm_vrndnq_x_f32 (__a, __p); | |
35217 | } | |
35218 | ||
35219 | __extension__ extern __inline float16x8_t | |
35220 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35221 | __arm_vrndmq_x (float16x8_t __a, mve_pred16_t __p) | |
35222 | { | |
35223 | return __arm_vrndmq_x_f16 (__a, __p); | |
35224 | } | |
35225 | ||
35226 | __extension__ extern __inline float32x4_t | |
35227 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35228 | __arm_vrndmq_x (float32x4_t __a, mve_pred16_t __p) | |
35229 | { | |
35230 | return __arm_vrndmq_x_f32 (__a, __p); | |
35231 | } | |
35232 | ||
35233 | __extension__ extern __inline float16x8_t | |
35234 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35235 | __arm_vrndpq_x (float16x8_t __a, mve_pred16_t __p) | |
35236 | { | |
35237 | return __arm_vrndpq_x_f16 (__a, __p); | |
35238 | } | |
35239 | ||
35240 | __extension__ extern __inline float32x4_t | |
35241 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35242 | __arm_vrndpq_x (float32x4_t __a, mve_pred16_t __p) | |
35243 | { | |
35244 | return __arm_vrndpq_x_f32 (__a, __p); | |
35245 | } | |
35246 | ||
35247 | __extension__ extern __inline float16x8_t | |
35248 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35249 | __arm_vrndaq_x (float16x8_t __a, mve_pred16_t __p) | |
35250 | { | |
35251 | return __arm_vrndaq_x_f16 (__a, __p); | |
35252 | } | |
35253 | ||
35254 | __extension__ extern __inline float32x4_t | |
35255 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35256 | __arm_vrndaq_x (float32x4_t __a, mve_pred16_t __p) | |
35257 | { | |
35258 | return __arm_vrndaq_x_f32 (__a, __p); | |
35259 | } | |
35260 | ||
35261 | __extension__ extern __inline float16x8_t | |
35262 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35263 | __arm_vrndxq_x (float16x8_t __a, mve_pred16_t __p) | |
35264 | { | |
35265 | return __arm_vrndxq_x_f16 (__a, __p); | |
35266 | } | |
35267 | ||
35268 | __extension__ extern __inline float32x4_t | |
35269 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35270 | __arm_vrndxq_x (float32x4_t __a, mve_pred16_t __p) | |
35271 | { | |
35272 | return __arm_vrndxq_x_f32 (__a, __p); | |
35273 | } | |
35274 | ||
35275 | __extension__ extern __inline float16x8_t | |
35276 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35277 | __arm_vandq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
35278 | { | |
35279 | return __arm_vandq_x_f16 (__a, __b, __p); | |
35280 | } | |
35281 | ||
35282 | __extension__ extern __inline float32x4_t | |
35283 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35284 | __arm_vandq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
35285 | { | |
35286 | return __arm_vandq_x_f32 (__a, __b, __p); | |
35287 | } | |
35288 | ||
35289 | __extension__ extern __inline float16x8_t | |
35290 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35291 | __arm_vbicq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
35292 | { | |
35293 | return __arm_vbicq_x_f16 (__a, __b, __p); | |
35294 | } | |
35295 | ||
35296 | __extension__ extern __inline float32x4_t | |
35297 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35298 | __arm_vbicq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
35299 | { | |
35300 | return __arm_vbicq_x_f32 (__a, __b, __p); | |
35301 | } | |
35302 | ||
35303 | __extension__ extern __inline float16x8_t | |
35304 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35305 | __arm_vbrsrq_x (float16x8_t __a, int32_t __b, mve_pred16_t __p) | |
35306 | { | |
35307 | return __arm_vbrsrq_x_n_f16 (__a, __b, __p); | |
35308 | } | |
35309 | ||
35310 | __extension__ extern __inline float32x4_t | |
35311 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35312 | __arm_vbrsrq_x (float32x4_t __a, int32_t __b, mve_pred16_t __p) | |
35313 | { | |
35314 | return __arm_vbrsrq_x_n_f32 (__a, __b, __p); | |
35315 | } | |
35316 | ||
35317 | __extension__ extern __inline float16x8_t | |
35318 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35319 | __arm_veorq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
35320 | { | |
35321 | return __arm_veorq_x_f16 (__a, __b, __p); | |
35322 | } | |
35323 | ||
35324 | __extension__ extern __inline float32x4_t | |
35325 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35326 | __arm_veorq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
35327 | { | |
35328 | return __arm_veorq_x_f32 (__a, __b, __p); | |
35329 | } | |
35330 | ||
35331 | __extension__ extern __inline float16x8_t | |
35332 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35333 | __arm_vornq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
35334 | { | |
35335 | return __arm_vornq_x_f16 (__a, __b, __p); | |
35336 | } | |
35337 | ||
35338 | __extension__ extern __inline float32x4_t | |
35339 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35340 | __arm_vornq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
35341 | { | |
35342 | return __arm_vornq_x_f32 (__a, __b, __p); | |
35343 | } | |
35344 | ||
35345 | __extension__ extern __inline float16x8_t | |
35346 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35347 | __arm_vorrq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) | |
35348 | { | |
35349 | return __arm_vorrq_x_f16 (__a, __b, __p); | |
35350 | } | |
35351 | ||
35352 | __extension__ extern __inline float32x4_t | |
35353 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35354 | __arm_vorrq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) | |
35355 | { | |
35356 | return __arm_vorrq_x_f32 (__a, __b, __p); | |
35357 | } | |
35358 | ||
35359 | __extension__ extern __inline float16x8_t | |
35360 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35361 | __arm_vrev32q_x (float16x8_t __a, mve_pred16_t __p) | |
35362 | { | |
35363 | return __arm_vrev32q_x_f16 (__a, __p); | |
35364 | } | |
35365 | ||
35366 | __extension__ extern __inline float16x8_t | |
35367 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35368 | __arm_vrev64q_x (float16x8_t __a, mve_pred16_t __p) | |
35369 | { | |
35370 | return __arm_vrev64q_x_f16 (__a, __p); | |
35371 | } | |
35372 | ||
35373 | __extension__ extern __inline float32x4_t | |
35374 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35375 | __arm_vrev64q_x (float32x4_t __a, mve_pred16_t __p) | |
35376 | { | |
35377 | return __arm_vrev64q_x_f32 (__a, __p); | |
35378 | } | |
35379 | ||
35380 | __extension__ extern __inline float16x8x4_t | |
35381 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35382 | __arm_vld4q (float16_t const * __addr) | |
35383 | { | |
35384 | return __arm_vld4q_f16 (__addr); | |
35385 | } | |
35386 | ||
35387 | __extension__ extern __inline float16x8x2_t | |
35388 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35389 | __arm_vld2q (float16_t const * __addr) | |
35390 | { | |
35391 | return __arm_vld2q_f16 (__addr); | |
35392 | } | |
35393 | ||
35394 | __extension__ extern __inline float16x8_t | |
35395 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35396 | __arm_vld1q_z (float16_t const *__base, mve_pred16_t __p) | |
35397 | { | |
35398 | return __arm_vld1q_z_f16 (__base, __p); | |
35399 | } | |
35400 | ||
35401 | __extension__ extern __inline void | |
35402 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35403 | __arm_vst2q (float16_t * __addr, float16x8x2_t __value) | |
35404 | { | |
35405 | __arm_vst2q_f16 (__addr, __value); | |
35406 | } | |
35407 | ||
35408 | __extension__ extern __inline void | |
35409 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35410 | __arm_vst1q_p (float16_t * __addr, float16x8_t __value, mve_pred16_t __p) | |
35411 | { | |
35412 | __arm_vst1q_p_f16 (__addr, __value, __p); | |
35413 | } | |
35414 | ||
35415 | __extension__ extern __inline float32x4x4_t | |
35416 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35417 | __arm_vld4q (float32_t const * __addr) | |
35418 | { | |
35419 | return __arm_vld4q_f32 (__addr); | |
35420 | } | |
35421 | ||
35422 | __extension__ extern __inline float32x4x2_t | |
35423 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35424 | __arm_vld2q (float32_t const * __addr) | |
35425 | { | |
35426 | return __arm_vld2q_f32 (__addr); | |
35427 | } | |
35428 | ||
35429 | __extension__ extern __inline float32x4_t | |
35430 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35431 | __arm_vld1q_z (float32_t const *__base, mve_pred16_t __p) | |
35432 | { | |
35433 | return __arm_vld1q_z_f32 (__base, __p); | |
35434 | } | |
35435 | ||
35436 | __extension__ extern __inline void | |
35437 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35438 | __arm_vst2q (float32_t * __addr, float32x4x2_t __value) | |
35439 | { | |
35440 | __arm_vst2q_f32 (__addr, __value); | |
35441 | } | |
35442 | ||
35443 | __extension__ extern __inline void | |
35444 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35445 | __arm_vst1q_p (float32_t * __addr, float32x4_t __value, mve_pred16_t __p) | |
35446 | { | |
35447 | __arm_vst1q_p_f32 (__addr, __value, __p); | |
35448 | } | |
35449 | ||
35450 | __extension__ extern __inline float16x8_t | |
35451 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35452 | __arm_vsetq_lane (float16_t __a, float16x8_t __b, const int __idx) | |
35453 | { | |
35454 | return __arm_vsetq_lane_f16 (__a, __b, __idx); | |
35455 | } | |
35456 | ||
35457 | __extension__ extern __inline float32x4_t | |
35458 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35459 | __arm_vsetq_lane (float32_t __a, float32x4_t __b, const int __idx) | |
35460 | { | |
35461 | return __arm_vsetq_lane_f32 (__a, __b, __idx); | |
35462 | } | |
35463 | ||
35464 | __extension__ extern __inline float16_t | |
35465 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35466 | __arm_vgetq_lane (float16x8_t __a, const int __idx) | |
35467 | { | |
35468 | return __arm_vgetq_lane_f16 (__a, __idx); | |
35469 | } | |
35470 | ||
35471 | __extension__ extern __inline float32_t | |
35472 | __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) | |
35473 | __arm_vgetq_lane (float32x4_t __a, const int __idx) | |
35474 | { | |
35475 | return __arm_vgetq_lane_f32 (__a, __idx); | |
35476 | } | |
35477 | #endif /* MVE Floating point. */ | |
35478 | ||
35479 | #else | |
35480 | enum { | |
35481 | __ARM_mve_type_fp_n = 1, | |
35482 | __ARM_mve_type_int_n, | |
35483 | __ARM_mve_type_float16_t_ptr, | |
35484 | __ARM_mve_type_float16x8_t, | |
35485 | __ARM_mve_type_float16x8x2_t, | |
35486 | __ARM_mve_type_float16x8x4_t, | |
35487 | __ARM_mve_type_float32_t_ptr, | |
35488 | __ARM_mve_type_float32x4_t, | |
35489 | __ARM_mve_type_float32x4x2_t, | |
35490 | __ARM_mve_type_float32x4x4_t, | |
35491 | __ARM_mve_type_int16_t_ptr, | |
35492 | __ARM_mve_type_int16x8_t, | |
35493 | __ARM_mve_type_int16x8x2_t, | |
35494 | __ARM_mve_type_int16x8x4_t, | |
35495 | __ARM_mve_type_int32_t_ptr, | |
35496 | __ARM_mve_type_int32x4_t, | |
35497 | __ARM_mve_type_int32x4x2_t, | |
35498 | __ARM_mve_type_int32x4x4_t, | |
35499 | __ARM_mve_type_int64_t_ptr, | |
35500 | __ARM_mve_type_int64x2_t, | |
35501 | __ARM_mve_type_int8_t_ptr, | |
35502 | __ARM_mve_type_int8x16_t, | |
35503 | __ARM_mve_type_int8x16x2_t, | |
35504 | __ARM_mve_type_int8x16x4_t, | |
35505 | __ARM_mve_type_uint16_t_ptr, | |
35506 | __ARM_mve_type_uint16x8_t, | |
35507 | __ARM_mve_type_uint16x8x2_t, | |
35508 | __ARM_mve_type_uint16x8x4_t, | |
35509 | __ARM_mve_type_uint32_t_ptr, | |
35510 | __ARM_mve_type_uint32x4_t, | |
35511 | __ARM_mve_type_uint32x4x2_t, | |
35512 | __ARM_mve_type_uint32x4x4_t, | |
35513 | __ARM_mve_type_uint64_t_ptr, | |
35514 | __ARM_mve_type_uint64x2_t, | |
35515 | __ARM_mve_type_uint8_t_ptr, | |
35516 | __ARM_mve_type_uint8x16_t, | |
35517 | __ARM_mve_type_uint8x16x2_t, | |
35518 | __ARM_mve_type_uint8x16x4_t, | |
35519 | __ARM_mve_unsupported_type | |
35520 | }; | |
35521 | ||
35522 | #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ | |
35523 | #define __ARM_mve_typeid(x) _Generic(x, \ | |
35524 | float16_t: __ARM_mve_type_fp_n, \ | |
35525 | float16_t *: __ARM_mve_type_float16_t_ptr, \ | |
35526 | float16_t const *: __ARM_mve_type_float16_t_ptr, \ | |
35527 | float16x8_t: __ARM_mve_type_float16x8_t, \ | |
35528 | float16x8x2_t: __ARM_mve_type_float16x8x2_t, \ | |
35529 | float16x8x4_t: __ARM_mve_type_float16x8x4_t, \ | |
35530 | float32_t: __ARM_mve_type_fp_n, \ | |
35531 | float32_t *: __ARM_mve_type_float32_t_ptr, \ | |
35532 | float32_t const *: __ARM_mve_type_float32_t_ptr, \ | |
35533 | float32x4_t: __ARM_mve_type_float32x4_t, \ | |
35534 | float32x4x2_t: __ARM_mve_type_float32x4x2_t, \ | |
35535 | float32x4x4_t: __ARM_mve_type_float32x4x4_t, \ | |
35536 | int16_t: __ARM_mve_type_int_n, \ | |
35537 | int16_t *: __ARM_mve_type_int16_t_ptr, \ | |
35538 | int16_t const *: __ARM_mve_type_int16_t_ptr, \ | |
35539 | int16x8_t: __ARM_mve_type_int16x8_t, \ | |
35540 | int16x8x2_t: __ARM_mve_type_int16x8x2_t, \ | |
35541 | int16x8x4_t: __ARM_mve_type_int16x8x4_t, \ | |
35542 | int32_t: __ARM_mve_type_int_n, \ | |
35543 | int32_t *: __ARM_mve_type_int32_t_ptr, \ | |
35544 | int32_t const *: __ARM_mve_type_int32_t_ptr, \ | |
35545 | int32x4_t: __ARM_mve_type_int32x4_t, \ | |
35546 | int32x4x2_t: __ARM_mve_type_int32x4x2_t, \ | |
35547 | int32x4x4_t: __ARM_mve_type_int32x4x4_t, \ | |
35548 | int64_t: __ARM_mve_type_int_n, \ | |
35549 | int64_t *: __ARM_mve_type_int64_t_ptr, \ | |
35550 | int64_t const *: __ARM_mve_type_int64_t_ptr, \ | |
35551 | int64x2_t: __ARM_mve_type_int64x2_t, \ | |
35552 | int8_t: __ARM_mve_type_int_n, \ | |
35553 | int8_t *: __ARM_mve_type_int8_t_ptr, \ | |
35554 | int8_t const *: __ARM_mve_type_int8_t_ptr, \ | |
35555 | int8x16_t: __ARM_mve_type_int8x16_t, \ | |
35556 | int8x16x2_t: __ARM_mve_type_int8x16x2_t, \ | |
35557 | int8x16x4_t: __ARM_mve_type_int8x16x4_t, \ | |
35558 | uint16_t: __ARM_mve_type_int_n, \ | |
35559 | uint16_t *: __ARM_mve_type_uint16_t_ptr, \ | |
35560 | uint16_t const *: __ARM_mve_type_uint16_t_ptr, \ | |
35561 | uint16x8_t: __ARM_mve_type_uint16x8_t, \ | |
35562 | uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \ | |
35563 | uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \ | |
35564 | uint32_t: __ARM_mve_type_int_n, \ | |
35565 | uint32_t *: __ARM_mve_type_uint32_t_ptr, \ | |
35566 | uint32_t const *: __ARM_mve_type_uint32_t_ptr, \ | |
35567 | uint32x4_t: __ARM_mve_type_uint32x4_t, \ | |
35568 | uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \ | |
35569 | uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \ | |
35570 | uint64_t: __ARM_mve_type_int_n, \ | |
35571 | uint64_t *: __ARM_mve_type_uint64_t_ptr, \ | |
35572 | uint64_t const *: __ARM_mve_type_uint64_t_ptr, \ | |
35573 | uint64x2_t: __ARM_mve_type_uint64x2_t, \ | |
35574 | uint8_t: __ARM_mve_type_int_n, \ | |
35575 | uint8_t *: __ARM_mve_type_uint8_t_ptr, \ | |
35576 | uint8_t const *: __ARM_mve_type_uint8_t_ptr, \ | |
35577 | uint8x16_t: __ARM_mve_type_uint8x16_t, \ | |
35578 | uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \ | |
35579 | uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \ | |
35580 | default: _Generic(x, \ | |
35581 | signed char: __ARM_mve_type_int_n, \ | |
35582 | short: __ARM_mve_type_int_n, \ | |
35583 | int: __ARM_mve_type_int_n, \ | |
35584 | long: __ARM_mve_type_int_n, \ | |
6bd4ce64 | 35585 | double: __ARM_mve_type_fp_n, \ |
6a90680b ASDV |
35586 | long long: __ARM_mve_type_int_n, \ |
35587 | unsigned char: __ARM_mve_type_int_n, \ | |
35588 | unsigned short: __ARM_mve_type_int_n, \ | |
35589 | unsigned int: __ARM_mve_type_int_n, \ | |
35590 | unsigned long: __ARM_mve_type_int_n, \ | |
35591 | unsigned long long: __ARM_mve_type_int_n, \ | |
35592 | default: __ARM_mve_unsupported_type)) | |
35593 | #else | |
35594 | #define __ARM_mve_typeid(x) _Generic(x, \ | |
35595 | int16_t: __ARM_mve_type_int_n, \ | |
35596 | int16_t *: __ARM_mve_type_int16_t_ptr, \ | |
35597 | int16_t const *: __ARM_mve_type_int16_t_ptr, \ | |
35598 | int16x8_t: __ARM_mve_type_int16x8_t, \ | |
35599 | int16x8x2_t: __ARM_mve_type_int16x8x2_t, \ | |
35600 | int16x8x4_t: __ARM_mve_type_int16x8x4_t, \ | |
35601 | int32_t: __ARM_mve_type_int_n, \ | |
35602 | int32_t *: __ARM_mve_type_int32_t_ptr, \ | |
35603 | int32_t const *: __ARM_mve_type_int32_t_ptr, \ | |
35604 | int32x4_t: __ARM_mve_type_int32x4_t, \ | |
35605 | int32x4x2_t: __ARM_mve_type_int32x4x2_t, \ | |
35606 | int32x4x4_t: __ARM_mve_type_int32x4x4_t, \ | |
35607 | int64_t: __ARM_mve_type_int_n, \ | |
35608 | int64_t *: __ARM_mve_type_int64_t_ptr, \ | |
35609 | int64_t const *: __ARM_mve_type_int64_t_ptr, \ | |
35610 | int64x2_t: __ARM_mve_type_int64x2_t, \ | |
35611 | int8_t: __ARM_mve_type_int_n, \ | |
35612 | int8_t *: __ARM_mve_type_int8_t_ptr, \ | |
35613 | int8_t const *: __ARM_mve_type_int8_t_ptr, \ | |
35614 | int8x16_t: __ARM_mve_type_int8x16_t, \ | |
35615 | int8x16x2_t: __ARM_mve_type_int8x16x2_t, \ | |
35616 | int8x16x4_t: __ARM_mve_type_int8x16x4_t, \ | |
35617 | uint16_t: __ARM_mve_type_int_n, \ | |
35618 | uint16_t *: __ARM_mve_type_uint16_t_ptr, \ | |
35619 | uint16_t const *: __ARM_mve_type_uint16_t_ptr, \ | |
35620 | uint16x8_t: __ARM_mve_type_uint16x8_t, \ | |
35621 | uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \ | |
35622 | uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \ | |
35623 | uint32_t: __ARM_mve_type_int_n, \ | |
35624 | uint32_t *: __ARM_mve_type_uint32_t_ptr, \ | |
35625 | uint32_t const *: __ARM_mve_type_uint32_t_ptr, \ | |
35626 | uint32x4_t: __ARM_mve_type_uint32x4_t, \ | |
35627 | uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \ | |
35628 | uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \ | |
35629 | uint64_t: __ARM_mve_type_int_n, \ | |
35630 | uint64_t *: __ARM_mve_type_uint64_t_ptr, \ | |
35631 | uint64_t const *: __ARM_mve_type_uint64_t_ptr, \ | |
35632 | uint64x2_t: __ARM_mve_type_uint64x2_t, \ | |
35633 | uint8_t: __ARM_mve_type_int_n, \ | |
35634 | uint8_t *: __ARM_mve_type_uint8_t_ptr, \ | |
35635 | uint8_t const *: __ARM_mve_type_uint8_t_ptr, \ | |
35636 | uint8x16_t: __ARM_mve_type_uint8x16_t, \ | |
35637 | uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \ | |
35638 | uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \ | |
35639 | default: _Generic(x, \ | |
35640 | signed char: __ARM_mve_type_int_n, \ | |
35641 | short: __ARM_mve_type_int_n, \ | |
35642 | int: __ARM_mve_type_int_n, \ | |
35643 | long: __ARM_mve_type_int_n, \ | |
35644 | long long: __ARM_mve_type_int_n, \ | |
35645 | unsigned char: __ARM_mve_type_int_n, \ | |
35646 | unsigned short: __ARM_mve_type_int_n, \ | |
35647 | unsigned int: __ARM_mve_type_int_n, \ | |
35648 | unsigned long: __ARM_mve_type_int_n, \ | |
35649 | unsigned long long: __ARM_mve_type_int_n, \ | |
35650 | default: __ARM_mve_unsupported_type)) | |
35651 | #endif /* MVE Floating point. */ | |
35652 | ||
35653 | extern void *__ARM_undef; | |
35654 | #define __ARM_mve_coerce(param, type) \ | |
35655 | _Generic(param, type: param, default: *(type *)__ARM_undef) | |
35656 | #define __ARM_mve_coerce1(param, type) \ | |
35657 | _Generic(param, type: param, const type: param, default: *(type *)__ARM_undef) | |
6bd4ce64 SP |
35658 | #define __ARM_mve_coerce2(param, type) \ |
35659 | _Generic(param, type: param, float16_t: param, float32_t: param, default: *(type *)__ARM_undef) | |
e0dd75fe SMW |
35660 | #define __ARM_mve_coerce3(param, type) \ |
35661 | _Generic(param, type: param, int8_t: param, int16_t: param, int32_t: param, int64_t: param, uint8_t: param, uint16_t: param, uint32_t: param, uint64_t: param, default: *(type *)__ARM_undef) | |
6a90680b ASDV |
35662 | |
35663 | #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ | |
35664 | ||
35665 | #define __arm_vst4q(p0,p1) ({ __typeof(p0) __p0 = (p0); \ | |
35666 | __typeof(p1) __p1 = (p1); \ | |
35667 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
35668 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \ | |
35669 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \ | |
35670 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \ | |
35671 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \ | |
35672 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \ | |
35673 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)), \ | |
35674 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x4_t]: __arm_vst4q_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x4_t)), \ | |
35675 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x4_t]: __arm_vst4q_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x4_t)));}) | |
35676 | ||
35677 | #define __arm_vrndxq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
35678 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35679 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
35680 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
35681 | ||
35682 | #define __arm_vrndq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
35683 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35684 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
35685 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
35686 | ||
35687 | #define __arm_vrndpq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
35688 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35689 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
35690 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
35691 | ||
35692 | #define __arm_vrndnq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
35693 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35694 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
35695 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
35696 | ||
35697 | #define __arm_vrndmq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
35698 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35699 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
35700 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
35701 | ||
35702 | #define __arm_vrndaq(p0) ({ __typeof(p0) __p0 = (p0); \ | |
35703 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35704 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
e3678b44 SP |
35705 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndaq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) |
35706 | ||
e3678b44 SP |
35707 | #define __arm_vrev64q(p0) ({ __typeof(p0) __p0 = (p0); \ |
35708 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35709 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
35710 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
35711 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
35712 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
35713 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
35714 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
35715 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev64q_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
35716 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrev64q_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
35717 | ||
e3678b44 SP |
35718 | #define __arm_vnegq(p0) ({ __typeof(p0) __p0 = (p0); \ |
35719 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35720 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
35721 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
35722 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
35723 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vnegq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
35724 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vnegq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
35725 | ||
e3678b44 SP |
35726 | #define __arm_vdupq_n(p0) ({ __typeof(p0) __p0 = (p0); \ |
35727 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35728 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vdupq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
35729 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vdupq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
35730 | ||
e3678b44 SP |
35731 | #define __arm_vabsq(p0) ({ __typeof(p0) __p0 = (p0); \ |
35732 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35733 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
35734 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
35735 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
35736 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
35737 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
35738 | ||
e3678b44 SP |
35739 | #define __arm_vrev32q(p0) ({ __typeof(p0) __p0 = (p0); \ |
35740 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35741 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
35742 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
35743 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
35744 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
35745 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev32q_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) | |
35746 | ||
e3678b44 SP |
35747 | #define __arm_vcvtbq_f32(p0) ({ __typeof(p0) __p0 = (p0); \ |
35748 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35749 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vcvtbq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) | |
35750 | ||
e3678b44 SP |
35751 | #define __arm_vcvttq_f32(p0) ({ __typeof(p0) __p0 = (p0); \ |
35752 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35753 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vcvttq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) | |
35754 | ||
e3678b44 SP |
35755 | #define __arm_vrev16q(p0) ({ __typeof(p0) __p0 = (p0); \ |
35756 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35757 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
35758 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)));}) | |
35759 | ||
e3678b44 SP |
35760 | #define __arm_vqabsq(p0) ({ __typeof(p0) __p0 = (p0); \ |
35761 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35762 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
35763 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
35764 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
35765 | ||
e3678b44 SP |
35766 | #define __arm_vqnegq(p0) ({ __typeof(p0) __p0 = (p0); \ |
35767 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35768 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
35769 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
35770 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
35771 | ||
e3678b44 SP |
35772 | #define __arm_vmvnq(p0) ({ __typeof(p0) __p0 = (p0); \ |
35773 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35774 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
35775 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
35776 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
35777 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
35778 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
35779 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
35780 | ||
e3678b44 SP |
35781 | #define __arm_vmovlbq(p0) ({ __typeof(p0) __p0 = (p0); \ |
35782 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35783 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
35784 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
35785 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
35786 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) | |
35787 | ||
e3678b44 SP |
35788 | #define __arm_vmovltq(p0) ({ __typeof(p0) __p0 = (p0); \ |
35789 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35790 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
35791 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
35792 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
35793 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) | |
35794 | ||
e3678b44 SP |
35795 | #define __arm_vclzq(p0) ({ __typeof(p0) __p0 = (p0); \ |
35796 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35797 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
35798 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
35799 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
35800 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
35801 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
35802 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
35803 | ||
e3678b44 SP |
35804 | #define __arm_vclsq(p0) ({ __typeof(p0) __p0 = (p0); \ |
35805 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35806 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
35807 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
35808 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
35809 | ||
e3678b44 SP |
35810 | #define __arm_vcvtq(p0) ({ __typeof(p0) __p0 = (p0); \ |
35811 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35812 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
35813 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
35814 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
35815 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
35816 | ||
e3678b44 SP |
35817 | #define __arm_vshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
35818 | __typeof(p1) __p1 = (p1); \ | |
35819 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
35820 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
35821 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
35822 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
35823 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
35824 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
35825 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
35826 | ||
e3678b44 SP |
35827 | #define __arm_vshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
35828 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35829 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
35830 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
35831 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
35832 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
35833 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
35834 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
35835 | ||
e3678b44 SP |
35836 | #define __arm_vcvtq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
35837 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
35838 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_n_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
35839 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_n_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
35840 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_n_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
35841 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_n_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
35842 | ||
e3678b44 SP |
35843 | #define __arm_vorrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
35844 | __typeof(p1) __p1 = (p1); \ | |
35845 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
35846 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
35847 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
35848 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
35849 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
35850 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
35851 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
35852 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
35853 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
35854 | ||
e3678b44 SP |
35855 | #define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
35856 | __typeof(p1) __p1 = (p1); \ | |
35857 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
35858 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
35859 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
35860 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
35861 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
35862 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
35863 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
35864 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
35865 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
35866 | ||
e3678b44 SP |
35867 | #define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
35868 | __typeof(p1) __p1 = (p1); \ | |
35869 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
35870 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
35871 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
35872 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
35873 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
35874 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
35875 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
e0dd75fe SMW |
35876 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ |
35877 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
35878 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
35879 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
35880 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
35881 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
35882 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
35883 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
31df339a SMW |
35884 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double)), \ |
35885 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double)));}) | |
e3678b44 | 35886 | |
e3678b44 SP |
35887 | #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
35888 | __typeof(p1) __p1 = (p1); \ | |
35889 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
35890 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
35891 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
35892 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
35893 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
35894 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
35895 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
35896 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
35897 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
35898 | ||
e3678b44 SP |
35899 | #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
35900 | __typeof(p1) __p1 = (p1); \ | |
35901 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
d34f510e SP |
35902 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1 (__p1, int)), \ |
35903 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1 (__p1, int)), \ | |
35904 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1 (__p1, int)), \ | |
35905 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1 (__p1, int)), \ | |
e3678b44 SP |
35906 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
35907 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
35908 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
35909 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
35910 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
35911 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
35912 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
35913 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
35914 | ||
e3678b44 SP |
35915 | #define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
35916 | __typeof(p1) __p1 = (p1); \ | |
35917 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
35918 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
35919 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
35920 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
35921 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
35922 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
35923 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
35924 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
35925 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
35926 | ||
e3678b44 SP |
35927 | #define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
35928 | __typeof(p1) __p1 = (p1); \ | |
35929 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
35930 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
35931 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
35932 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
35933 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
35934 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
35935 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
35936 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double)), \ | |
35937 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double)), \ | |
e3678b44 SP |
35938 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
35939 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
35940 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
35941 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
35942 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
35943 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
35944 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
35945 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
35946 | ||
e3678b44 SP |
35947 | #define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
35948 | __typeof(p1) __p1 = (p1); \ | |
35949 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
35950 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
35951 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
35952 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
35953 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
35954 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
35955 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
35956 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
35957 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
35958 | ||
e3678b44 SP |
35959 | #define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
35960 | __typeof(p1) __p1 = (p1); \ | |
35961 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
35962 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
35963 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
35964 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
35965 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
35966 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
35967 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
35968 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double)), \ | |
35969 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double)), \ | |
e3678b44 SP |
35970 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
35971 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
35972 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
35973 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
35974 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
35975 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
35976 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
35977 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
35978 | ||
e3678b44 SP |
35979 | #define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
35980 | __typeof(p1) __p1 = (p1); \ | |
35981 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
35982 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
35983 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
35984 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
35985 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
35986 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
35987 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
35988 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
35989 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 35990 | |
e3678b44 SP |
35991 | #define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
35992 | __typeof(p1) __p1 = (p1); \ | |
35993 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
35994 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
35995 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
35996 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
35997 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
35998 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
35999 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
31df339a SMW |
36000 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int), p2), \ |
36001 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
36002 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int), p2), \ | |
36003 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int), p2), \ | |
36004 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
36005 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int), p2), \ | |
e3678b44 SP |
36006 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
36007 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
31df339a SMW |
36008 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double), p2), \ |
36009 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double), p2));}) | |
f9355dee | 36010 | |
e3678b44 | 36011 | #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36012 | __typeof(p1) __p1 = (p1); \ |
36013 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36014 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36015 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36016 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
31df339a SMW |
36017 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
36018 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36019 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
e3678b44 | 36020 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ |
0f3cc1b3 | 36021 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ |
31df339a SMW |
36022 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double)), \ |
36023 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double)));}) | |
f9355dee | 36024 | |
e3678b44 SP |
36025 | #define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36026 | __typeof(p1) __p1 = (p1); \ | |
36027 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36028 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
36029 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36030 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36031 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpleq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
36032 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpleq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
31df339a SMW |
36033 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
36034 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36035 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
36036 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double)), \ | |
36037 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double)));}) | |
e3678b44 | 36038 | |
e3678b44 SP |
36039 | #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36040 | __typeof(p1) __p1 = (p1); \ | |
36041 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36042 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
36043 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36044 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
31df339a SMW |
36045 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
36046 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36047 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
e3678b44 SP |
36048 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ |
36049 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
31df339a SMW |
36050 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double)), \ |
36051 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double)));}) | |
e3678b44 | 36052 | |
e3678b44 SP |
36053 | #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36054 | __typeof(p1) __p1 = (p1); \ | |
36055 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
36056 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
36057 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36058 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
36059 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
36060 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36061 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
36062 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double)), \ | |
36063 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double)), \ | |
e3678b44 SP |
36064 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36065 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36066 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36067 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
36068 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36069 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
36070 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpneq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
36071 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpneq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
36072 | ||
e3678b44 SP |
36073 | #define __arm_vcmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36074 | __typeof(p1) __p1 = (p1); \ | |
36075 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36076 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
36077 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
36078 | ||
e3678b44 SP |
36079 | #define __arm_vcmulq_rot180(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36080 | __typeof(p1) __p1 = (p1); \ | |
36081 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36082 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
36083 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 36084 | |
e3678b44 SP |
36085 | #define __arm_vcmulq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36086 | __typeof(p1) __p1 = (p1); \ | |
36087 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36088 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
36089 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 36090 | |
e3678b44 SP |
36091 | #define __arm_vcmulq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36092 | __typeof(p1) __p1 = (p1); \ | |
36093 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36094 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
36095 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 36096 | |
e3678b44 SP |
36097 | #define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36098 | __typeof(p1) __p1 = (p1); \ | |
36099 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36100 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
36101 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36102 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36103 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
36104 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36105 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
36106 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
36107 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 36108 | |
e3678b44 SP |
36109 | #define __arm_vmaxnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36110 | __typeof(p1) __p1 = (p1); \ | |
36111 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36112 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
36113 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 36114 | |
e3678b44 SP |
36115 | #define __arm_vmaxnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36116 | __typeof(p1) __p1 = (p1); \ | |
36117 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
36118 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_f16 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \ |
36119 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_f32 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 36120 | |
e3678b44 SP |
36121 | #define __arm_vmaxnmq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36122 | __typeof(p1) __p1 = (p1); \ | |
36123 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36124 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
36125 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 36126 | |
e3678b44 SP |
36127 | #define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36128 | __typeof(p1) __p1 = (p1); \ | |
36129 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
36130 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \ |
36131 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 36132 | |
e3678b44 SP |
36133 | #define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36134 | __typeof(p1) __p1 = (p1); \ | |
36135 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
36136 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \ |
36137 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 36138 | |
e3678b44 SP |
36139 | #define __arm_vminnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36140 | __typeof(p1) __p1 = (p1); \ | |
36141 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36142 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
36143 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 36144 | |
e3678b44 SP |
36145 | #define __arm_vminnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36146 | __typeof(p1) __p1 = (p1); \ | |
36147 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
36148 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_f16 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \ |
36149 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_f32 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 36150 | |
e3678b44 | 36151 | #define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36152 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
36153 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
36154 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
36155 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
36156 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
36157 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
36158 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \ | |
36159 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vbrsrq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), p1), \ | |
36160 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vbrsrq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), p1));}) | |
f9355dee | 36161 | |
e3678b44 SP |
36162 | #define __arm_vminnmq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36163 | __typeof(p1) __p1 = (p1); \ | |
36164 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36165 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
36166 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 36167 | |
e3678b44 SP |
36168 | #define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36169 | __typeof(p1) __p1 = (p1); \ | |
36170 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
36171 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double)), \ |
36172 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double)), \ | |
36173 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
36174 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36175 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
36176 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
36177 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36178 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
e3678b44 SP |
36179 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36180 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36181 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36182 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
36183 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36184 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
36185 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
36186 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 36187 | |
e3678b44 SP |
36188 | #define __arm_vminnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36189 | __typeof(p1) __p1 = (p1); \ | |
36190 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
36191 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_f16 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \ |
36192 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_f32 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
f9355dee | 36193 | |
e3678b44 | 36194 | #define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36195 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
36196 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
36197 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
36198 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
36199 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
36200 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
36201 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
f9355dee | 36202 | |
e3678b44 | 36203 | #define __arm_vshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36204 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
36205 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
36206 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
36207 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
36208 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
36209 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
36210 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
f9355dee | 36211 | |
e3678b44 | 36212 | #define __arm_vshlltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36213 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
36214 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
36215 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
36216 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
36217 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) | |
f9355dee | 36218 | |
e3678b44 | 36219 | #define __arm_vshllbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36220 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
36221 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
36222 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
36223 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
36224 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) | |
f9355dee | 36225 | |
e3678b44 | 36226 | #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36227 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
36228 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
36229 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
36230 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
36231 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
36232 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
36233 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
f9355dee | 36234 | |
e3678b44 | 36235 | #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36236 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
36237 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
36238 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
36239 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
36240 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
36241 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
36242 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
f9355dee | 36243 | |
e3678b44 | 36244 | #define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36245 | __typeof(p1) __p1 = (p1); \ |
36246 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
36247 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
36248 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36249 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
36250 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
36251 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36252 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
e3678b44 SP |
36253 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36254 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36255 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36256 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
36257 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36258 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 36259 | |
e3678b44 | 36260 | #define __arm_vrmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36261 | __typeof(p1) __p1 = (p1); \ |
36262 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36263 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36264 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36265 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36266 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
36267 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36268 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 36269 | |
e3678b44 SP |
36270 | #define __arm_vrhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36271 | __typeof(p1) __p1 = (p1); \ | |
36272 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36273 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
36274 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36275 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36276 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
36277 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36278 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
36279 | ||
e3678b44 SP |
36280 | #define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36281 | __typeof(p1) __p1 = (p1); \ | |
36282 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
36283 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
36284 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36285 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
36286 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
36287 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36288 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
e3678b44 SP |
36289 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36290 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36291 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36292 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
36293 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36294 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
36295 | ||
e3678b44 | 36296 | #define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36297 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
36298 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
36299 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
36300 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));}) | |
f9355dee | 36301 | |
e3678b44 SP |
36302 | #define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36303 | __typeof(p1) __p1 = (p1); \ | |
36304 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36305 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
36306 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36307 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36308 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
36309 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36310 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
36311 | ||
e3678b44 | 36312 | #define __arm_vqshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36313 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
36314 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ |
36315 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
36316 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
36317 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
36318 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
36319 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
f9355dee | 36320 | |
e3678b44 SP |
36321 | #define __arm_vqshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36322 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
36323 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
36324 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
36325 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
36326 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
36327 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
36328 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
36329 | ||
e3678b44 | 36330 | #define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36331 | __typeof(p1) __p1 = (p1); \ |
36332 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36333 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36334 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36335 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36336 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
36337 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36338 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
31df339a SMW |
36339 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
36340 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36341 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
36342 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
36343 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36344 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)));}) | |
f9355dee | 36345 | |
e3678b44 | 36346 | #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36347 | __typeof(p1) __p1 = (p1); \ |
36348 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36349 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36350 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36351 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
31df339a SMW |
36352 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
36353 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36354 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)));}) | |
f9355dee | 36355 | |
e3678b44 | 36356 | #define __arm_vmlaldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36357 | __typeof(p1) __p1 = (p1); \ |
36358 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36359 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
36360 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 36361 | |
e3678b44 | 36362 | #define __arm_vqmovuntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36363 | __typeof(p1) __p1 = (p1); \ |
36364 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36365 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
36366 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
36367 | ||
e3678b44 SP |
36368 | #define __arm_vqmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36369 | __typeof(p1) __p1 = (p1); \ | |
36370 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36371 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36372 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36373 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36374 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
36375 | ||
e3678b44 SP |
36376 | #define __arm_vqmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
36377 | __typeof(p1) __p1 = (p1); \ | |
36378 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36379 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36380 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36381 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36382 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 36383 | |
e3678b44 | 36384 | #define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36385 | __typeof(p1) __p1 = (p1); \ |
36386 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
36387 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ |
36388 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
e3678b44 SP |
36389 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
36390 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 36391 | |
e3678b44 | 36392 | #define __arm_vqmovunbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36393 | __typeof(p1) __p1 = (p1); \ |
36394 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36395 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
36396 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 36397 | |
e3678b44 | 36398 | #define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36399 | __typeof(p1) __p1 = (p1); \ |
36400 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
36401 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ |
36402 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
e3678b44 SP |
36403 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
36404 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 36405 | |
e3678b44 | 36406 | #define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36407 | __typeof(p1) __p1 = (p1); \ |
36408 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
36409 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
36410 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36411 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
e3678b44 SP |
36412 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36413 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36414 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 36415 | |
e3678b44 | 36416 | #define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36417 | __typeof(p1) __p1 = (p1); \ |
36418 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
36419 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
36420 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36421 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
36422 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
36423 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36424 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
e3678b44 SP |
36425 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36426 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36427 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36428 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
36429 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36430 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 36431 | |
e3678b44 | 36432 | #define __arm_vmulltq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36433 | __typeof(p1) __p1 = (p1); \ |
36434 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36435 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ |
36436 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
f9355dee | 36437 | |
e3678b44 | 36438 | #define __arm_vmullbq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36439 | __typeof(p1) __p1 = (p1); \ |
36440 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36441 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ |
36442 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
f9355dee | 36443 | |
e3678b44 | 36444 | #define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36445 | __typeof(p1) __p1 = (p1); \ |
36446 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36447 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36448 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36449 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36450 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
36451 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36452 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 36453 | |
e3678b44 | 36454 | #define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
0dad5b33 SP |
36455 | __typeof(p1) __p1 = (p1); \ |
36456 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
36457 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
36458 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36459 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
36460 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
36461 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36462 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
e3678b44 SP |
36463 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36464 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36465 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36466 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
36467 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36468 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 36469 | |
e3678b44 | 36470 | #define __arm_vhcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36471 | __typeof(p1) __p1 = (p1); \ |
36472 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36473 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36474 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36475 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 36476 | |
e3678b44 | 36477 | #define __arm_vhcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36478 | __typeof(p1) __p1 = (p1); \ |
36479 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36480 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36481 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36482 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 36483 | |
e3678b44 | 36484 | #define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36485 | __typeof(p1) __p1 = (p1); \ |
36486 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
36487 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
36488 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36489 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
36490 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
36491 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
36492 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
e3678b44 SP |
36493 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36494 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36495 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36496 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
36497 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36498 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 36499 | |
e3678b44 | 36500 | #define __arm_vminq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36501 | __typeof(p1) __p1 = (p1); \ |
36502 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36503 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36504 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36505 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36506 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
36507 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36508 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 36509 | |
e3678b44 | 36510 | #define __arm_vminaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36511 | __typeof(p1) __p1 = (p1); \ |
36512 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36513 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36514 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36515 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 36516 | |
e3678b44 | 36517 | #define __arm_vmaxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36518 | __typeof(p1) __p1 = (p1); \ |
36519 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36520 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36521 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36522 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36523 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
36524 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36525 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 36526 | |
e3678b44 | 36527 | #define __arm_vmaxaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36528 | __typeof(p1) __p1 = (p1); \ |
36529 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36530 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36531 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36532 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
f9355dee | 36533 | |
e3678b44 | 36534 | #define __arm_vmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36535 | __typeof(p1) __p1 = (p1); \ |
36536 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36537 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
36538 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36539 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36540 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 36541 | |
e3678b44 | 36542 | #define __arm_vmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36543 | __typeof(p1) __p1 = (p1); \ |
36544 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36545 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
36546 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36547 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36548 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 36549 | |
e3678b44 | 36550 | #define __arm_vmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36551 | __typeof(p1) __p1 = (p1); \ |
36552 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36553 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36554 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36555 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36556 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
36557 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36558 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 36559 | |
e3678b44 | 36560 | #define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36561 | __typeof(p1) __p1 = (p1); \ |
36562 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36563 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
36564 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
36565 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
36566 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
36567 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
36568 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
f9355dee | 36569 | |
e3678b44 SP |
36570 | #define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36571 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
36572 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
36573 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
36574 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
36575 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
36576 | ||
e3678b44 | 36577 | #define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36578 | __typeof(p1) __p1 = (p1); \ |
36579 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36580 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
36581 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
36582 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
36583 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
f9355dee | 36584 | |
e3678b44 | 36585 | #define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36586 | __typeof(p1) __p1 = (p1); \ |
36587 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36588 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
36589 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f9355dee | 36590 | |
e3678b44 SP |
36591 | #define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36592 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
36593 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
36594 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
36595 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
36596 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
36597 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
36598 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
36599 | ||
e3678b44 | 36600 | #define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36601 | __typeof(p1) __p1 = (p1); \ |
36602 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36603 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
36604 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
36605 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f9355dee | 36606 | |
e3678b44 | 36607 | #define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36608 | __typeof(p1) __p1 = (p1); \ |
36609 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36610 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclzq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
36611 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclzq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
36612 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclzq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
36613 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vclzq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
36614 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
36615 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
f9355dee | 36616 | |
e3678b44 | 36617 | #define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36618 | __typeof(p1) __p1 = (p1); \ |
36619 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36620 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
36621 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
36622 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f9355dee | 36623 | |
e3678b44 | 36624 | #define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36625 | __typeof(p1) __p1 = (p1); \ |
36626 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36627 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
36628 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
36629 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f9355dee | 36630 | |
e3678b44 | 36631 | #define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36632 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
36633 | __typeof(p2) __p2 = (p2); \ |
36634 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
36635 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int)), \ |
36636 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
36637 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int)), \ | |
36638 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int)), \ | |
36639 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
36640 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int)));}) | |
f9355dee | 36641 | |
e3678b44 SP |
36642 | #define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36643 | __typeof(p1) __p1 = (p1); \ | |
36644 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36645 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
36646 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
36647 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
36648 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
36649 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
36650 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
f9355dee | 36651 | |
e3678b44 SP |
36652 | #define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36653 | __typeof(p1) __p1 = (p1); \ | |
36654 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
36655 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
36656 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
36657 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
36658 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
36659 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
36660 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
f9355dee | 36661 | |
e3678b44 | 36662 | #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36663 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
36664 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ |
36665 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
36666 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
36667 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
36668 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
36669 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
14782c81 | 36670 | |
e3678b44 SP |
36671 | #define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36672 | __typeof(p1) __p1 = (p1); \ | |
f9355dee | 36673 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
36674 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ |
36675 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
36676 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
36677 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
36678 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
36679 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));}) | |
14782c81 | 36680 | |
e3678b44 | 36681 | #define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36682 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
36683 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ |
36684 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
36685 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
36686 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
36687 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
36688 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
14782c81 | 36689 | |
e3678b44 | 36690 | #define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36691 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
e3678b44 SP |
36692 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ |
36693 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
36694 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
36695 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
36696 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
36697 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
f9355dee | 36698 | |
e3678b44 | 36699 | #define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
14782c81 | 36700 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
36701 | __typeof(p2) __p2 = (p2); \ |
36702 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
36703 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
36704 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
36705 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
14782c81 | 36706 | |
e3678b44 | 36707 | #define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36708 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
36709 | __typeof(p2) __p2 = (p2); \ |
36710 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
36711 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
36712 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
36713 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
f9355dee | 36714 | |
e3678b44 | 36715 | #define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36716 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
36717 | __typeof(p2) __p2 = (p2); \ |
36718 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
36719 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int)), \ |
36720 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
36721 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int)));}) | |
f9355dee | 36722 | |
afb198ee CL |
36723 | #define __arm_vqdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36724 | __typeof(p1) __p1 = (p1); \ | |
36725 | __typeof(p2) __p2 = (p2); \ | |
36726 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
36727 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int)), \ |
36728 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
36729 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int)));}) | |
afb198ee | 36730 | |
e3678b44 | 36731 | #define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36732 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
36733 | __typeof(p2) __p2 = (p2); \ |
36734 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
36735 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int)), \ |
36736 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
36737 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int)));}) | |
f9355dee | 36738 | |
e3678b44 SP |
36739 | #define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36740 | __typeof(p1) __p1 = (p1); \ | |
36741 | __typeof(p2) __p2 = (p2); \ | |
36742 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
36743 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int)), \ |
36744 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
36745 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int)), \ | |
36746 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int)), \ | |
36747 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
36748 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int)));}) | |
a50f6abf | 36749 | |
e3678b44 | 36750 | #define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee | 36751 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
36752 | __typeof(p2) __p2 = (p2); \ |
36753 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
36754 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int)), \ |
36755 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
36756 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int)));}) | |
f9355dee | 36757 | |
e3678b44 SP |
36758 | #define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36759 | __typeof(p1) __p1 = (p1); \ | |
36760 | __typeof(p2) __p2 = (p2); \ | |
36761 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
36762 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
36763 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
36764 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
f9355dee | 36765 | |
e3678b44 SP |
36766 | #define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36767 | __typeof(p1) __p1 = (p1); \ | |
36768 | __typeof(p2) __p2 = (p2); \ | |
36769 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
36770 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
36771 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
36772 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
a50f6abf | 36773 | |
e3678b44 | 36774 | #define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36775 | __typeof(p1) __p1 = (p1); \ |
36776 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36777 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
36778 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
36779 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
36780 | ||
e3678b44 SP |
36781 | #define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36782 | __typeof(p1) __p1 = (p1); \ | |
36783 | __typeof(p2) __p2 = (p2); \ | |
36784 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
36785 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
36786 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
36787 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
36788 | ||
e3678b44 SP |
36789 | #define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36790 | __typeof(p1) __p1 = (p1); \ | |
36791 | __typeof(p2) __p2 = (p2); \ | |
36792 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
36793 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
36794 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
36795 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
36796 | ||
e3678b44 SP |
36797 | #define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36798 | __typeof(p1) __p1 = (p1); \ | |
36799 | __typeof(p2) __p2 = (p2); \ | |
36800 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
36801 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
36802 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
36803 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
36804 | ||
e3678b44 SP |
36805 | #define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36806 | __typeof(p1) __p1 = (p1); \ | |
36807 | __typeof(p2) __p2 = (p2); \ | |
36808 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
36809 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
36810 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
36811 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
a50f6abf | 36812 | |
e3678b44 | 36813 | #define __arm_vmovlbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36814 | __typeof(p1) __p1 = (p1); \ |
36815 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36816 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovlbq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
36817 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovlbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
36818 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
36819 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
a50f6abf | 36820 | |
e3678b44 | 36821 | #define __arm_vmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36822 | __typeof(p1) __p1 = (p1); \ |
36823 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36824 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
36825 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
36826 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
36827 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
a50f6abf | 36828 | |
e3678b44 | 36829 | #define __arm_vmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36830 | __typeof(p1) __p1 = (p1); \ |
36831 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36832 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
36833 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
36834 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
36835 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
a50f6abf | 36836 | |
e3678b44 | 36837 | #define __arm_vmovltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36838 | __typeof(p1) __p1 = (p1); \ |
36839 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36840 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovltq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
36841 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
36842 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovltq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
36843 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovltq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
a50f6abf | 36844 | |
e3678b44 | 36845 | #define __arm_vshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36846 | __typeof(p1) __p1 = (p1); \ |
36847 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36848 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
36849 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
36850 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
36851 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
a50f6abf | 36852 | |
e3678b44 | 36853 | #define __arm_vcvtaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36854 | __typeof(p1) __p1 = (p1); \ |
36855 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36856 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtaq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
36857 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtaq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
36858 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtaq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
36859 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtaq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
a50f6abf | 36860 | |
e3678b44 | 36861 | #define __arm_vcvtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36862 | __typeof(p1) __p1 = (p1); \ |
36863 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36864 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcvtq_m_f16_s16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
36865 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcvtq_m_f32_s32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
36866 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcvtq_m_f16_u16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
36867 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcvtq_m_f32_u32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
36868 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
36869 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
36870 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
36871 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
a50f6abf | 36872 | |
db5db9d2 SP |
36873 | #define __arm_vcvtq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
36874 | __typeof(p1) __p1 = (p1); \ | |
36875 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
36876 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_n_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \ |
36877 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_n_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3), \ | |
36878 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_n_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \ | |
36879 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_n_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3), \ | |
db5db9d2 SP |
36880 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcvtq_m_n_f16_s16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ |
36881 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcvtq_m_n_f32_s32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
36882 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcvtq_m_n_f16_u16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
36883 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcvtq_m_n_f32_u32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
36884 | ||
e3678b44 | 36885 | #define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36886 | __typeof(p1) __p1 = (p1); \ |
36887 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36888 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
36889 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
36890 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
36891 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabsq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
36892 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabsq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
6df4618c | 36893 | |
e3678b44 SP |
36894 | #define __arm_vcmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36895 | __typeof(p1) __p1 = (p1); \ | |
36896 | __typeof(p2) __p2 = (p2); \ | |
36897 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
36898 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ | |
36899 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
36900 | ||
e3678b44 SP |
36901 | #define __arm_vcmlaq_rot180(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36902 | __typeof(p1) __p1 = (p1); \ | |
36903 | __typeof(p2) __p2 = (p2); \ | |
36904 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
36905 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot180_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ | |
36906 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot180_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
36907 | ||
e3678b44 SP |
36908 | #define __arm_vcmlaq_rot270(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36909 | __typeof(p1) __p1 = (p1); \ | |
36910 | __typeof(p2) __p2 = (p2); \ | |
36911 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
36912 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ | |
36913 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
36914 | ||
e3678b44 SP |
36915 | #define __arm_vcmlaq_rot90(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
36916 | __typeof(p1) __p1 = (p1); \ | |
36917 | __typeof(p2) __p2 = (p2); \ | |
36918 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
36919 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ | |
36920 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
36921 | ||
e3678b44 | 36922 | #define __arm_vrndxq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36923 | __typeof(p1) __p1 = (p1); \ |
36924 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36925 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndxq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
36926 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndxq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
6df4618c | 36927 | |
e3678b44 | 36928 | #define __arm_vrndq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36929 | __typeof(p1) __p1 = (p1); \ |
36930 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36931 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
36932 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
6df4618c | 36933 | |
e3678b44 | 36934 | #define __arm_vrndpq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36935 | __typeof(p1) __p1 = (p1); \ |
36936 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36937 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndpq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
36938 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndpq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
6df4618c | 36939 | |
e3678b44 | 36940 | #define __arm_vcmpgtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36941 | __typeof(p1) __p1 = (p1); \ |
36942 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36943 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
36944 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
36945 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
31df339a SMW |
36946 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int), p2), \ |
36947 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
36948 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int), p2), \ | |
36949 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double), p2), \ | |
36950 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double), p2), \ | |
e3678b44 SP |
36951 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
36952 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
6df4618c | 36953 | |
e3678b44 | 36954 | #define __arm_vcmpleq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36955 | __typeof(p1) __p1 = (p1); \ |
36956 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36957 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
36958 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
36959 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
36960 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpleq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
36961 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpleq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
31df339a SMW |
36962 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int), p2), \ |
36963 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
36964 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int), p2), \ | |
36965 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double), p2), \ | |
36966 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double), p2));}) | |
6df4618c | 36967 | |
e3678b44 | 36968 | #define __arm_vcmpltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36969 | __typeof(p1) __p1 = (p1); \ |
36970 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36971 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
36972 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
36973 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
36974 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
36975 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
31df339a SMW |
36976 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int), p2), \ |
36977 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
36978 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int), p2), \ | |
36979 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double), p2), \ | |
36980 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double), p2));}) | |
6df4618c | 36981 | |
e3678b44 | 36982 | #define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
36983 | __typeof(p1) __p1 = (p1); \ |
36984 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
36985 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
36986 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
36987 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
36988 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
36989 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
36990 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
36991 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpneq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
36992 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpneq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
31df339a SMW |
36993 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int), p2), \ |
36994 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
36995 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int), p2), \ | |
36996 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int), p2), \ | |
36997 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
36998 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int), p2), \ | |
36999 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double), p2), \ | |
37000 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double), p2));}) | |
a50f6abf | 37001 | |
e3678b44 | 37002 | #define __arm_vcvtbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
4be8cf77 SP |
37003 | __typeof(p1) __p1 = (p1); \ |
37004 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
37005 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float16x8_t]: __arm_vcvtbq_m_f32_f16 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
37006 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float32x4_t]: __arm_vcvtbq_m_f16_f32 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
4be8cf77 | 37007 | |
e3678b44 | 37008 | #define __arm_vcvttq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
37009 | __typeof(p1) __p1 = (p1); \ |
37010 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
37011 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float16x8_t]: __arm_vcvttq_m_f32_f16 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
37012 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float32x4_t]: __arm_vcvttq_m_f16_f32 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
4be8cf77 | 37013 | |
e3678b44 | 37014 | #define __arm_vcvtmq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
d71dba7b SP |
37015 | __typeof(p1) __p1 = (p1); \ |
37016 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
37017 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtmq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
37018 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtmq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
37019 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtmq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
37020 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtmq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
d71dba7b | 37021 | |
e3678b44 | 37022 | #define __arm_vcvtnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
37023 | __typeof(p1) __p1 = (p1); \ |
37024 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
37025 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtnq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
37026 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtnq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
37027 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtnq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
37028 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtnq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
37029 | ||
e3678b44 SP |
37030 | #define __arm_vcvtpq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37031 | __typeof(p1) __p1 = (p1); \ | |
37032 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37033 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtpq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
37034 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtpq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \ | |
37035 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtpq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
37036 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtpq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
37037 | ||
e3678b44 SP |
37038 | #define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37039 | __typeof(p1) __p1 = (p1); \ | |
37040 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f SP |
37041 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), (int8_t) __p1, p2), \ |
37042 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), (int16_t) __p1, p2), \ | |
37043 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), (int32_t) __p1, p2), \ | |
37044 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint8_t) __p1, p2), \ | |
37045 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint16_t) __p1, p2), \ | |
37046 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2), \ | |
37047 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vdupq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), (float16_t) __p1, p2), \ | |
37048 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vdupq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), (float32_t) __p1, p2));}) | |
e3678b44 | 37049 | |
e3678b44 SP |
37050 | #define __arm_vfmaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37051 | __typeof(p1) __p1 = (p1); \ | |
37052 | __typeof(p2) __p2 = (p2); \ | |
37053 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
37054 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmaq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double)), \ |
37055 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmaq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double)), \ | |
e3678b44 SP |
37056 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ |
37057 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
d71dba7b | 37058 | |
e3678b44 | 37059 | #define __arm_vfmsq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
33203b4c | 37060 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
37061 | __typeof(p2) __p2 = (p2); \ |
37062 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37063 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmsq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ | |
37064 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmsq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) | |
f9355dee | 37065 | |
261014a1 SP |
37066 | #define __arm_vfmasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37067 | __typeof(p1) __p1 = (p1); \ | |
37068 | __typeof(p2) __p2 = (p2); \ | |
37069 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
37070 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmasq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double)), \ |
37071 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmasq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double)));}) | |
261014a1 | 37072 | |
e3678b44 | 37073 | #define __arm_vmaxnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f9355dee SP |
37074 | __typeof(p1) __p1 = (p1); \ |
37075 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
37076 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
37077 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 37078 | |
e3678b44 | 37079 | #define __arm_vmaxnmavq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
8165795c SP |
37080 | __typeof(p1) __p1 = (p1); \ |
37081 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
37082 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
37083 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 37084 | |
e3678b44 | 37085 | #define __arm_vmaxnmvq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
8165795c SP |
37086 | __typeof(p1) __p1 = (p1); \ |
37087 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
37088 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
37089 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 37090 | |
e3678b44 | 37091 | #define __arm_vmaxnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
8165795c SP |
37092 | __typeof(p1) __p1 = (p1); \ |
37093 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
37094 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_p_f16 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
37095 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_p_f32 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 37096 | |
e3678b44 | 37097 | #define __arm_vmaxnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
8165795c SP |
37098 | __typeof(p1) __p1 = (p1); \ |
37099 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
37100 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_p_f16 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
37101 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_p_f32 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 37102 | |
e3678b44 | 37103 | #define __arm_vminnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
8165795c SP |
37104 | __typeof(p1) __p1 = (p1); \ |
37105 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
37106 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
37107 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 37108 | |
e3678b44 | 37109 | #define __arm_vminnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
8165795c SP |
37110 | __typeof(p1) __p1 = (p1); \ |
37111 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
37112 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_p_f16 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
37113 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_p_f32 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 37114 | |
e3678b44 | 37115 | #define __arm_vminnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
8165795c SP |
37116 | __typeof(p1) __p1 = (p1); \ |
37117 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
37118 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_p_f16 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
37119 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_p_f32 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 37120 | |
e3678b44 | 37121 | #define __arm_vrndnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
8165795c SP |
37122 | __typeof(p1) __p1 = (p1); \ |
37123 | __typeof(p2) __p2 = (p2); \ | |
e3678b44 SP |
37124 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
37125 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndnq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
37126 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndnq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __p2));}) | |
8165795c | 37127 | |
e3678b44 | 37128 | #define __arm_vrndaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
8165795c SP |
37129 | __typeof(p1) __p1 = (p1); \ |
37130 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
37131 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
37132 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 37133 | |
e3678b44 | 37134 | #define __arm_vrndmq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
8165795c SP |
37135 | __typeof(p1) __p1 = (p1); \ |
37136 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
e3678b44 SP |
37137 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
37138 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 37139 | |
e3678b44 | 37140 | #define __arm_vrev64q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
8165795c | 37141 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
37142 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
37143 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev64q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
37144 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev64q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37145 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrev64q_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
37146 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev64q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
37147 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
37148 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
37149 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrev64q_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
37150 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrev64q_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 37151 | |
e3678b44 SP |
37152 | #define __arm_vrev32q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37153 | __typeof(p1) __p1 = (p1); \ | |
37154 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37155 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev32q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
37156 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev32q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37157 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev32q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
37158 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev32q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
37159 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrev32q_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2));}) | |
8165795c | 37160 | |
e3678b44 | 37161 | #define __arm_vpselq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
8165795c | 37162 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
37163 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
37164 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vpselq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
37165 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vpselq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37166 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vpselq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
37167 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int64x2_t]: __arm_vpselq_s64 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \ | |
37168 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vpselq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
37169 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vpselq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
37170 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vpselq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
37171 | int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint64x2_t]: __arm_vpselq_u64 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint64x2_t), p2), \ | |
37172 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vpselq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
37173 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vpselq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
8165795c | 37174 | |
e3678b44 | 37175 | #define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
8165795c | 37176 | __typeof(p1) __p1 = (p1); \ |
e3678b44 SP |
37177 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
37178 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
37179 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
37180 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
31df339a SMW |
37181 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
37182 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
37183 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
e3678b44 SP |
37184 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ |
37185 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ | |
31df339a SMW |
37186 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double)), \ |
37187 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double)));}) | |
8165795c | 37188 | |
e3678b44 | 37189 | #define __arm_vrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
8165795c | 37190 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
37191 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
37192 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37193 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
37194 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
37195 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
37196 | ||
532e9e24 SP |
37197 | #define __arm_vrev16q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37198 | __typeof(p1) __p1 = (p1); \ | |
37199 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37200 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev16q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
37201 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev16q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2));}) | |
37202 | ||
532e9e24 SP |
37203 | #define __arm_vqshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37204 | __typeof(p1) __p1 = (p1); \ | |
37205 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37206 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37207 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
37208 | ||
532e9e24 SP |
37209 | #define __arm_vqshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37210 | __typeof(p1) __p1 = (p1); \ | |
37211 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37212 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37213 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
37214 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
37215 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
37216 | ||
532e9e24 SP |
37217 | #define __arm_vqshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37218 | __typeof(p1) __p1 = (p1); \ | |
37219 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37220 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37221 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
37222 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
37223 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
37224 | ||
532e9e24 SP |
37225 | #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37226 | __typeof(p1) __p1 = (p1); \ | |
37227 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37228 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37229 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
37230 | ||
532e9e24 SP |
37231 | #define __arm_vqmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37232 | __typeof(p1) __p1 = (p1); \ | |
37233 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37234 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37235 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
37236 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
37237 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
37238 | ||
532e9e24 SP |
37239 | #define __arm_vqmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37240 | __typeof(p1) __p1 = (p1); \ | |
37241 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37242 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37243 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
37244 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
37245 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
37246 | ||
532e9e24 SP |
37247 | #define __arm_vqmovunbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37248 | __typeof(p1) __p1 = (p1); \ | |
37249 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37250 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37251 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
37252 | ||
532e9e24 SP |
37253 | #define __arm_vqmovuntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37254 | __typeof(p1) __p1 = (p1); \ | |
37255 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37256 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37257 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
37258 | ||
532e9e24 SP |
37259 | #define __arm_vqrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37260 | __typeof(p1) __p1 = (p1); \ | |
37261 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37262 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37263 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
37264 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
37265 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
37266 | ||
532e9e24 SP |
37267 | #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37268 | __typeof(p1) __p1 = (p1); \ | |
37269 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37270 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37271 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
37272 | ||
532e9e24 SP |
37273 | #define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37274 | __typeof(p1) __p1 = (p1); \ | |
37275 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37276 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
37277 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37278 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
37279 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vnegq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
37280 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vnegq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
37281 | ||
532e9e24 SP |
37282 | #define __arm_vcmpgeq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
37283 | __typeof(p1) __p1 = (p1); \ | |
37284 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37285 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
37286 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37287 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
31df339a SMW |
37288 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int), p2), \ |
37289 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
37290 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int), p2), \ | |
37291 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(p1, double), p2), \ | |
37292 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(p1, double), p2), \ | |
532e9e24 SP |
37293 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ |
37294 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
37295 | ||
532e9e24 SP |
37296 | #define __arm_vabdq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
37297 | __typeof(p1) __p1 = (p1); \ | |
37298 | __typeof(p2) __p2 = (p2); \ | |
37299 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37300 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
37301 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
37302 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37303 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
37304 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
37305 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37306 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37307 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
37308 | ||
532e9e24 SP |
37309 | #define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
37310 | __typeof(p1) __p1 = (p1); \ | |
37311 | __typeof(p2) __p2 = (p2); \ | |
37312 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37313 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
37314 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
37315 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37316 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
37317 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
37318 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37319 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37320 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
e0dd75fe SMW |
37321 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
37322 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
37323 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
37324 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ | |
37325 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
37326 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
31df339a SMW |
37327 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double), p3), \ |
37328 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double), p3));}) | |
532e9e24 | 37329 | |
532e9e24 SP |
37330 | #define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
37331 | __typeof(p1) __p1 = (p1); \ | |
37332 | __typeof(p2) __p2 = (p2); \ | |
37333 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37334 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
37335 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
37336 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37337 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
37338 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
37339 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37340 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37341 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
37342 | ||
532e9e24 SP |
37343 | #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
37344 | __typeof(p1) __p1 = (p1); \ | |
37345 | __typeof(p2) __p2 = (p2); \ | |
37346 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37347 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
37348 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
37349 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37350 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
37351 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
37352 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37353 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37354 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
37355 | ||
532e9e24 SP |
37356 | #define __arm_vbrsrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
37357 | __typeof(p1) __p1 = (p1); \ | |
37358 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
37359 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbrsrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
37360 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbrsrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
37361 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbrsrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
37362 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
37363 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
37364 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3), \ | |
37365 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbrsrq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \ | |
37366 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbrsrq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3));}) | |
37367 | ||
532e9e24 SP |
37368 | #define __arm_vcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
37369 | __typeof(p1) __p1 = (p1); \ | |
37370 | __typeof(p2) __p2 = (p2); \ | |
37371 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37372 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
37373 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
37374 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37375 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
37376 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
37377 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37378 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37379 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
37380 | ||
532e9e24 SP |
37381 | #define __arm_vcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
37382 | __typeof(p1) __p1 = (p1); \ | |
37383 | __typeof(p2) __p2 = (p2); \ | |
37384 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37385 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
37386 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
37387 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37388 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
37389 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
37390 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37391 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37392 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
37393 | ||
532e9e24 SP |
37394 | #define __arm_vcmlaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
37395 | __typeof(p1) __p1 = (p1); \ | |
37396 | __typeof(p2) __p2 = (p2); \ | |
37397 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37398 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37399 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
37400 | ||
532e9e24 SP |
37401 | #define __arm_vcmlaq_rot180_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
37402 | __typeof(p1) __p1 = (p1); \ | |
37403 | __typeof(p2) __p2 = (p2); \ | |
37404 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37405 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot180_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37406 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot180_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
37407 | ||
532e9e24 SP |
37408 | #define __arm_vcmlaq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
37409 | __typeof(p1) __p1 = (p1); \ | |
37410 | __typeof(p2) __p2 = (p2); \ | |
37411 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37412 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37413 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
37414 | ||
532e9e24 SP |
37415 | #define __arm_vcmlaq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
37416 | __typeof(p1) __p1 = (p1); \ | |
37417 | __typeof(p2) __p2 = (p2); \ | |
37418 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37419 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot90_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37420 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot90_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
8165795c | 37421 | |
532e9e24 | 37422 | #define __arm_vcmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8165795c | 37423 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
37424 | __typeof(p2) __p2 = (p2); \ |
37425 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37426 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37427 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
8165795c | 37428 | |
532e9e24 | 37429 | #define __arm_vcmulq_rot180_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8165795c | 37430 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
37431 | __typeof(p2) __p2 = (p2); \ |
37432 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37433 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37434 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
8165795c | 37435 | |
532e9e24 | 37436 | #define __arm_vcmulq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8165795c | 37437 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
37438 | __typeof(p2) __p2 = (p2); \ |
37439 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37440 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37441 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
8165795c | 37442 | |
532e9e24 | 37443 | #define __arm_vcmulq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8165795c | 37444 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
37445 | __typeof(p2) __p2 = (p2); \ |
37446 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)] [__ARM_mve_typeid(__p2)])0, \ | |
37447 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_m_f16(__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37448 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_m_f32(__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
33203b4c | 37449 | |
532e9e24 | 37450 | #define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
33203b4c | 37451 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
37452 | __typeof(p2) __p2 = (p2); \ |
37453 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37454 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
37455 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
37456 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37457 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
37458 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
37459 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37460 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37461 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
33203b4c | 37462 | |
532e9e24 | 37463 | #define __arm_vfmaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8165795c | 37464 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
37465 | __typeof(p2) __p2 = (p2); \ |
37466 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37467 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37468 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
31df339a SMW |
37469 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmaq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double), p3), \ |
37470 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmaq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double), p3));}) | |
0dad5b33 | 37471 | |
532e9e24 | 37472 | #define __arm_vfmasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8165795c | 37473 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
37474 | __typeof(p2) __p2 = (p2); \ |
37475 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
37476 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmasq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double), p3), \ |
37477 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmasq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double), p3));}) | |
8165795c | 37478 | |
532e9e24 | 37479 | #define __arm_vfmsq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8165795c | 37480 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
37481 | __typeof(p2) __p2 = (p2); \ |
37482 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37483 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmsq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37484 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmsq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
8165795c | 37485 | |
532e9e24 | 37486 | #define __arm_vmaxnmq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
0dad5b33 | 37487 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
37488 | __typeof(p2) __p2 = (p2); \ |
37489 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37490 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37491 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
0dad5b33 | 37492 | |
532e9e24 | 37493 | #define __arm_vminnmq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
0dad5b33 | 37494 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
37495 | __typeof(p2) __p2 = (p2); \ |
37496 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37497 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37498 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
0dad5b33 | 37499 | |
532e9e24 | 37500 | #define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8165795c | 37501 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
37502 | __typeof(p2) __p2 = (p2); \ |
37503 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37504 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
37505 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
37506 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37507 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
37508 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
37509 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37510 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37511 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
31df339a SMW |
37512 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
37513 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
37514 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
37515 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ | |
37516 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
37517 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
37518 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double), p3), \ | |
37519 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double), p3));}) | |
0dad5b33 | 37520 | |
532e9e24 | 37521 | #define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
0dad5b33 | 37522 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
37523 | __typeof(p2) __p2 = (p2); \ |
37524 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37525 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
37526 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
37527 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37528 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
37529 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
37530 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37531 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37532 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
0dad5b33 | 37533 | |
532e9e24 | 37534 | #define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
0dad5b33 | 37535 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
37536 | __typeof(p2) __p2 = (p2); \ |
37537 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37538 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
37539 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
37540 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37541 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
37542 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
37543 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37544 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37545 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
31df339a SMW |
37546 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
37547 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
37548 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
37549 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ | |
37550 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
37551 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
37552 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double), p3), \ | |
37553 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double), p3));}) | |
e3678b44 | 37554 | |
532e9e24 | 37555 | #define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
e3678b44 | 37556 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
37557 | __typeof(p2) __p2 = (p2); \ |
37558 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
37559 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
37560 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
37561 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37562 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
37563 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
37564 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37565 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
37566 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
0dad5b33 | 37567 | |
3ce755a8 ASDV |
37568 | #define __arm_vld1q(p0) (\ |
37569 | _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ | |
b13f297f SP |
37570 | int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \ |
37571 | int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \ | |
37572 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \ | |
37573 | int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \ | |
37574 | int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \ | |
37575 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce1(p0, uint32_t *)), \ | |
37576 | int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld1q_f16 (__ARM_mve_coerce1(p0, float16_t *)), \ | |
37577 | int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld1q_f32 (__ARM_mve_coerce1(p0, float32_t *)))) | |
bf1e3d5a | 37578 | |
3ce755a8 ASDV |
37579 | #define __arm_vld1q_z(p0,p1) ( \ |
37580 | _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ | |
b13f297f SP |
37581 | int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce1(p0, int8_t *), p1), \ |
37582 | int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_z_s16 (__ARM_mve_coerce1(p0, int16_t *), p1), \ | |
37583 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_z_s32 (__ARM_mve_coerce1(p0, int32_t *), p1), \ | |
37584 | int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_z_u8 (__ARM_mve_coerce1(p0, uint8_t *), p1), \ | |
37585 | int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), p1), \ | |
37586 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_z_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1), \ | |
37587 | int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld1q_z_f16 (__ARM_mve_coerce1(p0, float16_t *), p1), \ | |
37588 | int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld1q_z_f32 (__ARM_mve_coerce1(p0, float32_t *), p1))) | |
1dfcc3b5 | 37589 | |
3ce755a8 ASDV |
37590 | #define __arm_vld2q(p0) ( \ |
37591 | _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ | |
b13f297f SP |
37592 | int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \ |
37593 | int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld2q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \ | |
37594 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld2q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \ | |
37595 | int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld2q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \ | |
37596 | int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld2q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \ | |
37597 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld2q_u32 (__ARM_mve_coerce1(p0, uint32_t *)), \ | |
37598 | int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld2q_f16 (__ARM_mve_coerce1(p0, float16_t *)), \ | |
37599 | int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld2q_f32 (__ARM_mve_coerce1(p0, float32_t *)))) | |
1dfcc3b5 | 37600 | |
3ce755a8 ASDV |
37601 | #define __arm_vld4q(p0) ( \ |
37602 | _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ | |
b13f297f SP |
37603 | int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \ |
37604 | int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld4q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \ | |
37605 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld4q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \ | |
37606 | int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld4q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \ | |
37607 | int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \ | |
37608 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce1(p0, uint32_t *)), \ | |
37609 | int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld4q_f16 (__ARM_mve_coerce1(p0, float16_t *)), \ | |
37610 | int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld4q_f32 (__ARM_mve_coerce1(p0, float32_t *)))) | |
1dfcc3b5 | 37611 | |
3ce755a8 ASDV |
37612 | #define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
37613 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f SP |
37614 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ |
37615 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
37616 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
37617 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
37618 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
4cc23303 | 37619 | |
3ce755a8 ASDV |
37620 | #define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
37621 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f SP |
37622 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ |
37623 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
37624 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
37625 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
37626 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
4cc23303 | 37627 | |
3ce755a8 ASDV |
37628 | #define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
37629 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f SP |
37630 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ |
37631 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
37632 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
37633 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
37634 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
4cc23303 | 37635 | |
3ce755a8 ASDV |
37636 | #define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
37637 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f SP |
37638 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ |
37639 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
37640 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
37641 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
37642 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
4cc23303 | 37643 | |
3ce755a8 ASDV |
37644 | #define __arm_vldrwq_gather_offset(p0,p1) ( \ |
37645 | _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ | |
c6ffc89f SP |
37646 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce1(p0, int32_t *), p1), \ |
37647 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1), \ | |
37648 | int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_offset_f32 (__ARM_mve_coerce1(p0, float32_t *), p1))) | |
4cc23303 | 37649 | |
3ce755a8 ASDV |
37650 | #define __arm_vldrwq_gather_offset_z(p0,p1,p2) ( \ |
37651 | _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ | |
c6ffc89f SP |
37652 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce1(p0, int32_t *), p1, p2), \ |
37653 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1, p2), \ | |
37654 | int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_offset_z_f32 (__ARM_mve_coerce1(p0, float32_t *), p1, p2))) | |
4cc23303 | 37655 | |
3ce755a8 ASDV |
37656 | #define __arm_vldrwq_gather_shifted_offset(p0,p1) ( \ |
37657 | _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ | |
c6ffc89f SP |
37658 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce1(p0, int32_t *), p1), \ |
37659 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1), \ | |
37660 | int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_f32 (__ARM_mve_coerce1(p0, float32_t *), p1))) | |
4cc23303 | 37661 | |
3ce755a8 ASDV |
37662 | #define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ( \ |
37663 | _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ | |
c6ffc89f SP |
37664 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(p0, int32_t *), p1, p2), \ |
37665 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1, p2), \ | |
37666 | int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_f32 (__ARM_mve_coerce1(p0, float32_t *), p1, p2))) | |
4cc23303 | 37667 | |
3ce755a8 ASDV |
37668 | #define __arm_vst1q_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
37669 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
37670 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
37671 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37672 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
37673 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_p_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
37674 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
37675 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
37676 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
37677 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_p_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
1dfcc3b5 | 37678 | |
3ce755a8 ASDV |
37679 | #define __arm_vst2q(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
37680 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
37681 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \ | |
37682 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x2_t]: __arm_vst2q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x2_t)), \ | |
37683 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x2_t]: __arm_vst2q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x2_t)), \ | |
37684 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x2_t]: __arm_vst2q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x2_t)), \ | |
37685 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x2_t]: __arm_vst2q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x2_t)), \ | |
37686 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x2_t]: __arm_vst2q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x2_t)), \ | |
37687 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x2_t]: __arm_vst2q_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x2_t)), \ | |
37688 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x2_t]: __arm_vst2q_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x2_t)));}) | |
1dfcc3b5 | 37689 | |
3ce755a8 ASDV |
37690 | #define __arm_vst1q(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
37691 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
37692 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
37693 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
37694 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
37695 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
37696 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
37697 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
37698 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)), \ | |
37699 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
5cad47e0 | 37700 | |
3ce755a8 ASDV |
37701 | #define __arm_vstrhq(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
37702 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
37703 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
37704 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
37705 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
37706 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
37707 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)));}) | |
5cad47e0 | 37708 | |
3ce755a8 ASDV |
37709 | #define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
37710 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
37711 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
37712 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
37713 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
37714 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
37715 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2));}) | |
5cad47e0 | 37716 | |
3ce755a8 | 37717 | #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
5cad47e0 | 37718 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
37719 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
37720 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
37721 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37722 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
37723 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37724 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) | |
5cad47e0 | 37725 | |
3ce755a8 | 37726 | #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
5cad47e0 | 37727 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
37728 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
37729 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
37730 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
37731 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
37732 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
37733 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) | |
5cad47e0 | 37734 | |
3ce755a8 | 37735 | #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
5cad47e0 | 37736 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
37737 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
37738 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
37739 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37740 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
37741 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37742 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) | |
5cad47e0 | 37743 | |
3ce755a8 | 37744 | #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
5cad47e0 | 37745 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
37746 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
37747 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
37748 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
37749 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
37750 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
37751 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) | |
5cad47e0 | 37752 | |
3ce755a8 ASDV |
37753 | #define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
37754 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
37755 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
37756 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
37757 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_p_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
5cad47e0 | 37758 | |
3ce755a8 ASDV |
37759 | #define __arm_vstrwq(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
37760 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
37761 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
37762 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
37763 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));}) | |
5cad47e0 | 37764 | |
3ce755a8 | 37765 | #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
7a5fffa5 | 37766 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
37767 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
37768 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
37769 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
37770 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
37771 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
37772 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) | |
7a5fffa5 | 37773 | |
3ce755a8 | 37774 | #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
7a5fffa5 | 37775 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
37776 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
37777 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
37778 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37779 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
37780 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37781 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) | |
7a5fffa5 | 37782 | |
3ce755a8 | 37783 | #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
7a5fffa5 | 37784 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
37785 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
37786 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
37787 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
37788 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
37789 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
37790 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) | |
7a5fffa5 | 37791 | |
3ce755a8 | 37792 | #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
7a5fffa5 | 37793 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
37794 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
37795 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
37796 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37797 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
37798 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37799 | int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) | |
7a5fffa5 | 37800 | |
7a5fffa5 SP |
37801 | #define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ |
37802 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
37803 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
37804 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
37805 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
37806 | ||
7a5fffa5 SP |
37807 | #define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ |
37808 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
37809 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37810 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32(p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37811 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_p_f32(p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
37812 | ||
9b905ba9 | 37813 | #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
7a5fffa5 | 37814 | __typeof(p2) __p2 = (p2); \ |
9b905ba9 SP |
37815 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ |
37816 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
37817 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
37818 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
7a5fffa5 | 37819 | |
9b905ba9 | 37820 | #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
7a5fffa5 | 37821 | __typeof(p2) __p2 = (p2); \ |
9b905ba9 SP |
37822 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ |
37823 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37824 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37825 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_p_f32 (__ARM_mve_coerce(__p0, float32_t *), p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
7a5fffa5 | 37826 | |
3ce755a8 | 37827 | #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
7a5fffa5 | 37828 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
37829 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ |
37830 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
37831 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
37832 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
7a5fffa5 | 37833 | |
3ce755a8 | 37834 | #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
7a5fffa5 | 37835 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
37836 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ |
37837 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37838 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37839 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
7a5fffa5 | 37840 | |
3ce755a8 | 37841 | #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
7a5fffa5 | 37842 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
37843 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ |
37844 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37845 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37846 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
7a5fffa5 | 37847 | |
3ce755a8 | 37848 | #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
7a5fffa5 | 37849 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
37850 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ |
37851 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
37852 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
37853 | int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
7a5fffa5 | 37854 | |
85a94e87 SP |
37855 | #define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \ |
37856 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
37857 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \ | |
37858 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \ | |
37859 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \ | |
37860 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \ | |
37861 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \ | |
37862 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \ | |
37863 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \ | |
37864 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 (), \ | |
37865 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vuninitializedq_f16 (), \ | |
37866 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vuninitializedq_f32 ());}) | |
37867 | ||
85a94e87 SP |
37868 | #define __arm_vreinterpretq_f16(p0) ({ __typeof(p0) __p0 = (p0); \ |
37869 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
37870 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
37871 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
37872 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
37873 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
37874 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
37875 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
37876 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
37877 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
37878 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_f16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
37879 | ||
85a94e87 SP |
37880 | #define __arm_vreinterpretq_f32(p0) ({ __typeof(p0) __p0 = (p0); \ |
37881 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
37882 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
37883 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
37884 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
37885 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
37886 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
37887 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
37888 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
37889 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
37890 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) | |
37891 | ||
85a94e87 SP |
37892 | #define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \ |
37893 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
37894 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
37895 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
37896 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
37897 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
37898 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
37899 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
37900 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
37901 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
37902 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
37903 | ||
85a94e87 SP |
37904 | #define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \ |
37905 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
37906 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
37907 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
37908 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
37909 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
37910 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
37911 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
37912 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
37913 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
37914 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
37915 | ||
85a94e87 SP |
37916 | #define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \ |
37917 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
37918 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
37919 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
37920 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
37921 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
37922 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
37923 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
37924 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
37925 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
37926 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
37927 | ||
85a94e87 SP |
37928 | #define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \ |
37929 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
37930 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
37931 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
37932 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
37933 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
37934 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
37935 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
37936 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
37937 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
37938 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
37939 | ||
85a94e87 SP |
37940 | #define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \ |
37941 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
37942 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
37943 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
37944 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
37945 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
37946 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
37947 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
37948 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
37949 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
37950 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
37951 | ||
85a94e87 SP |
37952 | #define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \ |
37953 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
37954 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
37955 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
37956 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
37957 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
37958 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
37959 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
37960 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
37961 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
37962 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
37963 | ||
85a94e87 SP |
37964 | #define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \ |
37965 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
37966 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
37967 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
37968 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
37969 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
37970 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
37971 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
37972 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
37973 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
37974 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
37975 | ||
85a94e87 SP |
37976 | #define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \ |
37977 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
37978 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ | |
37979 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
37980 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
37981 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
37982 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
37983 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
37984 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
37985 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ | |
37986 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) | |
37987 | ||
41e1a7ff SP |
37988 | #define __arm_vstrwq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ |
37989 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
37990 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
37991 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ | |
37992 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_wb_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t)));}) | |
37993 | ||
41e1a7ff SP |
37994 | #define __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ |
37995 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
37996 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
37997 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
37998 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_wb_p_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
37999 | ||
261014a1 SP |
38000 | #define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38001 | __typeof(p2) __p2 = (p2); \ | |
38002 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38003 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
38004 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
38005 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
38006 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
38007 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
38008 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
38009 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
38010 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
38011 | ||
261014a1 SP |
38012 | #define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
38013 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
38014 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
38015 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
38016 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
38017 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
38018 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
38019 | ||
261014a1 SP |
38020 | #define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38021 | __typeof(p2) __p2 = (p2); \ | |
38022 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38023 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
38024 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
38025 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
31df339a SMW |
38026 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
38027 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
38028 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
261014a1 SP |
38029 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ |
38030 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
38031 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
31df339a SMW |
38032 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
38033 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
38034 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
261014a1 SP |
38035 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ |
38036 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
31df339a SMW |
38037 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double), p3), \ |
38038 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double), p3));}) | |
261014a1 | 38039 | |
261014a1 SP |
38040 | #define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38041 | __typeof(p2) __p2 = (p2); \ | |
38042 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38043 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
38044 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
38045 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
38046 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
38047 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
38048 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
38049 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
38050 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
38051 | ||
261014a1 SP |
38052 | #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38053 | __typeof(p2) __p2 = (p2); \ | |
38054 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38055 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
38056 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
38057 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
38058 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
38059 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
38060 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
38061 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
38062 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
38063 | ||
261014a1 SP |
38064 | #define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38065 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
38066 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
38067 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
38068 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
38069 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
38070 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
38071 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3), \ | |
38072 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vbrsrq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2, p3), \ | |
38073 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vbrsrq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2, p3));}) | |
38074 | ||
261014a1 SP |
38075 | #define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38076 | __typeof(p2) __p2 = (p2); \ | |
38077 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38078 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
38079 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
38080 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
38081 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
38082 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
38083 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
38084 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
38085 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
38086 | ||
261014a1 SP |
38087 | #define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38088 | __typeof(p2) __p2 = (p2); \ | |
38089 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38090 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
38091 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
38092 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
38093 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
38094 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
38095 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
38096 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
38097 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
38098 | ||
261014a1 SP |
38099 | #define __arm_vcmulq_rot180_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38100 | __typeof(p2) __p2 = (p2); \ | |
38101 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38102 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
38103 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
38104 | ||
261014a1 SP |
38105 | #define __arm_vcmulq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38106 | __typeof(p2) __p2 = (p2); \ | |
38107 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38108 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
38109 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
38110 | ||
261014a1 SP |
38111 | #define __arm_vcmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38112 | __typeof(p2) __p2 = (p2); \ | |
38113 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38114 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
38115 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
38116 | ||
261014a1 SP |
38117 | #define __arm_vcvtq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
38118 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
38119 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
38120 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_x_f32_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
38121 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
38122 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
38123 | ||
261014a1 SP |
38124 | #define __arm_vcvtq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38125 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
38126 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_n_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
38127 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_x_n_f32_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
38128 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_n_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
38129 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_n_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
38130 | ||
261014a1 SP |
38131 | #define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38132 | __typeof(p2) __p2 = (p2); \ | |
38133 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38134 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
38135 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
38136 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
38137 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
38138 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
38139 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
38140 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
38141 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
38142 | ||
261014a1 SP |
38143 | #define __arm_vmaxnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38144 | __typeof(p2) __p2 = (p2); \ | |
38145 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38146 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
38147 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
38148 | ||
261014a1 SP |
38149 | #define __arm_vminnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38150 | __typeof(p2) __p2 = (p2); \ | |
38151 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38152 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
38153 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
38154 | ||
261014a1 SP |
38155 | #define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38156 | __typeof(p2) __p2 = (p2); \ | |
38157 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38158 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
38159 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
38160 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
31df339a SMW |
38161 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
38162 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
38163 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
261014a1 SP |
38164 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ |
38165 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
38166 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
31df339a SMW |
38167 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
38168 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
38169 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
261014a1 SP |
38170 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ |
38171 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
31df339a SMW |
38172 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double), p3), \ |
38173 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double), p3));}) | |
261014a1 | 38174 | |
261014a1 SP |
38175 | #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
38176 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
38177 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
38178 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
38179 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
38180 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vnegq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
38181 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vnegq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
38182 | ||
261014a1 SP |
38183 | #define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38184 | __typeof(p2) __p2 = (p2); \ | |
38185 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38186 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
38187 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
38188 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
38189 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
38190 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
38191 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
38192 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
38193 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
38194 | ||
261014a1 SP |
38195 | #define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38196 | __typeof(p2) __p2 = (p2); \ | |
38197 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38198 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
38199 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
38200 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
38201 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
38202 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
38203 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
38204 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
38205 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
38206 | ||
261014a1 SP |
38207 | #define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
38208 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
38209 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
38210 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
38211 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
38212 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
38213 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev32q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2));}) | |
38214 | ||
261014a1 SP |
38215 | #define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
38216 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
38217 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
38218 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
38219 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
38220 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
38221 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
38222 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
38223 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev64q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
38224 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrev64q_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
38225 | ||
261014a1 SP |
38226 | #define __arm_vrndaq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
38227 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
38228 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
38229 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndaq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
38230 | ||
261014a1 SP |
38231 | #define __arm_vrndmq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
38232 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
38233 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
38234 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
38235 | ||
261014a1 SP |
38236 | #define __arm_vrndnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
38237 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
38238 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
38239 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
38240 | ||
261014a1 SP |
38241 | #define __arm_vrndpq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
38242 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
38243 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
38244 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
38245 | ||
261014a1 SP |
38246 | #define __arm_vrndq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
38247 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
38248 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
38249 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
38250 | ||
261014a1 SP |
38251 | #define __arm_vrndxq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
38252 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
38253 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
38254 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
38255 | ||
261014a1 SP |
38256 | #define __arm_vsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38257 | __typeof(p2) __p2 = (p2); \ | |
38258 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38259 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
38260 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \ | |
31df339a SMW |
38261 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(p2, double), p3), \ |
38262 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(p2, double), p3));}) | |
261014a1 | 38263 | |
261014a1 SP |
38264 | #define __arm_vcmulq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
38265 | __typeof(p2) __p2 = (p2); \ | |
38266 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
38267 | int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ | |
38268 | int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) | |
38269 | ||
1a5c27b1 SP |
38270 | #define __arm_vgetq_lane(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38271 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38272 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vgetq_lane_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
38273 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vgetq_lane_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
38274 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vgetq_lane_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
38275 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vgetq_lane_s64 (__ARM_mve_coerce(__p0, int64x2_t), p1), \ | |
38276 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vgetq_lane_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
38277 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vgetq_lane_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
38278 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vgetq_lane_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \ | |
38279 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vgetq_lane_u64 (__ARM_mve_coerce(__p0, uint64x2_t), p1), \ | |
38280 | int (*)[__ARM_mve_type_float16x8_t]: __arm_vgetq_lane_f16 (__ARM_mve_coerce(__p0, float16x8_t), p1), \ | |
38281 | int (*)[__ARM_mve_type_float32x4_t]: __arm_vgetq_lane_f32 (__ARM_mve_coerce(__p0, float32x4_t), p1));}) | |
38282 | ||
1a5c27b1 SP |
38283 | #define __arm_vsetq_lane(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
38284 | __typeof(p1) __p1 = (p1); \ | |
38285 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
38286 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vsetq_lane_s8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
38287 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vsetq_lane_s16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
38288 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vsetq_lane_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
38289 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int64x2_t]: __arm_vsetq_lane_s64 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int64x2_t), p2), \ | |
38290 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vsetq_lane_u8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
38291 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vsetq_lane_u16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
38292 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vsetq_lane_u32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
38293 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint64x2_t]: __arm_vsetq_lane_u64 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint64x2_t), p2), \ | |
38294 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vsetq_lane_f16 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \ | |
38295 | int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vsetq_lane_f32 (__ARM_mve_coerce2(p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));}) | |
1a5c27b1 | 38296 | |
e3678b44 | 38297 | #else /* MVE Integer. */ |
14782c81 | 38298 | |
41e1a7ff SP |
38299 | #define __arm_vstrwq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ |
38300 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
38301 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
38302 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
38303 | ||
41e1a7ff SP |
38304 | #define __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ |
38305 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
38306 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
38307 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
38308 | ||
3ce755a8 ASDV |
38309 | #define __arm_vst4q(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
38310 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
38311 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \ | |
38312 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \ | |
38313 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \ | |
38314 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \ | |
38315 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \ | |
38316 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)));}) | |
14782c81 | 38317 | |
6df4618c SP |
38318 | #define __arm_vabsq(p0) ({ __typeof(p0) __p0 = (p0); \ |
38319 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38320 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
38321 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
38322 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
38323 | ||
6df4618c SP |
38324 | #define __arm_vclsq(p0) ({ __typeof(p0) __p0 = (p0); \ |
38325 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38326 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
38327 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
38328 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
38329 | ||
6df4618c SP |
38330 | #define __arm_vclzq(p0) ({ __typeof(p0) __p0 = (p0); \ |
38331 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38332 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
38333 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
38334 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
38335 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
38336 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
38337 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
38338 | ||
6df4618c SP |
38339 | #define __arm_vnegq(p0) ({ __typeof(p0) __p0 = (p0); \ |
38340 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38341 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
38342 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
38343 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
38344 | ||
6df4618c SP |
38345 | #define __arm_vmovlbq(p0) ({ __typeof(p0) __p0 = (p0); \ |
38346 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38347 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
38348 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
38349 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
38350 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) | |
38351 | ||
6df4618c SP |
38352 | #define __arm_vmovltq(p0) ({ __typeof(p0) __p0 = (p0); \ |
38353 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38354 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
38355 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
38356 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
38357 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) | |
38358 | ||
6df4618c SP |
38359 | #define __arm_vmvnq(p0) ({ __typeof(p0) __p0 = (p0); \ |
38360 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38361 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
38362 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
38363 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
38364 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
38365 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
38366 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
38367 | ||
6df4618c SP |
38368 | #define __arm_vrev16q(p0) ({ __typeof(p0) __p0 = (p0); \ |
38369 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38370 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
38371 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)));}) | |
38372 | ||
6df4618c SP |
38373 | #define __arm_vrev32q(p0) ({ __typeof(p0) __p0 = (p0); \ |
38374 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38375 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
38376 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
38377 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
38378 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) | |
38379 | ||
5db0eb95 SP |
38380 | #define __arm_vrev64q(p0) ({ __typeof(p0) __p0 = (p0); \ |
38381 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38382 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
38383 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
38384 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
38385 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
38386 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
38387 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
38388 | ||
6df4618c SP |
38389 | #define __arm_vqabsq(p0) ({ __typeof(p0) __p0 = (p0); \ |
38390 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38391 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
38392 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
38393 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
38394 | ||
6df4618c SP |
38395 | #define __arm_vqnegq(p0) ({ __typeof(p0) __p0 = (p0); \ |
38396 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38397 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
38398 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
38399 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) | |
38400 | ||
f166a8cd SP |
38401 | #define __arm_vshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38402 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38403 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
38404 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
38405 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
38406 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
38407 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
38408 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
38409 | ||
d71dba7b SP |
38410 | #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38411 | __typeof(p1) __p1 = (p1); \ | |
38412 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
38413 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38414 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38415 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
38416 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
38417 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38418 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
d71dba7b SP |
38419 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
38420 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38421 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38422 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38423 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38424 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38425 | ||
d71dba7b SP |
38426 | #define __arm_vshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38427 | __typeof(p1) __p1 = (p1); \ | |
38428 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38429 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38430 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38431 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38432 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38433 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38434 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
38435 | ||
33203b4c SP |
38436 | #define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38437 | __typeof(p1) __p1 = (p1); \ | |
38438 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38439 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38440 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38441 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38442 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38443 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
f9355dee | 38444 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ |
31df339a SMW |
38445 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38446 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38447 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
38448 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
38449 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38450 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)));}) | |
33203b4c | 38451 | |
33203b4c SP |
38452 | #define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38453 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38454 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
38455 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
38456 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
38457 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
38458 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
38459 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
38460 | ||
33203b4c | 38461 | #define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
33203b4c SP |
38462 | __typeof(p1) __p1 = (p1); \ |
38463 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
38464 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38465 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38466 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
38467 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
38468 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38469 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
33203b4c SP |
38470 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
38471 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38472 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38473 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38474 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38475 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
38476 | ||
33203b4c SP |
38477 | #define __arm_vrmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38478 | __typeof(p1) __p1 = (p1); \ | |
38479 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38480 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38481 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38482 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38483 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38484 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38485 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38486 | ||
33203b4c SP |
38487 | #define __arm_vrhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38488 | __typeof(p1) __p1 = (p1); \ | |
38489 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38490 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38491 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38492 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38493 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38494 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38495 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38496 | ||
f9355dee | 38497 | #define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
33203b4c SP |
38498 | __typeof(p1) __p1 = (p1); \ |
38499 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
38500 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38501 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38502 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
38503 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
38504 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38505 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
33203b4c SP |
38506 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
38507 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38508 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38509 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38510 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38511 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38512 | ||
33203b4c SP |
38513 | #define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38514 | __typeof(p1) __p1 = (p1); \ | |
38515 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38516 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38517 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38518 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38519 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38520 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38521 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
38522 | ||
33203b4c SP |
38523 | #define __arm_vqshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38524 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38525 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
38526 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
38527 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
38528 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
38529 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
38530 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
38531 | ||
33203b4c SP |
38532 | #define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38533 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38534 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
38535 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
38536 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));}) | |
38537 | ||
f9355dee | 38538 | #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
33203b4c SP |
38539 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ |
38540 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
38541 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
38542 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
38543 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
38544 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
38545 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
38546 | ||
33203b4c SP |
38547 | #define __arm_vshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38548 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38549 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
38550 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
38551 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
38552 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
38553 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
38554 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
38555 | ||
33203b4c SP |
38556 | #define __arm_vqshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38557 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38558 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
38559 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
38560 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
38561 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
38562 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
38563 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
38564 | ||
33203b4c SP |
38565 | #define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38566 | __typeof(p1) __p1 = (p1); \ | |
38567 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38568 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38569 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38570 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38571 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38572 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
f9355dee | 38573 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ |
31df339a SMW |
38574 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38575 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38576 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
38577 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
38578 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38579 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)));}) | |
33203b4c | 38580 | |
33203b4c SP |
38581 | #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38582 | __typeof(p1) __p1 = (p1); \ | |
38583 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38584 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38585 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
f9355dee | 38586 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ |
31df339a SMW |
38587 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38588 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38589 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)));}) | |
33203b4c | 38590 | |
33203b4c SP |
38591 | #define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38592 | __typeof(p1) __p1 = (p1); \ | |
38593 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
38594 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38595 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38596 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
33203b4c SP |
38597 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
38598 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38599 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
38600 | ||
f9355dee | 38601 | #define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
33203b4c SP |
38602 | __typeof(p1) __p1 = (p1); \ |
38603 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
38604 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38605 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38606 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
38607 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
38608 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38609 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
33203b4c SP |
38610 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
38611 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38612 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38613 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38614 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38615 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38616 | ||
33203b4c SP |
38617 | #define __arm_vorrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38618 | __typeof(p1) __p1 = (p1); \ | |
38619 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38620 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38621 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38622 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38623 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38624 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38625 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38626 | ||
33203b4c SP |
38627 | #define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38628 | __typeof(p1) __p1 = (p1); \ | |
38629 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38630 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38631 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38632 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38633 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38634 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38635 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38636 | ||
33203b4c SP |
38637 | #define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38638 | __typeof(p1) __p1 = (p1); \ | |
38639 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
38640 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38641 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38642 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
38643 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
38644 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38645 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
33203b4c SP |
38646 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
38647 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38648 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38649 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38650 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38651 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38652 | ||
33203b4c SP |
38653 | #define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38654 | __typeof(p1) __p1 = (p1); \ | |
38655 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38656 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38657 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38658 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38659 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38660 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38661 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38662 | ||
33203b4c SP |
38663 | #define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38664 | __typeof(p1) __p1 = (p1); \ | |
38665 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38666 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38667 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38668 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38669 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38670 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38671 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38672 | ||
33203b4c SP |
38673 | #define __arm_vmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38674 | __typeof(p1) __p1 = (p1); \ | |
38675 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38676 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38677 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38678 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38679 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38680 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38681 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38682 | ||
33203b4c SP |
38683 | #define __arm_vminq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38684 | __typeof(p1) __p1 = (p1); \ | |
38685 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38686 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38687 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38688 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38689 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38690 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38691 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38692 | ||
33203b4c SP |
38693 | #define __arm_vminaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38694 | __typeof(p1) __p1 = (p1); \ | |
38695 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38696 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38697 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38698 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
38699 | ||
33203b4c SP |
38700 | #define __arm_vmaxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38701 | __typeof(p1) __p1 = (p1); \ | |
38702 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38703 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38704 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38705 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38706 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38707 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38708 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38709 | ||
33203b4c SP |
38710 | #define __arm_vmaxaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38711 | __typeof(p1) __p1 = (p1); \ | |
38712 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38713 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38714 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38715 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
38716 | ||
f9355dee | 38717 | #define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
33203b4c SP |
38718 | __typeof(p1) __p1 = (p1); \ |
38719 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
38720 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38721 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38722 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
38723 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
38724 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38725 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
33203b4c SP |
38726 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
38727 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38728 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38729 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38730 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38731 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38732 | ||
33203b4c SP |
38733 | #define __arm_vhcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38734 | __typeof(p1) __p1 = (p1); \ | |
38735 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38736 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38737 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38738 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
38739 | ||
33203b4c SP |
38740 | #define __arm_vhcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38741 | __typeof(p1) __p1 = (p1); \ | |
38742 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38743 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38744 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38745 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
38746 | ||
f9355dee | 38747 | #define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
33203b4c SP |
38748 | __typeof(p1) __p1 = (p1); \ |
38749 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
38750 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38751 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38752 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
38753 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
38754 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38755 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
33203b4c SP |
38756 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
38757 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38758 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38759 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38760 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38761 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38762 | ||
33203b4c SP |
38763 | #define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38764 | __typeof(p1) __p1 = (p1); \ | |
38765 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38766 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38767 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38768 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38769 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38770 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38771 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38772 | ||
33203b4c SP |
38773 | #define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38774 | __typeof(p1) __p1 = (p1); \ | |
38775 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38776 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38777 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38778 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38779 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38780 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38781 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38782 | ||
33203b4c SP |
38783 | #define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38784 | __typeof(p1) __p1 = (p1); \ | |
38785 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38786 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38787 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38788 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38789 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38790 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38791 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38792 | ||
33203b4c SP |
38793 | #define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38794 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38795 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
38796 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
38797 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
38798 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
38799 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
38800 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
38801 | ||
33203b4c SP |
38802 | #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38803 | __typeof(p1) __p1 = (p1); \ | |
38804 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
d34f510e SP |
38805 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1 (__p1, int)), \ |
38806 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1 (__p1, int)), \ | |
38807 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1 (__p1, int)), \ | |
38808 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1 (__p1, int)), \ | |
33203b4c SP |
38809 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ |
38810 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38811 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38812 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38813 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38814 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38815 | ||
33203b4c SP |
38816 | #define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38817 | __typeof(p1) __p1 = (p1); \ | |
38818 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38819 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38820 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38821 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38822 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38823 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
f9355dee | 38824 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ |
e0dd75fe SMW |
38825 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38826 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38827 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
38828 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
38829 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38830 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)));}) | |
33203b4c | 38831 | |
33203b4c SP |
38832 | #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38833 | __typeof(p1) __p1 = (p1); \ | |
38834 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38835 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38836 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38837 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38838 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38839 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38840 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38841 | ||
33203b4c SP |
38842 | #define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38843 | __typeof(p1) __p1 = (p1); \ | |
38844 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38845 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38846 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38847 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38848 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38849 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38850 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38851 | ||
33203b4c SP |
38852 | #define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38853 | __typeof(p1) __p1 = (p1); \ | |
38854 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38855 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38856 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38857 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38858 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38859 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38860 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
31df339a SMW |
38861 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38862 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38863 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
38864 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ | |
38865 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38866 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)));}) | |
33203b4c | 38867 | |
f9355dee SP |
38868 | #define __arm_vqmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38869 | __typeof(p1) __p1 = (p1); \ | |
38870 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38871 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38872 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38873 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38874 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38875 | ||
f9355dee SP |
38876 | #define __arm_vqmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38877 | __typeof(p1) __p1 = (p1); \ | |
38878 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38879 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38880 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38881 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38882 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38883 | ||
f9355dee SP |
38884 | #define __arm_vmulltq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38885 | __typeof(p1) __p1 = (p1); \ | |
38886 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38887 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38888 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
38889 | ||
f9355dee SP |
38890 | #define __arm_vmullbq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38891 | __typeof(p1) __p1 = (p1); \ | |
38892 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38893 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
38894 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) | |
38895 | ||
f9355dee SP |
38896 | #define __arm_vmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38897 | __typeof(p1) __p1 = (p1); \ | |
38898 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38899 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38900 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38901 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38902 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38903 | ||
f9355dee SP |
38904 | #define __arm_vmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38905 | __typeof(p1) __p1 = (p1); \ | |
38906 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38907 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38908 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
38909 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
38910 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
38911 | ||
f9355dee SP |
38912 | #define __arm_vmlaldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38913 | __typeof(p1) __p1 = (p1); \ | |
38914 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38915 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38916 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
38917 | ||
f9355dee SP |
38918 | #define __arm_vqmovuntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38919 | __typeof(p1) __p1 = (p1); \ | |
38920 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38921 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38922 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
38923 | ||
f9355dee SP |
38924 | #define __arm_vshlltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38925 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38926 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
38927 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
38928 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
38929 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) | |
38930 | ||
f9355dee SP |
38931 | #define __arm_vshllbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38932 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
38933 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
38934 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
38935 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
38936 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) | |
38937 | ||
f9355dee SP |
38938 | #define __arm_vqmovunbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38939 | __typeof(p1) __p1 = (p1); \ | |
38940 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38941 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38942 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
38943 | ||
f9355dee SP |
38944 | #define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38945 | __typeof(p1) __p1 = (p1); \ | |
38946 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
38947 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ |
38948 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
f9355dee SP |
38949 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
38950 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
38951 | ||
f9355dee SP |
38952 | #define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38953 | __typeof(p1) __p1 = (p1); \ | |
38954 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
38955 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ |
38956 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)), \ | |
f9355dee SP |
38957 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ |
38958 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
38959 | ||
33203b4c SP |
38960 | #define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38961 | __typeof(p1) __p1 = (p1); \ | |
38962 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38963 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38964 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38965 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
31df339a SMW |
38966 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38967 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38968 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)));}) | |
33203b4c | 38969 | |
33203b4c SP |
38970 | #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38971 | __typeof(p1) __p1 = (p1); \ | |
38972 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38973 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38974 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38975 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
31df339a SMW |
38976 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38977 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38978 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)));}) | |
33203b4c | 38979 | |
33203b4c SP |
38980 | #define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38981 | __typeof(p1) __p1 = (p1); \ | |
38982 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38983 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38984 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38985 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
31df339a SMW |
38986 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38987 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38988 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)));}) | |
33203b4c | 38989 | |
33203b4c SP |
38990 | #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
38991 | __typeof(p1) __p1 = (p1); \ | |
38992 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
38993 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
38994 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
38995 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
31df339a SMW |
38996 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int)), \ |
38997 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
38998 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int)));}) | |
33203b4c | 38999 | |
8165795c SP |
39000 | #define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39001 | __typeof(p1) __p1 = (p1); \ | |
39002 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39003 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
31df339a SMW |
39004 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int), p2), \ |
39005 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39006 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39007 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39008 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39009 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int), p2), \ | |
8165795c SP |
39010 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
39011 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39012 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
39013 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39014 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
39015 | ||
0dad5b33 SP |
39016 | #define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39017 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
39018 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
39019 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
39020 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
39021 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
39022 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
39023 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
39024 | ||
0dad5b33 SP |
39025 | #define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39026 | __typeof(p1) __p1 = (p1); \ | |
39027 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39028 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39029 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39030 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39031 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
39032 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39033 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
31df339a SMW |
39034 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int), p2), \ |
39035 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39036 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39037 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39038 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39039 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int), p2));}) | |
0dad5b33 | 39040 | |
0dad5b33 SP |
39041 | #define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39042 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
39043 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
39044 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
39045 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
39046 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
39047 | ||
0dad5b33 SP |
39048 | #define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39049 | __typeof(p1) __p1 = (p1); \ | |
39050 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39051 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39052 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39053 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39054 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
39055 | ||
0dad5b33 SP |
39056 | #define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39057 | __typeof(p1) __p1 = (p1); \ | |
39058 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39059 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39060 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
39061 | ||
8165795c SP |
39062 | #define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39063 | __typeof(p1) __p1 = (p1); \ | |
39064 | __typeof(p2) __p2 = (p2); \ | |
39065 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
39066 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
39067 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
39068 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
39069 | ||
8165795c SP |
39070 | #define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39071 | __typeof(p1) __p1 = (p1); \ | |
39072 | __typeof(p2) __p2 = (p2); \ | |
39073 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
39074 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
39075 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
39076 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
39077 | ||
8165795c SP |
39078 | #define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39079 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
39080 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
39081 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
39082 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
39083 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
39084 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
39085 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
39086 | ||
8165795c SP |
39087 | #define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39088 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
39089 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
39090 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
39091 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
39092 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
39093 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
39094 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
39095 | ||
8165795c SP |
39096 | #define __arm_vrev64q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39097 | __typeof(p1) __p1 = (p1); \ | |
39098 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39099 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev64q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39100 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev64q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39101 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrev64q_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39102 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev64q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
39103 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39104 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
39105 | ||
8165795c SP |
39106 | #define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39107 | __typeof(p1) __p1 = (p1); \ | |
39108 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
39109 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
39110 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
39111 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
39112 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
39113 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
39114 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));}) | |
39115 | ||
8165795c SP |
39116 | #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39117 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
39118 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
39119 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
39120 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
39121 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
39122 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
39123 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
39124 | ||
8165795c SP |
39125 | #define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39126 | __typeof(p1) __p1 = (p1); \ | |
39127 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39128 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39129 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39130 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39131 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
39132 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39133 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
39134 | ||
8165795c SP |
39135 | #define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39136 | __typeof(p1) __p1 = (p1); \ | |
39137 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39138 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39139 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39140 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39141 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
39142 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39143 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
39144 | ||
8165795c SP |
39145 | #define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39146 | __typeof(p1) __p1 = (p1); \ | |
39147 | __typeof(p2) __p2 = (p2); \ | |
39148 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
39149 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int)), \ |
39150 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
39151 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int)));}) | |
8165795c | 39152 | |
afb198ee CL |
39153 | #define __arm_vqdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39154 | __typeof(p1) __p1 = (p1); \ | |
39155 | __typeof(p2) __p2 = (p2); \ | |
39156 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
39157 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int)), \ |
39158 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
39159 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int)));}) | |
afb198ee | 39160 | |
8165795c SP |
39161 | #define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39162 | __typeof(p1) __p1 = (p1); \ | |
39163 | __typeof(p2) __p2 = (p2); \ | |
39164 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
39165 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int)), \ |
39166 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
39167 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int)));}) | |
8165795c | 39168 | |
8165795c SP |
39169 | #define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39170 | __typeof(p1) __p1 = (p1); \ | |
39171 | __typeof(p2) __p2 = (p2); \ | |
39172 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
39173 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
39174 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
39175 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
39176 | ||
8165795c SP |
39177 | #define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39178 | __typeof(p1) __p1 = (p1); \ | |
39179 | __typeof(p2) __p2 = (p2); \ | |
39180 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
39181 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
39182 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
39183 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
39184 | ||
8165795c SP |
39185 | #define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39186 | __typeof(p1) __p1 = (p1); \ | |
39187 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39188 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39189 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39190 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
39191 | ||
8165795c SP |
39192 | #define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39193 | __typeof(p1) __p1 = (p1); \ | |
39194 | __typeof(p2) __p2 = (p2); \ | |
39195 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
39196 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
39197 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
39198 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
39199 | ||
8165795c SP |
39200 | #define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39201 | __typeof(p1) __p1 = (p1); \ | |
39202 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39203 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39204 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39205 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
39206 | ||
8165795c SP |
39207 | #define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39208 | __typeof(p1) __p1 = (p1); \ | |
39209 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39210 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39211 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39212 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
39213 | ||
8165795c SP |
39214 | #define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39215 | __typeof(p1) __p1 = (p1); \ | |
39216 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39217 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclzq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39218 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclzq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39219 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclzq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39220 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vclzq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
39221 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39222 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
39223 | ||
8165795c SP |
39224 | #define __arm_vcmpgeq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39225 | __typeof(p1) __p1 = (p1); \ | |
39226 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39227 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39228 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
e81d0d9e | 39229 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ |
31df339a SMW |
39230 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int), p2), \ |
39231 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39232 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int), p2));}) | |
8165795c | 39233 | |
1fa5a447 AC |
39234 | |
39235 | #define __arm_vcmpgtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
39236 | __typeof(p1) __p1 = (p1); \ | |
39237 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39238 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39239 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39240 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
31df339a SMW |
39241 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int), p2), \ |
39242 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39243 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int), p2));}) | |
1fa5a447 AC |
39244 | |
39245 | #define __arm_vcmpleq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
39246 | __typeof(p1) __p1 = (p1); \ | |
39247 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39248 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39249 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39250 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
31df339a SMW |
39251 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int), p2), \ |
39252 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39253 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int), p2));}) | |
1fa5a447 AC |
39254 | |
39255 | #define __arm_vcmpltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
39256 | __typeof(p1) __p1 = (p1); \ | |
39257 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39258 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39259 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39260 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
31df339a SMW |
39261 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int), p2), \ |
39262 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39263 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int), p2));}) | |
1fa5a447 AC |
39264 | |
39265 | #define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
39266 | __typeof(p1) __p1 = (p1); \ | |
39267 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39268 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39269 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39270 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39271 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
39272 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39273 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
31df339a SMW |
39274 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce3(p1, int), p2), \ |
39275 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39276 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39277 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39278 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
39279 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int), p2));}) | |
1fa5a447 | 39280 | |
8165795c SP |
39281 | #define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39282 | __typeof(p1) __p1 = (p1); \ | |
39283 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f SP |
39284 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), (int8_t) __p1, p2), \ |
39285 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), (int16_t) __p1, p2), \ | |
39286 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), (int32_t) __p1, p2), \ | |
39287 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint8_t) __p1, p2), \ | |
39288 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint16_t) __p1, p2), \ | |
39289 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2));}) | |
8165795c | 39290 | |
8165795c SP |
39291 | #define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39292 | __typeof(p1) __p1 = (p1); \ | |
39293 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39294 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39295 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39296 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
39297 | ||
8165795c SP |
39298 | #define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39299 | __typeof(p1) __p1 = (p1); \ | |
39300 | __typeof(p2) __p2 = (p2); \ | |
39301 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
39302 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int)), \ |
39303 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
39304 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int)), \ | |
39305 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int)), \ | |
39306 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
39307 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int)));}) | |
8165795c | 39308 | |
8165795c SP |
39309 | #define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39310 | __typeof(p1) __p1 = (p1); \ | |
39311 | __typeof(p2) __p2 = (p2); \ | |
39312 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
39313 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int)), \ |
39314 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
39315 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int)), \ | |
39316 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int)), \ | |
39317 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
39318 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int)));}) | |
8165795c | 39319 | |
8165795c SP |
39320 | #define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39321 | __typeof(p1) __p1 = (p1); \ | |
39322 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39323 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39324 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39325 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
39326 | ||
8165795c SP |
39327 | #define __arm_vpselq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39328 | __typeof(p1) __p1 = (p1); \ | |
39329 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39330 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vpselq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39331 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vpselq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39332 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vpselq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39333 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int64x2_t]: __arm_vpselq_s64 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \ | |
39334 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vpselq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
39335 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vpselq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39336 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vpselq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
39337 | int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint64x2_t]: __arm_vpselq_u64 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint64x2_t), p2));}) | |
39338 | ||
8165795c SP |
39339 | #define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39340 | __typeof(p1) __p1 = (p1); \ | |
39341 | __typeof(p2) __p2 = (p2); \ | |
39342 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
39343 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int)), \ |
39344 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int)), \ | |
39345 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int)));}) | |
8165795c | 39346 | |
8165795c SP |
39347 | #define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39348 | __typeof(p1) __p1 = (p1); \ | |
39349 | __typeof(p2) __p2 = (p2); \ | |
39350 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
39351 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
39352 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
39353 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
39354 | ||
8165795c SP |
39355 | #define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39356 | __typeof(p1) __p1 = (p1); \ | |
39357 | __typeof(p2) __p2 = (p2); \ | |
39358 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
39359 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
39360 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
39361 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
39362 | ||
8165795c SP |
39363 | #define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39364 | __typeof(p1) __p1 = (p1); \ | |
39365 | __typeof(p2) __p2 = (p2); \ | |
39366 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
39367 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
39368 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
39369 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
39370 | ||
8165795c SP |
39371 | #define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39372 | __typeof(p1) __p1 = (p1); \ | |
39373 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39374 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39375 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39376 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
39377 | ||
e3678b44 SP |
39378 | #define __arm_vmovlbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39379 | __typeof(p1) __p1 = (p1); \ | |
39380 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39381 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovlbq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39382 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovlbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39383 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
39384 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
39385 | ||
e3678b44 SP |
39386 | #define __arm_vmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39387 | __typeof(p1) __p1 = (p1); \ | |
39388 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39389 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39390 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39391 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39392 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
39393 | ||
e3678b44 SP |
39394 | #define __arm_vmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39395 | __typeof(p1) __p1 = (p1); \ | |
39396 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39397 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39398 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39399 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39400 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
39401 | ||
e3678b44 SP |
39402 | #define __arm_vshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39403 | __typeof(p1) __p1 = (p1); \ | |
39404 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39405 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39406 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39407 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39408 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
39409 | ||
e3678b44 SP |
39410 | #define __arm_vrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39411 | __typeof(p1) __p1 = (p1); \ | |
39412 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39413 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39414 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39415 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39416 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
39417 | ||
e3678b44 SP |
39418 | #define __arm_vrev32q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39419 | __typeof(p1) __p1 = (p1); \ | |
39420 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39421 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev32q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39422 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev32q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39423 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev32q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
39424 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev32q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
39425 | ||
e3678b44 SP |
39426 | #define __arm_vqshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39427 | __typeof(p1) __p1 = (p1); \ | |
39428 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39429 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39430 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
39431 | ||
e3678b44 SP |
39432 | #define __arm_vrev16q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39433 | __typeof(p1) __p1 = (p1); \ | |
39434 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39435 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev16q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39436 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev16q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2));}) | |
39437 | ||
e3678b44 SP |
39438 | #define __arm_vqshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39439 | __typeof(p1) __p1 = (p1); \ | |
39440 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39441 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39442 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39443 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39444 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
39445 | ||
e3678b44 SP |
39446 | #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39447 | __typeof(p1) __p1 = (p1); \ | |
39448 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39449 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39450 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
39451 | ||
e3678b44 SP |
39452 | #define __arm_vqrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39453 | __typeof(p1) __p1 = (p1); \ | |
39454 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39455 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39456 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39457 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39458 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
39459 | ||
e3678b44 SP |
39460 | #define __arm_vqshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39461 | __typeof(p1) __p1 = (p1); \ | |
39462 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39463 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39464 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39465 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39466 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
39467 | ||
e3678b44 SP |
39468 | #define __arm_vqmovuntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39469 | __typeof(p1) __p1 = (p1); \ | |
39470 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39471 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39472 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
39473 | ||
e3678b44 SP |
39474 | #define __arm_vqmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39475 | __typeof(p1) __p1 = (p1); \ | |
39476 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39477 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39478 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39479 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39480 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
39481 | ||
e3678b44 SP |
39482 | #define __arm_vqmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39483 | __typeof(p1) __p1 = (p1); \ | |
39484 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39485 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39486 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39487 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39488 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
39489 | ||
e3678b44 SP |
39490 | #define __arm_vmovltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39491 | __typeof(p1) __p1 = (p1); \ | |
39492 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39493 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovltq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39494 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39495 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovltq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
39496 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovltq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
39497 | ||
e3678b44 SP |
39498 | #define __arm_vqmovunbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39499 | __typeof(p1) __p1 = (p1); \ | |
39500 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
39501 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39502 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
39503 | ||
e81d0d9e | 39504 | #define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
e3678b44 | 39505 | __typeof(p1) __p1 = (p1); \ |
e81d0d9e SP |
39506 | __typeof(p2) __p2 = (p2); \ |
39507 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
39508 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
39509 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
39510 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
39511 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ | |
39512 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
39513 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
e81d0d9e SP |
39514 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
39515 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39516 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39517 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
39518 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39519 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
e3678b44 | 39520 | |
6a90680b ASDV |
39521 | #define __arm_vabavq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
39522 | __typeof(p1) __p1 = (p1); \ | |
39523 | __typeof(p2) __p2 = (p2); \ | |
39524 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
39525 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_p_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
39526 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39527 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39528 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_p_u8(__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
39529 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_p_u16(__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39530 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_p_u32(__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
39531 | ||
e81d0d9e | 39532 | #define __arm_vabdq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
e3678b44 SP |
39533 | __typeof(p1) __p1 = (p1); \ |
39534 | __typeof(p2) __p2 = (p2); \ | |
39535 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
e81d0d9e SP |
39536 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
39537 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39538 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39539 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
39540 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39541 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
e3678b44 | 39542 | |
e81d0d9e | 39543 | #define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
e3678b44 SP |
39544 | __typeof(p1) __p1 = (p1); \ |
39545 | __typeof(p2) __p2 = (p2); \ | |
39546 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
e81d0d9e SP |
39547 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
39548 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39549 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39550 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
39551 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39552 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
e3678b44 | 39553 | |
e81d0d9e | 39554 | #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
e3678b44 | 39555 | __typeof(p1) __p1 = (p1); \ |
e81d0d9e SP |
39556 | __typeof(p2) __p2 = (p2); \ |
39557 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
39558 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
39559 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39560 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39561 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
39562 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39563 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
e3678b44 | 39564 | |
e81d0d9e | 39565 | #define __arm_vbrsrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
e3678b44 | 39566 | __typeof(p1) __p1 = (p1); \ |
e81d0d9e | 39567 | __typeof(p2) __p2 = (p2); \ |
e3678b44 | 39568 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
e81d0d9e SP |
39569 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbrsrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __p2, p3), \ |
39570 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbrsrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __p2, p3), \ | |
39571 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbrsrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __p2, p3), \ | |
39572 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __p2, p3), \ | |
39573 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __p2, p3), \ | |
39574 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __p2, p3));}) | |
e3678b44 | 39575 | |
e81d0d9e | 39576 | #define __arm_vcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8eb3b6b9 SP |
39577 | __typeof(p1) __p1 = (p1); \ |
39578 | __typeof(p2) __p2 = (p2); \ | |
39579 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
39580 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
39581 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39582 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39583 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
39584 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39585 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 39586 | |
532e9e24 | 39587 | #define __arm_vcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8eb3b6b9 SP |
39588 | __typeof(p1) __p1 = (p1); \ |
39589 | __typeof(p2) __p2 = (p2); \ | |
39590 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
39591 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
39592 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39593 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39594 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
39595 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39596 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 39597 | |
532e9e24 | 39598 | #define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8eb3b6b9 SP |
39599 | __typeof(p1) __p1 = (p1); \ |
39600 | __typeof(p2) __p2 = (p2); \ | |
39601 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
39602 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
39603 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39604 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39605 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
39606 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39607 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 39608 | |
6a90680b ASDV |
39609 | #define __arm_vmladavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
39610 | __typeof(p1) __p1 = (p1); \ | |
39611 | __typeof(p2) __p2 = (p2); \ | |
39612 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
39613 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_p_s8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
39614 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_p_s16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39615 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_p_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39616 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_p_u8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
39617 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39618 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
6a90680b | 39619 | |
8eb3b6b9 SP |
39620 | #define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
39621 | __typeof(p1) __p1 = (p1); \ | |
39622 | __typeof(p2) __p2 = (p2); \ | |
39623 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
39624 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
39625 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39626 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39627 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
39628 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39629 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
39630 | ||
8eb3b6b9 SP |
39631 | #define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
39632 | __typeof(p1) __p1 = (p1); \ | |
39633 | __typeof(p2) __p2 = (p2); \ | |
39634 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
39635 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
39636 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39637 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39638 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
39639 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39640 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
39641 | ||
532e9e24 | 39642 | #define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8eb3b6b9 SP |
39643 | __typeof(p1) __p1 = (p1); \ |
39644 | __typeof(p2) __p2 = (p2); \ | |
39645 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
e0dd75fe SMW |
39646 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
39647 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
39648 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
39649 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ | |
39650 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
39651 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
532e9e24 SP |
39652 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
39653 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39654 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39655 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
39656 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39657 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
39658 | ||
532e9e24 SP |
39659 | #define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
39660 | __typeof(p1) __p1 = (p1); \ | |
39661 | __typeof(p2) __p2 = (p2); \ | |
39662 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
39663 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
39664 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
39665 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
39666 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ | |
39667 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
39668 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
532e9e24 SP |
39669 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
39670 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39671 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39672 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
39673 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39674 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
39675 | ||
4ff68575 SP |
39676 | #define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ |
39677 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
39678 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
39679 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_u32(p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
39680 | ||
3ce755a8 ASDV |
39681 | #define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
39682 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
b13f297f SP |
39683 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \ |
39684 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
39685 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
39686 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
39687 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
39688 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
535a8645 | 39689 | |
535a8645 SP |
39690 | #define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ |
39691 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
39692 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39693 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
39694 | ||
b13f297f SP |
39695 | #define __arm_vld1q(p0) (\ |
39696 | _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ | |
39697 | int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \ | |
39698 | int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \ | |
39699 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \ | |
39700 | int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \ | |
39701 | int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \ | |
39702 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce1(p0, uint32_t *)))) | |
bf1e3d5a | 39703 | |
3ce755a8 ASDV |
39704 | #define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
39705 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f SP |
39706 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ |
39707 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
39708 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
39709 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
bf1e3d5a | 39710 | |
3ce755a8 ASDV |
39711 | #define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
39712 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f SP |
39713 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ |
39714 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
39715 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39716 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
bf1e3d5a | 39717 | |
3ce755a8 ASDV |
39718 | #define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
39719 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f SP |
39720 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ |
39721 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
39722 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
39723 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
bf1e3d5a | 39724 | |
3ce755a8 ASDV |
39725 | #define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
39726 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f SP |
39727 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ |
39728 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
39729 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39730 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
bf1e3d5a | 39731 | |
4cc23303 SP |
39732 | #define __arm_vldrwq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
39733 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
c6ffc89f SP |
39734 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1), \ |
39735 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1));}) | |
4cc23303 | 39736 | |
4cc23303 SP |
39737 | #define __arm_vldrwq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39738 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
c6ffc89f SP |
39739 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1, p2), \ |
39740 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1, p2));}) | |
4cc23303 | 39741 | |
4cc23303 SP |
39742 | #define __arm_vldrwq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
39743 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
c6ffc89f SP |
39744 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1), \ |
39745 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1));}) | |
4cc23303 | 39746 | |
4cc23303 SP |
39747 | #define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
39748 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
c6ffc89f SP |
39749 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1, p2), \ |
39750 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1, p2));}) | |
4cc23303 | 39751 | |
3ce755a8 ASDV |
39752 | #define __arm_vst1q(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
39753 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
39754 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
39755 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
39756 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
39757 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
39758 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
39759 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
5cad47e0 | 39760 | |
3ce755a8 ASDV |
39761 | #define __arm_vst1q_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
39762 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
39763 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39764 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39765 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39766 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_p_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
39767 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39768 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
1dfcc3b5 | 39769 | |
3ce755a8 ASDV |
39770 | #define __arm_vst2q(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
39771 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
39772 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \ | |
39773 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x2_t]: __arm_vst2q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x2_t)), \ | |
39774 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x2_t]: __arm_vst2q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x2_t)), \ | |
39775 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x2_t]: __arm_vst2q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x2_t)), \ | |
39776 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x2_t]: __arm_vst2q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x2_t)), \ | |
39777 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x2_t]: __arm_vst2q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x2_t)));}) | |
1dfcc3b5 | 39778 | |
3ce755a8 ASDV |
39779 | #define __arm_vstrhq(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
39780 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
39781 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
39782 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
39783 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
39784 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
5cad47e0 | 39785 | |
3ce755a8 ASDV |
39786 | #define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
39787 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
39788 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39789 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39790 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
39791 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
5cad47e0 | 39792 | |
3ce755a8 | 39793 | #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
5cad47e0 | 39794 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
39795 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
39796 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39797 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39798 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39799 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
5cad47e0 | 39800 | |
3ce755a8 | 39801 | #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
5cad47e0 | 39802 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
39803 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
39804 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
39805 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
39806 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
39807 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
5cad47e0 | 39808 | |
3ce755a8 | 39809 | #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
5cad47e0 | 39810 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
39811 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
39812 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39813 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39814 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39815 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
5cad47e0 | 39816 | |
3ce755a8 | 39817 | #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
5cad47e0 | 39818 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
39819 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
39820 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
39821 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
39822 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
39823 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
5cad47e0 SP |
39824 | |
39825 | ||
3ce755a8 ASDV |
39826 | #define __arm_vstrwq(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
39827 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
39828 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
39829 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
5cad47e0 | 39830 | |
3ce755a8 ASDV |
39831 | #define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
39832 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
39833 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
39834 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
5cad47e0 | 39835 | |
6a90680b ASDV |
39836 | #define __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ |
39837 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
39838 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ | |
39839 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) | |
39840 | ||
39841 | #define __arm_vstrdq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ | |
39842 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
39843 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \ | |
39844 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) | |
39845 | ||
3ce755a8 | 39846 | #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
7a5fffa5 | 39847 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
39848 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
39849 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
39850 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
39851 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
39852 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
7a5fffa5 | 39853 | |
3ce755a8 | 39854 | #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
7a5fffa5 | 39855 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
39856 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
39857 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39858 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39859 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39860 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
7a5fffa5 | 39861 | |
3ce755a8 | 39862 | #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
7a5fffa5 | 39863 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
39864 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
39865 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
39866 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
39867 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
39868 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
7a5fffa5 | 39869 | |
3ce755a8 | 39870 | #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
7a5fffa5 | 39871 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
39872 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
39873 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
39874 | int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39875 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
39876 | int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
7a5fffa5 | 39877 | |
9b905ba9 | 39878 | #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
7a5fffa5 | 39879 | __typeof(p2) __p2 = (p2); \ |
9b905ba9 SP |
39880 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ |
39881 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
39882 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
7a5fffa5 | 39883 | |
9b905ba9 | 39884 | #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
7a5fffa5 | 39885 | __typeof(p2) __p2 = (p2); \ |
9b905ba9 SP |
39886 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ |
39887 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39888 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
7a5fffa5 | 39889 | |
3ce755a8 | 39890 | #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
7a5fffa5 | 39891 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
39892 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ |
39893 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ | |
39894 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
7a5fffa5 | 39895 | |
3ce755a8 | 39896 | #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
7a5fffa5 | 39897 | __typeof(p2) __p2 = (p2); \ |
3ce755a8 ASDV |
39898 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ |
39899 | int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
39900 | int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
7a5fffa5 | 39901 | |
85a94e87 SP |
39902 | #define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \ |
39903 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
39904 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \ | |
39905 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \ | |
39906 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \ | |
39907 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \ | |
39908 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \ | |
39909 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \ | |
39910 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \ | |
39911 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 ());}) | |
39912 | ||
85a94e87 SP |
39913 | #define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \ |
39914 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
39915 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
39916 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
39917 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
39918 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
39919 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
39920 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
39921 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
39922 | ||
85a94e87 SP |
39923 | #define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \ |
39924 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
39925 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
39926 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
39927 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
39928 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
39929 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
39930 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
39931 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
39932 | ||
85a94e87 SP |
39933 | #define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \ |
39934 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
39935 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
39936 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
39937 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
39938 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
39939 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
39940 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
39941 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
39942 | ||
85a94e87 SP |
39943 | #define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \ |
39944 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
39945 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
39946 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
39947 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
39948 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
39949 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
39950 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
39951 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
39952 | ||
85a94e87 SP |
39953 | #define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \ |
39954 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
39955 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
39956 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
39957 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
39958 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
39959 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
39960 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
39961 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
39962 | ||
85a94e87 SP |
39963 | #define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \ |
39964 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
39965 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
39966 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
39967 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
39968 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
39969 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
39970 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
39971 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
39972 | ||
85a94e87 SP |
39973 | #define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \ |
39974 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
39975 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
39976 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
39977 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
39978 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
39979 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
39980 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
39981 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)));}) | |
39982 | ||
85a94e87 SP |
39983 | #define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \ |
39984 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
39985 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
39986 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
39987 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ | |
39988 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
39989 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
39990 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ | |
39991 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) | |
39992 | ||
261014a1 SP |
39993 | #define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
39994 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
39995 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
39996 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
39997 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
39998 | ||
261014a1 SP |
39999 | #define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40000 | __typeof(p2) __p2 = (p2); \ | |
40001 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40002 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40003 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40004 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
31df339a SMW |
40005 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
40006 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40007 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
261014a1 SP |
40008 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ |
40009 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40010 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
31df339a SMW |
40011 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
40012 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40013 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3));}) | |
261014a1 | 40014 | |
261014a1 SP |
40015 | #define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40016 | __typeof(p2) __p2 = (p2); \ | |
40017 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40018 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40019 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40020 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40021 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40022 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40023 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40024 | ||
261014a1 SP |
40025 | #define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40026 | __typeof(p2) __p2 = (p2); \ | |
40027 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40028 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40029 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40030 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40031 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40032 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40033 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40034 | ||
261014a1 SP |
40035 | #define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40036 | __typeof(p2) __p2 = (p2); \ | |
40037 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40038 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40039 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40040 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40041 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40042 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40043 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40044 | ||
6a90680b ASDV |
40045 | #define __arm_vmovlbq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
40046 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
40047 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
40048 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
40049 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
40050 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
40051 | ||
40052 | #define __arm_vmovltq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
40053 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
40054 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
40055 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
40056 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
40057 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
40058 | ||
40059 | #define __arm_vmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
40060 | __typeof(p2) __p2 = (p2); \ | |
40061 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40062 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40063 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40064 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40065 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40066 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40067 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40068 | ||
40069 | #define __arm_vmullbq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
40070 | __typeof(p2) __p2 = (p2); \ | |
40071 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40072 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40073 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40074 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40075 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40076 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40077 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40078 | ||
40079 | #define __arm_vmullbq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
40080 | __typeof(p2) __p2 = (p2); \ | |
40081 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40082 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40083 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) | |
40084 | ||
40085 | #define __arm_vmulltq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
40086 | __typeof(p2) __p2 = (p2); \ | |
40087 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40088 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40089 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40090 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40091 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40092 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40093 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40094 | ||
40095 | #define __arm_vmulltq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
40096 | __typeof(p2) __p2 = (p2); \ | |
40097 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40098 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40099 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) | |
40100 | ||
e81d0d9e | 40101 | #define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
261014a1 SP |
40102 | __typeof(p2) __p2 = (p2); \ |
40103 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
e81d0d9e SP |
40104 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
40105 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40106 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
31df339a SMW |
40107 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
40108 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40109 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
e81d0d9e SP |
40110 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ |
40111 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40112 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
31df339a SMW |
40113 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
40114 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40115 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3));}) | |
261014a1 | 40116 | |
e81d0d9e SP |
40117 | #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
40118 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
40119 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
40120 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
40121 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
40122 | ||
e81d0d9e | 40123 | #define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
261014a1 SP |
40124 | __typeof(p2) __p2 = (p2); \ |
40125 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40126 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40127 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40128 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40129 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40130 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40131 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40132 | ||
261014a1 SP |
40133 | #define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40134 | __typeof(p2) __p2 = (p2); \ | |
40135 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40136 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40137 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40138 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40139 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40140 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40141 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40142 | ||
261014a1 SP |
40143 | #define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
40144 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
40145 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
40146 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
40147 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
40148 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
40149 | ||
261014a1 SP |
40150 | #define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
40151 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
40152 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
40153 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
40154 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
40155 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
40156 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
40157 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
40158 | ||
261014a1 SP |
40159 | #define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40160 | __typeof(p2) __p2 = (p2); \ | |
40161 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40162 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40163 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40164 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40165 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40166 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40167 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40168 | ||
261014a1 SP |
40169 | #define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40170 | __typeof(p2) __p2 = (p2); \ | |
40171 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40172 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40173 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40174 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40175 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40176 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40177 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40178 | ||
261014a1 SP |
40179 | #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40180 | __typeof(p2) __p2 = (p2); \ | |
40181 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40182 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40183 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40184 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40185 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40186 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40187 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40188 | ||
261014a1 SP |
40189 | #define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40190 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
40191 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
40192 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40193 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40194 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
40195 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40196 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
40197 | ||
3ce755a8 | 40198 | #define __arm_vld1q_z(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ |
b13f297f SP |
40199 | int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce1(p0, int8_t *), p1), \ |
40200 | int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_z_s16 (__ARM_mve_coerce1(p0, int16_t *), p1), \ | |
40201 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_z_s32 (__ARM_mve_coerce1(p0, int32_t *), p1), \ | |
40202 | int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_z_u8 (__ARM_mve_coerce1(p0, uint8_t *), p1), \ | |
40203 | int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), p1), \ | |
40204 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_z_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1))) | |
1dfcc3b5 | 40205 | |
3ce755a8 | 40206 | #define __arm_vld2q(p0) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ |
b13f297f SP |
40207 | int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \ |
40208 | int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld2q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \ | |
40209 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld2q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \ | |
40210 | int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld2q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \ | |
40211 | int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld2q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \ | |
40212 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld2q_u32 (__ARM_mve_coerce1(p0, uint32_t *)))) | |
3ce755a8 | 40213 | |
1dfcc3b5 | 40214 | |
3ce755a8 | 40215 | #define __arm_vld4q(p0) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ |
b13f297f SP |
40216 | int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce1(p0, int8_t *)), \ |
40217 | int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld4q_s16 (__ARM_mve_coerce1(p0, int16_t *)), \ | |
40218 | int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld4q_s32 (__ARM_mve_coerce1(p0, int32_t *)), \ | |
40219 | int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld4q_u8 (__ARM_mve_coerce1(p0, uint8_t *)), \ | |
40220 | int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce1(p0, uint16_t *)), \ | |
40221 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce1(p0, uint32_t *)))) | |
1dfcc3b5 | 40222 | |
1a5c27b1 SP |
40223 | #define __arm_vgetq_lane(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
40224 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
40225 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vgetq_lane_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
40226 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vgetq_lane_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
40227 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vgetq_lane_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
40228 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vgetq_lane_s64 (__ARM_mve_coerce(__p0, int64x2_t), p1), \ | |
40229 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vgetq_lane_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
40230 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vgetq_lane_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
40231 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vgetq_lane_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \ | |
40232 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vgetq_lane_u64 (__ARM_mve_coerce(__p0, uint64x2_t), p1));}) | |
40233 | ||
1a5c27b1 SP |
40234 | #define __arm_vsetq_lane(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
40235 | __typeof(p1) __p1 = (p1); \ | |
40236 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
40237 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vsetq_lane_s8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
40238 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vsetq_lane_s16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
40239 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vsetq_lane_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
40240 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int64x2_t]: __arm_vsetq_lane_s64 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int64x2_t), p2), \ | |
40241 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vsetq_lane_u8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
40242 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vsetq_lane_u16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
40243 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vsetq_lane_u32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
40244 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint64x2_t]: __arm_vsetq_lane_u64 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint64x2_t), p2));}) | |
1a5c27b1 | 40245 | |
429d607b SP |
40246 | #endif /* MVE Integer. */ |
40247 | ||
6a90680b ASDV |
40248 | #define __arm_vshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
40249 | __typeof(p1) __p1 = (p1); \ | |
40250 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40251 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
40252 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
40253 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
40254 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
40255 | ||
40256 | ||
40257 | #define __arm_vrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
40258 | __typeof(p1) __p1 = (p1); \ | |
40259 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40260 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
40261 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
40262 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
40263 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
40264 | ||
40265 | ||
261014a1 SP |
40266 | #define __arm_vmvnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
40267 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
40268 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
40269 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
40270 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
40271 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
40272 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
40273 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
40274 | ||
261014a1 SP |
40275 | #define __arm_vrev16q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
40276 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
40277 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
40278 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2));}) | |
40279 | ||
261014a1 SP |
40280 | #define __arm_vrhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40281 | __typeof(p2) __p2 = (p2); \ | |
40282 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40283 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40284 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40285 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40286 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40287 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40288 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40289 | ||
261014a1 SP |
40290 | #define __arm_vshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40291 | __typeof(p2) __p2 = (p2); \ | |
40292 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40293 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40294 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40295 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40296 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40297 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40298 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
40299 | ||
261014a1 SP |
40300 | #define __arm_vrmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40301 | __typeof(p2) __p2 = (p2); \ | |
40302 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40303 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40304 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40305 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40306 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40307 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40308 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40309 | ||
261014a1 SP |
40310 | #define __arm_vrshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40311 | __typeof(p2) __p2 = (p2); \ | |
40312 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40313 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40314 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40315 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40316 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40317 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40318 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
40319 | ||
261014a1 SP |
40320 | #define __arm_vrshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40321 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
40322 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
40323 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40324 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40325 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
40326 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40327 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
40328 | ||
261014a1 SP |
40329 | #define __arm_vshllbq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40330 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
40331 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
40332 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40333 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
40334 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) | |
40335 | ||
261014a1 SP |
40336 | #define __arm_vshlltq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40337 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
40338 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
40339 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40340 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
40341 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) | |
40342 | ||
261014a1 SP |
40343 | #define __arm_vshlq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40344 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
40345 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
40346 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40347 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40348 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
40349 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40350 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
40351 | ||
261014a1 SP |
40352 | #define __arm_vdwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ |
40353 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f | 40354 | int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u8 ((uint32_t) __p1, p2, p3, p4), \ |
261014a1 SP |
40355 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) |
40356 | ||
261014a1 SP |
40357 | #define __arm_vdwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ |
40358 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f | 40359 | int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u16 ((uint32_t) __p1, p2, p3, p4), \ |
261014a1 SP |
40360 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) |
40361 | ||
261014a1 SP |
40362 | #define __arm_vdwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ |
40363 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f | 40364 | int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u32 ((uint32_t) __p1, p2, p3, p4), \ |
261014a1 SP |
40365 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) |
40366 | ||
261014a1 SP |
40367 | #define __arm_viwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ |
40368 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f | 40369 | int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u8 ((uint32_t) __p1, p2, p3, p4), \ |
261014a1 SP |
40370 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) |
40371 | ||
261014a1 SP |
40372 | #define __arm_viwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ |
40373 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f | 40374 | int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u16 ((uint32_t) __p1, p2, p3, p4), \ |
261014a1 SP |
40375 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) |
40376 | ||
261014a1 SP |
40377 | #define __arm_viwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ |
40378 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f | 40379 | int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u32 ((uint32_t) __p1, p2, p3, p4), \ |
261014a1 SP |
40380 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) |
40381 | ||
261014a1 SP |
40382 | #define __arm_vidupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40383 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f | 40384 | int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u8 ((uint32_t) __p1, p2, p3), \ |
261014a1 SP |
40385 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) |
40386 | ||
261014a1 SP |
40387 | #define __arm_vddupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40388 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f | 40389 | int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u8 ((uint32_t) __p1, p2, p3), \ |
261014a1 SP |
40390 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) |
40391 | ||
261014a1 SP |
40392 | #define __arm_vidupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40393 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f | 40394 | int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u16 ((uint32_t) __p1, p2, p3), \ |
261014a1 SP |
40395 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) |
40396 | ||
261014a1 SP |
40397 | #define __arm_vddupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40398 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f | 40399 | int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u16 ((uint32_t) __p1, p2, p3), \ |
261014a1 SP |
40400 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) |
40401 | ||
261014a1 SP |
40402 | #define __arm_vidupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40403 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f | 40404 | int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u32 ((uint32_t) __p1, p2, p3), \ |
261014a1 SP |
40405 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) |
40406 | ||
261014a1 SP |
40407 | #define __arm_vddupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40408 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f | 40409 | int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u32 ((uint32_t) __p1, p2, p3), \ |
261014a1 SP |
40410 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) |
40411 | ||
261014a1 SP |
40412 | #define __arm_vshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40413 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
40414 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
40415 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40416 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40417 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
40418 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40419 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
40420 | ||
40421 | #define __arm_vhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
40422 | __typeof(p2) __p2 = (p2); \ | |
40423 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
40424 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
40425 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40426 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40427 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40428 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40429 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
261014a1 SP |
40430 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
40431 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40432 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40433 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40434 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40435 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40436 | ||
261014a1 SP |
40437 | #define __arm_vhcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40438 | __typeof(p2) __p2 = (p2); \ | |
40439 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40440 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40441 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40442 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
40443 | ||
261014a1 SP |
40444 | #define __arm_vhcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40445 | __typeof(p2) __p2 = (p2); \ | |
40446 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40447 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40448 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40449 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
40450 | ||
261014a1 SP |
40451 | #define __arm_vhsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
40452 | __typeof(p2) __p2 = (p2); \ | |
40453 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
40454 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
40455 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40456 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40457 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40458 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40459 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
261014a1 SP |
40460 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
40461 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40462 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40463 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40464 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40465 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40466 | ||
261014a1 SP |
40467 | #define __arm_vclsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
40468 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
40469 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
40470 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
40471 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
40472 | ||
261014a1 SP |
40473 | #define __arm_vclzq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
40474 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
40475 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
40476 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
40477 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
40478 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
40479 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
40480 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
40481 | ||
261014a1 SP |
40482 | #define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
40483 | __typeof(p1) __p1 = (p1); \ | |
40484 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40485 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
40486 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
40487 | ||
41e1a7ff SP |
40488 | #define __arm_vstrdq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ |
40489 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
40490 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ | |
40491 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_wb_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) | |
40492 | ||
41e1a7ff SP |
40493 | #define __arm_vstrdq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ |
40494 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
40495 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \ | |
40496 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_wb_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) | |
40497 | ||
3ce755a8 | 40498 | #define __arm_vldrdq_gather_offset(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ |
c6ffc89f SP |
40499 | int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_offset_s64 (__ARM_mve_coerce1(p0, int64_t *), p1), \ |
40500 | int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_offset_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1))) | |
4cc23303 | 40501 | |
3ce755a8 | 40502 | #define __arm_vldrdq_gather_offset_z(p0,p1,p2) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ |
c6ffc89f SP |
40503 | int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_offset_z_s64 (__ARM_mve_coerce1(p0, int64_t *), p1, p2), \ |
40504 | int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_offset_z_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1, p2))) | |
4cc23303 | 40505 | |
3ce755a8 | 40506 | #define __arm_vldrdq_gather_shifted_offset(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ |
c6ffc89f SP |
40507 | int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_s64 (__ARM_mve_coerce1(p0, int64_t *), p1), \ |
40508 | int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1))) | |
3ce755a8 ASDV |
40509 | |
40510 | #define __arm_vldrdq_gather_shifted_offset_z(p0,p1,p2) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ | |
c6ffc89f SP |
40511 | int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_s64 (__ARM_mve_coerce1(p0, int64_t *), p1, p2), \ |
40512 | int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1, p2))) | |
4cc23303 | 40513 | |
c3562f81 SP |
40514 | #define __arm_vadciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ |
40515 | __typeof(p1) __p1 = (p1); \ | |
40516 | __typeof(p2) __p2 = (p2); \ | |
40517 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40518 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ | |
40519 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) | |
40520 | ||
c3562f81 SP |
40521 | #define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
40522 | __typeof(p1) __p1 = (p1); \ | |
40523 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40524 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
40525 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
40526 | ||
c3562f81 SP |
40527 | #define __arm_vadcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ |
40528 | __typeof(p1) __p1 = (p1); \ | |
40529 | __typeof(p2) __p2 = (p2); \ | |
40530 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40531 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ | |
40532 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) | |
40533 | ||
c3562f81 SP |
40534 | #define __arm_vadcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
40535 | __typeof(p1) __p1 = (p1); \ | |
40536 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40537 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
40538 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
40539 | ||
c3562f81 SP |
40540 | #define __arm_vsbciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ |
40541 | __typeof(p1) __p1 = (p1); \ | |
40542 | __typeof(p2) __p2 = (p2); \ | |
40543 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40544 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ | |
40545 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) | |
40546 | ||
c3562f81 SP |
40547 | #define __arm_vsbciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
40548 | __typeof(p1) __p1 = (p1); \ | |
40549 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40550 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
40551 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
40552 | ||
c3562f81 SP |
40553 | #define __arm_vsbcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ |
40554 | __typeof(p1) __p1 = (p1); \ | |
40555 | __typeof(p2) __p2 = (p2); \ | |
40556 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40557 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ | |
40558 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) | |
40559 | ||
c3562f81 SP |
40560 | #define __arm_vsbcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
40561 | __typeof(p1) __p1 = (p1); \ | |
40562 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40563 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
40564 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
4cc23303 | 40565 | |
3ce755a8 ASDV |
40566 | #define __arm_vldrbq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
40567 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
b13f297f SP |
40568 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_s8 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ |
40569 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_s16 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
40570 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_s32 (__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
40571 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_u8 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
40572 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_u16 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
40573 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_u32 (__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
535a8645 | 40574 | |
8eb3b6b9 SP |
40575 | #define __arm_vqrdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40576 | __typeof(p1) __p1 = (p1); \ | |
40577 | __typeof(p2) __p2 = (p2); \ | |
40578 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
40579 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
40580 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40581 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3));}) | |
8eb3b6b9 | 40582 | |
8eb3b6b9 SP |
40583 | #define __arm_vqrdmlashq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40584 | __typeof(p1) __p1 = (p1); \ | |
40585 | __typeof(p2) __p2 = (p2); \ | |
40586 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
40587 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
40588 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40589 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3));}) | |
8eb3b6b9 | 40590 | |
afb198ee CL |
40591 | #define __arm_vqdmlashq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40592 | __typeof(p1) __p1 = (p1); \ | |
40593 | __typeof(p2) __p2 = (p2); \ | |
40594 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
40595 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
40596 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40597 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3));}) | |
afb198ee | 40598 | |
8eb3b6b9 SP |
40599 | #define __arm_vqrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40600 | __typeof(p1) __p1 = (p1); \ | |
40601 | __typeof(p2) __p2 = (p2); \ | |
40602 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40603 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40604 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40605 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40606 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40607 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40608 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
40609 | ||
8eb3b6b9 SP |
40610 | #define __arm_vqshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40611 | __typeof(p1) __p1 = (p1); \ | |
40612 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40613 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
40614 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40615 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40616 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
40617 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40618 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
40619 | ||
8eb3b6b9 SP |
40620 | #define __arm_vqshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40621 | __typeof(p1) __p1 = (p1); \ | |
40622 | __typeof(p2) __p2 = (p2); \ | |
40623 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40624 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40625 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40626 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40627 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40628 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40629 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
40630 | ||
8eb3b6b9 SP |
40631 | #define __arm_vrhaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40632 | __typeof(p1) __p1 = (p1); \ | |
40633 | __typeof(p2) __p2 = (p2); \ | |
40634 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40635 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40636 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40637 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40638 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40639 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40640 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40641 | ||
8eb3b6b9 SP |
40642 | #define __arm_vrmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40643 | __typeof(p1) __p1 = (p1); \ | |
40644 | __typeof(p2) __p2 = (p2); \ | |
40645 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40646 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40647 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40648 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40649 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40650 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40651 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40652 | ||
532e9e24 | 40653 | #define __arm_vrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8eb3b6b9 SP |
40654 | __typeof(p1) __p1 = (p1); \ |
40655 | __typeof(p2) __p2 = (p2); \ | |
40656 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
40657 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
40658 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40659 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40660 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40661 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40662 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 40663 | |
532e9e24 | 40664 | #define __arm_vrshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8eb3b6b9 | 40665 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
40666 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
40667 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
40668 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40669 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40670 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrshrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
40671 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40672 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
40673 | ||
532e9e24 SP |
40674 | #define __arm_vshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40675 | __typeof(p1) __p1 = (p1); \ | |
40676 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40677 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
40678 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40679 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40680 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vshrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
40681 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vshrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40682 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vshrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
40683 | ||
532e9e24 SP |
40684 | #define __arm_vsliq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40685 | __typeof(p1) __p1 = (p1); \ | |
40686 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40687 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
40688 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40689 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40690 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
40691 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40692 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
8eb3b6b9 | 40693 | |
8eb3b6b9 SP |
40694 | #define __arm_vqsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40695 | __typeof(p1) __p1 = (p1); \ | |
40696 | __typeof(p2) __p2 = (p2); \ | |
40697 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
40698 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
40699 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40700 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40701 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40702 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40703 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
8eb3b6b9 SP |
40704 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
40705 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40706 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40707 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
40708 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40709 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
40710 | ||
8eb3b6b9 SP |
40711 | #define __arm_vqrdmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40712 | __typeof(p1) __p1 = (p1); \ | |
40713 | __typeof(p2) __p2 = (p2); \ | |
40714 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40715 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40716 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40717 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
31df339a SMW |
40718 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
40719 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40720 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3));}) | |
8eb3b6b9 | 40721 | |
8eb3b6b9 SP |
40722 | #define __arm_vqrdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40723 | __typeof(p1) __p1 = (p1); \ | |
40724 | __typeof(p2) __p2 = (p2); \ | |
40725 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40726 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40727 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40728 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
40729 | ||
8eb3b6b9 SP |
40730 | #define __arm_vqrdmlsdhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40731 | __typeof(p1) __p1 = (p1); \ | |
40732 | __typeof(p2) __p2 = (p2); \ | |
40733 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40734 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40735 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40736 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
40737 | ||
532e9e24 SP |
40738 | #define __arm_vshllbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40739 | __typeof(p1) __p1 = (p1); \ | |
40740 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40741 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vshllbq_m_n_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
40742 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vshllbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40743 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshllbq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
40744 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshllbq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) | |
40745 | ||
532e9e24 SP |
40746 | #define __arm_vshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40747 | __typeof(p1) __p1 = (p1); \ | |
40748 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40749 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40750 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40751 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40752 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
40753 | ||
532e9e24 SP |
40754 | #define __arm_vshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40755 | __typeof(p1) __p1 = (p1); \ | |
40756 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40757 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40758 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40759 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40760 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
40761 | ||
532e9e24 SP |
40762 | #define __arm_vshlltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40763 | __typeof(p1) __p1 = (p1); \ | |
40764 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40765 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vshlltq_m_n_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ | |
40766 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vshlltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40767 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshlltq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
40768 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshlltq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) | |
40769 | ||
532e9e24 SP |
40770 | #define __arm_vrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40771 | __typeof(p1) __p1 = (p1); \ | |
40772 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40773 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40774 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40775 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40776 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
40777 | ||
532e9e24 SP |
40778 | #define __arm_vqshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40779 | __typeof(p1) __p1 = (p1); \ | |
40780 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40781 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40782 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) | |
40783 | ||
532e9e24 SP |
40784 | #define __arm_vqshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40785 | __typeof(p1) __p1 = (p1); \ | |
40786 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40787 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40788 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) | |
40789 | ||
532e9e24 SP |
40790 | #define __arm_vqrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40791 | __typeof(p1) __p1 = (p1); \ | |
40792 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40793 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40794 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40795 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40796 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
40797 | ||
532e9e24 SP |
40798 | #define __arm_vqrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40799 | __typeof(p1) __p1 = (p1); \ | |
40800 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40801 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40802 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40803 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40804 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
40805 | ||
532e9e24 SP |
40806 | #define __arm_vqrshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40807 | __typeof(p1) __p1 = (p1); \ | |
40808 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40809 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40810 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) | |
40811 | ||
532e9e24 SP |
40812 | #define __arm_vqrshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40813 | __typeof(p1) __p1 = (p1); \ | |
40814 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40815 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40816 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) | |
40817 | ||
532e9e24 SP |
40818 | #define __arm_vqshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40819 | __typeof(p1) __p1 = (p1); \ | |
40820 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40821 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40822 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40823 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40824 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
40825 | ||
532e9e24 SP |
40826 | #define __arm_vqshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40827 | __typeof(p1) __p1 = (p1); \ | |
40828 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40829 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40830 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40831 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40832 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
40833 | ||
532e9e24 SP |
40834 | #define __arm_vrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40835 | __typeof(p1) __p1 = (p1); \ | |
40836 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
40837 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40838 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40839 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40840 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
40841 | ||
532e9e24 | 40842 | #define __arm_vmlaldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8eb3b6b9 SP |
40843 | __typeof(p1) __p1 = (p1); \ |
40844 | __typeof(p2) __p2 = (p2); \ | |
40845 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
40846 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_p_s16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
40847 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_p_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40848 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_p_u16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
40849 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_p_u32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 40850 | |
532e9e24 | 40851 | #define __arm_vmlaldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8eb3b6b9 SP |
40852 | __typeof(p1) __p1 = (p1); \ |
40853 | __typeof(p2) __p2 = (p2); \ | |
40854 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
40855 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_p_s16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
40856 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_p_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 40857 | |
532e9e24 | 40858 | #define __arm_vmlsldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8eb3b6b9 SP |
40859 | __typeof(p1) __p1 = (p1); \ |
40860 | __typeof(p2) __p2 = (p2); \ | |
40861 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
40862 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
40863 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 40864 | |
532e9e24 | 40865 | #define __arm_vmlsldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8eb3b6b9 SP |
40866 | __typeof(p1) __p1 = (p1); \ |
40867 | __typeof(p2) __p2 = (p2); \ | |
40868 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
40869 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaxq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
40870 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaxq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 40871 | |
532e9e24 SP |
40872 | #define __arm_vrmlaldavhaxq_p(p0,p1,p2,p3) __arm_vrmlaldavhaxq_p_s32(p0,p1,p2,p3) |
40873 | ||
532e9e24 SP |
40874 | #define __arm_vrmlsldavhaq_p(p0,p1,p2,p3) __arm_vrmlsldavhaq_p_s32(p0,p1,p2,p3) |
40875 | ||
532e9e24 SP |
40876 | #define __arm_vrmlsldavhaxq_p(p0,p1,p2,p3) __arm_vrmlsldavhaxq_p_s32(p0,p1,p2,p3) |
40877 | ||
532e9e24 SP |
40878 | #define __arm_vqdmladhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40879 | __typeof(p1) __p1 = (p1); \ | |
40880 | __typeof(p2) __p2 = (p2); \ | |
40881 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40882 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40883 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40884 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
40885 | ||
532e9e24 SP |
40886 | #define __arm_vqdmladhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
40887 | __typeof(p1) __p1 = (p1); \ | |
40888 | __typeof(p2) __p2 = (p2); \ | |
40889 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40890 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40891 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40892 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 40893 | |
532e9e24 | 40894 | #define __arm_vqdmlsdhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 SP |
40895 | __typeof(p1) __p1 = (p1); \ |
40896 | __typeof(p2) __p2 = (p2); \ | |
40897 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
40898 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
40899 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40900 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
f2170a37 | 40901 | |
532e9e24 | 40902 | #define __arm_vqdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 SP |
40903 | __typeof(p1) __p1 = (p1); \ |
40904 | __typeof(p2) __p2 = (p2); \ | |
40905 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
40906 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
40907 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40908 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
f2170a37 | 40909 | |
532e9e24 | 40910 | #define __arm_vqabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 SP |
40911 | __typeof(p1) __p1 = (p1); \ |
40912 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
40913 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
40914 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
40915 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f2170a37 | 40916 | |
532e9e24 | 40917 | #define __arm_vmvnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 SP |
40918 | __typeof(p1) __p1 = (p1); \ |
40919 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
40920 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmvnq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
40921 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmvnq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
40922 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmvnq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
40923 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmvnq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
40924 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmvnq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
40925 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmvnq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
0f3cc1b3 ASDV |
40926 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1(__p1, int) , p2), \ |
40927 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1(__p1, int) , p2), \ | |
40928 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1(__p1, int) , p2), \ | |
40929 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1(__p1, int) , p2));}) | |
f2170a37 | 40930 | |
532e9e24 SP |
40931 | #define __arm_vorrq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
40932 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
40933 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vorrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
40934 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vorrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
40935 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
40936 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
40937 | ||
532e9e24 | 40938 | #define __arm_vqshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 SP |
40939 | __typeof(p1) __p1 = (p1); \ |
40940 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
40941 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ |
40942 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
f2170a37 | 40943 | |
532e9e24 | 40944 | #define __arm_vqshluq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 SP |
40945 | __typeof(p1) __p1 = (p1); \ |
40946 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
40947 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshluq_m_n_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ |
40948 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshluq_m_n_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40949 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshluq_m_n_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) | |
f2170a37 | 40950 | |
532e9e24 | 40951 | #define __arm_vshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 | 40952 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
40953 | __typeof(p2) __p2 = (p2); \ |
40954 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
40955 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40956 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40957 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
40958 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
40959 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
40960 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
f2170a37 | 40961 | |
532e9e24 | 40962 | #define __arm_vshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 SP |
40963 | __typeof(p1) __p1 = (p1); \ |
40964 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
40965 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ |
40966 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40967 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40968 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
40969 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40970 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
f2170a37 | 40971 | |
532e9e24 SP |
40972 | #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
40973 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
40974 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ | |
40975 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ | |
40976 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ | |
40977 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ | |
40978 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ | |
40979 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) | |
40980 | ||
532e9e24 | 40981 | #define __arm_vsriq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 SP |
40982 | __typeof(p1) __p1 = (p1); \ |
40983 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
532e9e24 SP |
40984 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ |
40985 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ | |
40986 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \ | |
40987 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ | |
40988 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ | |
40989 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) | |
f2170a37 | 40990 | |
532e9e24 | 40991 | #define __arm_vhaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 SP |
40992 | __typeof(p1) __p1 = (p1); \ |
40993 | __typeof(p2) __p2 = (p2); \ | |
40994 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
40995 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
40996 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40997 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40998 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ | |
40999 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41000 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
532e9e24 SP |
41001 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
41002 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41003 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41004 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41005 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41006 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 41007 | |
532e9e24 | 41008 | #define __arm_vhcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 SP |
41009 | __typeof(p1) __p1 = (p1); \ |
41010 | __typeof(p2) __p2 = (p2); \ | |
41011 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
41012 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
41013 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41014 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
f2170a37 | 41015 | |
532e9e24 | 41016 | #define __arm_vhcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 | 41017 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
41018 | __typeof(p2) __p2 = (p2); \ |
41019 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41020 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41021 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41022 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
f2170a37 | 41023 | |
532e9e24 | 41024 | #define __arm_vhsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 | 41025 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
41026 | __typeof(p2) __p2 = (p2); \ |
41027 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41028 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41029 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41030 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41031 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41032 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41033 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ | |
31df339a SMW |
41034 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
41035 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41036 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41037 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41038 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41039 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3));}) | |
f2170a37 | 41040 | |
532e9e24 | 41041 | #define __arm_vmaxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 | 41042 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
41043 | __typeof(p2) __p2 = (p2); \ |
41044 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41045 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41046 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41047 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41048 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41049 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41050 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 41051 | |
532e9e24 | 41052 | #define __arm_vminq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 | 41053 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
41054 | __typeof(p2) __p2 = (p2); \ |
41055 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41056 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41057 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41058 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41059 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41060 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41061 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 41062 | |
532e9e24 | 41063 | #define __arm_vmlaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 | 41064 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
41065 | __typeof(p2) __p2 = (p2); \ |
41066 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
41067 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
41068 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41069 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41070 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41071 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41072 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3));}) | |
f2170a37 | 41073 | |
532e9e24 | 41074 | #define __arm_vmlasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 SP |
41075 | __typeof(p1) __p1 = (p1); \ |
41076 | __typeof(p2) __p2 = (p2); \ | |
41077 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
41078 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
41079 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41080 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41081 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41082 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41083 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3));}) | |
f2170a37 | 41084 | |
532e9e24 | 41085 | #define __arm_vmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 SP |
41086 | __typeof(p1) __p1 = (p1); \ |
41087 | __typeof(p2) __p2 = (p2); \ | |
41088 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
41089 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
41090 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41091 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41092 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41093 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41094 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 41095 | |
532e9e24 | 41096 | #define __arm_vmullbq_int_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 SP |
41097 | __typeof(p1) __p1 = (p1); \ |
41098 | __typeof(p2) __p2 = (p2); \ | |
532e9e24 SP |
41099 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
41100 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41101 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41102 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41103 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41104 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41105 | int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_m_u32 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 41106 | |
532e9e24 | 41107 | #define __arm_vmulltq_int_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 SP |
41108 | __typeof(p1) __p1 = (p1); \ |
41109 | __typeof(p2) __p2 = (p2); \ | |
532e9e24 SP |
41110 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
41111 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41112 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41113 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41114 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41115 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41116 | int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_m_u32 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
f2170a37 | 41117 | |
532e9e24 | 41118 | #define __arm_vmulltq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
f2170a37 SP |
41119 | __typeof(p1) __p1 = (p1); \ |
41120 | __typeof(p2) __p2 = (p2); \ | |
41121 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
41122 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ |
41123 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) | |
e3678b44 | 41124 | |
532e9e24 | 41125 | #define __arm_vqaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8eb3b6b9 SP |
41126 | __typeof(p1) __p1 = (p1); \ |
41127 | __typeof(p2) __p2 = (p2); \ | |
41128 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
41129 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
41130 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41131 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41132 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41133 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41134 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
532e9e24 SP |
41135 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
41136 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41137 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41138 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41139 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41140 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
8eb3b6b9 | 41141 | |
532e9e24 | 41142 | #define __arm_vqdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8eb3b6b9 SP |
41143 | __typeof(p1) __p1 = (p1); \ |
41144 | __typeof(p2) __p2 = (p2); \ | |
41145 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
41146 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
41147 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41148 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3));}) | |
8eb3b6b9 | 41149 | |
532e9e24 | 41150 | #define __arm_vqdmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8eb3b6b9 SP |
41151 | __typeof(p1) __p1 = (p1); \ |
41152 | __typeof(p2) __p2 = (p2); \ | |
41153 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
41154 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce3(p2, int), p3), \ |
41155 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ | |
41156 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
532e9e24 SP |
41157 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
41158 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41159 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 41160 | |
532e9e24 | 41161 | #define __arm_vqdmullbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8eb3b6b9 SP |
41162 | __typeof(p1) __p1 = (p1); \ |
41163 | __typeof(p2) __p2 = (p2); \ | |
41164 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
532e9e24 SP |
41165 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
41166 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
31df339a SMW |
41167 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ |
41168 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3));}) | |
8eb3b6b9 | 41169 | |
532e9e24 | 41170 | #define __arm_vqdmulltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
e3678b44 | 41171 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
41172 | __typeof(p2) __p2 = (p2); \ |
41173 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
41174 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce3(p2, int), p3), \ |
41175 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce3(p2, int), p3), \ | |
532e9e24 SP |
41176 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ |
41177 | int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
e3678b44 | 41178 | |
532e9e24 | 41179 | #define __arm_vqrdmladhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
e3678b44 | 41180 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
41181 | __typeof(p2) __p2 = (p2); \ |
41182 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41183 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41184 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41185 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
e3678b44 | 41186 | |
532e9e24 | 41187 | #define __arm_vqrdmladhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
e3678b44 | 41188 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
41189 | __typeof(p2) __p2 = (p2); \ |
41190 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41191 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41192 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41193 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
14782c81 | 41194 | |
9b905ba9 SP |
41195 | #define __arm_vmlsdavaxq_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
41196 | __typeof(p2) __p2 = (p2); \ | |
41197 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41198 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaxq_p_s8 (p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41199 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaxq_p_s16 (p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41200 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaxq_p_s32 (p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
41201 | ||
41202 | #define __arm_vmlsdavaq(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ | |
532e9e24 SP |
41203 | __typeof(p2) __p2 = (p2); \ |
41204 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
9b905ba9 SP |
41205 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaq_s8(p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ |
41206 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaq_s16(p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
41207 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaq_s32(p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
41208 | ||
41209 | #define __arm_vmlsdavaxq(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ | |
41210 | __typeof(p1) __p1 = (p1); \ | |
41211 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41212 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaxq_s8(p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
41213 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaxq_s16(p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
41214 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaxq_s32(p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
41215 | ||
41216 | #define __arm_vmlsdavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
41217 | __typeof(p1) __p1 = (p1); \ | |
41218 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41219 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
41220 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41221 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
db5db9d2 | 41222 | |
9b905ba9 | 41223 | #define __arm_vmlsdavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
db5db9d2 | 41224 | __typeof(p1) __p1 = (p1); \ |
9b905ba9 SP |
41225 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ |
41226 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavxq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
41227 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41228 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
41229 | ||
41230 | #define __arm_vmlsdavaq_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ | |
db5db9d2 | 41231 | __typeof(p2) __p2 = (p2); \ |
532e9e24 | 41232 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
9b905ba9 SP |
41233 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaq_p_s8(p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
41234 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaq_p_s16(p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41235 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaq_p_s32(p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
db5db9d2 | 41236 | |
532e9e24 | 41237 | #define __arm_vmladavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
8eb3b6b9 | 41238 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
41239 | __typeof(p2) __p2 = (p2); \ |
41240 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
41241 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_p_s8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
41242 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_p_s16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41243 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_p_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) | |
8eb3b6b9 | 41244 | |
532e9e24 | 41245 | #define __arm_vmullbq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
db5db9d2 | 41246 | __typeof(p1) __p1 = (p1); \ |
532e9e24 SP |
41247 | __typeof(p2) __p2 = (p2); \ |
41248 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41249 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41250 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) | |
db5db9d2 | 41251 | |
3ce755a8 ASDV |
41252 | #define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
41253 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
b13f297f SP |
41254 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8(__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \ |
41255 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16(__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
41256 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32(__ARM_mve_coerce1(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
41257 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8(__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
41258 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16(__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
41259 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32(__ARM_mve_coerce1(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
535a8645 | 41260 | |
92f80065 SP |
41261 | #define __arm_vidupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
41262 | __typeof(p1) __p1 = (p1); \ | |
41263 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f SP |
41264 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vidupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint32_t) __p1, p2, p3), \ |
41265 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vidupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint32_t) __p1, p2, p3), \ | |
41266 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vidupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2, p3), \ | |
92f80065 SP |
41267 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \ |
41268 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \ | |
41269 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
41270 | ||
92f80065 SP |
41271 | #define __arm_vddupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
41272 | __typeof(p1) __p1 = (p1); \ | |
41273 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
c6ffc89f SP |
41274 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vddupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint32_t) __p1, p2, p3), \ |
41275 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vddupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint32_t) __p1, p2, p3), \ | |
41276 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vddupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2, p3), \ | |
92f80065 SP |
41277 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \ |
41278 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \ | |
41279 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) | |
41280 | ||
92f80065 SP |
41281 | #define __arm_vidupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41282 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
c6ffc89f | 41283 | int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u16 ((uint32_t) __p0, p1), \ |
92f80065 SP |
41284 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) |
41285 | ||
92f80065 SP |
41286 | #define __arm_vidupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41287 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
c6ffc89f | 41288 | int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u32 ((uint32_t) __p0, p1), \ |
92f80065 SP |
41289 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) |
41290 | ||
92f80065 SP |
41291 | #define __arm_vidupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41292 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
c6ffc89f | 41293 | int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u8 ((uint32_t) __p0, p1), \ |
92f80065 SP |
41294 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) |
41295 | ||
92f80065 SP |
41296 | #define __arm_vddupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41297 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
c6ffc89f | 41298 | int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u16 ((uint32_t) __p0, p1), \ |
92f80065 SP |
41299 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) |
41300 | ||
92f80065 SP |
41301 | #define __arm_vddupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41302 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
c6ffc89f | 41303 | int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u32 ((uint32_t) __p0, p1), \ |
92f80065 SP |
41304 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) |
41305 | ||
92f80065 SP |
41306 | #define __arm_vddupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41307 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
c6ffc89f | 41308 | int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u8 ((uint32_t) __p0, p1), \ |
92f80065 SP |
41309 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) |
41310 | ||
92f80065 SP |
41311 | #define __arm_viwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ |
41312 | __typeof(p1) __p1 = (p1); \ | |
41313 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
41314 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int), p2, p3, p4), \ |
41315 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int), p2, p3, p4), \ | |
41316 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int), p2, p3, p4), \ | |
92f80065 SP |
41317 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \ |
41318 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \ | |
41319 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
41320 | ||
92f80065 SP |
41321 | #define __arm_viwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41322 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
31df339a | 41323 | int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u16 (__ARM_mve_coerce3(p0, int), p1, (const int) p2), \ |
9b905ba9 | 41324 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1, (const int) p2));}) |
92f80065 | 41325 | |
92f80065 SP |
41326 | #define __arm_viwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41327 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
31df339a | 41328 | int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u32 (__ARM_mve_coerce3(p0, int), p1, p2), \ |
92f80065 SP |
41329 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) |
41330 | ||
92f80065 SP |
41331 | #define __arm_viwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41332 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
31df339a | 41333 | int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u8 (__ARM_mve_coerce3(p0, int), p1, p2), \ |
92f80065 SP |
41334 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) |
41335 | ||
92f80065 SP |
41336 | #define __arm_vdwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ |
41337 | __typeof(p1) __p1 = (p1); \ | |
41338 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
41339 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int), p2, p3, p4), \ |
41340 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int), p2, p3, p4), \ | |
41341 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int), p2, p3, p4), \ | |
92f80065 SP |
41342 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \ |
41343 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \ | |
41344 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) | |
41345 | ||
92f80065 SP |
41346 | #define __arm_vdwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41347 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
31df339a | 41348 | int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u16 (__ARM_mve_coerce3(p0, int), p1, p2), \ |
92f80065 SP |
41349 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) |
41350 | ||
92f80065 SP |
41351 | #define __arm_vdwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41352 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
31df339a | 41353 | int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u32 (__ARM_mve_coerce3(p0, int), p1, p2), \ |
92f80065 SP |
41354 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) |
41355 | ||
92f80065 SP |
41356 | #define __arm_vdwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41357 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
31df339a | 41358 | int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u8 (__ARM_mve_coerce3(p0, int), p1, p2), \ |
92f80065 SP |
41359 | int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) |
41360 | ||
88c9a831 SP |
41361 | #define __arm_vshlcq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
41362 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
41363 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2, p3), \ | |
41364 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2, p3), \ | |
41365 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2, p3), \ | |
41366 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2, p3), \ | |
41367 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2, p3), \ | |
41368 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2, p3));}) | |
41369 | ||
e81d0d9e SP |
41370 | #define __arm_vabavq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41371 | __typeof(p1) __p1 = (p1); \ | |
41372 | __typeof(p2) __p2 = (p2); \ | |
41373 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41374 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
41375 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
41376 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
41377 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \ | |
41378 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
41379 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
41380 | ||
e81d0d9e SP |
41381 | #define __arm_vabavq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
41382 | __typeof(p1) __p1 = (p1); \ | |
41383 | __typeof(p2) __p2 = (p2); \ | |
41384 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41385 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_p_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41386 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41387 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41388 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_p_u8(__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41389 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_p_u16(__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41390 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_p_u32(__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
41391 | ||
e81d0d9e SP |
41392 | #define __arm_vaddlvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41393 | __typeof(p1) __p1 = (p1); \ | |
41394 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
41395 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t)), \ |
41396 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_u32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
e81d0d9e | 41397 | |
e81d0d9e SP |
41398 | #define __arm_vaddlvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41399 | __typeof(p1) __p1 = (p1); \ | |
41400 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
41401 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_p_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), p2), \ |
41402 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_p_u32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
e81d0d9e | 41403 | |
e81d0d9e SP |
41404 | #define __arm_vaddlvq(p0) ({ __typeof(p0) __p0 = (p0); \ |
41405 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
41406 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
41407 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
41408 | ||
e81d0d9e SP |
41409 | #define __arm_vaddlvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41410 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
41411 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
41412 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
41413 | ||
e81d0d9e SP |
41414 | #define __arm_vaddvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41415 | __typeof(p1) __p1 = (p1); \ | |
41416 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
41417 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_s8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int8x16_t)), \ |
41418 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_s16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
41419 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
41420 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_u8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
41421 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_u16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
41422 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_u32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
e81d0d9e | 41423 | |
e81d0d9e SP |
41424 | #define __arm_vaddvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41425 | __typeof(p1) __p1 = (p1); \ | |
41426 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
41427 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_p_s8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
41428 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_p_s16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41429 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_p_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
41430 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_p_u8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
41431 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_p_u16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
41432 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_p_u32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
e81d0d9e | 41433 | |
e81d0d9e SP |
41434 | #define __arm_vaddvq(p0) ({ __typeof(p0) __p0 = (p0); \ |
41435 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
41436 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vaddvq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ | |
41437 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vaddvq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ | |
41438 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddvq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ | |
41439 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vaddvq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ | |
41440 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vaddvq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ | |
41441 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddvq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) | |
41442 | ||
e81d0d9e SP |
41443 | #define __arm_vaddvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41444 | _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ | |
41445 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vaddvq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ | |
41446 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vaddvq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ | |
41447 | int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddvq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ | |
41448 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vaddvq_p_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ | |
41449 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vaddvq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ | |
41450 | int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) | |
41451 | ||
e81d0d9e SP |
41452 | #define __arm_vcmpcsq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41453 | __typeof(p1) __p1 = (p1); \ | |
41454 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41455 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
41456 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
41457 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
31df339a SMW |
41458 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ |
41459 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
41460 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)));}) | |
e81d0d9e | 41461 | |
e81d0d9e SP |
41462 | #define __arm_vcmpcsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41463 | __typeof(p1) __p1 = (p1); \ | |
41464 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41465 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
41466 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
41467 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ | |
31df339a SMW |
41468 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int), p2), \ |
41469 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
41470 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int), p2));}) | |
e81d0d9e | 41471 | |
e81d0d9e SP |
41472 | #define __arm_vcmphiq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41473 | __typeof(p1) __p1 = (p1); \ | |
41474 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41475 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
41476 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
41477 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ | |
31df339a SMW |
41478 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int)), \ |
41479 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int)), \ | |
41480 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int)));}) | |
e81d0d9e | 41481 | |
e81d0d9e SP |
41482 | #define __arm_vcmphiq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41483 | __typeof(p1) __p1 = (p1); \ | |
41484 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
31df339a SMW |
41485 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce3(p1, int), p2), \ |
41486 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce3(p1, int), p2), \ | |
41487 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce3(p1, int), p2), \ | |
e81d0d9e SP |
41488 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ |
41489 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
41490 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
41491 | ||
e81d0d9e SP |
41492 | #define __arm_vmaxavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41493 | __typeof(p1) __p1 = (p1); \ | |
41494 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
251950d8 JR |
41495 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxavq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \ |
41496 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \ | |
41497 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)));}) | |
e81d0d9e | 41498 | |
e81d0d9e SP |
41499 | #define __arm_vmaxavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41500 | __typeof(p1) __p1 = (p1); \ | |
41501 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
251950d8 JR |
41502 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxavq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
41503 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41504 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
e81d0d9e | 41505 | |
e81d0d9e SP |
41506 | #define __arm_vmaxvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41507 | __typeof(p1) __p1 = (p1); \ | |
41508 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
251950d8 JR |
41509 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxvq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \ |
41510 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxvq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \ | |
41511 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxvq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)), \ | |
41512 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vmaxvq_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
41513 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
41514 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_u32 (__p0,__ARM_mve_coerce(__p1, uint32x4_t)));}) | |
e81d0d9e | 41515 | |
e81d0d9e SP |
41516 | #define __arm_vmaxvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41517 | __typeof(p1) __p1 = (p1); \ | |
41518 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
251950d8 JR |
41519 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxvq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
41520 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxvq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41521 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxvq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
41522 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vmaxvq_p_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
41523 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_p_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
41524 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_p_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
e81d0d9e | 41525 | |
e81d0d9e SP |
41526 | #define __arm_vminavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41527 | __typeof(p1) __p1 = (p1); \ | |
41528 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
251950d8 JR |
41529 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminavq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \ |
41530 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminavq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \ | |
41531 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminavq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)));}) | |
e81d0d9e | 41532 | |
e81d0d9e SP |
41533 | #define __arm_vminavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41534 | __typeof(p1) __p1 = (p1); \ | |
41535 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
251950d8 JR |
41536 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminavq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
41537 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminavq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41538 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminavq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
e81d0d9e | 41539 | |
9b905ba9 SP |
41540 | #define __arm_vmaxq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
41541 | __typeof(p2) __p2 = (p2); \ | |
41542 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41543 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41544 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41545 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41546 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41547 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41548 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
41549 | ||
e81d0d9e SP |
41550 | #define __arm_vminq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
41551 | __typeof(p2) __p2 = (p2); \ | |
41552 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41553 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41554 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41555 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41556 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41557 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41558 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
41559 | ||
e81d0d9e SP |
41560 | #define __arm_vminvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41561 | __typeof(p1) __p1 = (p1); \ | |
41562 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
251950d8 JR |
41563 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminvq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \ |
41564 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminvq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \ | |
41565 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminvq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)), \ | |
41566 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vminvq_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
41567 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vminvq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
41568 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vminvq_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
e81d0d9e | 41569 | |
e81d0d9e SP |
41570 | #define __arm_vminvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41571 | __typeof(p1) __p1 = (p1); \ | |
41572 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
251950d8 JR |
41573 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminvq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \ |
41574 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminvq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41575 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminvq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
41576 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vminvq_p_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
41577 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vminvq_p_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
41578 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vminvq_p_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
e81d0d9e | 41579 | |
e81d0d9e SP |
41580 | #define __arm_vmladavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41581 | __typeof(p1) __p1 = (p1); \ | |
41582 | __typeof(p2) __p2 = (p2); \ | |
41583 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
41584 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_s8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ |
41585 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_s16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
41586 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
41587 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_u8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \ | |
41588 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_u16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
41589 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_u32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
e81d0d9e | 41590 | |
e81d0d9e SP |
41591 | #define __arm_vmladavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
41592 | __typeof(p1) __p1 = (p1); \ | |
41593 | __typeof(p2) __p2 = (p2); \ | |
41594 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
41595 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_p_s8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ |
41596 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_p_s16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41597 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_p_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41598 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_p_u8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41599 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41600 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
e81d0d9e | 41601 | |
e81d0d9e SP |
41602 | #define __arm_vmladavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41603 | __typeof(p1) __p1 = (p1); \ | |
41604 | __typeof(p2) __p2 = (p2); \ | |
41605 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
41606 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_s8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ |
41607 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_s16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
41608 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
41609 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaxq_u8 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \ | |
41610 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaxq_u16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
41611 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaxq_u32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
e81d0d9e | 41612 | |
e81d0d9e SP |
41613 | #define __arm_vmladavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41614 | __typeof(p1) __p1 = (p1); \ | |
41615 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41616 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
41617 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
41618 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
41619 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
41620 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
41621 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
41622 | ||
e81d0d9e SP |
41623 | #define __arm_vmladavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41624 | __typeof(p1) __p1 = (p1); \ | |
41625 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41626 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
41627 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41628 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
41629 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavq_p_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
41630 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
41631 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
41632 | ||
e81d0d9e SP |
41633 | #define __arm_vmladavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41634 | __typeof(p1) __p1 = (p1); \ | |
41635 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41636 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
41637 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
41638 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
41639 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
41640 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
41641 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
41642 | ||
e81d0d9e SP |
41643 | #define __arm_vmladavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41644 | __typeof(p1) __p1 = (p1); \ | |
41645 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41646 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavxq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
41647 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41648 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
41649 | ||
e81d0d9e SP |
41650 | #define __arm_vmlaldavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41651 | __typeof(p1) __p1 = (p1); \ | |
41652 | __typeof(p2) __p2 = (p2); \ | |
41653 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
41654 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_s16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ |
41655 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
41656 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_u16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
41657 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_u32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
e81d0d9e | 41658 | |
e81d0d9e SP |
41659 | #define __arm_vmlaldavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41660 | __typeof(p1) __p1 = (p1); \ | |
41661 | __typeof(p2) __p2 = (p2); \ | |
41662 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
31df339a SMW |
41663 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_s16 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ |
41664 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
e81d0d9e | 41665 | |
e81d0d9e SP |
41666 | #define __arm_vmlaldavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41667 | __typeof(p1) __p1 = (p1); \ | |
41668 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41669 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
41670 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
41671 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
41672 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
41673 | ||
e81d0d9e SP |
41674 | #define __arm_vmlaldavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41675 | __typeof(p1) __p1 = (p1); \ | |
41676 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41677 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41678 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
41679 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
41680 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
41681 | ||
e81d0d9e SP |
41682 | #define __arm_vmlaldavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41683 | __typeof(p1) __p1 = (p1); \ | |
41684 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41685 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41686 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
41687 | ||
e81d0d9e SP |
41688 | #define __arm_vmlsdavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41689 | __typeof(p1) __p1 = (p1); \ | |
41690 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41691 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
41692 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
41693 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
41694 | ||
e81d0d9e SP |
41695 | #define __arm_vmlsdavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41696 | __typeof(p1) __p1 = (p1); \ | |
41697 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41698 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
41699 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
41700 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
41701 | ||
e81d0d9e SP |
41702 | #define __arm_vmlsldavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41703 | __typeof(p1) __p1 = (p1); \ | |
41704 | __typeof(p2) __p2 = (p2); \ | |
41705 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41706 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
41707 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
41708 | ||
e81d0d9e SP |
41709 | #define __arm_vmlsldavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41710 | __typeof(p1) __p1 = (p1); \ | |
41711 | __typeof(p2) __p2 = (p2); \ | |
41712 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41713 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaxq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
41714 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaxq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) | |
41715 | ||
e81d0d9e SP |
41716 | #define __arm_vmlsldavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41717 | __typeof(p1) __p1 = (p1); \ | |
41718 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41719 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
41720 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
41721 | ||
e81d0d9e SP |
41722 | #define __arm_vmlsldavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41723 | __typeof(p1) __p1 = (p1); \ | |
41724 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41725 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41726 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
41727 | ||
e81d0d9e SP |
41728 | #define __arm_vmlsldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41729 | __typeof(p1) __p1 = (p1); \ | |
41730 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41731 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
41732 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) | |
41733 | ||
e81d0d9e SP |
41734 | #define __arm_vmlsldavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41735 | __typeof(p1) __p1 = (p1); \ | |
41736 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41737 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41738 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) | |
41739 | ||
e81d0d9e SP |
41740 | #define __arm_vmovlbq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
41741 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
41742 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
41743 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41744 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
41745 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
41746 | ||
e81d0d9e SP |
41747 | #define __arm_vmovltq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ |
41748 | _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ | |
41749 | int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
41750 | int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41751 | int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
41752 | int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) | |
41753 | ||
e81d0d9e SP |
41754 | #define __arm_vmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
41755 | __typeof(p2) __p2 = (p2); \ | |
41756 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41757 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41758 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41759 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41760 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41761 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41762 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
41763 | ||
e81d0d9e SP |
41764 | #define __arm_vmullbq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
41765 | __typeof(p2) __p2 = (p2); \ | |
41766 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41767 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41768 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41769 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41770 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41771 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41772 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
41773 | ||
e81d0d9e SP |
41774 | #define __arm_vmullbq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
41775 | __typeof(p2) __p2 = (p2); \ | |
41776 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41777 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41778 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) | |
41779 | ||
e81d0d9e SP |
41780 | #define __arm_vmulltq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
41781 | __typeof(p2) __p2 = (p2); \ | |
41782 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41783 | int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41784 | int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41785 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41786 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41787 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41788 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
41789 | ||
e81d0d9e SP |
41790 | #define __arm_vmulltq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ |
41791 | __typeof(p2) __p2 = (p2); \ | |
41792 | _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ | |
41793 | int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41794 | int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) | |
41795 | ||
e81d0d9e SP |
41796 | #define __arm_vrmlaldavhaxq(p0,p1,p2) __arm_vrmlaldavhaxq_s32(p0,p1,p2) |
41797 | ||
e81d0d9e SP |
41798 | #define __arm_vrmlaldavhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ |
41799 | __typeof(p1) __p1 = (p1); \ | |
41800 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41801 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
41802 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
41803 | ||
e81d0d9e SP |
41804 | #define __arm_vrmlaldavhq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41805 | __typeof(p1) __p1 = (p1); \ | |
41806 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41807 | int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
41808 | int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
41809 | ||
e81d0d9e SP |
41810 | #define __arm_vrmlaldavhxq(p0,p1) __arm_vrmlaldavhxq_s32(p0,p1) |
41811 | ||
e81d0d9e SP |
41812 | #define __arm_vrmlaldavhxq_p(p0,p1,p2) __arm_vrmlaldavhxq_p_s32(p0,p1,p2) |
41813 | ||
e81d0d9e SP |
41814 | #define __arm_vrmlsldavhaq(p0,p1,p2) __arm_vrmlsldavhaq_s32(p0,p1,p2) |
41815 | ||
e81d0d9e SP |
41816 | #define __arm_vrmlsldavhaxq(p0,p1,p2) __arm_vrmlsldavhaxq_s32(p0,p1,p2) |
41817 | ||
e81d0d9e SP |
41818 | #define __arm_vrmlsldavhq(p0,p1) __arm_vrmlsldavhq_s32(p0,p1) |
41819 | ||
e81d0d9e SP |
41820 | #define __arm_vrmlsldavhq_p(p0,p1,p2) __arm_vrmlsldavhq_p_s32(p0,p1,p2) |
41821 | ||
e81d0d9e SP |
41822 | #define __arm_vrmlsldavhxq(p0,p1) __arm_vrmlsldavhxq_s32(p0,p1) |
41823 | ||
e81d0d9e SP |
41824 | #define __arm_vrmlsldavhxq_p(p0,p1,p2) __arm_vrmlsldavhxq_p_s32(p0,p1,p2) |
41825 | ||
3ce755a8 ASDV |
41826 | #define __arm_vstrbq(p0,p1) ({ __typeof(p1) __p1 = (p1); \ |
41827 | _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ | |
41828 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \ | |
41829 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_s16 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ | |
41830 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_s32 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ | |
41831 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \ | |
41832 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_u16 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ | |
41833 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_u32 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) | |
e81d0d9e | 41834 | |
9b905ba9 SP |
41835 | #define __arm_vstrbq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41836 | __typeof(p1) __p1 = (p1); \ | |
41837 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ | |
41838 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \ | |
41839 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_p_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ | |
41840 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_p_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ | |
41841 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ | |
41842 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_p_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ | |
41843 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_p_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) | |
e81d0d9e | 41844 | |
e81d0d9e SP |
41845 | #define __arm_vstrdq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ |
41846 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
41847 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \ | |
41848 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) | |
41849 | ||
e81d0d9e SP |
41850 | #define __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ |
41851 | _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ | |
41852 | int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ | |
41853 | int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) | |
41854 | ||
9b905ba9 SP |
41855 | #define __arm_vrmlaldavhaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41856 | __typeof(p1) __p1 = (p1); \ | |
e81d0d9e | 41857 | __typeof(p2) __p2 = (p2); \ |
9b905ba9 | 41858 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
31df339a SMW |
41859 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ |
41860 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_u32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
e81d0d9e | 41861 | |
9b905ba9 SP |
41862 | #define __arm_vrmlaldavhaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
41863 | __typeof(p1) __p1 = (p1); \ | |
e81d0d9e | 41864 | __typeof(p2) __p2 = (p2); \ |
9b905ba9 | 41865 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
31df339a SMW |
41866 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_p_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ |
41867 | int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_p_u32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
e81d0d9e | 41868 | |
9b905ba9 SP |
41869 | #define __arm_vstrbq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ |
41870 | __typeof(p1) __p1 = (p1); \ | |
e81d0d9e | 41871 | __typeof(p2) __p2 = (p2); \ |
9b905ba9 SP |
41872 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
41873 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ | |
41874 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ | |
41875 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ | |
41876 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \ | |
41877 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ | |
41878 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) | |
e81d0d9e | 41879 | |
9b905ba9 SP |
41880 | #define __arm_vstrbq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ |
41881 | __typeof(p1) __p1 = (p1); \ | |
e81d0d9e | 41882 | __typeof(p2) __p2 = (p2); \ |
9b905ba9 SP |
41883 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ |
41884 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ | |
41885 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ | |
41886 | int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ | |
41887 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ | |
41888 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ | |
41889 | int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) | |
41890 | ||
41891 | #define __arm_vstrdq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
41892 | __typeof(p2) __p2 = (p2); \ | |
41893 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
41894 | int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ | |
41895 | int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) | |
41896 | ||
41897 | #define __arm_vstrdq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
41898 | __typeof(p2) __p2 = (p2); \ | |
41899 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
41900 | int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t)), \ | |
41901 | int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) | |
41902 | ||
41903 | #define __arm_vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ | |
41904 | __typeof(p2) __p2 = (p2); \ | |
41905 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
41906 | int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ | |
41907 | int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) | |
41908 | ||
41909 | #define __arm_vstrdq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ | |
41910 | __typeof(p2) __p2 = (p2); \ | |
41911 | _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ | |
41912 | int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t)), \ | |
41913 | int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) | |
e81d0d9e | 41914 | |
6a90680b | 41915 | #endif /* __cplusplus */ |
3b6e79ae | 41916 | #endif /* __ARM_FEATURE_MVE */ |
63c8f7d6 | 41917 | #endif /* _GCC_ARM_MVE_H. */ |