]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sqadd_u16.c
[AArch64] Add support for the SVE2 ACLE
[thirdparty/gcc.git] / gcc / testsuite / gcc.target / aarch64 / sve2 / acle / asm / sqadd_u16.c
1 /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
2
3 #include "test_sve_acle.h"
4
5 /*
6 ** sqadd_u16_m_tied1:
7 ** usqadd z0\.h, p0/m, z0\.h, z4\.h
8 ** ret
9 */
10 TEST_DUAL_Z (sqadd_u16_m_tied1, svuint16_t, svint16_t,
11 z0 = svsqadd_u16_m (p0, z0, z4),
12 z0 = svsqadd_m (p0, z0, z4))
13
14 /*
15 ** sqadd_u16_m_tied2:
16 ** mov (z[0-9]+)\.d, z0\.d
17 ** movprfx z0, z4
18 ** usqadd z0\.h, p0/m, z0\.h, \1\.h
19 ** ret
20 */
21 TEST_DUAL_Z_REV (sqadd_u16_m_tied2, svuint16_t, svint16_t,
22 z0_res = svsqadd_u16_m (p0, z4, z0),
23 z0_res = svsqadd_m (p0, z4, z0))
24
25 /*
26 ** sqadd_u16_m_untied:
27 ** movprfx z0, z1
28 ** usqadd z0\.h, p0/m, z0\.h, z4\.h
29 ** ret
30 */
31 TEST_DUAL_Z (sqadd_u16_m_untied, svuint16_t, svint16_t,
32 z0 = svsqadd_u16_m (p0, z1, z4),
33 z0 = svsqadd_m (p0, z1, z4))
34
35 /*
36 ** sqadd_w0_u16_m_tied1:
37 ** mov (z[0-9]+\.h), w0
38 ** usqadd z0\.h, p0/m, z0\.h, \1
39 ** ret
40 */
41 TEST_UNIFORM_ZX (sqadd_w0_u16_m_tied1, svuint16_t, int16_t,
42 z0 = svsqadd_n_u16_m (p0, z0, x0),
43 z0 = svsqadd_m (p0, z0, x0))
44
45 /*
46 ** sqadd_w0_u16_m_untied:: { xfail *-*-*}
47 ** mov (z[0-9]+\.h), w0
48 ** movprfx z0, z1
49 ** usqadd z0\.h, p0/m, z0\.h, \1
50 ** ret
51 */
52 TEST_UNIFORM_ZX (sqadd_w0_u16_m_untied, svuint16_t, int16_t,
53 z0 = svsqadd_n_u16_m (p0, z1, x0),
54 z0 = svsqadd_m (p0, z1, x0))
55
56 /*
57 ** sqadd_1_u16_m_tied1:
58 ** mov (z[0-9]+\.h), #1
59 ** usqadd z0\.h, p0/m, z0\.h, \1
60 ** ret
61 */
62 TEST_UNIFORM_Z (sqadd_1_u16_m_tied1, svuint16_t,
63 z0 = svsqadd_n_u16_m (p0, z0, 1),
64 z0 = svsqadd_m (p0, z0, 1))
65
66 /*
67 ** sqadd_1_u16_m_untied:: { xfail *-*-*}
68 ** mov (z[0-9]+\.h), #1
69 ** movprfx z0, z1
70 ** usqadd z0\.h, p0/m, z0\.h, \1
71 ** ret
72 */
73 TEST_UNIFORM_Z (sqadd_1_u16_m_untied, svuint16_t,
74 z0 = svsqadd_n_u16_m (p0, z1, 1),
75 z0 = svsqadd_m (p0, z1, 1))
76
77 /*
78 ** sqadd_127_u16_m:
79 ** mov (z[0-9]+\.h), #127
80 ** usqadd z0\.h, p0/m, z0\.h, \1
81 ** ret
82 */
83 TEST_UNIFORM_Z (sqadd_127_u16_m, svuint16_t,
84 z0 = svsqadd_n_u16_m (p0, z0, 127),
85 z0 = svsqadd_m (p0, z0, 127))
86
87 /*
88 ** sqadd_128_u16_m:
89 ** mov (z[0-9]+\.h), #128
90 ** usqadd z0\.h, p0/m, z0\.h, \1
91 ** ret
92 */
93 TEST_UNIFORM_Z (sqadd_128_u16_m, svuint16_t,
94 z0 = svsqadd_n_u16_m (p0, z0, 128),
95 z0 = svsqadd_m (p0, z0, 128))
96
97 /*
98 ** sqadd_255_u16_m:
99 ** mov (z[0-9]+\.h), #255
100 ** usqadd z0\.h, p0/m, z0\.h, \1
101 ** ret
102 */
103 TEST_UNIFORM_Z (sqadd_255_u16_m, svuint16_t,
104 z0 = svsqadd_n_u16_m (p0, z0, 255),
105 z0 = svsqadd_m (p0, z0, 255))
106
107 /*
108 ** sqadd_m1_u16_m:
109 ** mov (z[0-9]+)\.b, #-1
110 ** usqadd z0\.h, p0/m, z0\.h, \1\.h
111 ** ret
112 */
113 TEST_UNIFORM_Z (sqadd_m1_u16_m, svuint16_t,
114 z0 = svsqadd_n_u16_m (p0, z0, -1),
115 z0 = svsqadd_m (p0, z0, -1))
116
117 /*
118 ** sqadd_m127_u16_m:
119 ** mov (z[0-9]+\.h), #-127
120 ** usqadd z0\.h, p0/m, z0\.h, \1
121 ** ret
122 */
123 TEST_UNIFORM_Z (sqadd_m127_u16_m, svuint16_t,
124 z0 = svsqadd_n_u16_m (p0, z0, -127),
125 z0 = svsqadd_m (p0, z0, -127))
126
127 /*
128 ** sqadd_m128_u16_m:
129 ** mov (z[0-9]+\.h), #-128
130 ** usqadd z0\.h, p0/m, z0\.h, \1
131 ** ret
132 */
133 TEST_UNIFORM_Z (sqadd_m128_u16_m, svuint16_t,
134 z0 = svsqadd_n_u16_m (p0, z0, -128),
135 z0 = svsqadd_m (p0, z0, -128))
136
137 /*
138 ** sqadd_u16_z_tied1:
139 ** movprfx z0\.h, p0/z, z0\.h
140 ** usqadd z0\.h, p0/m, z0\.h, z4\.h
141 ** ret
142 */
143 TEST_DUAL_Z (sqadd_u16_z_tied1, svuint16_t, svint16_t,
144 z0 = svsqadd_u16_z (p0, z0, z4),
145 z0 = svsqadd_z (p0, z0, z4))
146
147 /*
148 ** sqadd_u16_z_tied2:
149 ** mov (z[0-9]+)\.d, z0\.d
150 ** movprfx z0\.h, p0/z, z4\.h
151 ** usqadd z0\.h, p0/m, z0\.h, \1\.h
152 ** ret
153 */
154 TEST_DUAL_Z_REV (sqadd_u16_z_tied2, svuint16_t, svint16_t,
155 z0_res = svsqadd_u16_z (p0, z4, z0),
156 z0_res = svsqadd_z (p0, z4, z0))
157
158 /*
159 ** sqadd_u16_z_untied:
160 ** movprfx z0\.h, p0/z, z1\.h
161 ** usqadd z0\.h, p0/m, z0\.h, z4\.h
162 ** ret
163 */
164 TEST_DUAL_Z (sqadd_u16_z_untied, svuint16_t, svint16_t,
165 z0 = svsqadd_u16_z (p0, z1, z4),
166 z0 = svsqadd_z (p0, z1, z4))
167
168 /*
169 ** sqadd_w0_u16_z_tied1:
170 ** mov (z[0-9]+\.h), w0
171 ** movprfx z0\.h, p0/z, z0\.h
172 ** usqadd z0\.h, p0/m, z0\.h, \1
173 ** ret
174 */
175 TEST_UNIFORM_ZX (sqadd_w0_u16_z_tied1, svuint16_t, int16_t,
176 z0 = svsqadd_n_u16_z (p0, z0, x0),
177 z0 = svsqadd_z (p0, z0, x0))
178
179 /*
180 ** sqadd_w0_u16_z_untied:: { xfail *-*-*}
181 ** mov (z[0-9]+\.h), w0
182 ** movprfx z0\.h, p0/z, z1\.h
183 ** usqadd z0\.h, p0/m, z0\.h, \1
184 ** ret
185 */
186 TEST_UNIFORM_ZX (sqadd_w0_u16_z_untied, svuint16_t, int16_t,
187 z0 = svsqadd_n_u16_z (p0, z1, x0),
188 z0 = svsqadd_z (p0, z1, x0))
189
190 /*
191 ** sqadd_1_u16_z_tied1:
192 ** mov (z[0-9]+\.h), #1
193 ** movprfx z0\.h, p0/z, z0\.h
194 ** usqadd z0\.h, p0/m, z0\.h, \1
195 ** ret
196 */
197 TEST_UNIFORM_Z (sqadd_1_u16_z_tied1, svuint16_t,
198 z0 = svsqadd_n_u16_z (p0, z0, 1),
199 z0 = svsqadd_z (p0, z0, 1))
200
201 /*
202 ** sqadd_1_u16_z_untied:: { xfail *-*-*}
203 ** mov (z[0-9]+\.h), #1
204 ** movprfx z0\.h, p0/z, z1\.h
205 ** usqadd z0\.h, p0/m, z0\.h, \1
206 ** ret
207 */
208 TEST_UNIFORM_Z (sqadd_1_u16_z_untied, svuint16_t,
209 z0 = svsqadd_n_u16_z (p0, z1, 1),
210 z0 = svsqadd_z (p0, z1, 1))
211
212 /*
213 ** sqadd_127_u16_z:
214 ** mov (z[0-9]+\.h), #127
215 ** movprfx z0\.h, p0/z, z0\.h
216 ** usqadd z0\.h, p0/m, z0\.h, \1
217 ** ret
218 */
219 TEST_UNIFORM_Z (sqadd_127_u16_z, svuint16_t,
220 z0 = svsqadd_n_u16_z (p0, z0, 127),
221 z0 = svsqadd_z (p0, z0, 127))
222
223 /*
224 ** sqadd_128_u16_z:
225 ** mov (z[0-9]+\.h), #128
226 ** movprfx z0\.h, p0/z, z0\.h
227 ** usqadd z0\.h, p0/m, z0\.h, \1
228 ** ret
229 */
230 TEST_UNIFORM_Z (sqadd_128_u16_z, svuint16_t,
231 z0 = svsqadd_n_u16_z (p0, z0, 128),
232 z0 = svsqadd_z (p0, z0, 128))
233
234 /*
235 ** sqadd_255_u16_z:
236 ** mov (z[0-9]+\.h), #255
237 ** movprfx z0\.h, p0/z, z0\.h
238 ** usqadd z0\.h, p0/m, z0\.h, \1
239 ** ret
240 */
241 TEST_UNIFORM_Z (sqadd_255_u16_z, svuint16_t,
242 z0 = svsqadd_n_u16_z (p0, z0, 255),
243 z0 = svsqadd_z (p0, z0, 255))
244
245 /*
246 ** sqadd_m1_u16_z:
247 ** mov (z[0-9]+)\.b, #-1
248 ** movprfx z0\.h, p0/z, z0\.h
249 ** usqadd z0\.h, p0/m, z0\.h, \1\.h
250 ** ret
251 */
252 TEST_UNIFORM_Z (sqadd_m1_u16_z, svuint16_t,
253 z0 = svsqadd_n_u16_z (p0, z0, -1),
254 z0 = svsqadd_z (p0, z0, -1))
255
256 /*
257 ** sqadd_m127_u16_z:
258 ** mov (z[0-9]+\.h), #-127
259 ** movprfx z0\.h, p0/z, z0\.h
260 ** usqadd z0\.h, p0/m, z0\.h, \1
261 ** ret
262 */
263 TEST_UNIFORM_Z (sqadd_m127_u16_z, svuint16_t,
264 z0 = svsqadd_n_u16_z (p0, z0, -127),
265 z0 = svsqadd_z (p0, z0, -127))
266
267 /*
268 ** sqadd_m128_u16_z:
269 ** mov (z[0-9]+\.h), #-128
270 ** movprfx z0\.h, p0/z, z0\.h
271 ** usqadd z0\.h, p0/m, z0\.h, \1
272 ** ret
273 */
274 TEST_UNIFORM_Z (sqadd_m128_u16_z, svuint16_t,
275 z0 = svsqadd_n_u16_z (p0, z0, -128),
276 z0 = svsqadd_z (p0, z0, -128))
277
278 /*
279 ** sqadd_u16_x_tied1:
280 ** usqadd z0\.h, p0/m, z0\.h, z4\.h
281 ** ret
282 */
283 TEST_DUAL_Z (sqadd_u16_x_tied1, svuint16_t, svint16_t,
284 z0 = svsqadd_u16_x (p0, z0, z4),
285 z0 = svsqadd_x (p0, z0, z4))
286
287 /*
288 ** sqadd_u16_x_tied2:
289 ** mov (z[0-9]+)\.d, z0\.d
290 ** movprfx z0, z4
291 ** usqadd z0\.h, p0/m, z0\.h, \1\.h
292 ** ret
293 */
294 TEST_DUAL_Z_REV (sqadd_u16_x_tied2, svuint16_t, svint16_t,
295 z0_res = svsqadd_u16_x (p0, z4, z0),
296 z0_res = svsqadd_x (p0, z4, z0))
297
298 /*
299 ** sqadd_u16_x_untied:
300 ** movprfx z0, z1
301 ** usqadd z0\.h, p0/m, z0\.h, z4\.h
302 ** ret
303 */
304 TEST_DUAL_Z (sqadd_u16_x_untied, svuint16_t, svint16_t,
305 z0 = svsqadd_u16_x (p0, z1, z4),
306 z0 = svsqadd_x (p0, z1, z4))
307
308 /*
309 ** sqadd_w0_u16_x_tied1:
310 ** mov (z[0-9]+\.h), w0
311 ** usqadd z0\.h, p0/m, z0\.h, \1
312 ** ret
313 */
314 TEST_UNIFORM_ZX (sqadd_w0_u16_x_tied1, svuint16_t, int16_t,
315 z0 = svsqadd_n_u16_x (p0, z0, x0),
316 z0 = svsqadd_x (p0, z0, x0))
317
318 /*
319 ** sqadd_w0_u16_x_untied:: { xfail *-*-*}
320 ** mov (z[0-9]+\.h), w0
321 ** movprfx z0, z1
322 ** usqadd z0\.h, p0/m, z0\.h, \1
323 ** ret
324 */
325 TEST_UNIFORM_ZX (sqadd_w0_u16_x_untied, svuint16_t, int16_t,
326 z0 = svsqadd_n_u16_x (p0, z1, x0),
327 z0 = svsqadd_x (p0, z1, x0))
328
329 /*
330 ** sqadd_1_u16_x_tied1:
331 ** uqadd z0\.h, z0\.h, #1
332 ** ret
333 */
334 TEST_UNIFORM_Z (sqadd_1_u16_x_tied1, svuint16_t,
335 z0 = svsqadd_n_u16_x (p0, z0, 1),
336 z0 = svsqadd_x (p0, z0, 1))
337
338 /*
339 ** sqadd_1_u16_x_untied:
340 ** movprfx z0, z1
341 ** uqadd z0\.h, z0\.h, #1
342 ** ret
343 */
344 TEST_UNIFORM_Z (sqadd_1_u16_x_untied, svuint16_t,
345 z0 = svsqadd_n_u16_x (p0, z1, 1),
346 z0 = svsqadd_x (p0, z1, 1))
347
348 /*
349 ** sqadd_127_u16_x:
350 ** uqadd z0\.h, z0\.h, #127
351 ** ret
352 */
353 TEST_UNIFORM_Z (sqadd_127_u16_x, svuint16_t,
354 z0 = svsqadd_n_u16_x (p0, z0, 127),
355 z0 = svsqadd_x (p0, z0, 127))
356
357 /*
358 ** sqadd_128_u16_x:
359 ** uqadd z0\.h, z0\.h, #128
360 ** ret
361 */
362 TEST_UNIFORM_Z (sqadd_128_u16_x, svuint16_t,
363 z0 = svsqadd_n_u16_x (p0, z0, 128),
364 z0 = svsqadd_x (p0, z0, 128))
365
366 /*
367 ** sqadd_255_u16_x:
368 ** uqadd z0\.h, z0\.h, #255
369 ** ret
370 */
371 TEST_UNIFORM_Z (sqadd_255_u16_x, svuint16_t,
372 z0 = svsqadd_n_u16_x (p0, z0, 255),
373 z0 = svsqadd_x (p0, z0, 255))
374
375 /*
376 ** sqadd_m1_u16_x:
377 ** mov (z[0-9]+)\.b, #-1
378 ** usqadd z0\.h, p0/m, z0\.h, \1\.h
379 ** ret
380 */
381 TEST_UNIFORM_Z (sqadd_m1_u16_x, svuint16_t,
382 z0 = svsqadd_n_u16_x (p0, z0, -1),
383 z0 = svsqadd_x (p0, z0, -1))
384
385 /*
386 ** sqadd_m127_u16_x:
387 ** mov (z[0-9]+\.h), #-127
388 ** usqadd z0\.h, p0/m, z0\.h, \1
389 ** ret
390 */
391 TEST_UNIFORM_Z (sqadd_m127_u16_x, svuint16_t,
392 z0 = svsqadd_n_u16_x (p0, z0, -127),
393 z0 = svsqadd_x (p0, z0, -127))
394
395 /*
396 ** sqadd_m128_u16_x:
397 ** mov (z[0-9]+\.h), #-128
398 ** usqadd z0\.h, p0/m, z0\.h, \1
399 ** ret
400 */
401 TEST_UNIFORM_Z (sqadd_m128_u16_x, svuint16_t,
402 z0 = svsqadd_n_u16_x (p0, z0, -128),
403 z0 = svsqadd_x (p0, z0, -128))