]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/qsub_u32.c
[AArch64] Add support for the SVE2 ACLE
[thirdparty/gcc.git] / gcc / testsuite / gcc.target / aarch64 / sve2 / acle / asm / qsub_u32.c
1 /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
2
3 #include "test_sve_acle.h"
4
5 /*
6 ** qsub_u32_tied1:
7 ** uqsub z0\.s, z0\.s, z1\.s
8 ** ret
9 */
10 TEST_UNIFORM_Z (qsub_u32_tied1, svuint32_t,
11 z0 = svqsub_u32 (z0, z1),
12 z0 = svqsub (z0, z1))
13
14 /*
15 ** qsub_u32_tied2:
16 ** uqsub z0\.s, z1\.s, z0\.s
17 ** ret
18 */
19 TEST_UNIFORM_Z (qsub_u32_tied2, svuint32_t,
20 z0 = svqsub_u32 (z1, z0),
21 z0 = svqsub (z1, z0))
22
23 /*
24 ** qsub_u32_untied:
25 ** uqsub z0\.s, z1\.s, z2\.s
26 ** ret
27 */
28 TEST_UNIFORM_Z (qsub_u32_untied, svuint32_t,
29 z0 = svqsub_u32 (z1, z2),
30 z0 = svqsub (z1, z2))
31
32 /*
33 ** qsub_w0_u32_tied1:
34 ** mov (z[0-9]+\.s), w0
35 ** uqsub z0\.s, z0\.s, \1
36 ** ret
37 */
38 TEST_UNIFORM_ZX (qsub_w0_u32_tied1, svuint32_t, uint32_t,
39 z0 = svqsub_n_u32 (z0, x0),
40 z0 = svqsub (z0, x0))
41
42 /*
43 ** qsub_w0_u32_untied:
44 ** mov (z[0-9]+\.s), w0
45 ** uqsub z0\.s, z1\.s, \1
46 ** ret
47 */
48 TEST_UNIFORM_ZX (qsub_w0_u32_untied, svuint32_t, uint32_t,
49 z0 = svqsub_n_u32 (z1, x0),
50 z0 = svqsub (z1, x0))
51
52 /*
53 ** qsub_1_u32_tied1:
54 ** uqsub z0\.s, z0\.s, #1
55 ** ret
56 */
57 TEST_UNIFORM_Z (qsub_1_u32_tied1, svuint32_t,
58 z0 = svqsub_n_u32 (z0, 1),
59 z0 = svqsub (z0, 1))
60
61 /*
62 ** qsub_1_u32_untied:
63 ** movprfx z0, z1
64 ** uqsub z0\.s, z0\.s, #1
65 ** ret
66 */
67 TEST_UNIFORM_Z (qsub_1_u32_untied, svuint32_t,
68 z0 = svqsub_n_u32 (z1, 1),
69 z0 = svqsub (z1, 1))
70
71 /*
72 ** qsub_127_u32:
73 ** uqsub z0\.s, z0\.s, #127
74 ** ret
75 */
76 TEST_UNIFORM_Z (qsub_127_u32, svuint32_t,
77 z0 = svqsub_n_u32 (z0, 127),
78 z0 = svqsub (z0, 127))
79
80 /*
81 ** qsub_128_u32:
82 ** uqsub z0\.s, z0\.s, #128
83 ** ret
84 */
85 TEST_UNIFORM_Z (qsub_128_u32, svuint32_t,
86 z0 = svqsub_n_u32 (z0, 128),
87 z0 = svqsub (z0, 128))
88
89 /*
90 ** qsub_255_u32:
91 ** uqsub z0\.s, z0\.s, #255
92 ** ret
93 */
94 TEST_UNIFORM_Z (qsub_255_u32, svuint32_t,
95 z0 = svqsub_n_u32 (z0, 255),
96 z0 = svqsub (z0, 255))
97
98 /*
99 ** qsub_m1_u32:
100 ** mov (z[0-9]+)\.b, #-1
101 ** uqsub z0\.s, z0\.s, \1\.s
102 ** ret
103 */
104 TEST_UNIFORM_Z (qsub_m1_u32, svuint32_t,
105 z0 = svqsub_n_u32 (z0, -1),
106 z0 = svqsub (z0, -1))
107
108 /*
109 ** qsub_m127_u32:
110 ** mov (z[0-9]+\.s), #-127
111 ** uqsub z0\.s, z0\.s, \1
112 ** ret
113 */
114 TEST_UNIFORM_Z (qsub_m127_u32, svuint32_t,
115 z0 = svqsub_n_u32 (z0, -127),
116 z0 = svqsub (z0, -127))
117
118 /*
119 ** qsub_m128_u32:
120 ** mov (z[0-9]+\.s), #-128
121 ** uqsub z0\.s, z0\.s, \1
122 ** ret
123 */
124 TEST_UNIFORM_Z (qsub_m128_u32, svuint32_t,
125 z0 = svqsub_n_u32 (z0, -128),
126 z0 = svqsub (z0, -128))
127
128 /*
129 ** qsub_u32_m_tied1:
130 ** uqsub z0\.s, p0/m, z0\.s, z1\.s
131 ** ret
132 */
133 TEST_UNIFORM_Z (qsub_u32_m_tied1, svuint32_t,
134 z0 = svqsub_u32_m (p0, z0, z1),
135 z0 = svqsub_m (p0, z0, z1))
136
137 /*
138 ** qsub_u32_m_tied2:
139 ** mov (z[0-9]+)\.d, z0\.d
140 ** movprfx z0, z1
141 ** uqsub z0\.s, p0/m, z0\.s, \1\.s
142 ** ret
143 */
144 TEST_UNIFORM_Z (qsub_u32_m_tied2, svuint32_t,
145 z0 = svqsub_u32_m (p0, z1, z0),
146 z0 = svqsub_m (p0, z1, z0))
147
148 /*
149 ** qsub_u32_m_untied:
150 ** movprfx z0, z1
151 ** uqsub z0\.s, p0/m, z0\.s, z2\.s
152 ** ret
153 */
154 TEST_UNIFORM_Z (qsub_u32_m_untied, svuint32_t,
155 z0 = svqsub_u32_m (p0, z1, z2),
156 z0 = svqsub_m (p0, z1, z2))
157
158 /*
159 ** qsub_w0_u32_m_tied1:
160 ** mov (z[0-9]+\.s), w0
161 ** uqsub z0\.s, p0/m, z0\.s, \1
162 ** ret
163 */
164 TEST_UNIFORM_ZX (qsub_w0_u32_m_tied1, svuint32_t, uint32_t,
165 z0 = svqsub_n_u32_m (p0, z0, x0),
166 z0 = svqsub_m (p0, z0, x0))
167
168 /*
169 ** qsub_w0_u32_m_untied:
170 ** mov (z[0-9]+\.s), w0
171 ** movprfx z0, z1
172 ** uqsub z0\.s, p0/m, z0\.s, \1
173 ** ret
174 */
175 TEST_UNIFORM_ZX (qsub_w0_u32_m_untied, svuint32_t, uint32_t,
176 z0 = svqsub_n_u32_m (p0, z1, x0),
177 z0 = svqsub_m (p0, z1, x0))
178
179 /*
180 ** qsub_1_u32_m_tied1:
181 ** mov (z[0-9]+\.s), #1
182 ** uqsub z0\.s, p0/m, z0\.s, \1
183 ** ret
184 */
185 TEST_UNIFORM_Z (qsub_1_u32_m_tied1, svuint32_t,
186 z0 = svqsub_n_u32_m (p0, z0, 1),
187 z0 = svqsub_m (p0, z0, 1))
188
189 /*
190 ** qsub_1_u32_m_untied: { xfail *-*-* }
191 ** mov (z[0-9]+\.s), #1
192 ** movprfx z0, z1
193 ** uqsub z0\.s, p0/m, z0\.s, \1
194 ** ret
195 */
196 TEST_UNIFORM_Z (qsub_1_u32_m_untied, svuint32_t,
197 z0 = svqsub_n_u32_m (p0, z1, 1),
198 z0 = svqsub_m (p0, z1, 1))
199
200 /*
201 ** qsub_127_u32_m:
202 ** mov (z[0-9]+\.s), #127
203 ** uqsub z0\.s, p0/m, z0\.s, \1
204 ** ret
205 */
206 TEST_UNIFORM_Z (qsub_127_u32_m, svuint32_t,
207 z0 = svqsub_n_u32_m (p0, z0, 127),
208 z0 = svqsub_m (p0, z0, 127))
209
210 /*
211 ** qsub_128_u32_m:
212 ** mov (z[0-9]+\.s), #128
213 ** uqsub z0\.s, p0/m, z0\.s, \1
214 ** ret
215 */
216 TEST_UNIFORM_Z (qsub_128_u32_m, svuint32_t,
217 z0 = svqsub_n_u32_m (p0, z0, 128),
218 z0 = svqsub_m (p0, z0, 128))
219
220 /*
221 ** qsub_255_u32_m:
222 ** mov (z[0-9]+\.s), #255
223 ** uqsub z0\.s, p0/m, z0\.s, \1
224 ** ret
225 */
226 TEST_UNIFORM_Z (qsub_255_u32_m, svuint32_t,
227 z0 = svqsub_n_u32_m (p0, z0, 255),
228 z0 = svqsub_m (p0, z0, 255))
229
230 /*
231 ** qsub_m1_u32_m:
232 ** mov (z[0-9]+)\.b, #-1
233 ** uqsub z0\.s, p0/m, z0\.s, \1\.s
234 ** ret
235 */
236 TEST_UNIFORM_Z (qsub_m1_u32_m, svuint32_t,
237 z0 = svqsub_n_u32_m (p0, z0, -1),
238 z0 = svqsub_m (p0, z0, -1))
239
240 /*
241 ** qsub_m127_u32_m:
242 ** mov (z[0-9]+\.s), #-127
243 ** uqsub z0\.s, p0/m, z0\.s, \1
244 ** ret
245 */
246 TEST_UNIFORM_Z (qsub_m127_u32_m, svuint32_t,
247 z0 = svqsub_n_u32_m (p0, z0, -127),
248 z0 = svqsub_m (p0, z0, -127))
249
250 /*
251 ** qsub_m128_u32_m:
252 ** mov (z[0-9]+\.s), #-128
253 ** uqsub z0\.s, p0/m, z0\.s, \1
254 ** ret
255 */
256 TEST_UNIFORM_Z (qsub_m128_u32_m, svuint32_t,
257 z0 = svqsub_n_u32_m (p0, z0, -128),
258 z0 = svqsub_m (p0, z0, -128))
259
260 /*
261 ** qsub_u32_z_tied1:
262 ** movprfx z0\.s, p0/z, z0\.s
263 ** uqsub z0\.s, p0/m, z0\.s, z1\.s
264 ** ret
265 */
266 TEST_UNIFORM_Z (qsub_u32_z_tied1, svuint32_t,
267 z0 = svqsub_u32_z (p0, z0, z1),
268 z0 = svqsub_z (p0, z0, z1))
269
270 /*
271 ** qsub_u32_z_tied2:
272 ** movprfx z0\.s, p0/z, z0\.s
273 ** uqsubr z0\.s, p0/m, z0\.s, z1\.s
274 ** ret
275 */
276 TEST_UNIFORM_Z (qsub_u32_z_tied2, svuint32_t,
277 z0 = svqsub_u32_z (p0, z1, z0),
278 z0 = svqsub_z (p0, z1, z0))
279
280 /*
281 ** qsub_u32_z_untied:
282 ** (
283 ** movprfx z0\.s, p0/z, z1\.s
284 ** uqsub z0\.s, p0/m, z0\.s, z2\.s
285 ** |
286 ** movprfx z0\.s, p0/z, z2\.s
287 ** uqsubr z0\.s, p0/m, z0\.s, z1\.s
288 ** )
289 ** ret
290 */
291 TEST_UNIFORM_Z (qsub_u32_z_untied, svuint32_t,
292 z0 = svqsub_u32_z (p0, z1, z2),
293 z0 = svqsub_z (p0, z1, z2))
294
295 /*
296 ** qsub_w0_u32_z_tied1:
297 ** mov (z[0-9]+\.s), w0
298 ** movprfx z0\.s, p0/z, z0\.s
299 ** uqsub z0\.s, p0/m, z0\.s, \1
300 ** ret
301 */
302 TEST_UNIFORM_ZX (qsub_w0_u32_z_tied1, svuint32_t, uint32_t,
303 z0 = svqsub_n_u32_z (p0, z0, x0),
304 z0 = svqsub_z (p0, z0, x0))
305
306 /*
307 ** qsub_w0_u32_z_untied:
308 ** mov (z[0-9]+\.s), w0
309 ** (
310 ** movprfx z0\.s, p0/z, z1\.s
311 ** uqsub z0\.s, p0/m, z0\.s, \1
312 ** |
313 ** movprfx z0\.s, p0/z, \1
314 ** uqsubr z0\.s, p0/m, z0\.s, z1\.s
315 ** )
316 ** ret
317 */
318 TEST_UNIFORM_ZX (qsub_w0_u32_z_untied, svuint32_t, uint32_t,
319 z0 = svqsub_n_u32_z (p0, z1, x0),
320 z0 = svqsub_z (p0, z1, x0))
321
322 /*
323 ** qsub_1_u32_z_tied1:
324 ** mov (z[0-9]+\.s), #1
325 ** movprfx z0\.s, p0/z, z0\.s
326 ** uqsub z0\.s, p0/m, z0\.s, \1
327 ** ret
328 */
329 TEST_UNIFORM_Z (qsub_1_u32_z_tied1, svuint32_t,
330 z0 = svqsub_n_u32_z (p0, z0, 1),
331 z0 = svqsub_z (p0, z0, 1))
332
333 /*
334 ** qsub_1_u32_z_untied:
335 ** mov (z[0-9]+\.s), #1
336 ** (
337 ** movprfx z0\.s, p0/z, z1\.s
338 ** uqsub z0\.s, p0/m, z0\.s, \1
339 ** |
340 ** movprfx z0\.s, p0/z, \1
341 ** uqsubr z0\.s, p0/m, z0\.s, z1\.s
342 ** )
343 ** ret
344 */
345 TEST_UNIFORM_Z (qsub_1_u32_z_untied, svuint32_t,
346 z0 = svqsub_n_u32_z (p0, z1, 1),
347 z0 = svqsub_z (p0, z1, 1))
348
349 /*
350 ** qsub_127_u32_z:
351 ** mov (z[0-9]+\.s), #127
352 ** movprfx z0\.s, p0/z, z0\.s
353 ** uqsub z0\.s, p0/m, z0\.s, \1
354 ** ret
355 */
356 TEST_UNIFORM_Z (qsub_127_u32_z, svuint32_t,
357 z0 = svqsub_n_u32_z (p0, z0, 127),
358 z0 = svqsub_z (p0, z0, 127))
359
360 /*
361 ** qsub_128_u32_z:
362 ** mov (z[0-9]+\.s), #128
363 ** movprfx z0\.s, p0/z, z0\.s
364 ** uqsub z0\.s, p0/m, z0\.s, \1
365 ** ret
366 */
367 TEST_UNIFORM_Z (qsub_128_u32_z, svuint32_t,
368 z0 = svqsub_n_u32_z (p0, z0, 128),
369 z0 = svqsub_z (p0, z0, 128))
370
371 /*
372 ** qsub_255_u32_z:
373 ** mov (z[0-9]+\.s), #255
374 ** movprfx z0\.s, p0/z, z0\.s
375 ** uqsub z0\.s, p0/m, z0\.s, \1
376 ** ret
377 */
378 TEST_UNIFORM_Z (qsub_255_u32_z, svuint32_t,
379 z0 = svqsub_n_u32_z (p0, z0, 255),
380 z0 = svqsub_z (p0, z0, 255))
381
382 /*
383 ** qsub_m1_u32_z:
384 ** mov (z[0-9]+)\.b, #-1
385 ** movprfx z0\.s, p0/z, z0\.s
386 ** uqsub z0\.s, p0/m, z0\.s, \1\.s
387 ** ret
388 */
389 TEST_UNIFORM_Z (qsub_m1_u32_z, svuint32_t,
390 z0 = svqsub_n_u32_z (p0, z0, -1),
391 z0 = svqsub_z (p0, z0, -1))
392
393 /*
394 ** qsub_m127_u32_z:
395 ** mov (z[0-9]+\.s), #-127
396 ** movprfx z0\.s, p0/z, z0\.s
397 ** uqsub z0\.s, p0/m, z0\.s, \1
398 ** ret
399 */
400 TEST_UNIFORM_Z (qsub_m127_u32_z, svuint32_t,
401 z0 = svqsub_n_u32_z (p0, z0, -127),
402 z0 = svqsub_z (p0, z0, -127))
403
404 /*
405 ** qsub_m128_u32_z:
406 ** mov (z[0-9]+\.s), #-128
407 ** movprfx z0\.s, p0/z, z0\.s
408 ** uqsub z0\.s, p0/m, z0\.s, \1
409 ** ret
410 */
411 TEST_UNIFORM_Z (qsub_m128_u32_z, svuint32_t,
412 z0 = svqsub_n_u32_z (p0, z0, -128),
413 z0 = svqsub_z (p0, z0, -128))
414
415 /*
416 ** qsub_u32_x_tied1:
417 ** uqsub z0\.s, z0\.s, z1\.s
418 ** ret
419 */
420 TEST_UNIFORM_Z (qsub_u32_x_tied1, svuint32_t,
421 z0 = svqsub_u32_x (p0, z0, z1),
422 z0 = svqsub_x (p0, z0, z1))
423
424 /*
425 ** qsub_u32_x_tied2:
426 ** uqsub z0\.s, z1\.s, z0\.s
427 ** ret
428 */
429 TEST_UNIFORM_Z (qsub_u32_x_tied2, svuint32_t,
430 z0 = svqsub_u32_x (p0, z1, z0),
431 z0 = svqsub_x (p0, z1, z0))
432
433 /*
434 ** qsub_u32_x_untied:
435 ** uqsub z0\.s, z1\.s, z2\.s
436 ** ret
437 */
438 TEST_UNIFORM_Z (qsub_u32_x_untied, svuint32_t,
439 z0 = svqsub_u32_x (p0, z1, z2),
440 z0 = svqsub_x (p0, z1, z2))
441
442 /*
443 ** qsub_w0_u32_x_tied1:
444 ** mov (z[0-9]+\.s), w0
445 ** uqsub z0\.s, z0\.s, \1
446 ** ret
447 */
448 TEST_UNIFORM_ZX (qsub_w0_u32_x_tied1, svuint32_t, uint32_t,
449 z0 = svqsub_n_u32_x (p0, z0, x0),
450 z0 = svqsub_x (p0, z0, x0))
451
452 /*
453 ** qsub_w0_u32_x_untied:
454 ** mov (z[0-9]+\.s), w0
455 ** uqsub z0\.s, z1\.s, \1
456 ** ret
457 */
458 TEST_UNIFORM_ZX (qsub_w0_u32_x_untied, svuint32_t, uint32_t,
459 z0 = svqsub_n_u32_x (p0, z1, x0),
460 z0 = svqsub_x (p0, z1, x0))
461
462 /*
463 ** qsub_1_u32_x_tied1:
464 ** uqsub z0\.s, z0\.s, #1
465 ** ret
466 */
467 TEST_UNIFORM_Z (qsub_1_u32_x_tied1, svuint32_t,
468 z0 = svqsub_n_u32_x (p0, z0, 1),
469 z0 = svqsub_x (p0, z0, 1))
470
471 /*
472 ** qsub_1_u32_x_untied:
473 ** movprfx z0, z1
474 ** uqsub z0\.s, z0\.s, #1
475 ** ret
476 */
477 TEST_UNIFORM_Z (qsub_1_u32_x_untied, svuint32_t,
478 z0 = svqsub_n_u32_x (p0, z1, 1),
479 z0 = svqsub_x (p0, z1, 1))
480
481 /*
482 ** qsub_127_u32_x:
483 ** uqsub z0\.s, z0\.s, #127
484 ** ret
485 */
486 TEST_UNIFORM_Z (qsub_127_u32_x, svuint32_t,
487 z0 = svqsub_n_u32_x (p0, z0, 127),
488 z0 = svqsub_x (p0, z0, 127))
489
490 /*
491 ** qsub_128_u32_x:
492 ** uqsub z0\.s, z0\.s, #128
493 ** ret
494 */
495 TEST_UNIFORM_Z (qsub_128_u32_x, svuint32_t,
496 z0 = svqsub_n_u32_x (p0, z0, 128),
497 z0 = svqsub_x (p0, z0, 128))
498
499 /*
500 ** qsub_255_u32_x:
501 ** uqsub z0\.s, z0\.s, #255
502 ** ret
503 */
504 TEST_UNIFORM_Z (qsub_255_u32_x, svuint32_t,
505 z0 = svqsub_n_u32_x (p0, z0, 255),
506 z0 = svqsub_x (p0, z0, 255))
507
508 /*
509 ** qsub_m1_u32_x:
510 ** mov (z[0-9]+)\.b, #-1
511 ** uqsub z0\.s, z0\.s, \1\.s
512 ** ret
513 */
514 TEST_UNIFORM_Z (qsub_m1_u32_x, svuint32_t,
515 z0 = svqsub_n_u32_x (p0, z0, -1),
516 z0 = svqsub_x (p0, z0, -1))
517
518 /*
519 ** qsub_m127_u32_x:
520 ** mov (z[0-9]+\.s), #-127
521 ** uqsub z0\.s, z0\.s, \1
522 ** ret
523 */
524 TEST_UNIFORM_Z (qsub_m127_u32_x, svuint32_t,
525 z0 = svqsub_n_u32_x (p0, z0, -127),
526 z0 = svqsub_x (p0, z0, -127))
527
528 /*
529 ** qsub_m128_u32_x:
530 ** mov (z[0-9]+\.s), #-128
531 ** uqsub z0\.s, z0\.s, \1
532 ** ret
533 */
534 TEST_UNIFORM_Z (qsub_m128_u32_x, svuint32_t,
535 z0 = svqsub_n_u32_x (p0, z0, -128),
536 z0 = svqsub_x (p0, z0, -128))