]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/qsub_u8.c
[AArch64] Add support for the SVE2 ACLE
[thirdparty/gcc.git] / gcc / testsuite / gcc.target / aarch64 / sve2 / acle / asm / qsub_u8.c
1 /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
2
3 #include "test_sve_acle.h"
4
5 /*
6 ** qsub_u8_tied1:
7 ** uqsub z0\.b, z0\.b, z1\.b
8 ** ret
9 */
10 TEST_UNIFORM_Z (qsub_u8_tied1, svuint8_t,
11 z0 = svqsub_u8 (z0, z1),
12 z0 = svqsub (z0, z1))
13
14 /*
15 ** qsub_u8_tied2:
16 ** uqsub z0\.b, z1\.b, z0\.b
17 ** ret
18 */
19 TEST_UNIFORM_Z (qsub_u8_tied2, svuint8_t,
20 z0 = svqsub_u8 (z1, z0),
21 z0 = svqsub (z1, z0))
22
23 /*
24 ** qsub_u8_untied:
25 ** uqsub z0\.b, z1\.b, z2\.b
26 ** ret
27 */
28 TEST_UNIFORM_Z (qsub_u8_untied, svuint8_t,
29 z0 = svqsub_u8 (z1, z2),
30 z0 = svqsub (z1, z2))
31
32 /*
33 ** qsub_w0_u8_tied1:
34 ** mov (z[0-9]+\.b), w0
35 ** uqsub z0\.b, z0\.b, \1
36 ** ret
37 */
38 TEST_UNIFORM_ZX (qsub_w0_u8_tied1, svuint8_t, uint8_t,
39 z0 = svqsub_n_u8 (z0, x0),
40 z0 = svqsub (z0, x0))
41
42 /*
43 ** qsub_w0_u8_untied:
44 ** mov (z[0-9]+\.b), w0
45 ** uqsub z0\.b, z1\.b, \1
46 ** ret
47 */
48 TEST_UNIFORM_ZX (qsub_w0_u8_untied, svuint8_t, uint8_t,
49 z0 = svqsub_n_u8 (z1, x0),
50 z0 = svqsub (z1, x0))
51
52 /*
53 ** qsub_1_u8_tied1:
54 ** uqsub z0\.b, z0\.b, #1
55 ** ret
56 */
57 TEST_UNIFORM_Z (qsub_1_u8_tied1, svuint8_t,
58 z0 = svqsub_n_u8 (z0, 1),
59 z0 = svqsub (z0, 1))
60
61 /*
62 ** qsub_1_u8_untied:
63 ** movprfx z0, z1
64 ** uqsub z0\.b, z0\.b, #1
65 ** ret
66 */
67 TEST_UNIFORM_Z (qsub_1_u8_untied, svuint8_t,
68 z0 = svqsub_n_u8 (z1, 1),
69 z0 = svqsub (z1, 1))
70
71 /*
72 ** qsub_127_u8:
73 ** uqsub z0\.b, z0\.b, #127
74 ** ret
75 */
76 TEST_UNIFORM_Z (qsub_127_u8, svuint8_t,
77 z0 = svqsub_n_u8 (z0, 127),
78 z0 = svqsub (z0, 127))
79
80 /*
81 ** qsub_128_u8:
82 ** uqsub z0\.b, z0\.b, #128
83 ** ret
84 */
85 TEST_UNIFORM_Z (qsub_128_u8, svuint8_t,
86 z0 = svqsub_n_u8 (z0, 128),
87 z0 = svqsub (z0, 128))
88
89 /*
90 ** qsub_255_u8:
91 ** uqsub z0\.b, z0\.b, #255
92 ** ret
93 */
94 TEST_UNIFORM_Z (qsub_255_u8, svuint8_t,
95 z0 = svqsub_n_u8 (z0, 255),
96 z0 = svqsub (z0, 255))
97
98 /*
99 ** qsub_m1_u8:
100 ** uqsub z0\.b, z0\.b, #255
101 ** ret
102 */
103 TEST_UNIFORM_Z (qsub_m1_u8, svuint8_t,
104 z0 = svqsub_n_u8 (z0, -1),
105 z0 = svqsub (z0, -1))
106
107 /*
108 ** qsub_m127_u8:
109 ** uqsub z0\.b, z0\.b, #129
110 ** ret
111 */
112 TEST_UNIFORM_Z (qsub_m127_u8, svuint8_t,
113 z0 = svqsub_n_u8 (z0, -127),
114 z0 = svqsub (z0, -127))
115
116 /*
117 ** qsub_m128_u8:
118 ** uqsub z0\.b, z0\.b, #128
119 ** ret
120 */
121 TEST_UNIFORM_Z (qsub_m128_u8, svuint8_t,
122 z0 = svqsub_n_u8 (z0, -128),
123 z0 = svqsub (z0, -128))
124
125 /*
126 ** qsub_u8_m_tied1:
127 ** uqsub z0\.b, p0/m, z0\.b, z1\.b
128 ** ret
129 */
130 TEST_UNIFORM_Z (qsub_u8_m_tied1, svuint8_t,
131 z0 = svqsub_u8_m (p0, z0, z1),
132 z0 = svqsub_m (p0, z0, z1))
133
134 /*
135 ** qsub_u8_m_tied2:
136 ** mov (z[0-9]+)\.d, z0\.d
137 ** movprfx z0, z1
138 ** uqsub z0\.b, p0/m, z0\.b, \1\.b
139 ** ret
140 */
141 TEST_UNIFORM_Z (qsub_u8_m_tied2, svuint8_t,
142 z0 = svqsub_u8_m (p0, z1, z0),
143 z0 = svqsub_m (p0, z1, z0))
144
145 /*
146 ** qsub_u8_m_untied:
147 ** movprfx z0, z1
148 ** uqsub z0\.b, p0/m, z0\.b, z2\.b
149 ** ret
150 */
151 TEST_UNIFORM_Z (qsub_u8_m_untied, svuint8_t,
152 z0 = svqsub_u8_m (p0, z1, z2),
153 z0 = svqsub_m (p0, z1, z2))
154
155 /*
156 ** qsub_w0_u8_m_tied1:
157 ** mov (z[0-9]+\.b), w0
158 ** uqsub z0\.b, p0/m, z0\.b, \1
159 ** ret
160 */
161 TEST_UNIFORM_ZX (qsub_w0_u8_m_tied1, svuint8_t, uint8_t,
162 z0 = svqsub_n_u8_m (p0, z0, x0),
163 z0 = svqsub_m (p0, z0, x0))
164
165 /*
166 ** qsub_w0_u8_m_untied: { xfail *-*-* }
167 ** mov (z[0-9]+\.b), w0
168 ** movprfx z0, z1
169 ** uqsub z0\.b, p0/m, z0\.b, \1
170 ** ret
171 */
172 TEST_UNIFORM_ZX (qsub_w0_u8_m_untied, svuint8_t, uint8_t,
173 z0 = svqsub_n_u8_m (p0, z1, x0),
174 z0 = svqsub_m (p0, z1, x0))
175
176 /*
177 ** qsub_1_u8_m_tied1:
178 ** mov (z[0-9]+\.b), #1
179 ** uqsub z0\.b, p0/m, z0\.b, \1
180 ** ret
181 */
182 TEST_UNIFORM_Z (qsub_1_u8_m_tied1, svuint8_t,
183 z0 = svqsub_n_u8_m (p0, z0, 1),
184 z0 = svqsub_m (p0, z0, 1))
185
186 /*
187 ** qsub_1_u8_m_untied: { xfail *-*-* }
188 ** mov (z[0-9]+\.b), #1
189 ** movprfx z0, z1
190 ** uqsub z0\.b, p0/m, z0\.b, \1
191 ** ret
192 */
193 TEST_UNIFORM_Z (qsub_1_u8_m_untied, svuint8_t,
194 z0 = svqsub_n_u8_m (p0, z1, 1),
195 z0 = svqsub_m (p0, z1, 1))
196
197 /*
198 ** qsub_127_u8_m:
199 ** mov (z[0-9]+\.b), #127
200 ** uqsub z0\.b, p0/m, z0\.b, \1
201 ** ret
202 */
203 TEST_UNIFORM_Z (qsub_127_u8_m, svuint8_t,
204 z0 = svqsub_n_u8_m (p0, z0, 127),
205 z0 = svqsub_m (p0, z0, 127))
206
207 /*
208 ** qsub_128_u8_m:
209 ** mov (z[0-9]+\.b), #-128
210 ** uqsub z0\.b, p0/m, z0\.b, \1
211 ** ret
212 */
213 TEST_UNIFORM_Z (qsub_128_u8_m, svuint8_t,
214 z0 = svqsub_n_u8_m (p0, z0, 128),
215 z0 = svqsub_m (p0, z0, 128))
216
217 /*
218 ** qsub_255_u8_m:
219 ** mov (z[0-9]+\.b), #-1
220 ** uqsub z0\.b, p0/m, z0\.b, \1
221 ** ret
222 */
223 TEST_UNIFORM_Z (qsub_255_u8_m, svuint8_t,
224 z0 = svqsub_n_u8_m (p0, z0, 255),
225 z0 = svqsub_m (p0, z0, 255))
226
227 /*
228 ** qsub_m1_u8_m:
229 ** mov (z[0-9]+\.b), #-1
230 ** uqsub z0\.b, p0/m, z0\.b, \1
231 ** ret
232 */
233 TEST_UNIFORM_Z (qsub_m1_u8_m, svuint8_t,
234 z0 = svqsub_n_u8_m (p0, z0, -1),
235 z0 = svqsub_m (p0, z0, -1))
236
237 /*
238 ** qsub_m127_u8_m:
239 ** mov (z[0-9]+\.b), #-127
240 ** uqsub z0\.b, p0/m, z0\.b, \1
241 ** ret
242 */
243 TEST_UNIFORM_Z (qsub_m127_u8_m, svuint8_t,
244 z0 = svqsub_n_u8_m (p0, z0, -127),
245 z0 = svqsub_m (p0, z0, -127))
246
247 /*
248 ** qsub_m128_u8_m:
249 ** mov (z[0-9]+\.b), #-128
250 ** uqsub z0\.b, p0/m, z0\.b, \1
251 ** ret
252 */
253 TEST_UNIFORM_Z (qsub_m128_u8_m, svuint8_t,
254 z0 = svqsub_n_u8_m (p0, z0, -128),
255 z0 = svqsub_m (p0, z0, -128))
256
257 /*
258 ** qsub_u8_z_tied1:
259 ** movprfx z0\.b, p0/z, z0\.b
260 ** uqsub z0\.b, p0/m, z0\.b, z1\.b
261 ** ret
262 */
263 TEST_UNIFORM_Z (qsub_u8_z_tied1, svuint8_t,
264 z0 = svqsub_u8_z (p0, z0, z1),
265 z0 = svqsub_z (p0, z0, z1))
266
267 /*
268 ** qsub_u8_z_tied2:
269 ** movprfx z0\.b, p0/z, z0\.b
270 ** uqsubr z0\.b, p0/m, z0\.b, z1\.b
271 ** ret
272 */
273 TEST_UNIFORM_Z (qsub_u8_z_tied2, svuint8_t,
274 z0 = svqsub_u8_z (p0, z1, z0),
275 z0 = svqsub_z (p0, z1, z0))
276
277 /*
278 ** qsub_u8_z_untied:
279 ** (
280 ** movprfx z0\.b, p0/z, z1\.b
281 ** uqsub z0\.b, p0/m, z0\.b, z2\.b
282 ** |
283 ** movprfx z0\.b, p0/z, z2\.b
284 ** uqsubr z0\.b, p0/m, z0\.b, z1\.b
285 ** )
286 ** ret
287 */
288 TEST_UNIFORM_Z (qsub_u8_z_untied, svuint8_t,
289 z0 = svqsub_u8_z (p0, z1, z2),
290 z0 = svqsub_z (p0, z1, z2))
291
292 /*
293 ** qsub_w0_u8_z_tied1:
294 ** mov (z[0-9]+\.b), w0
295 ** movprfx z0\.b, p0/z, z0\.b
296 ** uqsub z0\.b, p0/m, z0\.b, \1
297 ** ret
298 */
299 TEST_UNIFORM_ZX (qsub_w0_u8_z_tied1, svuint8_t, uint8_t,
300 z0 = svqsub_n_u8_z (p0, z0, x0),
301 z0 = svqsub_z (p0, z0, x0))
302
303 /*
304 ** qsub_w0_u8_z_untied:
305 ** mov (z[0-9]+\.b), w0
306 ** (
307 ** movprfx z0\.b, p0/z, z1\.b
308 ** uqsub z0\.b, p0/m, z0\.b, \1
309 ** |
310 ** movprfx z0\.b, p0/z, \1
311 ** uqsubr z0\.b, p0/m, z0\.b, z1\.b
312 ** )
313 ** ret
314 */
315 TEST_UNIFORM_ZX (qsub_w0_u8_z_untied, svuint8_t, uint8_t,
316 z0 = svqsub_n_u8_z (p0, z1, x0),
317 z0 = svqsub_z (p0, z1, x0))
318
319 /*
320 ** qsub_1_u8_z_tied1:
321 ** mov (z[0-9]+\.b), #1
322 ** movprfx z0\.b, p0/z, z0\.b
323 ** uqsub z0\.b, p0/m, z0\.b, \1
324 ** ret
325 */
326 TEST_UNIFORM_Z (qsub_1_u8_z_tied1, svuint8_t,
327 z0 = svqsub_n_u8_z (p0, z0, 1),
328 z0 = svqsub_z (p0, z0, 1))
329
330 /*
331 ** qsub_1_u8_z_untied:
332 ** mov (z[0-9]+\.b), #1
333 ** (
334 ** movprfx z0\.b, p0/z, z1\.b
335 ** uqsub z0\.b, p0/m, z0\.b, \1
336 ** |
337 ** movprfx z0\.b, p0/z, \1
338 ** uqsubr z0\.b, p0/m, z0\.b, z1\.b
339 ** )
340 ** ret
341 */
342 TEST_UNIFORM_Z (qsub_1_u8_z_untied, svuint8_t,
343 z0 = svqsub_n_u8_z (p0, z1, 1),
344 z0 = svqsub_z (p0, z1, 1))
345
346 /*
347 ** qsub_127_u8_z:
348 ** mov (z[0-9]+\.b), #127
349 ** movprfx z0\.b, p0/z, z0\.b
350 ** uqsub z0\.b, p0/m, z0\.b, \1
351 ** ret
352 */
353 TEST_UNIFORM_Z (qsub_127_u8_z, svuint8_t,
354 z0 = svqsub_n_u8_z (p0, z0, 127),
355 z0 = svqsub_z (p0, z0, 127))
356
357 /*
358 ** qsub_128_u8_z:
359 ** mov (z[0-9]+\.b), #-128
360 ** movprfx z0\.b, p0/z, z0\.b
361 ** uqsub z0\.b, p0/m, z0\.b, \1
362 ** ret
363 */
364 TEST_UNIFORM_Z (qsub_128_u8_z, svuint8_t,
365 z0 = svqsub_n_u8_z (p0, z0, 128),
366 z0 = svqsub_z (p0, z0, 128))
367
368 /*
369 ** qsub_255_u8_z:
370 ** mov (z[0-9]+\.b), #-1
371 ** movprfx z0\.b, p0/z, z0\.b
372 ** uqsub z0\.b, p0/m, z0\.b, \1
373 ** ret
374 */
375 TEST_UNIFORM_Z (qsub_255_u8_z, svuint8_t,
376 z0 = svqsub_n_u8_z (p0, z0, 255),
377 z0 = svqsub_z (p0, z0, 255))
378
379 /*
380 ** qsub_m1_u8_z:
381 ** mov (z[0-9]+\.b), #-1
382 ** movprfx z0\.b, p0/z, z0\.b
383 ** uqsub z0\.b, p0/m, z0\.b, \1
384 ** ret
385 */
386 TEST_UNIFORM_Z (qsub_m1_u8_z, svuint8_t,
387 z0 = svqsub_n_u8_z (p0, z0, -1),
388 z0 = svqsub_z (p0, z0, -1))
389
390 /*
391 ** qsub_m127_u8_z:
392 ** mov (z[0-9]+\.b), #-127
393 ** movprfx z0\.b, p0/z, z0\.b
394 ** uqsub z0\.b, p0/m, z0\.b, \1
395 ** ret
396 */
397 TEST_UNIFORM_Z (qsub_m127_u8_z, svuint8_t,
398 z0 = svqsub_n_u8_z (p0, z0, -127),
399 z0 = svqsub_z (p0, z0, -127))
400
401 /*
402 ** qsub_m128_u8_z:
403 ** mov (z[0-9]+\.b), #-128
404 ** movprfx z0\.b, p0/z, z0\.b
405 ** uqsub z0\.b, p0/m, z0\.b, \1
406 ** ret
407 */
408 TEST_UNIFORM_Z (qsub_m128_u8_z, svuint8_t,
409 z0 = svqsub_n_u8_z (p0, z0, -128),
410 z0 = svqsub_z (p0, z0, -128))
411
412 /*
413 ** qsub_u8_x_tied1:
414 ** uqsub z0\.b, z0\.b, z1\.b
415 ** ret
416 */
417 TEST_UNIFORM_Z (qsub_u8_x_tied1, svuint8_t,
418 z0 = svqsub_u8_x (p0, z0, z1),
419 z0 = svqsub_x (p0, z0, z1))
420
421 /*
422 ** qsub_u8_x_tied2:
423 ** uqsub z0\.b, z1\.b, z0\.b
424 ** ret
425 */
426 TEST_UNIFORM_Z (qsub_u8_x_tied2, svuint8_t,
427 z0 = svqsub_u8_x (p0, z1, z0),
428 z0 = svqsub_x (p0, z1, z0))
429
430 /*
431 ** qsub_u8_x_untied:
432 ** uqsub z0\.b, z1\.b, z2\.b
433 ** ret
434 */
435 TEST_UNIFORM_Z (qsub_u8_x_untied, svuint8_t,
436 z0 = svqsub_u8_x (p0, z1, z2),
437 z0 = svqsub_x (p0, z1, z2))
438
439 /*
440 ** qsub_w0_u8_x_tied1:
441 ** mov (z[0-9]+\.b), w0
442 ** uqsub z0\.b, z0\.b, \1
443 ** ret
444 */
445 TEST_UNIFORM_ZX (qsub_w0_u8_x_tied1, svuint8_t, uint8_t,
446 z0 = svqsub_n_u8_x (p0, z0, x0),
447 z0 = svqsub_x (p0, z0, x0))
448
449 /*
450 ** qsub_w0_u8_x_untied:
451 ** mov (z[0-9]+\.b), w0
452 ** uqsub z0\.b, z1\.b, \1
453 ** ret
454 */
455 TEST_UNIFORM_ZX (qsub_w0_u8_x_untied, svuint8_t, uint8_t,
456 z0 = svqsub_n_u8_x (p0, z1, x0),
457 z0 = svqsub_x (p0, z1, x0))
458
459 /*
460 ** qsub_1_u8_x_tied1:
461 ** uqsub z0\.b, z0\.b, #1
462 ** ret
463 */
464 TEST_UNIFORM_Z (qsub_1_u8_x_tied1, svuint8_t,
465 z0 = svqsub_n_u8_x (p0, z0, 1),
466 z0 = svqsub_x (p0, z0, 1))
467
468 /*
469 ** qsub_1_u8_x_untied:
470 ** movprfx z0, z1
471 ** uqsub z0\.b, z0\.b, #1
472 ** ret
473 */
474 TEST_UNIFORM_Z (qsub_1_u8_x_untied, svuint8_t,
475 z0 = svqsub_n_u8_x (p0, z1, 1),
476 z0 = svqsub_x (p0, z1, 1))
477
478 /*
479 ** qsub_127_u8_x:
480 ** uqsub z0\.b, z0\.b, #127
481 ** ret
482 */
483 TEST_UNIFORM_Z (qsub_127_u8_x, svuint8_t,
484 z0 = svqsub_n_u8_x (p0, z0, 127),
485 z0 = svqsub_x (p0, z0, 127))
486
487 /*
488 ** qsub_128_u8_x:
489 ** uqsub z0\.b, z0\.b, #128
490 ** ret
491 */
492 TEST_UNIFORM_Z (qsub_128_u8_x, svuint8_t,
493 z0 = svqsub_n_u8_x (p0, z0, 128),
494 z0 = svqsub_x (p0, z0, 128))
495
496 /*
497 ** qsub_255_u8_x:
498 ** uqsub z0\.b, z0\.b, #255
499 ** ret
500 */
501 TEST_UNIFORM_Z (qsub_255_u8_x, svuint8_t,
502 z0 = svqsub_n_u8_x (p0, z0, 255),
503 z0 = svqsub_x (p0, z0, 255))
504
505 /*
506 ** qsub_m1_u8_x:
507 ** uqsub z0\.b, z0\.b, #255
508 ** ret
509 */
510 TEST_UNIFORM_Z (qsub_m1_u8_x, svuint8_t,
511 z0 = svqsub_n_u8_x (p0, z0, -1),
512 z0 = svqsub_x (p0, z0, -1))
513
514 /*
515 ** qsub_m127_u8_x:
516 ** uqsub z0\.b, z0\.b, #129
517 ** ret
518 */
519 TEST_UNIFORM_Z (qsub_m127_u8_x, svuint8_t,
520 z0 = svqsub_n_u8_x (p0, z0, -127),
521 z0 = svqsub_x (p0, z0, -127))
522
523 /*
524 ** qsub_m128_u8_x:
525 ** uqsub z0\.b, z0\.b, #128
526 ** ret
527 */
528 TEST_UNIFORM_Z (qsub_m128_u8_x, svuint8_t,
529 z0 = svqsub_n_u8_x (p0, z0, -128),
530 z0 = svqsub_x (p0, z0, -128))