]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nbsl_s16.c
[AArch64] Add support for the SVE2 ACLE
[thirdparty/gcc.git] / gcc / testsuite / gcc.target / aarch64 / sve2 / acle / asm / nbsl_s16.c
1 /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
2
3 #include "test_sve_acle.h"
4
5 /*
6 ** nbsl_s16_tied1:
7 ** nbsl z0\.d, z0\.d, z1\.d, z2\.d
8 ** ret
9 */
10 TEST_UNIFORM_Z (nbsl_s16_tied1, svint16_t,
11 z0 = svnbsl_s16 (z0, z1, z2),
12 z0 = svnbsl (z0, z1, z2))
13
14 /*
15 ** nbsl_s16_tied2:
16 ** mov (z[0-9]+\.d), z0\.d
17 ** movprfx z0, z1
18 ** nbsl z0\.d, z0\.d, \1, z2\.d
19 ** ret
20 */
21 TEST_UNIFORM_Z (nbsl_s16_tied2, svint16_t,
22 z0 = svnbsl_s16 (z1, z0, z2),
23 z0 = svnbsl (z1, z0, z2))
24
25 /*
26 ** nbsl_s16_tied3:
27 ** mov (z[0-9]+\.d), z0\.d
28 ** movprfx z0, z1
29 ** nbsl z0\.d, z0\.d, z2\.d, \1
30 ** ret
31 */
32 TEST_UNIFORM_Z (nbsl_s16_tied3, svint16_t,
33 z0 = svnbsl_s16 (z1, z2, z0),
34 z0 = svnbsl (z1, z2, z0))
35
36 /*
37 ** nbsl_s16_untied:
38 ** movprfx z0, z1
39 ** nbsl z0\.d, z0\.d, z2\.d, z3\.d
40 ** ret
41 */
42 TEST_UNIFORM_Z (nbsl_s16_untied, svint16_t,
43 z0 = svnbsl_s16 (z1, z2, z3),
44 z0 = svnbsl (z1, z2, z3))
45
46 /*
47 ** nbsl_w0_s16_tied1:
48 ** mov (z[0-9]+)\.h, w0
49 ** nbsl z0\.d, z0\.d, z1\.d, \1\.d
50 ** ret
51 */
52 TEST_UNIFORM_ZX (nbsl_w0_s16_tied1, svint16_t, int16_t,
53 z0 = svnbsl_n_s16 (z0, z1, x0),
54 z0 = svnbsl (z0, z1, x0))
55
56 /*
57 ** nbsl_w0_s16_tied2:
58 ** mov (z[0-9]+)\.h, w0
59 ** mov (z[0-9]+\.d), z0\.d
60 ** movprfx z0, z1
61 ** nbsl z0\.d, z0\.d, \2, \1\.d
62 ** ret
63 */
64 TEST_UNIFORM_ZX (nbsl_w0_s16_tied2, svint16_t, int16_t,
65 z0 = svnbsl_n_s16 (z1, z0, x0),
66 z0 = svnbsl (z1, z0, x0))
67
68 /*
69 ** nbsl_w0_s16_untied:
70 ** mov (z[0-9]+)\.h, w0
71 ** movprfx z0, z1
72 ** nbsl z0\.d, z0\.d, z2\.d, \1\.d
73 ** ret
74 */
75 TEST_UNIFORM_ZX (nbsl_w0_s16_untied, svint16_t, int16_t,
76 z0 = svnbsl_n_s16 (z1, z2, x0),
77 z0 = svnbsl (z1, z2, x0))
78
79 /*
80 ** nbsl_11_s16_tied1:
81 ** mov (z[0-9]+)\.h, #11
82 ** nbsl z0\.d, z0\.d, z1\.d, \1\.d
83 ** ret
84 */
85 TEST_UNIFORM_Z (nbsl_11_s16_tied1, svint16_t,
86 z0 = svnbsl_n_s16 (z0, z1, 11),
87 z0 = svnbsl (z0, z1, 11))
88
89 /*
90 ** nbsl_11_s16_tied2:
91 ** mov (z[0-9]+)\.h, #11
92 ** mov (z[0-9]+\.d), z0\.d
93 ** movprfx z0, z1
94 ** nbsl z0\.d, z0\.d, \2, \1\.d
95 ** ret
96 */
97 TEST_UNIFORM_Z (nbsl_11_s16_tied2, svint16_t,
98 z0 = svnbsl_n_s16 (z1, z0, 11),
99 z0 = svnbsl (z1, z0, 11))
100
101 /*
102 ** nbsl_11_s16_untied:
103 ** mov (z[0-9]+)\.h, #11
104 ** movprfx z0, z1
105 ** nbsl z0\.d, z0\.d, z2\.d, \1\.d
106 ** ret
107 */
108 TEST_UNIFORM_Z (nbsl_11_s16_untied, svint16_t,
109 z0 = svnbsl_n_s16 (z1, z2, 11),
110 z0 = svnbsl (z1, z2, 11))