]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/hsub_u32.c
[AArch64] Add support for the SVE2 ACLE
[thirdparty/gcc.git] / gcc / testsuite / gcc.target / aarch64 / sve2 / acle / asm / hsub_u32.c
1 /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
2
3 #include "test_sve_acle.h"
4
5 /*
6 ** hsub_u32_m_tied1:
7 ** uhsub z0\.s, p0/m, z0\.s, z1\.s
8 ** ret
9 */
10 TEST_UNIFORM_Z (hsub_u32_m_tied1, svuint32_t,
11 z0 = svhsub_u32_m (p0, z0, z1),
12 z0 = svhsub_m (p0, z0, z1))
13
14 /*
15 ** hsub_u32_m_tied2:
16 ** mov (z[0-9]+)\.d, z0\.d
17 ** movprfx z0, z1
18 ** uhsub z0\.s, p0/m, z0\.s, \1\.s
19 ** ret
20 */
21 TEST_UNIFORM_Z (hsub_u32_m_tied2, svuint32_t,
22 z0 = svhsub_u32_m (p0, z1, z0),
23 z0 = svhsub_m (p0, z1, z0))
24
25 /*
26 ** hsub_u32_m_untied:
27 ** movprfx z0, z1
28 ** uhsub z0\.s, p0/m, z0\.s, z2\.s
29 ** ret
30 */
31 TEST_UNIFORM_Z (hsub_u32_m_untied, svuint32_t,
32 z0 = svhsub_u32_m (p0, z1, z2),
33 z0 = svhsub_m (p0, z1, z2))
34
35 /*
36 ** hsub_w0_u32_m_tied1:
37 ** mov (z[0-9]+\.s), w0
38 ** uhsub z0\.s, p0/m, z0\.s, \1
39 ** ret
40 */
41 TEST_UNIFORM_ZX (hsub_w0_u32_m_tied1, svuint32_t, uint32_t,
42 z0 = svhsub_n_u32_m (p0, z0, x0),
43 z0 = svhsub_m (p0, z0, x0))
44
45 /*
46 ** hsub_w0_u32_m_untied:
47 ** mov (z[0-9]+\.s), w0
48 ** movprfx z0, z1
49 ** uhsub z0\.s, p0/m, z0\.s, \1
50 ** ret
51 */
52 TEST_UNIFORM_ZX (hsub_w0_u32_m_untied, svuint32_t, uint32_t,
53 z0 = svhsub_n_u32_m (p0, z1, x0),
54 z0 = svhsub_m (p0, z1, x0))
55
56 /*
57 ** hsub_11_u32_m_tied1:
58 ** mov (z[0-9]+\.s), #11
59 ** uhsub z0\.s, p0/m, z0\.s, \1
60 ** ret
61 */
62 TEST_UNIFORM_Z (hsub_11_u32_m_tied1, svuint32_t,
63 z0 = svhsub_n_u32_m (p0, z0, 11),
64 z0 = svhsub_m (p0, z0, 11))
65
66 /*
67 ** hsub_11_u32_m_untied:: { xfail *-*-*}
68 ** mov (z[0-9]+\.s), #11
69 ** movprfx z0, z1
70 ** uhsub z0\.s, p0/m, z0\.s, \1
71 ** ret
72 */
73 TEST_UNIFORM_Z (hsub_11_u32_m_untied, svuint32_t,
74 z0 = svhsub_n_u32_m (p0, z1, 11),
75 z0 = svhsub_m (p0, z1, 11))
76
77 /*
78 ** hsub_u32_z_tied1:
79 ** movprfx z0\.s, p0/z, z0\.s
80 ** uhsub z0\.s, p0/m, z0\.s, z1\.s
81 ** ret
82 */
83 TEST_UNIFORM_Z (hsub_u32_z_tied1, svuint32_t,
84 z0 = svhsub_u32_z (p0, z0, z1),
85 z0 = svhsub_z (p0, z0, z1))
86
87 /*
88 ** hsub_u32_z_tied2:
89 ** movprfx z0\.s, p0/z, z0\.s
90 ** uhsubr z0\.s, p0/m, z0\.s, z1\.s
91 ** ret
92 */
93 TEST_UNIFORM_Z (hsub_u32_z_tied2, svuint32_t,
94 z0 = svhsub_u32_z (p0, z1, z0),
95 z0 = svhsub_z (p0, z1, z0))
96
97 /*
98 ** hsub_u32_z_untied:
99 ** (
100 ** movprfx z0\.s, p0/z, z1\.s
101 ** uhsub z0\.s, p0/m, z0\.s, z2\.s
102 ** |
103 ** movprfx z0\.s, p0/z, z2\.s
104 ** uhsubr z0\.s, p0/m, z0\.s, z1\.s
105 ** )
106 ** ret
107 */
108 TEST_UNIFORM_Z (hsub_u32_z_untied, svuint32_t,
109 z0 = svhsub_u32_z (p0, z1, z2),
110 z0 = svhsub_z (p0, z1, z2))
111
112 /*
113 ** hsub_w0_u32_z_tied1:
114 ** mov (z[0-9]+\.s), w0
115 ** movprfx z0\.s, p0/z, z0\.s
116 ** uhsub z0\.s, p0/m, z0\.s, \1
117 ** ret
118 */
119 TEST_UNIFORM_ZX (hsub_w0_u32_z_tied1, svuint32_t, uint32_t,
120 z0 = svhsub_n_u32_z (p0, z0, x0),
121 z0 = svhsub_z (p0, z0, x0))
122
123 /*
124 ** hsub_w0_u32_z_untied:
125 ** mov (z[0-9]+\.s), w0
126 ** (
127 ** movprfx z0\.s, p0/z, z1\.s
128 ** uhsub z0\.s, p0/m, z0\.s, \1
129 ** |
130 ** movprfx z0\.s, p0/z, \1
131 ** uhsubr z0\.s, p0/m, z0\.s, z1\.s
132 ** )
133 ** ret
134 */
135 TEST_UNIFORM_ZX (hsub_w0_u32_z_untied, svuint32_t, uint32_t,
136 z0 = svhsub_n_u32_z (p0, z1, x0),
137 z0 = svhsub_z (p0, z1, x0))
138
139 /*
140 ** hsub_11_u32_z_tied1:
141 ** mov (z[0-9]+\.s), #11
142 ** movprfx z0\.s, p0/z, z0\.s
143 ** uhsub z0\.s, p0/m, z0\.s, \1
144 ** ret
145 */
146 TEST_UNIFORM_Z (hsub_11_u32_z_tied1, svuint32_t,
147 z0 = svhsub_n_u32_z (p0, z0, 11),
148 z0 = svhsub_z (p0, z0, 11))
149
150 /*
151 ** hsub_11_u32_z_untied:
152 ** mov (z[0-9]+\.s), #11
153 ** (
154 ** movprfx z0\.s, p0/z, z1\.s
155 ** uhsub z0\.s, p0/m, z0\.s, \1
156 ** |
157 ** movprfx z0\.s, p0/z, \1
158 ** uhsubr z0\.s, p0/m, z0\.s, z1\.s
159 ** )
160 ** ret
161 */
162 TEST_UNIFORM_Z (hsub_11_u32_z_untied, svuint32_t,
163 z0 = svhsub_n_u32_z (p0, z1, 11),
164 z0 = svhsub_z (p0, z1, 11))
165
166 /*
167 ** hsub_u32_x_tied1:
168 ** uhsub z0\.s, p0/m, z0\.s, z1\.s
169 ** ret
170 */
171 TEST_UNIFORM_Z (hsub_u32_x_tied1, svuint32_t,
172 z0 = svhsub_u32_x (p0, z0, z1),
173 z0 = svhsub_x (p0, z0, z1))
174
175 /*
176 ** hsub_u32_x_tied2:
177 ** uhsubr z0\.s, p0/m, z0\.s, z1\.s
178 ** ret
179 */
180 TEST_UNIFORM_Z (hsub_u32_x_tied2, svuint32_t,
181 z0 = svhsub_u32_x (p0, z1, z0),
182 z0 = svhsub_x (p0, z1, z0))
183
184 /*
185 ** hsub_u32_x_untied:
186 ** (
187 ** movprfx z0, z1
188 ** uhsub z0\.s, p0/m, z0\.s, z2\.s
189 ** |
190 ** movprfx z0, z2
191 ** uhsubr z0\.s, p0/m, z0\.s, z1\.s
192 ** )
193 ** ret
194 */
195 TEST_UNIFORM_Z (hsub_u32_x_untied, svuint32_t,
196 z0 = svhsub_u32_x (p0, z1, z2),
197 z0 = svhsub_x (p0, z1, z2))
198
199 /*
200 ** hsub_w0_u32_x_tied1:
201 ** mov (z[0-9]+\.s), w0
202 ** uhsub z0\.s, p0/m, z0\.s, \1
203 ** ret
204 */
205 TEST_UNIFORM_ZX (hsub_w0_u32_x_tied1, svuint32_t, uint32_t,
206 z0 = svhsub_n_u32_x (p0, z0, x0),
207 z0 = svhsub_x (p0, z0, x0))
208
209 /*
210 ** hsub_w0_u32_x_untied:
211 ** mov z0\.s, w0
212 ** uhsubr z0\.s, p0/m, z0\.s, z1\.s
213 ** ret
214 */
215 TEST_UNIFORM_ZX (hsub_w0_u32_x_untied, svuint32_t, uint32_t,
216 z0 = svhsub_n_u32_x (p0, z1, x0),
217 z0 = svhsub_x (p0, z1, x0))
218
219 /*
220 ** hsub_11_u32_x_tied1:
221 ** mov (z[0-9]+\.s), #11
222 ** uhsub z0\.s, p0/m, z0\.s, \1
223 ** ret
224 */
225 TEST_UNIFORM_Z (hsub_11_u32_x_tied1, svuint32_t,
226 z0 = svhsub_n_u32_x (p0, z0, 11),
227 z0 = svhsub_x (p0, z0, 11))
228
229 /*
230 ** hsub_11_u32_x_untied:
231 ** mov z0\.s, #11
232 ** uhsubr z0\.s, p0/m, z0\.s, z1\.s
233 ** ret
234 */
235 TEST_UNIFORM_Z (hsub_11_u32_x_untied, svuint32_t,
236 z0 = svhsub_n_u32_x (p0, z1, 11),
237 z0 = svhsub_x (p0, z1, 11))