]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/rshl_s32.c
[AArch64] Add support for the SVE2 ACLE
[thirdparty/gcc.git] / gcc / testsuite / gcc.target / aarch64 / sve2 / acle / asm / rshl_s32.c
1 /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
2
3 #include "test_sve_acle.h"
4
5 /*
6 ** rshl_s32_m_tied1:
7 ** srshl z0\.s, p0/m, z0\.s, z4\.s
8 ** ret
9 */
10 TEST_DUAL_Z (rshl_s32_m_tied1, svint32_t, svint32_t,
11 z0 = svrshl_s32_m (p0, z0, z4),
12 z0 = svrshl_m (p0, z0, z4))
13
14 /*
15 ** rshl_s32_m_tied2:
16 ** mov (z[0-9]+)\.d, z0\.d
17 ** movprfx z0, z4
18 ** srshl z0\.s, p0/m, z0\.s, \1\.s
19 ** ret
20 */
21 TEST_DUAL_Z_REV (rshl_s32_m_tied2, svint32_t, svint32_t,
22 z0_res = svrshl_s32_m (p0, z4, z0),
23 z0_res = svrshl_m (p0, z4, z0))
24
25 /*
26 ** rshl_s32_m_untied:
27 ** movprfx z0, z1
28 ** srshl z0\.s, p0/m, z0\.s, z4\.s
29 ** ret
30 */
31 TEST_DUAL_Z (rshl_s32_m_untied, svint32_t, svint32_t,
32 z0 = svrshl_s32_m (p0, z1, z4),
33 z0 = svrshl_m (p0, z1, z4))
34
35 /*
36 ** rshl_w0_s32_m_tied1:
37 ** mov (z[0-9]+\.s), w0
38 ** srshl z0\.s, p0/m, z0\.s, \1
39 ** ret
40 */
41 TEST_UNIFORM_ZX (rshl_w0_s32_m_tied1, svint32_t, int32_t,
42 z0 = svrshl_n_s32_m (p0, z0, x0),
43 z0 = svrshl_m (p0, z0, x0))
44
45 /*
46 ** rshl_w0_s32_m_untied:
47 ** mov (z[0-9]+\.s), w0
48 ** movprfx z0, z1
49 ** srshl z0\.s, p0/m, z0\.s, \1
50 ** ret
51 */
52 TEST_UNIFORM_ZX (rshl_w0_s32_m_untied, svint32_t, int32_t,
53 z0 = svrshl_n_s32_m (p0, z1, x0),
54 z0 = svrshl_m (p0, z1, x0))
55
56 /*
57 ** rshl_m32_s32_m:
58 ** srshr z0\.s, p0/m, z0\.s, #32
59 ** ret
60 */
61 TEST_UNIFORM_Z (rshl_m32_s32_m, svint32_t,
62 z0 = svrshl_n_s32_m (p0, z0, -32),
63 z0 = svrshl_m (p0, z0, -32))
64
65 /*
66 ** rshl_m2_s32_m:
67 ** srshr z0\.s, p0/m, z0\.s, #2
68 ** ret
69 */
70 TEST_UNIFORM_Z (rshl_m2_s32_m, svint32_t,
71 z0 = svrshl_n_s32_m (p0, z0, -2),
72 z0 = svrshl_m (p0, z0, -2))
73
74 /*
75 ** rshl_m1_s32_m_tied1:
76 ** srshr z0\.s, p0/m, z0\.s, #1
77 ** ret
78 */
79 TEST_UNIFORM_Z (rshl_m1_s32_m_tied1, svint32_t,
80 z0 = svrshl_n_s32_m (p0, z0, -1),
81 z0 = svrshl_m (p0, z0, -1))
82
83 /*
84 ** rshl_m1_s32_m_untied:
85 ** movprfx z0, z1
86 ** srshr z0\.s, p0/m, z0\.s, #1
87 ** ret
88 */
89 TEST_UNIFORM_Z (rshl_m1_s32_m_untied, svint32_t,
90 z0 = svrshl_n_s32_m (p0, z1, -1),
91 z0 = svrshl_m (p0, z1, -1))
92
93 /*
94 ** rshl_1_s32_m_tied1:
95 ** lsl z0\.s, p0/m, z0\.s, #1
96 ** ret
97 */
98 TEST_UNIFORM_Z (rshl_1_s32_m_tied1, svint32_t,
99 z0 = svrshl_n_s32_m (p0, z0, 1),
100 z0 = svrshl_m (p0, z0, 1))
101
102 /*
103 ** rshl_1_s32_m_untied:
104 ** movprfx z0, z1
105 ** lsl z0\.s, p0/m, z0\.s, #1
106 ** ret
107 */
108 TEST_UNIFORM_Z (rshl_1_s32_m_untied, svint32_t,
109 z0 = svrshl_n_s32_m (p0, z1, 1),
110 z0 = svrshl_m (p0, z1, 1))
111
112 /*
113 ** rshl_2_s32_m:
114 ** lsl z0\.s, p0/m, z0\.s, #2
115 ** ret
116 */
117 TEST_UNIFORM_Z (rshl_2_s32_m, svint32_t,
118 z0 = svrshl_n_s32_m (p0, z0, 2),
119 z0 = svrshl_m (p0, z0, 2))
120
121 /*
122 ** rshl_31_s32_m:
123 ** lsl z0\.s, p0/m, z0\.s, #31
124 ** ret
125 */
126 TEST_UNIFORM_Z (rshl_31_s32_m, svint32_t,
127 z0 = svrshl_n_s32_m (p0, z0, 31),
128 z0 = svrshl_m (p0, z0, 31))
129
130 /*
131 ** rshl_s32_z_tied1:
132 ** movprfx z0\.s, p0/z, z0\.s
133 ** srshl z0\.s, p0/m, z0\.s, z4\.s
134 ** ret
135 */
136 TEST_DUAL_Z (rshl_s32_z_tied1, svint32_t, svint32_t,
137 z0 = svrshl_s32_z (p0, z0, z4),
138 z0 = svrshl_z (p0, z0, z4))
139
140 /*
141 ** rshl_s32_z_tied2:
142 ** movprfx z0\.s, p0/z, z0\.s
143 ** srshlr z0\.s, p0/m, z0\.s, z4\.s
144 ** ret
145 */
146 TEST_DUAL_Z_REV (rshl_s32_z_tied2, svint32_t, svint32_t,
147 z0_res = svrshl_s32_z (p0, z4, z0),
148 z0_res = svrshl_z (p0, z4, z0))
149
150 /*
151 ** rshl_s32_z_untied:
152 ** (
153 ** movprfx z0\.s, p0/z, z1\.s
154 ** srshl z0\.s, p0/m, z0\.s, z4\.s
155 ** |
156 ** movprfx z0\.s, p0/z, z4\.s
157 ** srshlr z0\.s, p0/m, z0\.s, z1\.s
158 ** )
159 ** ret
160 */
161 TEST_DUAL_Z (rshl_s32_z_untied, svint32_t, svint32_t,
162 z0 = svrshl_s32_z (p0, z1, z4),
163 z0 = svrshl_z (p0, z1, z4))
164
165 /*
166 ** rshl_w0_s32_z_tied1:
167 ** mov (z[0-9]+\.s), w0
168 ** movprfx z0\.s, p0/z, z0\.s
169 ** srshl z0\.s, p0/m, z0\.s, \1
170 ** ret
171 */
172 TEST_UNIFORM_ZX (rshl_w0_s32_z_tied1, svint32_t, int32_t,
173 z0 = svrshl_n_s32_z (p0, z0, x0),
174 z0 = svrshl_z (p0, z0, x0))
175
176 /*
177 ** rshl_w0_s32_z_untied:
178 ** mov (z[0-9]+\.s), w0
179 ** (
180 ** movprfx z0\.s, p0/z, z1\.s
181 ** srshl z0\.s, p0/m, z0\.s, \1
182 ** |
183 ** movprfx z0\.s, p0/z, \1
184 ** srshlr z0\.s, p0/m, z0\.s, z1\.s
185 ** )
186 ** ret
187 */
188 TEST_UNIFORM_ZX (rshl_w0_s32_z_untied, svint32_t, int32_t,
189 z0 = svrshl_n_s32_z (p0, z1, x0),
190 z0 = svrshl_z (p0, z1, x0))
191
192 /*
193 ** rshl_m32_s32_z:
194 ** movprfx z0\.s, p0/z, z0\.s
195 ** srshr z0\.s, p0/m, z0\.s, #32
196 ** ret
197 */
198 TEST_UNIFORM_Z (rshl_m32_s32_z, svint32_t,
199 z0 = svrshl_n_s32_z (p0, z0, -32),
200 z0 = svrshl_z (p0, z0, -32))
201
202 /*
203 ** rshl_m2_s32_z:
204 ** movprfx z0\.s, p0/z, z0\.s
205 ** srshr z0\.s, p0/m, z0\.s, #2
206 ** ret
207 */
208 TEST_UNIFORM_Z (rshl_m2_s32_z, svint32_t,
209 z0 = svrshl_n_s32_z (p0, z0, -2),
210 z0 = svrshl_z (p0, z0, -2))
211
212 /*
213 ** rshl_m1_s32_z_tied1:
214 ** movprfx z0\.s, p0/z, z0\.s
215 ** srshr z0\.s, p0/m, z0\.s, #1
216 ** ret
217 */
218 TEST_UNIFORM_Z (rshl_m1_s32_z_tied1, svint32_t,
219 z0 = svrshl_n_s32_z (p0, z0, -1),
220 z0 = svrshl_z (p0, z0, -1))
221
222 /*
223 ** rshl_m1_s32_z_untied:
224 ** movprfx z0\.s, p0/z, z1\.s
225 ** srshr z0\.s, p0/m, z0\.s, #1
226 ** ret
227 */
228 TEST_UNIFORM_Z (rshl_m1_s32_z_untied, svint32_t,
229 z0 = svrshl_n_s32_z (p0, z1, -1),
230 z0 = svrshl_z (p0, z1, -1))
231
232 /*
233 ** rshl_1_s32_z_tied1:
234 ** movprfx z0\.s, p0/z, z0\.s
235 ** lsl z0\.s, p0/m, z0\.s, #1
236 ** ret
237 */
238 TEST_UNIFORM_Z (rshl_1_s32_z_tied1, svint32_t,
239 z0 = svrshl_n_s32_z (p0, z0, 1),
240 z0 = svrshl_z (p0, z0, 1))
241
242 /*
243 ** rshl_1_s32_z_untied:
244 ** movprfx z0\.s, p0/z, z1\.s
245 ** lsl z0\.s, p0/m, z0\.s, #1
246 ** ret
247 */
248 TEST_UNIFORM_Z (rshl_1_s32_z_untied, svint32_t,
249 z0 = svrshl_n_s32_z (p0, z1, 1),
250 z0 = svrshl_z (p0, z1, 1))
251
252 /*
253 ** rshl_2_s32_z:
254 ** movprfx z0\.s, p0/z, z0\.s
255 ** lsl z0\.s, p0/m, z0\.s, #2
256 ** ret
257 */
258 TEST_UNIFORM_Z (rshl_2_s32_z, svint32_t,
259 z0 = svrshl_n_s32_z (p0, z0, 2),
260 z0 = svrshl_z (p0, z0, 2))
261
262 /*
263 ** rshl_31_s32_z:
264 ** movprfx z0\.s, p0/z, z0\.s
265 ** lsl z0\.s, p0/m, z0\.s, #31
266 ** ret
267 */
268 TEST_UNIFORM_Z (rshl_31_s32_z, svint32_t,
269 z0 = svrshl_n_s32_z (p0, z0, 31),
270 z0 = svrshl_z (p0, z0, 31))
271
272 /*
273 ** rshl_s32_x_tied1:
274 ** srshl z0\.s, p0/m, z0\.s, z4\.s
275 ** ret
276 */
277 TEST_DUAL_Z (rshl_s32_x_tied1, svint32_t, svint32_t,
278 z0 = svrshl_s32_x (p0, z0, z4),
279 z0 = svrshl_x (p0, z0, z4))
280
281 /*
282 ** rshl_s32_x_tied2:
283 ** srshlr z0\.s, p0/m, z0\.s, z4\.s
284 ** ret
285 */
286 TEST_DUAL_Z_REV (rshl_s32_x_tied2, svint32_t, svint32_t,
287 z0_res = svrshl_s32_x (p0, z4, z0),
288 z0_res = svrshl_x (p0, z4, z0))
289
290 /*
291 ** rshl_s32_x_untied:
292 ** (
293 ** movprfx z0, z1
294 ** srshl z0\.s, p0/m, z0\.s, z4\.s
295 ** |
296 ** movprfx z0, z4
297 ** srshlr z0\.s, p0/m, z0\.s, z1\.s
298 ** )
299 ** ret
300 */
301 TEST_DUAL_Z (rshl_s32_x_untied, svint32_t, svint32_t,
302 z0 = svrshl_s32_x (p0, z1, z4),
303 z0 = svrshl_x (p0, z1, z4))
304
305 /*
306 ** rshl_w0_s32_x_tied1:
307 ** mov (z[0-9]+\.s), w0
308 ** srshl z0\.s, p0/m, z0\.s, \1
309 ** ret
310 */
311 TEST_UNIFORM_ZX (rshl_w0_s32_x_tied1, svint32_t, int32_t,
312 z0 = svrshl_n_s32_x (p0, z0, x0),
313 z0 = svrshl_x (p0, z0, x0))
314
315 /*
316 ** rshl_w0_s32_x_untied:
317 ** mov z0\.s, w0
318 ** srshlr z0\.s, p0/m, z0\.s, z1\.s
319 ** ret
320 */
321 TEST_UNIFORM_ZX (rshl_w0_s32_x_untied, svint32_t, int32_t,
322 z0 = svrshl_n_s32_x (p0, z1, x0),
323 z0 = svrshl_x (p0, z1, x0))
324
325 /*
326 ** rshl_m32_s32_x:
327 ** srshr z0\.s, p0/m, z0\.s, #32
328 ** ret
329 */
330 TEST_UNIFORM_Z (rshl_m32_s32_x, svint32_t,
331 z0 = svrshl_n_s32_x (p0, z0, -32),
332 z0 = svrshl_x (p0, z0, -32))
333
334 /*
335 ** rshl_m2_s32_x:
336 ** srshr z0\.s, p0/m, z0\.s, #2
337 ** ret
338 */
339 TEST_UNIFORM_Z (rshl_m2_s32_x, svint32_t,
340 z0 = svrshl_n_s32_x (p0, z0, -2),
341 z0 = svrshl_x (p0, z0, -2))
342
343 /*
344 ** rshl_m1_s32_x_tied1:
345 ** srshr z0\.s, p0/m, z0\.s, #1
346 ** ret
347 */
348 TEST_UNIFORM_Z (rshl_m1_s32_x_tied1, svint32_t,
349 z0 = svrshl_n_s32_x (p0, z0, -1),
350 z0 = svrshl_x (p0, z0, -1))
351
352 /*
353 ** rshl_m1_s32_x_untied:
354 ** movprfx z0, z1
355 ** srshr z0\.s, p0/m, z0\.s, #1
356 ** ret
357 */
358 TEST_UNIFORM_Z (rshl_m1_s32_x_untied, svint32_t,
359 z0 = svrshl_n_s32_x (p0, z1, -1),
360 z0 = svrshl_x (p0, z1, -1))
361
362 /*
363 ** rshl_1_s32_x_tied1:
364 ** lsl z0\.s, z0\.s, #1
365 ** ret
366 */
367 TEST_UNIFORM_Z (rshl_1_s32_x_tied1, svint32_t,
368 z0 = svrshl_n_s32_x (p0, z0, 1),
369 z0 = svrshl_x (p0, z0, 1))
370
371 /*
372 ** rshl_1_s32_x_untied:
373 ** lsl z0\.s, z1\.s, #1
374 ** ret
375 */
376 TEST_UNIFORM_Z (rshl_1_s32_x_untied, svint32_t,
377 z0 = svrshl_n_s32_x (p0, z1, 1),
378 z0 = svrshl_x (p0, z1, 1))
379
380 /*
381 ** rshl_2_s32_x:
382 ** lsl z0\.s, z0\.s, #2
383 ** ret
384 */
385 TEST_UNIFORM_Z (rshl_2_s32_x, svint32_t,
386 z0 = svrshl_n_s32_x (p0, z0, 2),
387 z0 = svrshl_x (p0, z0, 2))
388
389 /*
390 ** rshl_31_s32_x:
391 ** lsl z0\.s, z0\.s, #31
392 ** ret
393 */
394 TEST_UNIFORM_Z (rshl_31_s32_x, svint32_t,
395 z0 = svrshl_n_s32_x (p0, z0, 31),
396 z0 = svrshl_x (p0, z0, 31))