]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/qshl_s64.c
[AArch64] Add support for the SVE2 ACLE
[thirdparty/gcc.git] / gcc / testsuite / gcc.target / aarch64 / sve2 / acle / asm / qshl_s64.c
1 /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
2
3 #include "test_sve_acle.h"
4
5 /*
6 ** qshl_s64_m_tied1:
7 ** sqshl z0\.d, p0/m, z0\.d, z4\.d
8 ** ret
9 */
10 TEST_DUAL_Z (qshl_s64_m_tied1, svint64_t, svint64_t,
11 z0 = svqshl_s64_m (p0, z0, z4),
12 z0 = svqshl_m (p0, z0, z4))
13
14 /*
15 ** qshl_s64_m_tied2:
16 ** mov (z[0-9]+\.d), z0\.d
17 ** movprfx z0, z4
18 ** sqshl z0\.d, p0/m, z0\.d, \1
19 ** ret
20 */
21 TEST_DUAL_Z_REV (qshl_s64_m_tied2, svint64_t, svint64_t,
22 z0_res = svqshl_s64_m (p0, z4, z0),
23 z0_res = svqshl_m (p0, z4, z0))
24
25 /*
26 ** qshl_s64_m_untied:
27 ** movprfx z0, z1
28 ** sqshl z0\.d, p0/m, z0\.d, z4\.d
29 ** ret
30 */
31 TEST_DUAL_Z (qshl_s64_m_untied, svint64_t, svint64_t,
32 z0 = svqshl_s64_m (p0, z1, z4),
33 z0 = svqshl_m (p0, z1, z4))
34
35 /*
36 ** qshl_x0_s64_m_tied1:
37 ** mov (z[0-9]+\.d), x0
38 ** sqshl z0\.d, p0/m, z0\.d, \1
39 ** ret
40 */
41 TEST_UNIFORM_ZX (qshl_x0_s64_m_tied1, svint64_t, int64_t,
42 z0 = svqshl_n_s64_m (p0, z0, x0),
43 z0 = svqshl_m (p0, z0, x0))
44
45 /*
46 ** qshl_x0_s64_m_untied:
47 ** mov (z[0-9]+\.d), x0
48 ** movprfx z0, z1
49 ** sqshl z0\.d, p0/m, z0\.d, \1
50 ** ret
51 */
52 TEST_UNIFORM_ZX (qshl_x0_s64_m_untied, svint64_t, int64_t,
53 z0 = svqshl_n_s64_m (p0, z1, x0),
54 z0 = svqshl_m (p0, z1, x0))
55
56 /*
57 ** qshl_m64_s64_m:
58 ** asr z0\.d, p0/m, z0\.d, #64
59 ** ret
60 */
61 TEST_UNIFORM_Z (qshl_m64_s64_m, svint64_t,
62 z0 = svqshl_n_s64_m (p0, z0, -64),
63 z0 = svqshl_m (p0, z0, -64))
64
65 /*
66 ** qshl_m2_s64_m:
67 ** asr z0\.d, p0/m, z0\.d, #2
68 ** ret
69 */
70 TEST_UNIFORM_Z (qshl_m2_s64_m, svint64_t,
71 z0 = svqshl_n_s64_m (p0, z0, -2),
72 z0 = svqshl_m (p0, z0, -2))
73
74 /*
75 ** qshl_m1_s64_m_tied1:
76 ** asr z0\.d, p0/m, z0\.d, #1
77 ** ret
78 */
79 TEST_UNIFORM_Z (qshl_m1_s64_m_tied1, svint64_t,
80 z0 = svqshl_n_s64_m (p0, z0, -1),
81 z0 = svqshl_m (p0, z0, -1))
82
83 /*
84 ** qshl_m1_s64_m_untied:
85 ** movprfx z0, z1
86 ** asr z0\.d, p0/m, z0\.d, #1
87 ** ret
88 */
89 TEST_UNIFORM_Z (qshl_m1_s64_m_untied, svint64_t,
90 z0 = svqshl_n_s64_m (p0, z1, -1),
91 z0 = svqshl_m (p0, z1, -1))
92
93 /*
94 ** qshl_1_s64_m_tied1:
95 ** sqshl z0\.d, p0/m, z0\.d, #1
96 ** ret
97 */
98 TEST_UNIFORM_Z (qshl_1_s64_m_tied1, svint64_t,
99 z0 = svqshl_n_s64_m (p0, z0, 1),
100 z0 = svqshl_m (p0, z0, 1))
101
102 /*
103 ** qshl_1_s64_m_untied:
104 ** movprfx z0, z1
105 ** sqshl z0\.d, p0/m, z0\.d, #1
106 ** ret
107 */
108 TEST_UNIFORM_Z (qshl_1_s64_m_untied, svint64_t,
109 z0 = svqshl_n_s64_m (p0, z1, 1),
110 z0 = svqshl_m (p0, z1, 1))
111
112 /*
113 ** qshl_2_s64_m:
114 ** sqshl z0\.d, p0/m, z0\.d, #2
115 ** ret
116 */
117 TEST_UNIFORM_Z (qshl_2_s64_m, svint64_t,
118 z0 = svqshl_n_s64_m (p0, z0, 2),
119 z0 = svqshl_m (p0, z0, 2))
120
121 /*
122 ** qshl_63_s64_m:
123 ** sqshl z0\.d, p0/m, z0\.d, #63
124 ** ret
125 */
126 TEST_UNIFORM_Z (qshl_63_s64_m, svint64_t,
127 z0 = svqshl_n_s64_m (p0, z0, 63),
128 z0 = svqshl_m (p0, z0, 63))
129
130 /*
131 ** qshl_s64_z_tied1:
132 ** movprfx z0\.d, p0/z, z0\.d
133 ** sqshl z0\.d, p0/m, z0\.d, z4\.d
134 ** ret
135 */
136 TEST_DUAL_Z (qshl_s64_z_tied1, svint64_t, svint64_t,
137 z0 = svqshl_s64_z (p0, z0, z4),
138 z0 = svqshl_z (p0, z0, z4))
139
140 /*
141 ** qshl_s64_z_tied2:
142 ** movprfx z0\.d, p0/z, z0\.d
143 ** sqshlr z0\.d, p0/m, z0\.d, z4\.d
144 ** ret
145 */
146 TEST_DUAL_Z_REV (qshl_s64_z_tied2, svint64_t, svint64_t,
147 z0_res = svqshl_s64_z (p0, z4, z0),
148 z0_res = svqshl_z (p0, z4, z0))
149
150 /*
151 ** qshl_s64_z_untied:
152 ** (
153 ** movprfx z0\.d, p0/z, z1\.d
154 ** sqshl z0\.d, p0/m, z0\.d, z4\.d
155 ** |
156 ** movprfx z0\.d, p0/z, z4\.d
157 ** sqshlr z0\.d, p0/m, z0\.d, z1\.d
158 ** )
159 ** ret
160 */
161 TEST_DUAL_Z (qshl_s64_z_untied, svint64_t, svint64_t,
162 z0 = svqshl_s64_z (p0, z1, z4),
163 z0 = svqshl_z (p0, z1, z4))
164
165 /*
166 ** qshl_x0_s64_z_tied1:
167 ** mov (z[0-9]+\.d), x0
168 ** movprfx z0\.d, p0/z, z0\.d
169 ** sqshl z0\.d, p0/m, z0\.d, \1
170 ** ret
171 */
172 TEST_UNIFORM_ZX (qshl_x0_s64_z_tied1, svint64_t, int64_t,
173 z0 = svqshl_n_s64_z (p0, z0, x0),
174 z0 = svqshl_z (p0, z0, x0))
175
176 /*
177 ** qshl_x0_s64_z_untied:
178 ** mov (z[0-9]+\.d), x0
179 ** (
180 ** movprfx z0\.d, p0/z, z1\.d
181 ** sqshl z0\.d, p0/m, z0\.d, \1
182 ** |
183 ** movprfx z0\.d, p0/z, \1
184 ** sqshlr z0\.d, p0/m, z0\.d, z1\.d
185 ** )
186 ** ret
187 */
188 TEST_UNIFORM_ZX (qshl_x0_s64_z_untied, svint64_t, int64_t,
189 z0 = svqshl_n_s64_z (p0, z1, x0),
190 z0 = svqshl_z (p0, z1, x0))
191
192 /*
193 ** qshl_m64_s64_z:
194 ** movprfx z0\.d, p0/z, z0\.d
195 ** asr z0\.d, p0/m, z0\.d, #64
196 ** ret
197 */
198 TEST_UNIFORM_Z (qshl_m64_s64_z, svint64_t,
199 z0 = svqshl_n_s64_z (p0, z0, -64),
200 z0 = svqshl_z (p0, z0, -64))
201
202 /*
203 ** qshl_m2_s64_z:
204 ** movprfx z0\.d, p0/z, z0\.d
205 ** asr z0\.d, p0/m, z0\.d, #2
206 ** ret
207 */
208 TEST_UNIFORM_Z (qshl_m2_s64_z, svint64_t,
209 z0 = svqshl_n_s64_z (p0, z0, -2),
210 z0 = svqshl_z (p0, z0, -2))
211
212 /*
213 ** qshl_m1_s64_z_tied1:
214 ** movprfx z0\.d, p0/z, z0\.d
215 ** asr z0\.d, p0/m, z0\.d, #1
216 ** ret
217 */
218 TEST_UNIFORM_Z (qshl_m1_s64_z_tied1, svint64_t,
219 z0 = svqshl_n_s64_z (p0, z0, -1),
220 z0 = svqshl_z (p0, z0, -1))
221
222 /*
223 ** qshl_m1_s64_z_untied:
224 ** movprfx z0\.d, p0/z, z1\.d
225 ** asr z0\.d, p0/m, z0\.d, #1
226 ** ret
227 */
228 TEST_UNIFORM_Z (qshl_m1_s64_z_untied, svint64_t,
229 z0 = svqshl_n_s64_z (p0, z1, -1),
230 z0 = svqshl_z (p0, z1, -1))
231
232 /*
233 ** qshl_1_s64_z_tied1:
234 ** movprfx z0\.d, p0/z, z0\.d
235 ** sqshl z0\.d, p0/m, z0\.d, #1
236 ** ret
237 */
238 TEST_UNIFORM_Z (qshl_1_s64_z_tied1, svint64_t,
239 z0 = svqshl_n_s64_z (p0, z0, 1),
240 z0 = svqshl_z (p0, z0, 1))
241
242 /*
243 ** qshl_1_s64_z_untied:
244 ** movprfx z0\.d, p0/z, z1\.d
245 ** sqshl z0\.d, p0/m, z0\.d, #1
246 ** ret
247 */
248 TEST_UNIFORM_Z (qshl_1_s64_z_untied, svint64_t,
249 z0 = svqshl_n_s64_z (p0, z1, 1),
250 z0 = svqshl_z (p0, z1, 1))
251
252 /*
253 ** qshl_2_s64_z:
254 ** movprfx z0\.d, p0/z, z0\.d
255 ** sqshl z0\.d, p0/m, z0\.d, #2
256 ** ret
257 */
258 TEST_UNIFORM_Z (qshl_2_s64_z, svint64_t,
259 z0 = svqshl_n_s64_z (p0, z0, 2),
260 z0 = svqshl_z (p0, z0, 2))
261
262 /*
263 ** qshl_63_s64_z:
264 ** movprfx z0\.d, p0/z, z0\.d
265 ** sqshl z0\.d, p0/m, z0\.d, #63
266 ** ret
267 */
268 TEST_UNIFORM_Z (qshl_63_s64_z, svint64_t,
269 z0 = svqshl_n_s64_z (p0, z0, 63),
270 z0 = svqshl_z (p0, z0, 63))
271
272 /*
273 ** qshl_s64_x_tied1:
274 ** sqshl z0\.d, p0/m, z0\.d, z4\.d
275 ** ret
276 */
277 TEST_DUAL_Z (qshl_s64_x_tied1, svint64_t, svint64_t,
278 z0 = svqshl_s64_x (p0, z0, z4),
279 z0 = svqshl_x (p0, z0, z4))
280
281 /*
282 ** qshl_s64_x_tied2:
283 ** sqshlr z0\.d, p0/m, z0\.d, z4\.d
284 ** ret
285 */
286 TEST_DUAL_Z_REV (qshl_s64_x_tied2, svint64_t, svint64_t,
287 z0_res = svqshl_s64_x (p0, z4, z0),
288 z0_res = svqshl_x (p0, z4, z0))
289
290 /*
291 ** qshl_s64_x_untied:
292 ** (
293 ** movprfx z0, z1
294 ** sqshl z0\.d, p0/m, z0\.d, z4\.d
295 ** |
296 ** movprfx z0, z4
297 ** sqshlr z0\.d, p0/m, z0\.d, z1\.d
298 ** )
299 ** ret
300 */
301 TEST_DUAL_Z (qshl_s64_x_untied, svint64_t, svint64_t,
302 z0 = svqshl_s64_x (p0, z1, z4),
303 z0 = svqshl_x (p0, z1, z4))
304
305 /*
306 ** qshl_x0_s64_x_tied1:
307 ** mov (z[0-9]+\.d), x0
308 ** sqshl z0\.d, p0/m, z0\.d, \1
309 ** ret
310 */
311 TEST_UNIFORM_ZX (qshl_x0_s64_x_tied1, svint64_t, int64_t,
312 z0 = svqshl_n_s64_x (p0, z0, x0),
313 z0 = svqshl_x (p0, z0, x0))
314
315 /*
316 ** qshl_x0_s64_x_untied:
317 ** mov z0\.d, x0
318 ** sqshlr z0\.d, p0/m, z0\.d, z1\.d
319 ** ret
320 */
321 TEST_UNIFORM_ZX (qshl_x0_s64_x_untied, svint64_t, int64_t,
322 z0 = svqshl_n_s64_x (p0, z1, x0),
323 z0 = svqshl_x (p0, z1, x0))
324
325 /*
326 ** qshl_m64_s64_x:
327 ** asr z0\.d, z0\.d, #64
328 ** ret
329 */
330 TEST_UNIFORM_Z (qshl_m64_s64_x, svint64_t,
331 z0 = svqshl_n_s64_x (p0, z0, -64),
332 z0 = svqshl_x (p0, z0, -64))
333
334 /*
335 ** qshl_m2_s64_x:
336 ** asr z0\.d, z0\.d, #2
337 ** ret
338 */
339 TEST_UNIFORM_Z (qshl_m2_s64_x, svint64_t,
340 z0 = svqshl_n_s64_x (p0, z0, -2),
341 z0 = svqshl_x (p0, z0, -2))
342
343 /*
344 ** qshl_m1_s64_x_tied1:
345 ** asr z0\.d, z0\.d, #1
346 ** ret
347 */
348 TEST_UNIFORM_Z (qshl_m1_s64_x_tied1, svint64_t,
349 z0 = svqshl_n_s64_x (p0, z0, -1),
350 z0 = svqshl_x (p0, z0, -1))
351
352 /*
353 ** qshl_m1_s64_x_untied:
354 ** asr z0\.d, z1\.d, #1
355 ** ret
356 */
357 TEST_UNIFORM_Z (qshl_m1_s64_x_untied, svint64_t,
358 z0 = svqshl_n_s64_x (p0, z1, -1),
359 z0 = svqshl_x (p0, z1, -1))
360
361 /*
362 ** qshl_1_s64_x_tied1:
363 ** sqshl z0\.d, p0/m, z0\.d, #1
364 ** ret
365 */
366 TEST_UNIFORM_Z (qshl_1_s64_x_tied1, svint64_t,
367 z0 = svqshl_n_s64_x (p0, z0, 1),
368 z0 = svqshl_x (p0, z0, 1))
369
370 /*
371 ** qshl_1_s64_x_untied:
372 ** movprfx z0, z1
373 ** sqshl z0\.d, p0/m, z0\.d, #1
374 ** ret
375 */
376 TEST_UNIFORM_Z (qshl_1_s64_x_untied, svint64_t,
377 z0 = svqshl_n_s64_x (p0, z1, 1),
378 z0 = svqshl_x (p0, z1, 1))
379
380 /*
381 ** qshl_2_s64_x:
382 ** sqshl z0\.d, p0/m, z0\.d, #2
383 ** ret
384 */
385 TEST_UNIFORM_Z (qshl_2_s64_x, svint64_t,
386 z0 = svqshl_n_s64_x (p0, z0, 2),
387 z0 = svqshl_x (p0, z0, 2))
388
389 /*
390 ** qshl_63_s64_x:
391 ** sqshl z0\.d, p0/m, z0\.d, #63
392 ** ret
393 */
394 TEST_UNIFORM_Z (qshl_63_s64_x, svint64_t,
395 z0 = svqshl_n_s64_x (p0, z0, 63),
396 z0 = svqshl_x (p0, z0, 63))