]>
git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/qshl_s64.c
1 /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
3 #include "test_sve_acle.h"
7 ** sqshl z0\.d, p0/m, z0\.d, z4\.d
10 TEST_DUAL_Z (qshl_s64_m_tied1
, svint64_t
, svint64_t
,
11 z0
= svqshl_s64_m (p0
, z0
, z4
),
12 z0
= svqshl_m (p0
, z0
, z4
))
16 ** mov (z[0-9]+\.d), z0\.d
18 ** sqshl z0\.d, p0/m, z0\.d, \1
21 TEST_DUAL_Z_REV (qshl_s64_m_tied2
, svint64_t
, svint64_t
,
22 z0_res
= svqshl_s64_m (p0
, z4
, z0
),
23 z0_res
= svqshl_m (p0
, z4
, z0
))
28 ** sqshl z0\.d, p0/m, z0\.d, z4\.d
31 TEST_DUAL_Z (qshl_s64_m_untied
, svint64_t
, svint64_t
,
32 z0
= svqshl_s64_m (p0
, z1
, z4
),
33 z0
= svqshl_m (p0
, z1
, z4
))
36 ** qshl_x0_s64_m_tied1:
37 ** mov (z[0-9]+\.d), x0
38 ** sqshl z0\.d, p0/m, z0\.d, \1
41 TEST_UNIFORM_ZX (qshl_x0_s64_m_tied1
, svint64_t
, int64_t,
42 z0
= svqshl_n_s64_m (p0
, z0
, x0
),
43 z0
= svqshl_m (p0
, z0
, x0
))
46 ** qshl_x0_s64_m_untied:
47 ** mov (z[0-9]+\.d), x0
49 ** sqshl z0\.d, p0/m, z0\.d, \1
52 TEST_UNIFORM_ZX (qshl_x0_s64_m_untied
, svint64_t
, int64_t,
53 z0
= svqshl_n_s64_m (p0
, z1
, x0
),
54 z0
= svqshl_m (p0
, z1
, x0
))
58 ** asr z0\.d, p0/m, z0\.d, #64
61 TEST_UNIFORM_Z (qshl_m64_s64_m
, svint64_t
,
62 z0
= svqshl_n_s64_m (p0
, z0
, -64),
63 z0
= svqshl_m (p0
, z0
, -64))
67 ** asr z0\.d, p0/m, z0\.d, #2
70 TEST_UNIFORM_Z (qshl_m2_s64_m
, svint64_t
,
71 z0
= svqshl_n_s64_m (p0
, z0
, -2),
72 z0
= svqshl_m (p0
, z0
, -2))
75 ** qshl_m1_s64_m_tied1:
76 ** asr z0\.d, p0/m, z0\.d, #1
79 TEST_UNIFORM_Z (qshl_m1_s64_m_tied1
, svint64_t
,
80 z0
= svqshl_n_s64_m (p0
, z0
, -1),
81 z0
= svqshl_m (p0
, z0
, -1))
84 ** qshl_m1_s64_m_untied:
86 ** asr z0\.d, p0/m, z0\.d, #1
89 TEST_UNIFORM_Z (qshl_m1_s64_m_untied
, svint64_t
,
90 z0
= svqshl_n_s64_m (p0
, z1
, -1),
91 z0
= svqshl_m (p0
, z1
, -1))
94 ** qshl_1_s64_m_tied1:
95 ** sqshl z0\.d, p0/m, z0\.d, #1
98 TEST_UNIFORM_Z (qshl_1_s64_m_tied1
, svint64_t
,
99 z0
= svqshl_n_s64_m (p0
, z0
, 1),
100 z0
= svqshl_m (p0
, z0
, 1))
103 ** qshl_1_s64_m_untied:
105 ** sqshl z0\.d, p0/m, z0\.d, #1
108 TEST_UNIFORM_Z (qshl_1_s64_m_untied
, svint64_t
,
109 z0
= svqshl_n_s64_m (p0
, z1
, 1),
110 z0
= svqshl_m (p0
, z1
, 1))
114 ** sqshl z0\.d, p0/m, z0\.d, #2
117 TEST_UNIFORM_Z (qshl_2_s64_m
, svint64_t
,
118 z0
= svqshl_n_s64_m (p0
, z0
, 2),
119 z0
= svqshl_m (p0
, z0
, 2))
123 ** sqshl z0\.d, p0/m, z0\.d, #63
126 TEST_UNIFORM_Z (qshl_63_s64_m
, svint64_t
,
127 z0
= svqshl_n_s64_m (p0
, z0
, 63),
128 z0
= svqshl_m (p0
, z0
, 63))
132 ** movprfx z0\.d, p0/z, z0\.d
133 ** sqshl z0\.d, p0/m, z0\.d, z4\.d
136 TEST_DUAL_Z (qshl_s64_z_tied1
, svint64_t
, svint64_t
,
137 z0
= svqshl_s64_z (p0
, z0
, z4
),
138 z0
= svqshl_z (p0
, z0
, z4
))
142 ** movprfx z0\.d, p0/z, z0\.d
143 ** sqshlr z0\.d, p0/m, z0\.d, z4\.d
146 TEST_DUAL_Z_REV (qshl_s64_z_tied2
, svint64_t
, svint64_t
,
147 z0_res
= svqshl_s64_z (p0
, z4
, z0
),
148 z0_res
= svqshl_z (p0
, z4
, z0
))
151 ** qshl_s64_z_untied:
153 ** movprfx z0\.d, p0/z, z1\.d
154 ** sqshl z0\.d, p0/m, z0\.d, z4\.d
156 ** movprfx z0\.d, p0/z, z4\.d
157 ** sqshlr z0\.d, p0/m, z0\.d, z1\.d
161 TEST_DUAL_Z (qshl_s64_z_untied
, svint64_t
, svint64_t
,
162 z0
= svqshl_s64_z (p0
, z1
, z4
),
163 z0
= svqshl_z (p0
, z1
, z4
))
166 ** qshl_x0_s64_z_tied1:
167 ** mov (z[0-9]+\.d), x0
168 ** movprfx z0\.d, p0/z, z0\.d
169 ** sqshl z0\.d, p0/m, z0\.d, \1
172 TEST_UNIFORM_ZX (qshl_x0_s64_z_tied1
, svint64_t
, int64_t,
173 z0
= svqshl_n_s64_z (p0
, z0
, x0
),
174 z0
= svqshl_z (p0
, z0
, x0
))
177 ** qshl_x0_s64_z_untied:
178 ** mov (z[0-9]+\.d), x0
180 ** movprfx z0\.d, p0/z, z1\.d
181 ** sqshl z0\.d, p0/m, z0\.d, \1
183 ** movprfx z0\.d, p0/z, \1
184 ** sqshlr z0\.d, p0/m, z0\.d, z1\.d
188 TEST_UNIFORM_ZX (qshl_x0_s64_z_untied
, svint64_t
, int64_t,
189 z0
= svqshl_n_s64_z (p0
, z1
, x0
),
190 z0
= svqshl_z (p0
, z1
, x0
))
194 ** movprfx z0\.d, p0/z, z0\.d
195 ** asr z0\.d, p0/m, z0\.d, #64
198 TEST_UNIFORM_Z (qshl_m64_s64_z
, svint64_t
,
199 z0
= svqshl_n_s64_z (p0
, z0
, -64),
200 z0
= svqshl_z (p0
, z0
, -64))
204 ** movprfx z0\.d, p0/z, z0\.d
205 ** asr z0\.d, p0/m, z0\.d, #2
208 TEST_UNIFORM_Z (qshl_m2_s64_z
, svint64_t
,
209 z0
= svqshl_n_s64_z (p0
, z0
, -2),
210 z0
= svqshl_z (p0
, z0
, -2))
213 ** qshl_m1_s64_z_tied1:
214 ** movprfx z0\.d, p0/z, z0\.d
215 ** asr z0\.d, p0/m, z0\.d, #1
218 TEST_UNIFORM_Z (qshl_m1_s64_z_tied1
, svint64_t
,
219 z0
= svqshl_n_s64_z (p0
, z0
, -1),
220 z0
= svqshl_z (p0
, z0
, -1))
223 ** qshl_m1_s64_z_untied:
224 ** movprfx z0\.d, p0/z, z1\.d
225 ** asr z0\.d, p0/m, z0\.d, #1
228 TEST_UNIFORM_Z (qshl_m1_s64_z_untied
, svint64_t
,
229 z0
= svqshl_n_s64_z (p0
, z1
, -1),
230 z0
= svqshl_z (p0
, z1
, -1))
233 ** qshl_1_s64_z_tied1:
234 ** movprfx z0\.d, p0/z, z0\.d
235 ** sqshl z0\.d, p0/m, z0\.d, #1
238 TEST_UNIFORM_Z (qshl_1_s64_z_tied1
, svint64_t
,
239 z0
= svqshl_n_s64_z (p0
, z0
, 1),
240 z0
= svqshl_z (p0
, z0
, 1))
243 ** qshl_1_s64_z_untied:
244 ** movprfx z0\.d, p0/z, z1\.d
245 ** sqshl z0\.d, p0/m, z0\.d, #1
248 TEST_UNIFORM_Z (qshl_1_s64_z_untied
, svint64_t
,
249 z0
= svqshl_n_s64_z (p0
, z1
, 1),
250 z0
= svqshl_z (p0
, z1
, 1))
254 ** movprfx z0\.d, p0/z, z0\.d
255 ** sqshl z0\.d, p0/m, z0\.d, #2
258 TEST_UNIFORM_Z (qshl_2_s64_z
, svint64_t
,
259 z0
= svqshl_n_s64_z (p0
, z0
, 2),
260 z0
= svqshl_z (p0
, z0
, 2))
264 ** movprfx z0\.d, p0/z, z0\.d
265 ** sqshl z0\.d, p0/m, z0\.d, #63
268 TEST_UNIFORM_Z (qshl_63_s64_z
, svint64_t
,
269 z0
= svqshl_n_s64_z (p0
, z0
, 63),
270 z0
= svqshl_z (p0
, z0
, 63))
274 ** sqshl z0\.d, p0/m, z0\.d, z4\.d
277 TEST_DUAL_Z (qshl_s64_x_tied1
, svint64_t
, svint64_t
,
278 z0
= svqshl_s64_x (p0
, z0
, z4
),
279 z0
= svqshl_x (p0
, z0
, z4
))
283 ** sqshlr z0\.d, p0/m, z0\.d, z4\.d
286 TEST_DUAL_Z_REV (qshl_s64_x_tied2
, svint64_t
, svint64_t
,
287 z0_res
= svqshl_s64_x (p0
, z4
, z0
),
288 z0_res
= svqshl_x (p0
, z4
, z0
))
291 ** qshl_s64_x_untied:
294 ** sqshl z0\.d, p0/m, z0\.d, z4\.d
297 ** sqshlr z0\.d, p0/m, z0\.d, z1\.d
301 TEST_DUAL_Z (qshl_s64_x_untied
, svint64_t
, svint64_t
,
302 z0
= svqshl_s64_x (p0
, z1
, z4
),
303 z0
= svqshl_x (p0
, z1
, z4
))
306 ** qshl_x0_s64_x_tied1:
307 ** mov (z[0-9]+\.d), x0
308 ** sqshl z0\.d, p0/m, z0\.d, \1
311 TEST_UNIFORM_ZX (qshl_x0_s64_x_tied1
, svint64_t
, int64_t,
312 z0
= svqshl_n_s64_x (p0
, z0
, x0
),
313 z0
= svqshl_x (p0
, z0
, x0
))
316 ** qshl_x0_s64_x_untied:
318 ** sqshlr z0\.d, p0/m, z0\.d, z1\.d
321 TEST_UNIFORM_ZX (qshl_x0_s64_x_untied
, svint64_t
, int64_t,
322 z0
= svqshl_n_s64_x (p0
, z1
, x0
),
323 z0
= svqshl_x (p0
, z1
, x0
))
327 ** asr z0\.d, z0\.d, #64
330 TEST_UNIFORM_Z (qshl_m64_s64_x
, svint64_t
,
331 z0
= svqshl_n_s64_x (p0
, z0
, -64),
332 z0
= svqshl_x (p0
, z0
, -64))
336 ** asr z0\.d, z0\.d, #2
339 TEST_UNIFORM_Z (qshl_m2_s64_x
, svint64_t
,
340 z0
= svqshl_n_s64_x (p0
, z0
, -2),
341 z0
= svqshl_x (p0
, z0
, -2))
344 ** qshl_m1_s64_x_tied1:
345 ** asr z0\.d, z0\.d, #1
348 TEST_UNIFORM_Z (qshl_m1_s64_x_tied1
, svint64_t
,
349 z0
= svqshl_n_s64_x (p0
, z0
, -1),
350 z0
= svqshl_x (p0
, z0
, -1))
353 ** qshl_m1_s64_x_untied:
354 ** asr z0\.d, z1\.d, #1
357 TEST_UNIFORM_Z (qshl_m1_s64_x_untied
, svint64_t
,
358 z0
= svqshl_n_s64_x (p0
, z1
, -1),
359 z0
= svqshl_x (p0
, z1
, -1))
362 ** qshl_1_s64_x_tied1:
363 ** sqshl z0\.d, p0/m, z0\.d, #1
366 TEST_UNIFORM_Z (qshl_1_s64_x_tied1
, svint64_t
,
367 z0
= svqshl_n_s64_x (p0
, z0
, 1),
368 z0
= svqshl_x (p0
, z0
, 1))
371 ** qshl_1_s64_x_untied:
373 ** sqshl z0\.d, p0/m, z0\.d, #1
376 TEST_UNIFORM_Z (qshl_1_s64_x_untied
, svint64_t
,
377 z0
= svqshl_n_s64_x (p0
, z1
, 1),
378 z0
= svqshl_x (p0
, z1
, 1))
382 ** sqshl z0\.d, p0/m, z0\.d, #2
385 TEST_UNIFORM_Z (qshl_2_s64_x
, svint64_t
,
386 z0
= svqshl_n_s64_x (p0
, z0
, 2),
387 z0
= svqshl_x (p0
, z0
, 2))
391 ** sqshl z0\.d, p0/m, z0\.d, #63
394 TEST_UNIFORM_Z (qshl_63_s64_x
, svint64_t
,
395 z0
= svqshl_n_s64_x (p0
, z0
, 63),
396 z0
= svqshl_x (p0
, z0
, 63))