]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_u64.c
[AArch64] Add support for the SVE2 ACLE
[thirdparty/gcc.git] / gcc / testsuite / gcc.target / aarch64 / sve2 / acle / asm / ldnt1_gather_u64.c
1 /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
2
3 #include "test_sve_acle.h"
4
5 /*
6 ** ldnt1_gather_u64_tied1:
7 ** ldnt1d z0\.d, p0/z, \[z0\.d\]
8 ** ret
9 */
10 TEST_LOAD_GATHER_ZS (ldnt1_gather_u64_tied1, svuint64_t, svuint64_t,
11 z0_res = svldnt1_gather_u64base_u64 (p0, z0),
12 z0_res = svldnt1_gather_u64 (p0, z0))
13
14 /*
15 ** ldnt1_gather_u64_untied:
16 ** ldnt1d z0\.d, p0/z, \[z1\.d\]
17 ** ret
18 */
19 TEST_LOAD_GATHER_ZS (ldnt1_gather_u64_untied, svuint64_t, svuint64_t,
20 z0_res = svldnt1_gather_u64base_u64 (p0, z1),
21 z0_res = svldnt1_gather_u64 (p0, z1))
22
23 /*
24 ** ldnt1_gather_x0_u64_offset:
25 ** ldnt1d z0\.d, p0/z, \[z0\.d, x0\]
26 ** ret
27 */
28 TEST_LOAD_GATHER_ZS (ldnt1_gather_x0_u64_offset, svuint64_t, svuint64_t,
29 z0_res = svldnt1_gather_u64base_offset_u64 (p0, z0, x0),
30 z0_res = svldnt1_gather_offset_u64 (p0, z0, x0))
31
32 /*
33 ** ldnt1_gather_m8_u64_offset:
34 ** mov (x[0-9]+), #?-8
35 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
36 ** ret
37 */
38 TEST_LOAD_GATHER_ZS (ldnt1_gather_m8_u64_offset, svuint64_t, svuint64_t,
39 z0_res = svldnt1_gather_u64base_offset_u64 (p0, z0, -8),
40 z0_res = svldnt1_gather_offset_u64 (p0, z0, -8))
41
42 /*
43 ** ldnt1_gather_0_u64_offset:
44 ** ldnt1d z0\.d, p0/z, \[z0\.d\]
45 ** ret
46 */
47 TEST_LOAD_GATHER_ZS (ldnt1_gather_0_u64_offset, svuint64_t, svuint64_t,
48 z0_res = svldnt1_gather_u64base_offset_u64 (p0, z0, 0),
49 z0_res = svldnt1_gather_offset_u64 (p0, z0, 0))
50
51 /*
52 ** ldnt1_gather_9_u64_offset:
53 ** mov (x[0-9]+), #?9
54 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
55 ** ret
56 */
57 TEST_LOAD_GATHER_ZS (ldnt1_gather_9_u64_offset, svuint64_t, svuint64_t,
58 z0_res = svldnt1_gather_u64base_offset_u64 (p0, z0, 9),
59 z0_res = svldnt1_gather_offset_u64 (p0, z0, 9))
60
61 /*
62 ** ldnt1_gather_10_u64_offset:
63 ** mov (x[0-9]+), #?10
64 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
65 ** ret
66 */
67 TEST_LOAD_GATHER_ZS (ldnt1_gather_10_u64_offset, svuint64_t, svuint64_t,
68 z0_res = svldnt1_gather_u64base_offset_u64 (p0, z0, 10),
69 z0_res = svldnt1_gather_offset_u64 (p0, z0, 10))
70
71 /*
72 ** ldnt1_gather_11_u64_offset:
73 ** mov (x[0-9]+), #?11
74 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
75 ** ret
76 */
77 TEST_LOAD_GATHER_ZS (ldnt1_gather_11_u64_offset, svuint64_t, svuint64_t,
78 z0_res = svldnt1_gather_u64base_offset_u64 (p0, z0, 11),
79 z0_res = svldnt1_gather_offset_u64 (p0, z0, 11))
80
81 /*
82 ** ldnt1_gather_12_u64_offset:
83 ** mov (x[0-9]+), #?12
84 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
85 ** ret
86 */
87 TEST_LOAD_GATHER_ZS (ldnt1_gather_12_u64_offset, svuint64_t, svuint64_t,
88 z0_res = svldnt1_gather_u64base_offset_u64 (p0, z0, 12),
89 z0_res = svldnt1_gather_offset_u64 (p0, z0, 12))
90
91 /*
92 ** ldnt1_gather_13_u64_offset:
93 ** mov (x[0-9]+), #?13
94 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
95 ** ret
96 */
97 TEST_LOAD_GATHER_ZS (ldnt1_gather_13_u64_offset, svuint64_t, svuint64_t,
98 z0_res = svldnt1_gather_u64base_offset_u64 (p0, z0, 13),
99 z0_res = svldnt1_gather_offset_u64 (p0, z0, 13))
100
101 /*
102 ** ldnt1_gather_14_u64_offset:
103 ** mov (x[0-9]+), #?14
104 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
105 ** ret
106 */
107 TEST_LOAD_GATHER_ZS (ldnt1_gather_14_u64_offset, svuint64_t, svuint64_t,
108 z0_res = svldnt1_gather_u64base_offset_u64 (p0, z0, 14),
109 z0_res = svldnt1_gather_offset_u64 (p0, z0, 14))
110
111 /*
112 ** ldnt1_gather_15_u64_offset:
113 ** mov (x[0-9]+), #?15
114 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
115 ** ret
116 */
117 TEST_LOAD_GATHER_ZS (ldnt1_gather_15_u64_offset, svuint64_t, svuint64_t,
118 z0_res = svldnt1_gather_u64base_offset_u64 (p0, z0, 15),
119 z0_res = svldnt1_gather_offset_u64 (p0, z0, 15))
120
121 /*
122 ** ldnt1_gather_16_u64_offset:
123 ** mov (x[0-9]+), #?16
124 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
125 ** ret
126 */
127 TEST_LOAD_GATHER_ZS (ldnt1_gather_16_u64_offset, svuint64_t, svuint64_t,
128 z0_res = svldnt1_gather_u64base_offset_u64 (p0, z0, 16),
129 z0_res = svldnt1_gather_offset_u64 (p0, z0, 16))
130
131 /*
132 ** ldnt1_gather_248_u64_offset:
133 ** mov (x[0-9]+), #?248
134 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
135 ** ret
136 */
137 TEST_LOAD_GATHER_ZS (ldnt1_gather_248_u64_offset, svuint64_t, svuint64_t,
138 z0_res = svldnt1_gather_u64base_offset_u64 (p0, z0, 248),
139 z0_res = svldnt1_gather_offset_u64 (p0, z0, 248))
140
141 /*
142 ** ldnt1_gather_256_u64_offset:
143 ** mov (x[0-9]+), #?256
144 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
145 ** ret
146 */
147 TEST_LOAD_GATHER_ZS (ldnt1_gather_256_u64_offset, svuint64_t, svuint64_t,
148 z0_res = svldnt1_gather_u64base_offset_u64 (p0, z0, 256),
149 z0_res = svldnt1_gather_offset_u64 (p0, z0, 256))
150
151 /*
152 ** ldnt1_gather_x0_u64_index:
153 ** lsl (x[0-9]+), x0, #?3
154 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
155 ** ret
156 */
157 TEST_LOAD_GATHER_ZS (ldnt1_gather_x0_u64_index, svuint64_t, svuint64_t,
158 z0_res = svldnt1_gather_u64base_index_u64 (p0, z0, x0),
159 z0_res = svldnt1_gather_index_u64 (p0, z0, x0))
160
161 /*
162 ** ldnt1_gather_m1_u64_index:
163 ** mov (x[0-9]+), #?-8
164 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
165 ** ret
166 */
167 TEST_LOAD_GATHER_ZS (ldnt1_gather_m1_u64_index, svuint64_t, svuint64_t,
168 z0_res = svldnt1_gather_u64base_index_u64 (p0, z0, -1),
169 z0_res = svldnt1_gather_index_u64 (p0, z0, -1))
170
171 /*
172 ** ldnt1_gather_0_u64_index:
173 ** ldnt1d z0\.d, p0/z, \[z0\.d\]
174 ** ret
175 */
176 TEST_LOAD_GATHER_ZS (ldnt1_gather_0_u64_index, svuint64_t, svuint64_t,
177 z0_res = svldnt1_gather_u64base_index_u64 (p0, z0, 0),
178 z0_res = svldnt1_gather_index_u64 (p0, z0, 0))
179
180 /*
181 ** ldnt1_gather_5_u64_index:
182 ** mov (x[0-9]+), #?40
183 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
184 ** ret
185 */
186 TEST_LOAD_GATHER_ZS (ldnt1_gather_5_u64_index, svuint64_t, svuint64_t,
187 z0_res = svldnt1_gather_u64base_index_u64 (p0, z0, 5),
188 z0_res = svldnt1_gather_index_u64 (p0, z0, 5))
189
190 /*
191 ** ldnt1_gather_31_u64_index:
192 ** mov (x[0-9]+), #?248
193 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
194 ** ret
195 */
196 TEST_LOAD_GATHER_ZS (ldnt1_gather_31_u64_index, svuint64_t, svuint64_t,
197 z0_res = svldnt1_gather_u64base_index_u64 (p0, z0, 31),
198 z0_res = svldnt1_gather_index_u64 (p0, z0, 31))
199
200 /*
201 ** ldnt1_gather_32_u64_index:
202 ** mov (x[0-9]+), #?256
203 ** ldnt1d z0\.d, p0/z, \[z0\.d, \1\]
204 ** ret
205 */
206 TEST_LOAD_GATHER_ZS (ldnt1_gather_32_u64_index, svuint64_t, svuint64_t,
207 z0_res = svldnt1_gather_u64base_index_u64 (p0, z0, 32),
208 z0_res = svldnt1_gather_index_u64 (p0, z0, 32))
209
210 /*
211 ** ldnt1_gather_x0_u64_s64offset:
212 ** ldnt1d z0\.d, p0/z, \[z0\.d, x0\]
213 ** ret
214 */
215 TEST_LOAD_GATHER_SZ (ldnt1_gather_x0_u64_s64offset, svuint64_t, uint64_t, svint64_t,
216 z0_res = svldnt1_gather_s64offset_u64 (p0, x0, z0),
217 z0_res = svldnt1_gather_offset (p0, x0, z0))
218
219 /*
220 ** ldnt1_gather_tied1_u64_s64offset:
221 ** ldnt1d z0\.d, p0/z, \[z0\.d, x0\]
222 ** ret
223 */
224 TEST_LOAD_GATHER_SZ (ldnt1_gather_tied1_u64_s64offset, svuint64_t, uint64_t, svint64_t,
225 z0_res = svldnt1_gather_s64offset_u64 (p0, x0, z0),
226 z0_res = svldnt1_gather_offset (p0, x0, z0))
227
228 /*
229 ** ldnt1_gather_untied_u64_s64offset:
230 ** ldnt1d z0\.d, p0/z, \[z1\.d, x0\]
231 ** ret
232 */
233 TEST_LOAD_GATHER_SZ (ldnt1_gather_untied_u64_s64offset, svuint64_t, uint64_t, svint64_t,
234 z0_res = svldnt1_gather_s64offset_u64 (p0, x0, z1),
235 z0_res = svldnt1_gather_offset (p0, x0, z1))
236
237 /*
238 ** ldnt1_gather_x0_u64_u64offset:
239 ** ldnt1d z0\.d, p0/z, \[z0\.d, x0\]
240 ** ret
241 */
242 TEST_LOAD_GATHER_SZ (ldnt1_gather_x0_u64_u64offset, svuint64_t, uint64_t, svuint64_t,
243 z0_res = svldnt1_gather_u64offset_u64 (p0, x0, z0),
244 z0_res = svldnt1_gather_offset (p0, x0, z0))
245
246 /*
247 ** ldnt1_gather_tied1_u64_u64offset:
248 ** ldnt1d z0\.d, p0/z, \[z0\.d, x0\]
249 ** ret
250 */
251 TEST_LOAD_GATHER_SZ (ldnt1_gather_tied1_u64_u64offset, svuint64_t, uint64_t, svuint64_t,
252 z0_res = svldnt1_gather_u64offset_u64 (p0, x0, z0),
253 z0_res = svldnt1_gather_offset (p0, x0, z0))
254
255 /*
256 ** ldnt1_gather_untied_u64_u64offset:
257 ** ldnt1d z0\.d, p0/z, \[z1\.d, x0\]
258 ** ret
259 */
260 TEST_LOAD_GATHER_SZ (ldnt1_gather_untied_u64_u64offset, svuint64_t, uint64_t, svuint64_t,
261 z0_res = svldnt1_gather_u64offset_u64 (p0, x0, z1),
262 z0_res = svldnt1_gather_offset (p0, x0, z1))
263
264 /*
265 ** ldnt1_gather_x0_u64_s64index:
266 ** lsl (z[0-9]+\.d), z0\.d, #3
267 ** ldnt1d z0\.d, p0/z, \[\1, x0\]
268 ** ret
269 */
270 TEST_LOAD_GATHER_SZ (ldnt1_gather_x0_u64_s64index, svuint64_t, uint64_t, svint64_t,
271 z0_res = svldnt1_gather_s64index_u64 (p0, x0, z0),
272 z0_res = svldnt1_gather_index (p0, x0, z0))
273
274 /*
275 ** ldnt1_gather_tied1_u64_s64index:
276 ** lsl (z[0-9]+\.d), z0\.d, #3
277 ** ldnt1d z0\.d, p0/z, \[\1, x0\]
278 ** ret
279 */
280 TEST_LOAD_GATHER_SZ (ldnt1_gather_tied1_u64_s64index, svuint64_t, uint64_t, svint64_t,
281 z0_res = svldnt1_gather_s64index_u64 (p0, x0, z0),
282 z0_res = svldnt1_gather_index (p0, x0, z0))
283
284 /*
285 ** ldnt1_gather_untied_u64_s64index:
286 ** lsl (z[0-9]+\.d), z1\.d, #3
287 ** ldnt1d z0\.d, p0/z, \[\1, x0\]
288 ** ret
289 */
290 TEST_LOAD_GATHER_SZ (ldnt1_gather_untied_u64_s64index, svuint64_t, uint64_t, svint64_t,
291 z0_res = svldnt1_gather_s64index_u64 (p0, x0, z1),
292 z0_res = svldnt1_gather_index (p0, x0, z1))
293
294 /*
295 ** ldnt1_gather_x0_u64_u64index:
296 ** lsl (z[0-9]+\.d), z0\.d, #3
297 ** ldnt1d z0\.d, p0/z, \[\1, x0\]
298 ** ret
299 */
300 TEST_LOAD_GATHER_SZ (ldnt1_gather_x0_u64_u64index, svuint64_t, uint64_t, svuint64_t,
301 z0_res = svldnt1_gather_u64index_u64 (p0, x0, z0),
302 z0_res = svldnt1_gather_index (p0, x0, z0))
303
304 /*
305 ** ldnt1_gather_tied1_u64_u64index:
306 ** lsl (z[0-9]+\.d), z0\.d, #3
307 ** ldnt1d z0\.d, p0/z, \[\1, x0\]
308 ** ret
309 */
310 TEST_LOAD_GATHER_SZ (ldnt1_gather_tied1_u64_u64index, svuint64_t, uint64_t, svuint64_t,
311 z0_res = svldnt1_gather_u64index_u64 (p0, x0, z0),
312 z0_res = svldnt1_gather_index (p0, x0, z0))
313
314 /*
315 ** ldnt1_gather_untied_u64_u64index:
316 ** lsl (z[0-9]+\.d), z1\.d, #3
317 ** ldnt1d z0\.d, p0/z, \[\1, x0\]
318 ** ret
319 */
320 TEST_LOAD_GATHER_SZ (ldnt1_gather_untied_u64_u64index, svuint64_t, uint64_t, svuint64_t,
321 z0_res = svldnt1_gather_u64index_u64 (p0, x0, z1),
322 z0_res = svldnt1_gather_index (p0, x0, z1))