]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sw_gather_u64.c
[AArch64] Add support for the SVE2 ACLE
[thirdparty/gcc.git] / gcc / testsuite / gcc.target / aarch64 / sve2 / acle / asm / ldnt1sw_gather_u64.c
1 /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
2
3 #include "test_sve_acle.h"
4
5 /*
6 ** ldnt1sw_gather_u64_tied1:
7 ** ldnt1sw z0\.d, p0/z, \[z0\.d\]
8 ** ret
9 */
10 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_u64_tied1, svuint64_t, svuint64_t,
11 z0_res = svldnt1sw_gather_u64base_u64 (p0, z0),
12 z0_res = svldnt1sw_gather_u64 (p0, z0))
13
14 /*
15 ** ldnt1sw_gather_u64_untied:
16 ** ldnt1sw z0\.d, p0/z, \[z1\.d\]
17 ** ret
18 */
19 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_u64_untied, svuint64_t, svuint64_t,
20 z0_res = svldnt1sw_gather_u64base_u64 (p0, z1),
21 z0_res = svldnt1sw_gather_u64 (p0, z1))
22
23 /*
24 ** ldnt1sw_gather_x0_u64_offset:
25 ** ldnt1sw z0\.d, p0/z, \[z0\.d, x0\]
26 ** ret
27 */
28 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_x0_u64_offset, svuint64_t, svuint64_t,
29 z0_res = svldnt1sw_gather_u64base_offset_u64 (p0, z0, x0),
30 z0_res = svldnt1sw_gather_offset_u64 (p0, z0, x0))
31
32 /*
33 ** ldnt1sw_gather_m4_u64_offset:
34 ** mov (x[0-9]+), #?-4
35 ** ldnt1sw z0\.d, p0/z, \[z0\.d, \1\]
36 ** ret
37 */
38 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_m4_u64_offset, svuint64_t, svuint64_t,
39 z0_res = svldnt1sw_gather_u64base_offset_u64 (p0, z0, -4),
40 z0_res = svldnt1sw_gather_offset_u64 (p0, z0, -4))
41
42 /*
43 ** ldnt1sw_gather_0_u64_offset:
44 ** ldnt1sw z0\.d, p0/z, \[z0\.d\]
45 ** ret
46 */
47 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_0_u64_offset, svuint64_t, svuint64_t,
48 z0_res = svldnt1sw_gather_u64base_offset_u64 (p0, z0, 0),
49 z0_res = svldnt1sw_gather_offset_u64 (p0, z0, 0))
50
51 /*
52 ** ldnt1sw_gather_5_u64_offset:
53 ** mov (x[0-9]+), #?5
54 ** ldnt1sw z0\.d, p0/z, \[z0\.d, \1\]
55 ** ret
56 */
57 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_5_u64_offset, svuint64_t, svuint64_t,
58 z0_res = svldnt1sw_gather_u64base_offset_u64 (p0, z0, 5),
59 z0_res = svldnt1sw_gather_offset_u64 (p0, z0, 5))
60
61 /*
62 ** ldnt1sw_gather_6_u64_offset:
63 ** mov (x[0-9]+), #?6
64 ** ldnt1sw z0\.d, p0/z, \[z0\.d, \1\]
65 ** ret
66 */
67 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_6_u64_offset, svuint64_t, svuint64_t,
68 z0_res = svldnt1sw_gather_u64base_offset_u64 (p0, z0, 6),
69 z0_res = svldnt1sw_gather_offset_u64 (p0, z0, 6))
70
71 /*
72 ** ldnt1sw_gather_7_u64_offset:
73 ** mov (x[0-9]+), #?7
74 ** ldnt1sw z0\.d, p0/z, \[z0\.d, \1\]
75 ** ret
76 */
77 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_7_u64_offset, svuint64_t, svuint64_t,
78 z0_res = svldnt1sw_gather_u64base_offset_u64 (p0, z0, 7),
79 z0_res = svldnt1sw_gather_offset_u64 (p0, z0, 7))
80
81 /*
82 ** ldnt1sw_gather_8_u64_offset:
83 ** mov (x[0-9]+), #?8
84 ** ldnt1sw z0\.d, p0/z, \[z0\.d, \1\]
85 ** ret
86 */
87 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_8_u64_offset, svuint64_t, svuint64_t,
88 z0_res = svldnt1sw_gather_u64base_offset_u64 (p0, z0, 8),
89 z0_res = svldnt1sw_gather_offset_u64 (p0, z0, 8))
90
91 /*
92 ** ldnt1sw_gather_124_u64_offset:
93 ** mov (x[0-9]+), #?124
94 ** ldnt1sw z0\.d, p0/z, \[z0\.d, \1\]
95 ** ret
96 */
97 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_124_u64_offset, svuint64_t, svuint64_t,
98 z0_res = svldnt1sw_gather_u64base_offset_u64 (p0, z0, 124),
99 z0_res = svldnt1sw_gather_offset_u64 (p0, z0, 124))
100
101 /*
102 ** ldnt1sw_gather_128_u64_offset:
103 ** mov (x[0-9]+), #?128
104 ** ldnt1sw z0\.d, p0/z, \[z0\.d, \1\]
105 ** ret
106 */
107 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_128_u64_offset, svuint64_t, svuint64_t,
108 z0_res = svldnt1sw_gather_u64base_offset_u64 (p0, z0, 128),
109 z0_res = svldnt1sw_gather_offset_u64 (p0, z0, 128))
110
111 /*
112 ** ldnt1sw_gather_x0_u64_index:
113 ** lsl (x[0-9]+), x0, #?2
114 ** ldnt1sw z0\.d, p0/z, \[z0\.d, \1\]
115 ** ret
116 */
117 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_x0_u64_index, svuint64_t, svuint64_t,
118 z0_res = svldnt1sw_gather_u64base_index_u64 (p0, z0, x0),
119 z0_res = svldnt1sw_gather_index_u64 (p0, z0, x0))
120
121 /*
122 ** ldnt1sw_gather_m1_u64_index:
123 ** mov (x[0-9]+), #?-4
124 ** ldnt1sw z0\.d, p0/z, \[z0\.d, \1\]
125 ** ret
126 */
127 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_m1_u64_index, svuint64_t, svuint64_t,
128 z0_res = svldnt1sw_gather_u64base_index_u64 (p0, z0, -1),
129 z0_res = svldnt1sw_gather_index_u64 (p0, z0, -1))
130
131 /*
132 ** ldnt1sw_gather_0_u64_index:
133 ** ldnt1sw z0\.d, p0/z, \[z0\.d\]
134 ** ret
135 */
136 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_0_u64_index, svuint64_t, svuint64_t,
137 z0_res = svldnt1sw_gather_u64base_index_u64 (p0, z0, 0),
138 z0_res = svldnt1sw_gather_index_u64 (p0, z0, 0))
139
140 /*
141 ** ldnt1sw_gather_5_u64_index:
142 ** mov (x[0-9]+), #?20
143 ** ldnt1sw z0\.d, p0/z, \[z0\.d, \1\]
144 ** ret
145 */
146 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_5_u64_index, svuint64_t, svuint64_t,
147 z0_res = svldnt1sw_gather_u64base_index_u64 (p0, z0, 5),
148 z0_res = svldnt1sw_gather_index_u64 (p0, z0, 5))
149
150 /*
151 ** ldnt1sw_gather_31_u64_index:
152 ** mov (x[0-9]+), #?124
153 ** ldnt1sw z0\.d, p0/z, \[z0\.d, \1\]
154 ** ret
155 */
156 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_31_u64_index, svuint64_t, svuint64_t,
157 z0_res = svldnt1sw_gather_u64base_index_u64 (p0, z0, 31),
158 z0_res = svldnt1sw_gather_index_u64 (p0, z0, 31))
159
160 /*
161 ** ldnt1sw_gather_32_u64_index:
162 ** mov (x[0-9]+), #?128
163 ** ldnt1sw z0\.d, p0/z, \[z0\.d, \1\]
164 ** ret
165 */
166 TEST_LOAD_GATHER_ZS (ldnt1sw_gather_32_u64_index, svuint64_t, svuint64_t,
167 z0_res = svldnt1sw_gather_u64base_index_u64 (p0, z0, 32),
168 z0_res = svldnt1sw_gather_index_u64 (p0, z0, 32))
169
170 /*
171 ** ldnt1sw_gather_x0_u64_s64offset:
172 ** ldnt1sw z0\.d, p0/z, \[z0\.d, x0\]
173 ** ret
174 */
175 TEST_LOAD_GATHER_SZ (ldnt1sw_gather_x0_u64_s64offset, svuint64_t, int32_t, svint64_t,
176 z0_res = svldnt1sw_gather_s64offset_u64 (p0, x0, z0),
177 z0_res = svldnt1sw_gather_offset_u64 (p0, x0, z0))
178
179 /*
180 ** ldnt1sw_gather_tied1_u64_s64offset:
181 ** ldnt1sw z0\.d, p0/z, \[z0\.d, x0\]
182 ** ret
183 */
184 TEST_LOAD_GATHER_SZ (ldnt1sw_gather_tied1_u64_s64offset, svuint64_t, int32_t, svint64_t,
185 z0_res = svldnt1sw_gather_s64offset_u64 (p0, x0, z0),
186 z0_res = svldnt1sw_gather_offset_u64 (p0, x0, z0))
187
188 /*
189 ** ldnt1sw_gather_untied_u64_s64offset:
190 ** ldnt1sw z0\.d, p0/z, \[z1\.d, x0\]
191 ** ret
192 */
193 TEST_LOAD_GATHER_SZ (ldnt1sw_gather_untied_u64_s64offset, svuint64_t, int32_t, svint64_t,
194 z0_res = svldnt1sw_gather_s64offset_u64 (p0, x0, z1),
195 z0_res = svldnt1sw_gather_offset_u64 (p0, x0, z1))
196
197 /*
198 ** ldnt1sw_gather_x0_u64_u64offset:
199 ** ldnt1sw z0\.d, p0/z, \[z0\.d, x0\]
200 ** ret
201 */
202 TEST_LOAD_GATHER_SZ (ldnt1sw_gather_x0_u64_u64offset, svuint64_t, int32_t, svuint64_t,
203 z0_res = svldnt1sw_gather_u64offset_u64 (p0, x0, z0),
204 z0_res = svldnt1sw_gather_offset_u64 (p0, x0, z0))
205
206 /*
207 ** ldnt1sw_gather_tied1_u64_u64offset:
208 ** ldnt1sw z0\.d, p0/z, \[z0\.d, x0\]
209 ** ret
210 */
211 TEST_LOAD_GATHER_SZ (ldnt1sw_gather_tied1_u64_u64offset, svuint64_t, int32_t, svuint64_t,
212 z0_res = svldnt1sw_gather_u64offset_u64 (p0, x0, z0),
213 z0_res = svldnt1sw_gather_offset_u64 (p0, x0, z0))
214
215 /*
216 ** ldnt1sw_gather_untied_u64_u64offset:
217 ** ldnt1sw z0\.d, p0/z, \[z1\.d, x0\]
218 ** ret
219 */
220 TEST_LOAD_GATHER_SZ (ldnt1sw_gather_untied_u64_u64offset, svuint64_t, int32_t, svuint64_t,
221 z0_res = svldnt1sw_gather_u64offset_u64 (p0, x0, z1),
222 z0_res = svldnt1sw_gather_offset_u64 (p0, x0, z1))
223
224 /*
225 ** ldnt1sw_gather_x0_u64_s64index:
226 ** lsl (z[0-9]+\.d), z0\.d, #2
227 ** ldnt1sw z0\.d, p0/z, \[\1, x0\]
228 ** ret
229 */
230 TEST_LOAD_GATHER_SZ (ldnt1sw_gather_x0_u64_s64index, svuint64_t, int32_t, svint64_t,
231 z0_res = svldnt1sw_gather_s64index_u64 (p0, x0, z0),
232 z0_res = svldnt1sw_gather_index_u64 (p0, x0, z0))
233
234 /*
235 ** ldnt1sw_gather_tied1_u64_s64index:
236 ** lsl (z[0-9]+\.d), z0\.d, #2
237 ** ldnt1sw z0\.d, p0/z, \[\1, x0\]
238 ** ret
239 */
240 TEST_LOAD_GATHER_SZ (ldnt1sw_gather_tied1_u64_s64index, svuint64_t, int32_t, svint64_t,
241 z0_res = svldnt1sw_gather_s64index_u64 (p0, x0, z0),
242 z0_res = svldnt1sw_gather_index_u64 (p0, x0, z0))
243
244 /*
245 ** ldnt1sw_gather_untied_u64_s64index:
246 ** lsl (z[0-9]+\.d), z1\.d, #2
247 ** ldnt1sw z0\.d, p0/z, \[\1, x0\]
248 ** ret
249 */
250 TEST_LOAD_GATHER_SZ (ldnt1sw_gather_untied_u64_s64index, svuint64_t, int32_t, svint64_t,
251 z0_res = svldnt1sw_gather_s64index_u64 (p0, x0, z1),
252 z0_res = svldnt1sw_gather_index_u64 (p0, x0, z1))
253
254 /*
255 ** ldnt1sw_gather_x0_u64_u64index:
256 ** lsl (z[0-9]+\.d), z0\.d, #2
257 ** ldnt1sw z0\.d, p0/z, \[\1, x0\]
258 ** ret
259 */
260 TEST_LOAD_GATHER_SZ (ldnt1sw_gather_x0_u64_u64index, svuint64_t, int32_t, svuint64_t,
261 z0_res = svldnt1sw_gather_u64index_u64 (p0, x0, z0),
262 z0_res = svldnt1sw_gather_index_u64 (p0, x0, z0))
263
264 /*
265 ** ldnt1sw_gather_tied1_u64_u64index:
266 ** lsl (z[0-9]+\.d), z0\.d, #2
267 ** ldnt1sw z0\.d, p0/z, \[\1, x0\]
268 ** ret
269 */
270 TEST_LOAD_GATHER_SZ (ldnt1sw_gather_tied1_u64_u64index, svuint64_t, int32_t, svuint64_t,
271 z0_res = svldnt1sw_gather_u64index_u64 (p0, x0, z0),
272 z0_res = svldnt1sw_gather_index_u64 (p0, x0, z0))
273
274 /*
275 ** ldnt1sw_gather_untied_u64_u64index:
276 ** lsl (z[0-9]+\.d), z1\.d, #2
277 ** ldnt1sw z0\.d, p0/z, \[\1, x0\]
278 ** ret
279 */
280 TEST_LOAD_GATHER_SZ (ldnt1sw_gather_untied_u64_u64index, svuint64_t, int32_t, svuint64_t,
281 z0_res = svldnt1sw_gather_u64index_u64 (p0, x0, z1),
282 z0_res = svldnt1sw_gather_index_u64 (p0, x0, z1))