]>
Commit | Line | Data |
---|---|---|
1 | /* AArch64 assembler/disassembler support. | |
2 | ||
3 | Copyright (C) 2009-2025 Free Software Foundation, Inc. | |
4 | Contributed by ARM Ltd. | |
5 | ||
6 | This file is part of GNU Binutils. | |
7 | ||
8 | This program is free software; you can redistribute it and/or modify | |
9 | it under the terms of the GNU General Public License as published by | |
10 | the Free Software Foundation; either version 3 of the license, or | |
11 | (at your option) any later version. | |
12 | ||
13 | This program is distributed in the hope that it will be useful, | |
14 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | GNU General Public License for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with this program; see the file COPYING3. If not, | |
20 | see <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | #ifndef OPCODE_AARCH64_H | |
23 | #define OPCODE_AARCH64_H | |
24 | ||
25 | #include "bfd.h" | |
26 | #include <stdint.h> | |
27 | #include <assert.h> | |
28 | #include <stdlib.h> | |
29 | ||
30 | #include "dis-asm.h" | |
31 | ||
32 | #ifdef __cplusplus | |
33 | extern "C" { | |
34 | #endif | |
35 | ||
36 | /* The offset for pc-relative addressing is currently defined to be 0. */ | |
37 | #define AARCH64_PCREL_OFFSET 0 | |
38 | ||
39 | typedef uint32_t aarch64_insn; | |
40 | ||
41 | /* An enum containing all known CPU features. The values act as bit positions | |
42 | into aarch64_feature_set. */ | |
43 | enum aarch64_feature_bit { | |
44 | /* All processors. */ | |
45 | AARCH64_FEATURE_V8, | |
46 | /* ARMv8.6 processors. */ | |
47 | AARCH64_FEATURE_V8_6A, | |
48 | /* Bfloat16 insns. */ | |
49 | AARCH64_FEATURE_BFLOAT16, | |
50 | /* Armv8-A processors. */ | |
51 | AARCH64_FEATURE_V8A, | |
52 | /* SVE2 instructions. */ | |
53 | AARCH64_FEATURE_SVE2, | |
54 | /* ARMv8.2 processors. */ | |
55 | AARCH64_FEATURE_V8_2A, | |
56 | /* ARMv8.3 processors. */ | |
57 | AARCH64_FEATURE_V8_3A, | |
58 | AARCH64_FEATURE_SVE2_AES, | |
59 | AARCH64_FEATURE_SVE2_BITPERM, | |
60 | AARCH64_FEATURE_SVE2_SM4, | |
61 | AARCH64_FEATURE_SVE2_SHA3, | |
62 | /* ARMv8.4 processors. */ | |
63 | AARCH64_FEATURE_V8_4A, | |
64 | /* Armv8-R processors. */ | |
65 | AARCH64_FEATURE_V8R, | |
66 | /* Armv8.7 processors. */ | |
67 | AARCH64_FEATURE_V8_7A, | |
68 | /* Scalable Matrix Extension. */ | |
69 | AARCH64_FEATURE_SME, | |
70 | /* Atomic 64-byte load/store. */ | |
71 | AARCH64_FEATURE_LS64, | |
72 | /* v8.3 Pointer Authentication. */ | |
73 | AARCH64_FEATURE_PAUTH, | |
74 | /* FP instructions. */ | |
75 | AARCH64_FEATURE_FP, | |
76 | /* SIMD instructions. */ | |
77 | AARCH64_FEATURE_SIMD, | |
78 | /* CRC instructions. */ | |
79 | AARCH64_FEATURE_CRC, | |
80 | /* LSE instructions. */ | |
81 | AARCH64_FEATURE_LSE, | |
82 | /* LSFE instructions. */ | |
83 | AARCH64_FEATURE_LSFE, | |
84 | /* PAN instructions. */ | |
85 | AARCH64_FEATURE_PAN, | |
86 | /* LOR instructions. */ | |
87 | AARCH64_FEATURE_LOR, | |
88 | /* v8.1 SIMD instructions. */ | |
89 | AARCH64_FEATURE_RDMA, | |
90 | /* v8.1 features. */ | |
91 | AARCH64_FEATURE_V8_1A, | |
92 | /* v8.2 FP16 instructions. */ | |
93 | AARCH64_FEATURE_F16, | |
94 | /* RAS Extensions. */ | |
95 | AARCH64_FEATURE_RAS, | |
96 | /* Statistical Profiling. */ | |
97 | AARCH64_FEATURE_PROFILE, | |
98 | /* SVE instructions. */ | |
99 | AARCH64_FEATURE_SVE, | |
100 | /* RCPC instructions. */ | |
101 | AARCH64_FEATURE_RCPC, | |
102 | /* RCPC2 instructions. */ | |
103 | AARCH64_FEATURE_RCPC2, | |
104 | /* Complex # instructions. */ | |
105 | AARCH64_FEATURE_COMPNUM, | |
106 | /* JavaScript conversion instructions. */ | |
107 | AARCH64_FEATURE_JSCVT, | |
108 | /* Dot Product instructions. */ | |
109 | AARCH64_FEATURE_DOTPROD, | |
110 | /* SM3 & SM4 instructions. */ | |
111 | AARCH64_FEATURE_SM4, | |
112 | /* SHA2 instructions. */ | |
113 | AARCH64_FEATURE_SHA2, | |
114 | /* SHA3 instructions. */ | |
115 | AARCH64_FEATURE_SHA3, | |
116 | /* AES instructions. */ | |
117 | AARCH64_FEATURE_AES, | |
118 | /* v8.2 FP16FML ins. */ | |
119 | AARCH64_FEATURE_F16_FML, | |
120 | /* ARMv8.5 processors. */ | |
121 | AARCH64_FEATURE_V8_5A, | |
122 | /* v8.5 Flag Manipulation version 2. */ | |
123 | AARCH64_FEATURE_FLAGMANIP, | |
124 | /* FRINT[32,64][Z,X] insns. */ | |
125 | AARCH64_FEATURE_FRINTTS, | |
126 | /* SB instruction. */ | |
127 | AARCH64_FEATURE_SB, | |
128 | /* Execution and Data Prediction Restriction instructions. */ | |
129 | AARCH64_FEATURE_PREDRES, | |
130 | /* DC CVADP. */ | |
131 | AARCH64_FEATURE_CVADP, | |
132 | /* Random Number instructions. */ | |
133 | AARCH64_FEATURE_RNG, | |
134 | /* SCXTNUM_ELx. */ | |
135 | AARCH64_FEATURE_SCXTNUM, | |
136 | /* ID_PFR2 instructions. */ | |
137 | AARCH64_FEATURE_ID_PFR2, | |
138 | /* SSBS mechanism enabled. */ | |
139 | AARCH64_FEATURE_SSBS, | |
140 | /* Compare and branch instructions. */ | |
141 | AARCH64_FEATURE_CMPBR, | |
142 | /* Memory Tagging Extension. */ | |
143 | AARCH64_FEATURE_MEMTAG, | |
144 | /* Outer Cacheable Cache Maintenance Operation. */ | |
145 | AARCH64_FEATURE_OCCMO, | |
146 | /* Transactional Memory Extension. */ | |
147 | AARCH64_FEATURE_TME, | |
148 | /* XS memory attribute. */ | |
149 | AARCH64_FEATURE_XS, | |
150 | /* WFx instructions with timeout. */ | |
151 | AARCH64_FEATURE_WFXT, | |
152 | /* Standardization of memory operations. */ | |
153 | AARCH64_FEATURE_MOPS, | |
154 | /* Hinted conditional branches. */ | |
155 | AARCH64_FEATURE_HBC, | |
156 | /* Matrix Multiply instructions. */ | |
157 | AARCH64_FEATURE_I8MM, | |
158 | AARCH64_FEATURE_F32MM, | |
159 | AARCH64_FEATURE_F64MM, | |
160 | /* v8.4 Flag Manipulation. */ | |
161 | AARCH64_FEATURE_FLAGM, | |
162 | /* Armv9.0-A processors. */ | |
163 | AARCH64_FEATURE_V9A, | |
164 | /* SME F64F64. */ | |
165 | AARCH64_FEATURE_SME_F64F64, | |
166 | /* SME I16I64. */ | |
167 | AARCH64_FEATURE_SME_I16I64, | |
168 | /* Armv8.8 processors. */ | |
169 | AARCH64_FEATURE_V8_8A, | |
170 | /* Common Short Sequence Compression instructions. */ | |
171 | AARCH64_FEATURE_CSSC, | |
172 | /* Armv8.9-A processors. */ | |
173 | AARCH64_FEATURE_V8_9A, | |
174 | /* Check Feature Status Extension. */ | |
175 | AARCH64_FEATURE_CHK, | |
176 | /* Guarded Control Stack. */ | |
177 | AARCH64_FEATURE_GCS, | |
178 | /* SPE Call Return branch records. */ | |
179 | AARCH64_FEATURE_SPE_CRR, | |
180 | /* SPE Filter by data source. */ | |
181 | AARCH64_FEATURE_SPE_FDS, | |
182 | /* Additional SPE events. */ | |
183 | AARCH64_FEATURE_SPEv1p4, | |
184 | /* SME2. */ | |
185 | AARCH64_FEATURE_SME2, | |
186 | /* Translation Hardening Extension. */ | |
187 | AARCH64_FEATURE_THE, | |
188 | /* LSE128. */ | |
189 | AARCH64_FEATURE_LSE128, | |
190 | /* ARMv8.9-A RAS Extensions. */ | |
191 | AARCH64_FEATURE_RASv2, | |
192 | /* Delegated SError exceptions for EL3. */ | |
193 | AARCH64_FEATURE_E3DSE, | |
194 | /* System Control Register2. */ | |
195 | AARCH64_FEATURE_SCTLR2, | |
196 | /* Fine Grained Traps. */ | |
197 | AARCH64_FEATURE_FGT2, | |
198 | /* Physical Fault Address. */ | |
199 | AARCH64_FEATURE_PFAR, | |
200 | /* Address Translate Stage 1. */ | |
201 | AARCH64_FEATURE_ATS1A, | |
202 | /* Memory Attribute Index Enhancement. */ | |
203 | AARCH64_FEATURE_AIE, | |
204 | /* Stage 1 Permission Indirection Extension. */ | |
205 | AARCH64_FEATURE_S1PIE, | |
206 | /* Stage 2 Permission Indirection Extension. */ | |
207 | AARCH64_FEATURE_S2PIE, | |
208 | /* Stage 1 Permission Overlay Extension. */ | |
209 | AARCH64_FEATURE_S1POE, | |
210 | /* Stage 2 Permission Overlay Extension. */ | |
211 | AARCH64_FEATURE_S2POE, | |
212 | /* Extension to Translation Control Registers. */ | |
213 | AARCH64_FEATURE_TCR2, | |
214 | /* Speculation Prediction Restriction instructions. */ | |
215 | AARCH64_FEATURE_PREDRES2, | |
216 | /* Instrumentation Extension. */ | |
217 | AARCH64_FEATURE_ITE, | |
218 | /* 128-bit page table descriptor, system registers | |
219 | and instructions. */ | |
220 | AARCH64_FEATURE_D128, | |
221 | /* Armv8.9-A/Armv9.4-A architecture Debug extension. */ | |
222 | AARCH64_FEATURE_DEBUGv8p9, | |
223 | /* Performance Monitors Extension. */ | |
224 | AARCH64_FEATURE_PMUv3p9, | |
225 | /* Performance Monitors Snapshots Extension. */ | |
226 | AARCH64_FEATURE_PMUv3_SS, | |
227 | /* Performance Monitors Instruction Counter Extension. */ | |
228 | AARCH64_FEATURE_PMUv3_ICNTR, | |
229 | /* System Performance Monitors Extension */ | |
230 | AARCH64_FEATURE_SPMU, | |
231 | /* System Performance Monitors Extension version 2 */ | |
232 | AARCH64_FEATURE_SPMU2, | |
233 | /* Performance Monitors Synchronous-Exception-Based Event Extension. */ | |
234 | AARCH64_FEATURE_SEBEP, | |
235 | /* SME2.1 instructions. */ | |
236 | AARCH64_FEATURE_SME2p1, | |
237 | /* SVE2.1 instructions. */ | |
238 | AARCH64_FEATURE_SVE2p1, | |
239 | /* SVE_F16F32MM instructions. */ | |
240 | AARCH64_FEATURE_SVE_F16F32MM, | |
241 | /* F8F32MM instructions. */ | |
242 | AARCH64_FEATURE_F8F32MM, | |
243 | /* F8F16MM instructions. */ | |
244 | AARCH64_FEATURE_F8F16MM, | |
245 | /* RCPC3 instructions. */ | |
246 | AARCH64_FEATURE_RCPC3, | |
247 | /* Enhanced Software Step Extension. */ | |
248 | AARCH64_FEATURE_STEP2, | |
249 | /* Checked Pointer Arithmetic instructions. */ | |
250 | AARCH64_FEATURE_CPA, | |
251 | /* FAMINMAX instructions. */ | |
252 | AARCH64_FEATURE_FAMINMAX, | |
253 | /* FP8 instructions. */ | |
254 | AARCH64_FEATURE_FP8, | |
255 | /* LUT instructions. */ | |
256 | AARCH64_FEATURE_LUT, | |
257 | /* Branch Record Buffer Extension */ | |
258 | AARCH64_FEATURE_BRBE, | |
259 | /* SME LUTv2 instructions. */ | |
260 | AARCH64_FEATURE_SME_LUTv2, | |
261 | /* FP8FMA instructions. */ | |
262 | AARCH64_FEATURE_FP8FMA, | |
263 | /* FP8DOT4 instructions. */ | |
264 | AARCH64_FEATURE_FP8DOT4, | |
265 | /* FP8DOT2 instructions. */ | |
266 | AARCH64_FEATURE_FP8DOT2, | |
267 | /* SSVE FP8FMA instructions. */ | |
268 | AARCH64_FEATURE_SSVE_FP8FMA, | |
269 | /* SSVE FP8DOT4 instructions. */ | |
270 | AARCH64_FEATURE_SSVE_FP8DOT4, | |
271 | /* SSVE FP8DOT2 instructions. */ | |
272 | AARCH64_FEATURE_SSVE_FP8DOT2, | |
273 | /* SME F8F32 instructions. */ | |
274 | AARCH64_FEATURE_SME_F8F32, | |
275 | /* SME F8F16 instructions. */ | |
276 | AARCH64_FEATURE_SME_F8F16, | |
277 | /* Non-widening half-precision FP16 to FP16 arithmetic for SME2. */ | |
278 | AARCH64_FEATURE_SME_F16F16, | |
279 | /* FEAT_SVE_BFSCALE. */ | |
280 | AARCH64_FEATURE_SVE_BFSCALE, | |
281 | /* SVE Z-targeting non-widening BFloat16 instructions. */ | |
282 | AARCH64_FEATURE_SVE_B16B16, | |
283 | /* SME non-widening BFloat16 instructions. */ | |
284 | AARCH64_FEATURE_SME_B16B16, | |
285 | /* Armv9.1-A processors. */ | |
286 | AARCH64_FEATURE_V9_1A, | |
287 | /* Armv9.2-A processors. */ | |
288 | AARCH64_FEATURE_V9_2A, | |
289 | /* Armv9.3-A processors. */ | |
290 | AARCH64_FEATURE_V9_3A, | |
291 | /* Armv9.4-A processors. */ | |
292 | AARCH64_FEATURE_V9_4A, | |
293 | /* Armv9.5-A processors. */ | |
294 | AARCH64_FEATURE_V9_5A, | |
295 | /* FPRCVT instructions. */ | |
296 | AARCH64_FEATURE_FPRCVT, | |
297 | /* Point of Physical Storage. */ | |
298 | AARCH64_FEATURE_PoPS, | |
299 | ||
300 | /* Virtual features. These are used to gate instructions that are enabled | |
301 | by either of two (or more) sets of command line flags. */ | |
302 | /* +fp8fma+sve or +ssve-fp8fma */ | |
303 | AARCH64_FEATURE_FP8FMA_SVE, | |
304 | /* +fp8dot4+sve or +ssve-fp8dot4 */ | |
305 | AARCH64_FEATURE_FP8DOT4_SVE, | |
306 | /* +fp8dot2+sve or +ssve-fp8dot2 */ | |
307 | AARCH64_FEATURE_FP8DOT2_SVE, | |
308 | /* +sme-f16f16 or +sme-f8f16 */ | |
309 | AARCH64_FEATURE_SME_F16F16_F8F16, | |
310 | /* +sve2 or +sme2 */ | |
311 | AARCH64_FEATURE_SVE2_SME2, | |
312 | /* +sve2p1 or +sme */ | |
313 | AARCH64_FEATURE_SVE2p1_SME, | |
314 | /* +sve2p1 or +sme2 */ | |
315 | AARCH64_FEATURE_SVE2p1_SME2, | |
316 | /* +sve2p1 or +sme2p1 */ | |
317 | AARCH64_FEATURE_SVE2p1_SME2p1, | |
318 | AARCH64_NUM_FEATURES | |
319 | }; | |
320 | ||
321 | typedef uint64_t aarch64_feature_word; | |
322 | #define AARCH64_BITS_PER_FEATURE_WORD 64 | |
323 | ||
324 | #define AA64_REPLICATE(SEP, BODY, ...) \ | |
325 | BODY (0, __VA_ARGS__) SEP \ | |
326 | BODY (1, __VA_ARGS__) SEP \ | |
327 | BODY (2, __VA_ARGS__) | |
328 | ||
329 | /* Some useful SEP operators for use with replication. */ | |
330 | #define REP_COMMA , | |
331 | #define REP_SEMICOLON ; | |
332 | #define REP_OR_OR || | |
333 | #define REP_AND_AND && | |
334 | #define REP_PLUS + | |
335 | ||
336 | /* Not currently needed, but if an empty SEP is required define: | |
337 | #define REP_NO_SEP | |
338 | Then use REP_NO_SEP in the SEP field. */ | |
339 | ||
340 | /* Used to generate one instance of VAL for each value of ELT (ELT is | |
341 | not otherwise used). */ | |
342 | #define AA64_REPVAL(ELT, VAL) VAL | |
343 | ||
344 | /* static_assert requires C11 (or C++11) or later. Support older | |
345 | versions by disabling this check since compilers without this are | |
346 | pretty uncommon these days. */ | |
347 | #if ((defined __STDC_VERSION__ && __STDC_VERSION__ >= 201112L) \ | |
348 | || (defined __cplusplus && __cplusplus >= 201103L)) | |
349 | static_assert ((AA64_REPLICATE (REP_PLUS, AA64_REPVAL, | |
350 | AARCH64_BITS_PER_FEATURE_WORD)) | |
351 | >= AARCH64_NUM_FEATURES, | |
352 | "Insufficient repetitions in AA64_REPLICATE()"); | |
353 | #endif | |
354 | ||
355 | /* These macros take an initial argument X that gives the index into | |
356 | an aarch64_feature_set. The macros then return the bitmask for | |
357 | that array index. */ | |
358 | ||
359 | /* A mask in which feature bit BIT is set and all other bits are clear. */ | |
360 | #define AARCH64_UINT64_BIT(X, BIT) \ | |
361 | ((X) == (BIT) / AARCH64_BITS_PER_FEATURE_WORD \ | |
362 | ? 1ULL << (BIT) % AARCH64_BITS_PER_FEATURE_WORD \ | |
363 | : 0) | |
364 | ||
365 | /* A mask that includes only AARCH64_FEATURE_<NAME>. */ | |
366 | #define AARCH64_FEATBIT(X, NAME) \ | |
367 | AARCH64_UINT64_BIT (X, AARCH64_FEATURE_##NAME) | |
368 | ||
369 | /* A mask of the features that are enabled by each architecture version, | |
370 | excluding those that are inherited from other architecture versions. */ | |
371 | #define AARCH64_ARCH_V8A_FEATURES(X) (AARCH64_FEATBIT (X, V8A) \ | |
372 | | AARCH64_FEATBIT (X, FP) \ | |
373 | | AARCH64_FEATBIT (X, RAS) \ | |
374 | | AARCH64_FEATBIT (X, SIMD) \ | |
375 | | AARCH64_FEATBIT (X, CHK)) | |
376 | #define AARCH64_ARCH_V8_1A_FEATURES(X) (AARCH64_FEATBIT (X, V8_1A) \ | |
377 | | AARCH64_FEATBIT (X, CRC) \ | |
378 | | AARCH64_FEATBIT (X, LSE) \ | |
379 | | AARCH64_FEATBIT (X, PAN) \ | |
380 | | AARCH64_FEATBIT (X, LOR) \ | |
381 | | AARCH64_FEATBIT (X, RDMA)) | |
382 | #define AARCH64_ARCH_V8_2A_FEATURES(X) (AARCH64_FEATBIT (X, V8_2A)) | |
383 | #define AARCH64_ARCH_V8_3A_FEATURES(X) (AARCH64_FEATBIT (X, V8_3A) \ | |
384 | | AARCH64_FEATBIT (X, PAUTH) \ | |
385 | | AARCH64_FEATBIT (X, RCPC) \ | |
386 | | AARCH64_FEATBIT (X, COMPNUM) \ | |
387 | | AARCH64_FEATBIT (X, JSCVT)) | |
388 | #define AARCH64_ARCH_V8_4A_FEATURES(X) (AARCH64_FEATBIT (X, V8_4A) \ | |
389 | | AARCH64_FEATBIT (X, RCPC2) \ | |
390 | | AARCH64_FEATBIT (X, DOTPROD) \ | |
391 | | AARCH64_FEATBIT (X, FLAGM) \ | |
392 | | AARCH64_FEATBIT (X, F16_FML)) | |
393 | #define AARCH64_ARCH_V8_5A_FEATURES(X) (AARCH64_FEATBIT (X, V8_5A) \ | |
394 | | AARCH64_FEATBIT (X, FLAGMANIP) \ | |
395 | | AARCH64_FEATBIT (X, FRINTTS) \ | |
396 | | AARCH64_FEATBIT (X, SB) \ | |
397 | | AARCH64_FEATBIT (X, PREDRES) \ | |
398 | | AARCH64_FEATBIT (X, CVADP) \ | |
399 | | AARCH64_FEATBIT (X, SCXTNUM) \ | |
400 | | AARCH64_FEATBIT (X, ID_PFR2) \ | |
401 | | AARCH64_FEATBIT (X, SSBS)) | |
402 | #define AARCH64_ARCH_V8_6A_FEATURES(X) (AARCH64_FEATBIT (X, V8_6A) \ | |
403 | | AARCH64_FEATBIT (X, BFLOAT16) \ | |
404 | | AARCH64_FEATBIT (X, I8MM)) | |
405 | #define AARCH64_ARCH_V8_7A_FEATURES(X) (AARCH64_FEATBIT (X, V8_7A) \ | |
406 | | AARCH64_FEATBIT (X, XS) \ | |
407 | | AARCH64_FEATBIT (X, WFXT) \ | |
408 | | AARCH64_FEATBIT (X, LS64)) | |
409 | #define AARCH64_ARCH_V8_8A_FEATURES(X) (AARCH64_FEATBIT (X, V8_8A) \ | |
410 | | AARCH64_FEATBIT (X, MOPS) \ | |
411 | | AARCH64_FEATBIT (X, HBC)) | |
412 | #define AARCH64_ARCH_V8_9A_FEATURES(X) (AARCH64_FEATBIT (X, V8_9A) \ | |
413 | | AARCH64_FEATBIT (X, CSSC) \ | |
414 | | AARCH64_FEATBIT (X, SPEv1p4) \ | |
415 | | AARCH64_FEATBIT (X, SPE_CRR) \ | |
416 | | AARCH64_FEATBIT (X, SPE_FDS) \ | |
417 | | AARCH64_FEATBIT (X, RASv2) \ | |
418 | | AARCH64_FEATBIT (X, SCTLR2) \ | |
419 | | AARCH64_FEATBIT (X, FGT2) \ | |
420 | | AARCH64_FEATBIT (X, PFAR) \ | |
421 | | AARCH64_FEATBIT (X, ATS1A) \ | |
422 | | AARCH64_FEATBIT (X, AIE) \ | |
423 | | AARCH64_FEATBIT (X, S1PIE) \ | |
424 | | AARCH64_FEATBIT (X, S2PIE) \ | |
425 | | AARCH64_FEATBIT (X, S1POE) \ | |
426 | | AARCH64_FEATBIT (X, S2POE) \ | |
427 | | AARCH64_FEATBIT (X, TCR2) \ | |
428 | | AARCH64_FEATBIT (X, DEBUGv8p9) \ | |
429 | | AARCH64_FEATBIT (X, PMUv3p9) \ | |
430 | | AARCH64_FEATBIT (X, PMUv3_SS) \ | |
431 | | AARCH64_FEATBIT (X, PMUv3_ICNTR) \ | |
432 | | AARCH64_FEATBIT (X, SPMU) \ | |
433 | | AARCH64_FEATBIT (X, SEBEP) \ | |
434 | | AARCH64_FEATBIT (X, PREDRES2) \ | |
435 | ) | |
436 | ||
437 | #define AARCH64_ARCH_V9A_FEATURES(X) (AARCH64_FEATBIT (X, V9A) \ | |
438 | | AARCH64_FEATBIT (X, F16) \ | |
439 | | AARCH64_FEATBIT (X, SVE) \ | |
440 | | AARCH64_FEATBIT (X, SVE2)) | |
441 | #define AARCH64_ARCH_V9_1A_FEATURES(X) (AARCH64_FEATBIT (X, V9_1A) \ | |
442 | | AARCH64_ARCH_V8_6A_FEATURES (X)) | |
443 | #define AARCH64_ARCH_V9_2A_FEATURES(X) (AARCH64_FEATBIT (X, V9_2A) \ | |
444 | | AARCH64_ARCH_V8_7A_FEATURES (X)) | |
445 | #define AARCH64_ARCH_V9_3A_FEATURES(X) (AARCH64_FEATBIT (X, V9_3A) \ | |
446 | | AARCH64_ARCH_V8_8A_FEATURES (X)) | |
447 | #define AARCH64_ARCH_V9_4A_FEATURES(X) (AARCH64_FEATBIT (X, V9_4A) \ | |
448 | | AARCH64_ARCH_V8_9A_FEATURES (X) \ | |
449 | | AARCH64_FEATBIT (X, SVE2p1)) | |
450 | #define AARCH64_ARCH_V9_5A_FEATURES(X) (AARCH64_FEATBIT (X, V9_5A) \ | |
451 | | AARCH64_FEATBIT (X, CPA) \ | |
452 | | AARCH64_FEATBIT (X, LUT) \ | |
453 | | AARCH64_FEATBIT (X, FAMINMAX)\ | |
454 | | AARCH64_FEATBIT (X, E3DSE) \ | |
455 | | AARCH64_FEATBIT (X, SPMU2) \ | |
456 | | AARCH64_FEATBIT (X, STEP2) \ | |
457 | ) | |
458 | ||
459 | /* Architectures are the sum of the base and extensions. */ | |
460 | #define AARCH64_ARCH_V8A(X) (AARCH64_FEATBIT (X, V8) \ | |
461 | | AARCH64_ARCH_V8A_FEATURES (X)) | |
462 | #define AARCH64_ARCH_V8_1A(X) (AARCH64_ARCH_V8A (X) \ | |
463 | | AARCH64_ARCH_V8_1A_FEATURES (X)) | |
464 | #define AARCH64_ARCH_V8_2A(X) (AARCH64_ARCH_V8_1A (X) \ | |
465 | | AARCH64_ARCH_V8_2A_FEATURES (X)) | |
466 | #define AARCH64_ARCH_V8_3A(X) (AARCH64_ARCH_V8_2A (X) \ | |
467 | | AARCH64_ARCH_V8_3A_FEATURES (X)) | |
468 | #define AARCH64_ARCH_V8_4A(X) (AARCH64_ARCH_V8_3A (X) \ | |
469 | | AARCH64_ARCH_V8_4A_FEATURES (X)) | |
470 | #define AARCH64_ARCH_V8_5A(X) (AARCH64_ARCH_V8_4A (X) \ | |
471 | | AARCH64_ARCH_V8_5A_FEATURES (X)) | |
472 | #define AARCH64_ARCH_V8_6A(X) (AARCH64_ARCH_V8_5A (X) \ | |
473 | | AARCH64_ARCH_V8_6A_FEATURES (X)) | |
474 | #define AARCH64_ARCH_V8_7A(X) (AARCH64_ARCH_V8_6A (X) \ | |
475 | | AARCH64_ARCH_V8_7A_FEATURES (X)) | |
476 | #define AARCH64_ARCH_V8_8A(X) (AARCH64_ARCH_V8_7A (X) \ | |
477 | | AARCH64_ARCH_V8_8A_FEATURES (X)) | |
478 | #define AARCH64_ARCH_V8_9A(X) (AARCH64_ARCH_V8_8A (X) \ | |
479 | | AARCH64_ARCH_V8_9A_FEATURES (X)) | |
480 | #define AARCH64_ARCH_V8R(X) ((AARCH64_ARCH_V8_4A (X) \ | |
481 | | AARCH64_FEATBIT (X, V8R)) \ | |
482 | & ~AARCH64_FEATBIT (X, V8A) \ | |
483 | & ~AARCH64_FEATBIT (X, LOR)) | |
484 | ||
485 | #define AARCH64_ARCH_V9A(X) (AARCH64_ARCH_V8_5A (X) \ | |
486 | | AARCH64_ARCH_V9A_FEATURES (X)) | |
487 | #define AARCH64_ARCH_V9_1A(X) (AARCH64_ARCH_V9A (X) \ | |
488 | | AARCH64_ARCH_V9_1A_FEATURES (X)) | |
489 | #define AARCH64_ARCH_V9_2A(X) (AARCH64_ARCH_V9_1A (X) \ | |
490 | | AARCH64_ARCH_V9_2A_FEATURES (X)) | |
491 | #define AARCH64_ARCH_V9_3A(X) (AARCH64_ARCH_V9_2A (X) \ | |
492 | | AARCH64_ARCH_V9_3A_FEATURES (X)) | |
493 | #define AARCH64_ARCH_V9_4A(X) (AARCH64_ARCH_V9_3A (X) \ | |
494 | | AARCH64_ARCH_V9_4A_FEATURES (X)) | |
495 | #define AARCH64_ARCH_V9_5A(X) (AARCH64_ARCH_V9_4A (X) \ | |
496 | | AARCH64_ARCH_V9_5A_FEATURES (X)) | |
497 | ||
498 | #define AARCH64_ARCH_NONE(X) 0 | |
499 | ||
500 | /* CPU-specific features. */ | |
501 | typedef struct { | |
502 | aarch64_feature_word flags[AA64_REPLICATE (REP_PLUS, AA64_REPVAL, 1)]; | |
503 | } aarch64_feature_set; | |
504 | ||
505 | #define AARCH64_CPU_HAS_FEATURE_BODY(ELT, CPU, FEAT) \ | |
506 | ((~(CPU).flags[ELT] & AARCH64_FEATBIT (ELT, FEAT)) == 0) | |
507 | #define AARCH64_CPU_HAS_FEATURE(CPU, FEAT) \ | |
508 | (AA64_REPLICATE (REP_AND_AND, AARCH64_CPU_HAS_FEATURE_BODY, CPU, FEAT)) | |
509 | ||
510 | #define AARCH64_CPU_HAS_ALL_FEATURES_BODY(ELT, CPU, FEAT) \ | |
511 | ((~(CPU).flags[ELT] & (FEAT).flags[ELT]) == 0) | |
512 | #define AARCH64_CPU_HAS_ALL_FEATURES(CPU, FEAT) \ | |
513 | (AA64_REPLICATE (REP_AND_AND, AARCH64_CPU_HAS_ALL_FEATURES_BODY, CPU, FEAT)) | |
514 | ||
515 | #define AARCH64_CPU_HAS_ANY_FEATURES_BODY(ELT, CPU, FEAT) \ | |
516 | (((CPU).flags[ELT] & (FEAT).flags[ELT]) != 0) | |
517 | #define AARCH64_CPU_HAS_ANY_FEATURES(CPU,FEAT) \ | |
518 | (AA64_REPLICATE (REP_OR_OR, AARCH64_CPU_HAS_ANY_FEATURES_BODY, CPU, FEAT)) | |
519 | ||
520 | #define AARCH64_SET_FEATURE_BODY(ELT, DEST, FEAT) \ | |
521 | (DEST).flags[ELT] = FEAT (ELT) | |
522 | #define AARCH64_SET_FEATURE(DEST, FEAT) \ | |
523 | (AA64_REPLICATE (REP_COMMA, AARCH64_SET_FEATURE_BODY, DEST, FEAT)) | |
524 | ||
525 | #define AARCH64_CLEAR_FEATURE_BODY(ELT, DEST, SRC, FEAT) \ | |
526 | (DEST).flags[ELT] = ((SRC).flags[ELT] \ | |
527 | & ~AARCH64_FEATBIT (ELT, FEAT)) | |
528 | #define AARCH64_CLEAR_FEATURE(DEST, SRC, FEAT) \ | |
529 | (AA64_REPLICATE (REP_COMMA, AARCH64_CLEAR_FEATURE_BODY, DEST, SRC, FEAT)) | |
530 | ||
531 | #define AARCH64_MERGE_FEATURE_SETS_BODY(ELT, TARG, F1, F2) \ | |
532 | (TARG).flags[ELT] = (F1).flags[ELT] | (F2).flags[ELT]; | |
533 | #define AARCH64_MERGE_FEATURE_SETS(TARG, F1, F2) \ | |
534 | do \ | |
535 | { \ | |
536 | AA64_REPLICATE (REP_SEMICOLON, \ | |
537 | AARCH64_MERGE_FEATURE_SETS_BODY, TARG, F1, F2); \ | |
538 | } \ | |
539 | while (0) | |
540 | ||
541 | #define AARCH64_CLEAR_FEATURES_BODY(ELT, TARG, F1, F2) \ | |
542 | (TARG).flags[ELT] = (F1).flags[ELT] &~ (F2).flags[ELT]; | |
543 | #define AARCH64_CLEAR_FEATURES(TARG,F1,F2) \ | |
544 | do \ | |
545 | { \ | |
546 | AA64_REPLICATE (REP_SEMICOLON, \ | |
547 | AARCH64_CLEAR_FEATURES_BODY, TARG, F1, F2); \ | |
548 | } \ | |
549 | while (0) | |
550 | ||
551 | /* aarch64_feature_set initializers for no features and all features, | |
552 | respectively. */ | |
553 | #define AARCH64_NO_FEATURES { { AA64_REPLICATE (REP_COMMA, AA64_REPVAL, 0) } } | |
554 | #define AARCH64_ALL_FEATURES { { AA64_REPLICATE (REP_COMMA, AA64_REPVAL, -1) } } | |
555 | ||
556 | /* An aarch64_feature_set initializer for a single feature, | |
557 | AARCH64_FEATURE_<FEAT>. */ | |
558 | #define AARCH64_FEATURE_BODY(ELT, FEAT) \ | |
559 | AARCH64_FEATBIT (ELT, FEAT) | |
560 | #define AARCH64_FEATURE(FEAT) \ | |
561 | { { AA64_REPLICATE (REP_COMMA, AARCH64_FEATURE_BODY, FEAT) } } | |
562 | ||
563 | /* An aarch64_feature_set initializer for a specific architecture version, | |
564 | including all the features that are enabled by default for that architecture | |
565 | version. */ | |
566 | #define AARCH64_ARCH_FEATURES_BODY(ELT, ARCH) \ | |
567 | AARCH64_ARCH_##ARCH (ELT) | |
568 | #define AARCH64_ARCH_FEATURES(ARCH) \ | |
569 | { { AA64_REPLICATE (REP_COMMA, AARCH64_ARCH_FEATURES_BODY, ARCH) } } | |
570 | ||
571 | /* Used by AARCH64_CPU_FEATURES. */ | |
572 | #define AARCH64_OR_FEATURES_1(X, ARCH, F1) \ | |
573 | (AARCH64_FEATBIT (X, F1) | AARCH64_ARCH_##ARCH (X)) | |
574 | #define AARCH64_OR_FEATURES_2(X, ARCH, F1, F2) \ | |
575 | (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_1 (X, ARCH, F2)) | |
576 | #define AARCH64_OR_FEATURES_3(X, ARCH, F1, ...) \ | |
577 | (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_2 (X, ARCH, __VA_ARGS__)) | |
578 | #define AARCH64_OR_FEATURES_4(X, ARCH, F1, ...) \ | |
579 | (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_3 (X, ARCH, __VA_ARGS__)) | |
580 | #define AARCH64_OR_FEATURES_5(X, ARCH, F1, ...) \ | |
581 | (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_4 (X, ARCH, __VA_ARGS__)) | |
582 | #define AARCH64_OR_FEATURES_6(X, ARCH, F1, ...) \ | |
583 | (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_5 (X, ARCH, __VA_ARGS__)) | |
584 | #define AARCH64_OR_FEATURES_7(X, ARCH, F1, ...) \ | |
585 | (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_6 (X, ARCH, __VA_ARGS__)) | |
586 | #define AARCH64_OR_FEATURES_8(X, ARCH, F1, ...) \ | |
587 | (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_7 (X, ARCH, __VA_ARGS__)) | |
588 | #define AARCH64_OR_FEATURES_9(X, ARCH, F1, ...) \ | |
589 | (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_8 (X, ARCH, __VA_ARGS__)) | |
590 | ||
591 | /* An aarch64_feature_set initializer for a CPU that implements architecture | |
592 | version ARCH, and additionally provides the N features listed in "...". */ | |
593 | #define AARCH64_CPU_FEATURES_BODY(ELT, ARCH, N, ...) \ | |
594 | AARCH64_OR_FEATURES_##N (ELT, ARCH, __VA_ARGS__) | |
595 | #define AARCH64_CPU_FEATURES(ARCH, N, ...) \ | |
596 | { { AA64_REPLICATE (REP_COMMA, AARCH64_CPU_FEATURES_BODY, \ | |
597 | ARCH, N, __VA_ARGS__) } } | |
598 | ||
599 | /* An aarch64_feature_set initializer for the N features listed in "...". */ | |
600 | #define AARCH64_FEATURES(N, ...) \ | |
601 | AARCH64_CPU_FEATURES (NONE, N, __VA_ARGS__) | |
602 | ||
603 | enum aarch64_operand_class | |
604 | { | |
605 | AARCH64_OPND_CLASS_NIL, | |
606 | AARCH64_OPND_CLASS_INT_REG, | |
607 | AARCH64_OPND_CLASS_MODIFIED_REG, | |
608 | AARCH64_OPND_CLASS_FP_REG, | |
609 | AARCH64_OPND_CLASS_SIMD_REG, | |
610 | AARCH64_OPND_CLASS_SIMD_ELEMENT, | |
611 | AARCH64_OPND_CLASS_SISD_REG, | |
612 | AARCH64_OPND_CLASS_SIMD_REGLIST, | |
613 | AARCH64_OPND_CLASS_SVE_REG, | |
614 | AARCH64_OPND_CLASS_SVE_REGLIST, | |
615 | AARCH64_OPND_CLASS_PRED_REG, | |
616 | AARCH64_OPND_CLASS_ZA_ACCESS, | |
617 | AARCH64_OPND_CLASS_ADDRESS, | |
618 | AARCH64_OPND_CLASS_IMMEDIATE, | |
619 | AARCH64_OPND_CLASS_SYSTEM, | |
620 | AARCH64_OPND_CLASS_COND, | |
621 | }; | |
622 | ||
623 | /* Operand code that helps both parsing and coding. | |
624 | Keep AARCH64_OPERANDS synced. */ | |
625 | ||
626 | enum aarch64_opnd | |
627 | { | |
628 | AARCH64_OPND_NIL, /* no operand---MUST BE FIRST!*/ | |
629 | ||
630 | AARCH64_OPND_Rd, /* Integer register as destination. */ | |
631 | AARCH64_OPND_Rn, /* Integer register as source. */ | |
632 | AARCH64_OPND_Rm, /* Integer register as source. */ | |
633 | AARCH64_OPND_Rt, /* Integer register used in ld/st instructions. */ | |
634 | AARCH64_OPND_Rt2, /* Integer register used in ld/st pair instructions. */ | |
635 | AARCH64_OPND_X16, /* Integer register x16 in chkfeat instruction. */ | |
636 | AARCH64_OPND_Rt_LS64, /* Integer register used in LS64 instructions. */ | |
637 | AARCH64_OPND_Rt_SP, /* Integer Rt or SP used in STG instructions. */ | |
638 | AARCH64_OPND_Rs, /* Integer register used in ld/st exclusive. */ | |
639 | AARCH64_OPND_Ra, /* Integer register used in ddp_3src instructions. */ | |
640 | AARCH64_OPND_Rt_SYS, /* Integer register used in system instructions. */ | |
641 | ||
642 | AARCH64_OPND_Rd_SP, /* Integer Rd or SP. */ | |
643 | AARCH64_OPND_Rn_SP, /* Integer Rn or SP. */ | |
644 | AARCH64_OPND_Rm_SP, /* Integer Rm or SP. */ | |
645 | AARCH64_OPND_PAIRREG, /* Paired register operand. */ | |
646 | AARCH64_OPND_PAIRREG_OR_XZR, /* Paired register operand, optionally xzr. */ | |
647 | AARCH64_OPND_Rm_EXT, /* Integer Rm extended. */ | |
648 | AARCH64_OPND_Rm_SFT, /* Integer Rm shifted. */ | |
649 | AARCH64_OPND_Rm_LSL, /* Integer Rm shifted (LSL-only). */ | |
650 | ||
651 | AARCH64_OPND_Fd, /* Floating-point Fd. */ | |
652 | AARCH64_OPND_Fn, /* Floating-point Fn. */ | |
653 | AARCH64_OPND_Fm, /* Floating-point Fm. */ | |
654 | AARCH64_OPND_Fa, /* Floating-point Fa. */ | |
655 | AARCH64_OPND_Ft, /* Floating-point Ft. */ | |
656 | AARCH64_OPND_Ft2, /* Floating-point Ft2. */ | |
657 | ||
658 | AARCH64_OPND_Sd, /* AdvSIMD Scalar Sd. */ | |
659 | AARCH64_OPND_Sn, /* AdvSIMD Scalar Sn. */ | |
660 | AARCH64_OPND_Sm, /* AdvSIMD Scalar Sm. */ | |
661 | ||
662 | AARCH64_OPND_Va, /* AdvSIMD Vector Va. */ | |
663 | AARCH64_OPND_Vd, /* AdvSIMD Vector Vd. */ | |
664 | AARCH64_OPND_Vn, /* AdvSIMD Vector Vn. */ | |
665 | AARCH64_OPND_Vm, /* AdvSIMD Vector Vm. */ | |
666 | AARCH64_OPND_VdD1, /* AdvSIMD <Vd>.D[1]; for FMOV only. */ | |
667 | AARCH64_OPND_VnD1, /* AdvSIMD <Vn>.D[1]; for FMOV only. */ | |
668 | AARCH64_OPND_Ed, /* AdvSIMD Vector Element Vd. */ | |
669 | AARCH64_OPND_En, /* AdvSIMD Vector Element Vn. */ | |
670 | AARCH64_OPND_Em, /* AdvSIMD Vector Element Vm. */ | |
671 | AARCH64_OPND_Em16, /* AdvSIMD Vector Element Vm restricted to V0 - V15 when | |
672 | qualifier is S_H or S_2B. */ | |
673 | AARCH64_OPND_Em8, /* AdvSIMD Vector Element Vm restricted to V0 - V7, | |
674 | used only with qualifier S_B. */ | |
675 | AARCH64_OPND_Em_INDEX1_14, /* AdvSIMD 1-bit encoded index in Vm at [14] */ | |
676 | AARCH64_OPND_Em_INDEX2_13, /* AdvSIMD 2-bit encoded index in Vm at [14:13] */ | |
677 | AARCH64_OPND_Em_INDEX3_12, /* AdvSIMD 3-bit encoded index in Vm at [14:12] */ | |
678 | AARCH64_OPND_LVn, /* AdvSIMD Vector register list used in e.g. TBL. */ | |
679 | AARCH64_OPND_LVt, /* AdvSIMD Vector register list used in ld/st. */ | |
680 | AARCH64_OPND_LVt_AL, /* AdvSIMD Vector register list for loading single | |
681 | structure to all lanes. */ | |
682 | AARCH64_OPND_LVn_LUT, /* AdvSIMD Vector register list used in lut. */ | |
683 | AARCH64_OPND_LEt, /* AdvSIMD Vector Element list. */ | |
684 | ||
685 | AARCH64_OPND_CRn, /* Co-processor register in CRn field. */ | |
686 | AARCH64_OPND_CRm, /* Co-processor register in CRm field. */ | |
687 | ||
688 | AARCH64_OPND_IDX, /* AdvSIMD EXT index operand. */ | |
689 | AARCH64_OPND_MASK, /* AdvSIMD EXT index operand. */ | |
690 | AARCH64_OPND_IMM_VLSL,/* Immediate for shifting vector registers left. */ | |
691 | AARCH64_OPND_IMM_VLSR,/* Immediate for shifting vector registers right. */ | |
692 | AARCH64_OPND_SIMD_IMM,/* AdvSIMD modified immediate without shift. */ | |
693 | AARCH64_OPND_SIMD_IMM_SFT, /* AdvSIMD modified immediate with shift. */ | |
694 | AARCH64_OPND_SIMD_FPIMM,/* AdvSIMD 8-bit fp immediate. */ | |
695 | AARCH64_OPND_SHLL_IMM,/* Immediate shift for AdvSIMD SHLL instruction | |
696 | (no encoding). */ | |
697 | AARCH64_OPND_IMM0, /* Immediate for #0. */ | |
698 | AARCH64_OPND_FPIMM0, /* Immediate for #0.0. */ | |
699 | AARCH64_OPND_FPIMM, /* Floating-point Immediate. */ | |
700 | AARCH64_OPND_IMMR, /* Immediate #<immr> in e.g. BFM. */ | |
701 | AARCH64_OPND_IMMS, /* Immediate #<imms> in e.g. BFM. */ | |
702 | AARCH64_OPND_WIDTH, /* Immediate #<width> in e.g. BFI. */ | |
703 | AARCH64_OPND_IMM, /* Immediate. */ | |
704 | AARCH64_OPND_IMM_2, /* Immediate. */ | |
705 | AARCH64_OPND_IMMP1_2, /* Immediate plus 1. */ | |
706 | AARCH64_OPND_IMMS1_2, /* Immediate minus 1. */ | |
707 | AARCH64_OPND_UIMM3_OP1,/* Unsigned 3-bit immediate in the op1 field. */ | |
708 | AARCH64_OPND_UIMM3_OP2,/* Unsigned 3-bit immediate in the op2 field. */ | |
709 | AARCH64_OPND_UIMM4, /* Unsigned 4-bit immediate in the CRm field. */ | |
710 | AARCH64_OPND_UIMM4_ADDG,/* Unsigned 4-bit immediate in addg/subg. */ | |
711 | AARCH64_OPND_UIMM7, /* Unsigned 7-bit immediate in the CRm:op2 fields. */ | |
712 | AARCH64_OPND_UIMM10, /* Unsigned 10-bit immediate in addg/subg. */ | |
713 | AARCH64_OPND_BIT_NUM, /* Immediate. */ | |
714 | AARCH64_OPND_EXCEPTION,/* imm16 operand in exception instructions. */ | |
715 | AARCH64_OPND_UNDEFINED,/* imm16 operand in undefined instruction. */ | |
716 | AARCH64_OPND_CCMP_IMM,/* Immediate in conditional compare instructions. */ | |
717 | AARCH64_OPND_SIMM5, /* 5-bit signed immediate in the imm5 field. */ | |
718 | AARCH64_OPND_NZCV, /* Flag bit specifier giving an alternative value for | |
719 | each condition flag. */ | |
720 | ||
721 | AARCH64_OPND_LIMM, /* Logical Immediate. */ | |
722 | AARCH64_OPND_AIMM, /* Arithmetic immediate. */ | |
723 | AARCH64_OPND_HALF, /* #<imm16>{, LSL #<shift>} operand in move wide. */ | |
724 | AARCH64_OPND_FBITS, /* FP #<fbits> operand in e.g. SCVTF */ | |
725 | AARCH64_OPND_IMM_MOV, /* Immediate operand for the MOV alias. */ | |
726 | AARCH64_OPND_IMM_ROT1, /* Immediate rotate operand for FCMLA. */ | |
727 | AARCH64_OPND_IMM_ROT2, /* Immediate rotate operand for indexed FCMLA. */ | |
728 | AARCH64_OPND_IMM_ROT3, /* Immediate rotate operand for FCADD. */ | |
729 | ||
730 | AARCH64_OPND_COND, /* Standard condition as the last operand. */ | |
731 | AARCH64_OPND_COND1, /* Same as the above, but excluding AL and NV. */ | |
732 | ||
733 | AARCH64_OPND_ADDR_ADRP, /* Memory address for ADRP */ | |
734 | AARCH64_OPND_ADDR_PCREL9, /* 9-bit PC-relative address for e.g. CB<cc>. */ | |
735 | AARCH64_OPND_ADDR_PCREL14, /* 14-bit PC-relative address for e.g. TBZ. */ | |
736 | AARCH64_OPND_ADDR_PCREL19, /* 19-bit PC-relative address for e.g. LDR. */ | |
737 | AARCH64_OPND_ADDR_PCREL21, /* 21-bit PC-relative address for e.g. ADR. */ | |
738 | AARCH64_OPND_ADDR_PCREL26, /* 26-bit PC-relative address for e.g. BL. */ | |
739 | ||
740 | AARCH64_OPND_ADDR_SIMPLE, /* Address of ld/st exclusive. */ | |
741 | AARCH64_OPND_ADDR_REGOFF, /* Address of register offset. */ | |
742 | AARCH64_OPND_ADDR_SIMM7, /* Address of signed 7-bit immediate. */ | |
743 | AARCH64_OPND_ADDR_SIMM9, /* Address of signed 9-bit immediate. */ | |
744 | AARCH64_OPND_ADDR_SIMM9_2, /* Same as the above, but the immediate is | |
745 | negative or unaligned and there is | |
746 | no writeback allowed. This operand code | |
747 | is only used to support the programmer- | |
748 | friendly feature of using LDR/STR as the | |
749 | the mnemonic name for LDUR/STUR instructions | |
750 | wherever there is no ambiguity. */ | |
751 | AARCH64_OPND_ADDR_SIMM10, /* Address of signed 10-bit immediate. */ | |
752 | AARCH64_OPND_ADDR_SIMM11, /* Address with a signed 11-bit (multiple of | |
753 | 16) immediate. */ | |
754 | AARCH64_OPND_ADDR_UIMM12, /* Address of unsigned 12-bit immediate. */ | |
755 | AARCH64_OPND_ADDR_SIMM13, /* Address with a signed 13-bit (multiple of | |
756 | 16) immediate. */ | |
757 | AARCH64_OPND_SIMD_ADDR_SIMPLE,/* Address of ld/st multiple structures. */ | |
758 | AARCH64_OPND_ADDR_OFFSET, /* Address with an optional 9-bit immediate. */ | |
759 | AARCH64_OPND_SIMD_ADDR_POST, /* Address of ld/st multiple post-indexed. */ | |
760 | ||
761 | AARCH64_OPND_SYSREG, /* System register operand. */ | |
762 | AARCH64_OPND_SYSREG128, /* 128-bit system register operand. */ | |
763 | AARCH64_OPND_PSTATEFIELD, /* PSTATE field name operand. */ | |
764 | AARCH64_OPND_SYSREG_AT, /* System register <at_op> operand. */ | |
765 | AARCH64_OPND_SYSREG_DC, /* System register <dc_op> operand. */ | |
766 | AARCH64_OPND_SYSREG_IC, /* System register <ic_op> operand. */ | |
767 | AARCH64_OPND_SYSREG_TLBI, /* System register <tlbi_op> operand. */ | |
768 | AARCH64_OPND_SYSREG_TLBIP, /* System register <tlbip_op> operand. */ | |
769 | AARCH64_OPND_SYSREG_SR, /* System register RCTX operand. */ | |
770 | AARCH64_OPND_BARRIER, /* Barrier operand. */ | |
771 | AARCH64_OPND_BARRIER_DSB_NXS, /* Barrier operand for DSB nXS variant. */ | |
772 | AARCH64_OPND_BARRIER_ISB, /* Barrier operand for ISB. */ | |
773 | AARCH64_OPND_PRFOP, /* Prefetch operation. */ | |
774 | AARCH64_OPND_RPRFMOP, /* Range prefetch operation. */ | |
775 | AARCH64_OPND_BARRIER_PSB, /* Barrier operand for PSB. */ | |
776 | AARCH64_OPND_BARRIER_GCSB, /* Barrier operand for GCSB. */ | |
777 | AARCH64_OPND_BTI_TARGET, /* BTI {<target>}. */ | |
778 | AARCH64_OPND_BRBOP, /* BRB operation IALL or INJ in bit 5. */ | |
779 | AARCH64_OPND_Rt_IN_SYS_ALIASES, /* Defaulted and omitted Rt used in SYS aliases such as brb. */ | |
780 | AARCH64_OPND_LSE128_Rt, /* LSE128 <Xt1>. */ | |
781 | AARCH64_OPND_LSE128_Rt2, /* LSE128 <Xt2>. */ | |
782 | AARCH64_OPND_SVE_ADDR_RI_S4x16, /* SVE [<Xn|SP>, #<simm4>*16]. */ | |
783 | AARCH64_OPND_SVE_ADDR_RI_S4x32, /* SVE [<Xn|SP>, #<simm4>*32]. */ | |
784 | AARCH64_OPND_SVE_ADDR_RI_S4xVL, /* SVE [<Xn|SP>, #<simm4>, MUL VL]. */ | |
785 | AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL]. */ | |
786 | AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL]. */ | |
787 | AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, /* SVE [<Xn|SP>, #<simm4>*4, MUL VL]. */ | |
788 | AARCH64_OPND_SVE_ADDR_RI_S6xVL, /* SVE [<Xn|SP>, #<simm6>, MUL VL]. */ | |
789 | AARCH64_OPND_SVE_ADDR_RI_S9xVL, /* SVE [<Xn|SP>, #<simm9>, MUL VL]. */ | |
790 | AARCH64_OPND_SVE_ADDR_RI_U6, /* SVE [<Xn|SP>, #<uimm6>]. */ | |
791 | AARCH64_OPND_SVE_ADDR_RI_U6x2, /* SVE [<Xn|SP>, #<uimm6>*2]. */ | |
792 | AARCH64_OPND_SVE_ADDR_RI_U6x4, /* SVE [<Xn|SP>, #<uimm6>*4]. */ | |
793 | AARCH64_OPND_SVE_ADDR_RI_U6x8, /* SVE [<Xn|SP>, #<uimm6>*8]. */ | |
794 | AARCH64_OPND_SVE_ADDR_RR, /* SVE [<Xn|SP>{, <Xm|XZR>}]. */ | |
795 | AARCH64_OPND_SVE_ADDR_RR_LSL1, /* SVE [<Xn|SP>{, <Xm|XZR>, LSL #1}]. */ | |
796 | AARCH64_OPND_SVE_ADDR_RR_LSL2, /* SVE [<Xn|SP>{, <Xm|XZR>, LSL #2}]. */ | |
797 | AARCH64_OPND_SVE_ADDR_RR_LSL3, /* SVE [<Xn|SP>{, <Xm|XZR>, LSL #3}]. */ | |
798 | AARCH64_OPND_SVE_ADDR_RR_LSL4, /* SVE [<Xn|SP>{, <Xm|XZR>, LSL #4}]. */ | |
799 | AARCH64_OPND_SVE_ADDR_RM, /* SVE [<Xn|SP>, <Xm|XZR>]. */ | |
800 | AARCH64_OPND_SVE_ADDR_RM_LSL1, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #1]. */ | |
801 | AARCH64_OPND_SVE_ADDR_RM_LSL2, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #2]. */ | |
802 | AARCH64_OPND_SVE_ADDR_RM_LSL3, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #3]. */ | |
803 | AARCH64_OPND_SVE_ADDR_RM_LSL4, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #4]. */ | |
804 | AARCH64_OPND_SVE_ADDR_RX, /* SVE [<Xn|SP>, <Xm>]. */ | |
805 | AARCH64_OPND_SVE_ADDR_RX_LSL1, /* SVE [<Xn|SP>, <Xm>, LSL #1]. */ | |
806 | AARCH64_OPND_SVE_ADDR_RX_LSL2, /* SVE [<Xn|SP>, <Xm>, LSL #2]. */ | |
807 | AARCH64_OPND_SVE_ADDR_RX_LSL3, /* SVE [<Xn|SP>, <Xm>, LSL #3]. */ | |
808 | AARCH64_OPND_SVE_ADDR_RX_LSL4, /* SVE [<Xn|SP>, <Xm>, LSL #4]. */ | |
809 | AARCH64_OPND_SVE_ADDR_ZX, /* SVE [Zn.<T>{, <Xm>}]. */ | |
810 | AARCH64_OPND_SVE_ADDR_RZ, /* SVE [<Xn|SP>, Zm.D]. */ | |
811 | AARCH64_OPND_SVE_ADDR_RZ_LSL1, /* SVE [<Xn|SP>, Zm.D, LSL #1]. */ | |
812 | AARCH64_OPND_SVE_ADDR_RZ_LSL2, /* SVE [<Xn|SP>, Zm.D, LSL #2]. */ | |
813 | AARCH64_OPND_SVE_ADDR_RZ_LSL3, /* SVE [<Xn|SP>, Zm.D, LSL #3]. */ | |
814 | AARCH64_OPND_SVE_ADDR_RZ_XTW_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW]. | |
815 | Bit 14 controls S/U choice. */ | |
816 | AARCH64_OPND_SVE_ADDR_RZ_XTW_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW]. | |
817 | Bit 22 controls S/U choice. */ | |
818 | AARCH64_OPND_SVE_ADDR_RZ_XTW1_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1]. | |
819 | Bit 14 controls S/U choice. */ | |
820 | AARCH64_OPND_SVE_ADDR_RZ_XTW1_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1]. | |
821 | Bit 22 controls S/U choice. */ | |
822 | AARCH64_OPND_SVE_ADDR_RZ_XTW2_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2]. | |
823 | Bit 14 controls S/U choice. */ | |
824 | AARCH64_OPND_SVE_ADDR_RZ_XTW2_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2]. | |
825 | Bit 22 controls S/U choice. */ | |
826 | AARCH64_OPND_SVE_ADDR_RZ_XTW3_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3]. | |
827 | Bit 14 controls S/U choice. */ | |
828 | AARCH64_OPND_SVE_ADDR_RZ_XTW3_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3]. | |
829 | Bit 22 controls S/U choice. */ | |
830 | AARCH64_OPND_SVE_ADDR_ZI_U5, /* SVE [Zn.<T>, #<uimm5>]. */ | |
831 | AARCH64_OPND_SVE_ADDR_ZI_U5x2, /* SVE [Zn.<T>, #<uimm5>*2]. */ | |
832 | AARCH64_OPND_SVE_ADDR_ZI_U5x4, /* SVE [Zn.<T>, #<uimm5>*4]. */ | |
833 | AARCH64_OPND_SVE_ADDR_ZI_U5x8, /* SVE [Zn.<T>, #<uimm5>*8]. */ | |
834 | AARCH64_OPND_SVE_ADDR_ZZ_LSL, /* SVE [Zn.<T>, Zm,<T>, LSL #<msz>]. */ | |
835 | AARCH64_OPND_SVE_ADDR_ZZ_SXTW, /* SVE [Zn.<T>, Zm,<T>, SXTW #<msz>]. */ | |
836 | AARCH64_OPND_SVE_ADDR_ZZ_UXTW, /* SVE [Zn.<T>, Zm,<T>, UXTW #<msz>]. */ | |
837 | AARCH64_OPND_SVE_AIMM, /* SVE unsigned arithmetic immediate. */ | |
838 | AARCH64_OPND_SVE_ASIMM, /* SVE signed arithmetic immediate. */ | |
839 | AARCH64_OPND_SVE_FPIMM8, /* SVE 8-bit floating-point immediate. */ | |
840 | AARCH64_OPND_SVE_I1_HALF_ONE, /* SVE choice between 0.5 and 1.0. */ | |
841 | AARCH64_OPND_SVE_I1_HALF_TWO, /* SVE choice between 0.5 and 2.0. */ | |
842 | AARCH64_OPND_SVE_I1_ZERO_ONE, /* SVE choice between 0.0 and 1.0. */ | |
843 | AARCH64_OPND_SVE_IMM_ROT1, /* SVE 1-bit rotate operand (90 or 270). */ | |
844 | AARCH64_OPND_SVE_IMM_ROT2, /* SVE 2-bit rotate operand (N*90). */ | |
845 | AARCH64_OPND_SVE_IMM_ROT3, /* SVE cadd 1-bit rotate (90 or 270). */ | |
846 | AARCH64_OPND_SVE_INV_LIMM, /* SVE inverted logical immediate. */ | |
847 | AARCH64_OPND_SVE_LIMM, /* SVE logical immediate. */ | |
848 | AARCH64_OPND_SVE_LIMM_MOV, /* SVE logical immediate for MOV. */ | |
849 | AARCH64_OPND_SVE_PATTERN, /* SVE vector pattern enumeration. */ | |
850 | AARCH64_OPND_SVE_PATTERN_SCALED, /* Likewise, with additional MUL factor. */ | |
851 | AARCH64_OPND_SVE_PRFOP, /* SVE prefetch operation. */ | |
852 | AARCH64_OPND_SVE_Pd, /* SVE p0-p15 in Pd. */ | |
853 | AARCH64_OPND_SVE_PNd, /* SVE pn0-pn15 in Pd. */ | |
854 | AARCH64_OPND_SVE_Pg3, /* SVE p0-p7 in Pg. */ | |
855 | AARCH64_OPND_SVE_Pg4_5, /* SVE p0-p15 in Pg, bits [8,5]. */ | |
856 | AARCH64_OPND_SVE_Pg4_10, /* SVE p0-p15 in Pg, bits [13,10]. */ | |
857 | AARCH64_OPND_SVE_PNg4_10, /* SVE pn0-pn15 in Pg, bits [13,10]. */ | |
858 | AARCH64_OPND_SVE_Pg4_16, /* SVE p0-p15 in Pg, bits [19,16]. */ | |
859 | AARCH64_OPND_SVE_Pm, /* SVE p0-p15 in Pm. */ | |
860 | AARCH64_OPND_SVE_Pn, /* SVE p0-p15 in Pn. */ | |
861 | AARCH64_OPND_SVE_PNn, /* SVE pn0-pn15 in Pn. */ | |
862 | AARCH64_OPND_SVE_Pt, /* SVE p0-p15 in Pt. */ | |
863 | AARCH64_OPND_SVE_PNt, /* SVE pn0-pn15 in Pt. */ | |
864 | AARCH64_OPND_SVE_Rm, /* Integer Rm or ZR, alt. SVE position. */ | |
865 | AARCH64_OPND_SVE_Rn_SP, /* Integer Rn or SP, alt. SVE position. */ | |
866 | AARCH64_OPND_SVE_SHLIMM_PRED, /* SVE shift left amount (predicated). */ | |
867 | AARCH64_OPND_SVE_SHLIMM_UNPRED, /* SVE shift left amount (unpredicated). */ | |
868 | AARCH64_OPND_SVE_SHLIMM_UNPRED_22, /* SVE 3 bit shift left unpred. */ | |
869 | AARCH64_OPND_SVE_SHRIMM_PRED, /* SVE shift right amount (predicated). */ | |
870 | AARCH64_OPND_SVE_SHRIMM_UNPRED, /* SVE shift right amount (unpredicated). */ | |
871 | AARCH64_OPND_SVE_SHRIMM_UNPRED_22, /* SVE 3 bit shift right unpred. */ | |
872 | AARCH64_OPND_SVE_SIMM5, /* SVE signed 5-bit immediate. */ | |
873 | AARCH64_OPND_SVE_SIMM5B, /* SVE secondary signed 5-bit immediate. */ | |
874 | AARCH64_OPND_SVE_SIMM6, /* SVE signed 6-bit immediate. */ | |
875 | AARCH64_OPND_SVE_SIMM8, /* SVE signed 8-bit immediate. */ | |
876 | AARCH64_OPND_SVE_UIMM3, /* SVE unsigned 3-bit immediate. */ | |
877 | AARCH64_OPND_SVE_UIMM7, /* SVE unsigned 7-bit immediate. */ | |
878 | AARCH64_OPND_SVE_UIMM8, /* SVE unsigned 8-bit immediate. */ | |
879 | AARCH64_OPND_SVE_UIMM8_53, /* SVE split unsigned 8-bit immediate. */ | |
880 | AARCH64_OPND_SVE_UIMM4, /* SVE unsigned 4-bit immediate. */ | |
881 | AARCH64_OPND_SVE_VZn, /* Scalar SIMD&FP register in Zn field. */ | |
882 | AARCH64_OPND_SVE_Vd, /* Scalar SIMD&FP register in Vd. */ | |
883 | AARCH64_OPND_SVE_Vm, /* Scalar SIMD&FP register in Vm. */ | |
884 | AARCH64_OPND_SVE_Vn, /* Scalar SIMD&FP register in Vn. */ | |
885 | AARCH64_OPND_SME_ZA_array_vrsb_1, /* Tile to vector, two registers (B). */ | |
886 | AARCH64_OPND_SME_ZA_array_vrsh_1, /* Tile to vector, two registers (H). */ | |
887 | AARCH64_OPND_SME_ZA_array_vrss_1, /* Tile to vector, two registers (S). */ | |
888 | AARCH64_OPND_SME_ZA_array_vrsd_1, /* Tile to vector, two registers (D). */ | |
889 | AARCH64_OPND_SME_ZA_array_vrsb_2, /* Tile to vector, four registers (B). */ | |
890 | AARCH64_OPND_SME_ZA_array_vrsh_2, /* Tile to vector, four registers (H). */ | |
891 | AARCH64_OPND_SME_ZA_array_vrss_2, /* Tile to vector, four registers (S). */ | |
892 | AARCH64_OPND_SME_ZA_array_vrsd_2, /* Tile to vector, four registers (D). */ | |
893 | AARCH64_OPND_SME_ZA_ARRAY4, /* Tile to vector, single (BHSDQ). */ | |
894 | AARCH64_OPND_SVE_Za_5, /* SVE vector register in Za, bits [9,5]. */ | |
895 | AARCH64_OPND_SVE_Za_16, /* SVE vector register in Za, bits [20,16]. */ | |
896 | AARCH64_OPND_SVE_Zd, /* SVE vector register in Zd. */ | |
897 | AARCH64_OPND_SVE_Zm_5, /* SVE vector register in Zm, bits [9,5]. */ | |
898 | AARCH64_OPND_SVE_Zm_16, /* SVE vector register in Zm, bits [20,16]. */ | |
899 | AARCH64_OPND_SVE_Zm1_23_INDEX, /* SVE bit index in Zm, bit 23. */ | |
900 | AARCH64_OPND_SVE_Zm2_22_INDEX, /* SVE bit index in Zm, bits [23,22]. */ | |
901 | AARCH64_OPND_SVE_Zm3_INDEX, /* z0-z7[0-3] in Zm, bits [20,16]. */ | |
902 | AARCH64_OPND_SVE_Zm3_11_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 11. */ | |
903 | AARCH64_OPND_SVE_Zm3_12_INDEX, /* SVE bit index in Zm, bits 12 plus bit [23,22]. */ | |
904 | AARCH64_OPND_SVE_Zm3_19_INDEX, /* z0-z7[0-3] in Zm3_INDEX plus bit 19. */ | |
905 | AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22. */ | |
906 | AARCH64_OPND_SVE_Zm3_10_INDEX, /* z0-z7[0-15] in Zm3_INDEX plus bit 11:10. */ | |
907 | AARCH64_OPND_SVE_Zm4_11_INDEX, /* z0-z15[0-3] in Zm plus bit 11. */ | |
908 | AARCH64_OPND_SVE_Zm4_INDEX, /* z0-z15[0-1] in Zm, bits [20,16]. */ | |
909 | AARCH64_OPND_SVE_Zn, /* SVE vector register in Zn. */ | |
910 | AARCH64_OPND_SVE_Zn_INDEX, /* Indexed SVE vector register, for DUP. */ | |
911 | AARCH64_OPND_SVE_Zn_5_INDEX, /* Indexed SVE vector register, for DUPQ. */ | |
912 | AARCH64_OPND_SVE_ZnxN, /* SVE vector register list in Zn. */ | |
913 | AARCH64_OPND_SVE_Zt, /* SVE vector register in Zt. */ | |
914 | AARCH64_OPND_SVE_ZtxN, /* SVE vector register list in Zt. */ | |
915 | AARCH64_OPND_SME_Zdnx2, /* SVE vector register list from [4:1]*2. */ | |
916 | AARCH64_OPND_SME_Zdnx4, /* SVE vector register list from [4:2]*4. */ | |
917 | AARCH64_OPND_SME_Zm, /* SVE vector register list in 4-bit Zm. */ | |
918 | AARCH64_OPND_SME_Zm_17, /* SVE vector register list in [20:17]. */ | |
919 | AARCH64_OPND_SME_Zmx2, /* SVE vector register list from [20:17]*2. */ | |
920 | AARCH64_OPND_SME_Zmx4, /* SVE vector register list from [20:18]*4. */ | |
921 | AARCH64_OPND_SME_Znx2, /* SVE vector register list from [9:6]*2. */ | |
922 | AARCH64_OPND_SME_Znx2_BIT_INDEX, /* SVE vector register list encoding a bit index from [9:6]*2. */ | |
923 | AARCH64_OPND_SME_Znx4, /* SVE vector register list from [9:7]*4. */ | |
924 | AARCH64_OPND_SME_Ztx2_STRIDED, /* SVE vector register list in [4:0]&23. */ | |
925 | AARCH64_OPND_SME_Ztx4_STRIDED, /* SVE vector register list in [4:0]&19. */ | |
926 | AARCH64_OPND_SME_ZAda_1b, /* SME <ZAda>.H, 1-bits. */ | |
927 | AARCH64_OPND_SME_ZAda_2b, /* SME <ZAda>.S, 2-bits. */ | |
928 | AARCH64_OPND_SME_ZAda_3b, /* SME <ZAda>.D, 3-bits. */ | |
929 | AARCH64_OPND_SME_ZA_HV_idx_src, /* SME source ZA tile vector. */ | |
930 | AARCH64_OPND_SME_ZA_HV_idx_srcxN, /* SME N source ZA tile vectors. */ | |
931 | AARCH64_OPND_SME_ZA_HV_idx_dest, /* SME destination ZA tile vector. */ | |
932 | AARCH64_OPND_SME_ZA_HV_idx_destxN, /* SME N dest ZA tile vectors. */ | |
933 | AARCH64_OPND_SME_Pdx2, /* Predicate register list in [3:1]. */ | |
934 | AARCH64_OPND_SME_PdxN, /* Predicate register list in [3:0]. */ | |
935 | AARCH64_OPND_SME_Pm, /* SME scalable predicate register, bits [15:13]. */ | |
936 | AARCH64_OPND_SME_PNd3, /* Predicate-as-counter register, bits [3:0]. */ | |
937 | AARCH64_OPND_SME_PNg3, /* Predicate-as-counter register, bits [12:10]. */ | |
938 | AARCH64_OPND_SME_PNn, /* Predicate-as-counter register, bits [8:5]. */ | |
939 | AARCH64_OPND_SME_PNn3_INDEX1, /* Indexed pred-as-counter reg, bits [8:5]. */ | |
940 | AARCH64_OPND_SME_PNn3_INDEX2, /* Indexed pred-as-counter reg, bits [9:5]. */ | |
941 | AARCH64_OPND_SME_list_of_64bit_tiles, /* SME list of ZA tiles. */ | |
942 | AARCH64_OPND_SME_ZA_HV_idx_ldstr, /* SME destination ZA tile vector. */ | |
943 | AARCH64_OPND_SME_ZA_array_off1x4, /* SME ZA[<Wv>, #<imm1>*4:<imm1>*4+3]. */ | |
944 | AARCH64_OPND_SME_ZA_array_off2x2, /* SME ZA[<Wv>, #<imm2>*2:<imm2>*2+1]. */ | |
945 | AARCH64_OPND_SME_ZA_array_off2x4, /* SME ZA[<Wv>, #<imm2>*4:<imm2>*4+3]. */ | |
946 | AARCH64_OPND_SME_ZA_array_off3_0, /* SME ZA[<Wv>{, #<imm3>}]. */ | |
947 | AARCH64_OPND_SME_ZA_array_off3_5, /* SME ZA[<Wv>{, #<imm3>}]. */ | |
948 | AARCH64_OPND_SME_ZA_array_off3x2, /* SME ZA[<Wv>, #<imm3>*2:<imm3>*2+1]. */ | |
949 | AARCH64_OPND_SME_ZA_array_off4, /* SME ZA[<Wv>{, #<imm>}]. */ | |
950 | AARCH64_OPND_SME_ADDR_RI_U4xVL, /* SME [<Xn|SP>{, #<imm>, MUL VL}]. */ | |
951 | AARCH64_OPND_SME_SM_ZA, /* SME {SM | ZA}. */ | |
952 | AARCH64_OPND_SME_PnT_Wm_imm, /* SME <Pn>.<T>[<Wm>, #<imm>]. */ | |
953 | AARCH64_OPND_SME_SHRIMM4, /* 4-bit right shift, bits [19:16]. */ | |
954 | AARCH64_OPND_SME_SHRIMM5, /* size + 5-bit right shift, bits [23:22,20:16]. */ | |
955 | AARCH64_OPND_SME_Zm_INDEX1, /* Zn.T[index], bits [19:16,10]. */ | |
956 | AARCH64_OPND_SME_Zm_INDEX2, /* Zn.T[index], bits [19:16,11:10]. */ | |
957 | AARCH64_OPND_SME_Zm_INDEX2_3, /* Zn.T[index], bits [19:16,10,3]. */ | |
958 | AARCH64_OPND_SME_Zm_INDEX3_1, /* Zn.T[index], bits [19:16,10,2:1]. */ | |
959 | AARCH64_OPND_SME_Zm_INDEX3_2, /* Zn.T[index], bits [19:16,11:10,2]. */ | |
960 | AARCH64_OPND_SME_Zm_INDEX3_3, /* Zn.T[index], bits [19:16,11:10,3]. */ | |
961 | AARCH64_OPND_SME_Zm_INDEX3_10, /* Zn.T[index], bits [19:16,15,11:10]. */ | |
962 | AARCH64_OPND_SME_Zm_INDEX4_1, /* Zn.T[index], bits [19:16,11:10,2:1]. */ | |
963 | AARCH64_OPND_SME_Zm_INDEX4_2, /* Zn.T[index], bits [19:16,11:10,3:2]. */ | |
964 | AARCH64_OPND_SME_Zm_INDEX4_3, /* Zn.T[index], bits [19:16,15,11,10,3]. */ | |
965 | AARCH64_OPND_SME_Zm_INDEX4_10, /* Zn.T[index], bits [19:16,15,12:10]. */ | |
966 | AARCH64_OPND_SME_Zn_INDEX1_16, /* Zn[index], bits [9:5] and [16:16]. */ | |
967 | AARCH64_OPND_SME_Zn_INDEX2_15, /* Zn[index], bits [9:5] and [16:15]. */ | |
968 | AARCH64_OPND_SME_Zn_INDEX2_16, /* Zn[index], bits [9:5] and [17:16]. */ | |
969 | AARCH64_OPND_SME_Zn_INDEX3_14, /* Zn[index], bits [9:5] and [16:14]. */ | |
970 | AARCH64_OPND_SME_Zn_INDEX3_15, /* Zn[index], bits [9:5] and [17:15]. */ | |
971 | AARCH64_OPND_SME_Zn_INDEX4_14, /* Zn[index], bits [9:5] and [17:14]. */ | |
972 | AARCH64_OPND_SVE_Zn0_INDEX, /* Zn[index], bits [9:5]. */ | |
973 | AARCH64_OPND_SVE_Zn1_17_INDEX, /* Zn[index], bits [9:5,17]. */ | |
974 | AARCH64_OPND_SVE_Zn2_18_INDEX, /* Zn[index], bits [9:5,18:17]. */ | |
975 | AARCH64_OPND_SVE_Zn3_22_INDEX, /* Zn[index], bits [9:5,18:17,22]. */ | |
976 | AARCH64_OPND_SVE_Zd0_INDEX, /* Zn[index], bits [4:0]. */ | |
977 | AARCH64_OPND_SVE_Zd1_17_INDEX, /* Zn[index], bits [4:0,17]. */ | |
978 | AARCH64_OPND_SVE_Zd2_18_INDEX, /* Zn[index], bits [4:0,18:17]. */ | |
979 | AARCH64_OPND_SVE_Zd3_22_INDEX, /* Zn[index], bits [4:0,18:17,22]. */ | |
980 | AARCH64_OPND_SME_VLxN_10, /* VLx2 or VLx4, in bit 10. */ | |
981 | AARCH64_OPND_SME_VLxN_13, /* VLx2 or VLx4, in bit 13. */ | |
982 | AARCH64_OPND_SME_ZT0, /* The fixed token zt0/ZT0 (not encoded). */ | |
983 | AARCH64_OPND_SME_ZT0_INDEX, /* ZT0[<imm>], bits [14:12]. */ | |
984 | AARCH64_OPND_SME_ZT0_INDEX_MUL_VL,/* ZT0[<imm>], bits [13:12]. */ | |
985 | AARCH64_OPND_SME_ZT0_LIST, /* { zt0/ZT0 } (not encoded). */ | |
986 | AARCH64_OPND_TME_UIMM16, /* TME unsigned 16-bit immediate. */ | |
987 | AARCH64_OPND_SM3_IMM2, /* SM3 encodes lane in bits [13, 14]. */ | |
988 | AARCH64_OPND_MOPS_ADDR_Rd, /* [Rd]!, in bits [0, 4]. */ | |
989 | AARCH64_OPND_MOPS_ADDR_Rs, /* [Rs]!, in bits [16, 20]. */ | |
990 | AARCH64_OPND_MOPS_WB_Rn, /* Rn!, in bits [5, 9]. */ | |
991 | AARCH64_OPND_CSSC_SIMM8, /* CSSC signed 8-bit immediate. */ | |
992 | AARCH64_OPND_CSSC_UIMM8, /* CSSC unsigned 8-bit immediate. */ | |
993 | AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND, /* [<Xn|SP>]{, #<imm>}. */ | |
994 | AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB, /* [<Xn|SP>] or [<Xn|SP>, #<imm>]!. */ | |
995 | AARCH64_OPND_RCPC3_ADDR_POSTIND, /* [<Xn|SP>], #<imm>. */ | |
996 | AARCH64_OPND_RCPC3_ADDR_PREIND_WB, /* [<Xn|SP>, #<imm>]!. */ | |
997 | AARCH64_OPND_RCPC3_ADDR_OFFSET, | |
998 | }; | |
999 | ||
1000 | /* Qualifier constrains an operand. It either specifies a variant of an | |
1001 | operand type or limits values available to an operand type. | |
1002 | ||
1003 | N.B. Order is important. | |
1004 | Keep aarch64_opnd_qualifiers (opcodes/aarch64-opc.c) synced. */ | |
1005 | ||
1006 | enum aarch64_opnd_qualifier | |
1007 | { | |
1008 | /* Indicating no further qualification on an operand. */ | |
1009 | AARCH64_OPND_QLF_NIL, | |
1010 | ||
1011 | /* Qualifying an operand which is a general purpose (integer) register; | |
1012 | indicating the operand data size or a specific register. */ | |
1013 | AARCH64_OPND_QLF_W, /* Wn, WZR or WSP. */ | |
1014 | AARCH64_OPND_QLF_X, /* Xn, XZR or XSP. */ | |
1015 | AARCH64_OPND_QLF_WSP, /* WSP. */ | |
1016 | AARCH64_OPND_QLF_SP, /* SP. */ | |
1017 | ||
1018 | /* Qualifying an operand which is a floating-point register, a SIMD | |
1019 | vector element or a SIMD vector element list; indicating operand data | |
1020 | size or the size of each SIMD vector element in the case of a SIMD | |
1021 | vector element list. | |
1022 | These qualifiers are also used to qualify an address operand to | |
1023 | indicate the size of data element a load/store instruction is | |
1024 | accessing. | |
1025 | They are also used for the immediate shift operand in e.g. SSHR. Such | |
1026 | a use is only for the ease of operand encoding/decoding and qualifier | |
1027 | sequence matching; such a use should not be applied widely; use the value | |
1028 | constraint qualifiers for immediate operands wherever possible. */ | |
1029 | AARCH64_OPND_QLF_S_B, | |
1030 | AARCH64_OPND_QLF_S_H, | |
1031 | AARCH64_OPND_QLF_S_S, | |
1032 | AARCH64_OPND_QLF_S_D, | |
1033 | AARCH64_OPND_QLF_S_Q, | |
1034 | /* These type qualifiers have a special meaning in that they mean 2 x 1 byte, | |
1035 | 4 x 1 byte or 2 x 2 byte are selected by the instruction. Other than that | |
1036 | they have no difference with AARCH64_OPND_QLF_S_B in encoding. They are | |
1037 | here purely for syntactical reasons and is an exception from normal | |
1038 | AArch64 disassembly scheme. */ | |
1039 | AARCH64_OPND_QLF_S_2B, | |
1040 | AARCH64_OPND_QLF_S_4B, | |
1041 | AARCH64_OPND_QLF_S_2H, | |
1042 | ||
1043 | /* Qualifying an operand which is a SIMD vector register or a SIMD vector | |
1044 | register list; indicating register shape. | |
1045 | They are also used for the immediate shift operand in e.g. SSHR. Such | |
1046 | a use is only for the ease of operand encoding/decoding and qualifier | |
1047 | sequence matching; such a use should not be applied widely; use the value | |
1048 | constraint qualifiers for immediate operands wherever possible. */ | |
1049 | AARCH64_OPND_QLF_V_4B, | |
1050 | AARCH64_OPND_QLF_V_8B, | |
1051 | AARCH64_OPND_QLF_V_16B, | |
1052 | AARCH64_OPND_QLF_V_2H, | |
1053 | AARCH64_OPND_QLF_V_4H, | |
1054 | AARCH64_OPND_QLF_V_8H, | |
1055 | AARCH64_OPND_QLF_V_2S, | |
1056 | AARCH64_OPND_QLF_V_4S, | |
1057 | AARCH64_OPND_QLF_V_1D, | |
1058 | AARCH64_OPND_QLF_V_2D, | |
1059 | AARCH64_OPND_QLF_V_1Q, | |
1060 | ||
1061 | AARCH64_OPND_QLF_P_Z, | |
1062 | AARCH64_OPND_QLF_P_M, | |
1063 | ||
1064 | /* Used in scaled signed immediate that are scaled by a Tag granule | |
1065 | like in stg, st2g, etc. */ | |
1066 | AARCH64_OPND_QLF_imm_tag, | |
1067 | ||
1068 | /* Constraint on value. */ | |
1069 | AARCH64_OPND_QLF_CR, /* CRn, CRm. */ | |
1070 | AARCH64_OPND_QLF_imm_0_7, | |
1071 | AARCH64_OPND_QLF_imm_0_15, | |
1072 | AARCH64_OPND_QLF_imm_0_31, | |
1073 | AARCH64_OPND_QLF_imm_0_63, | |
1074 | AARCH64_OPND_QLF_imm_1_32, | |
1075 | AARCH64_OPND_QLF_imm_1_64, | |
1076 | ||
1077 | /* Indicate whether an AdvSIMD modified immediate operand is shift-zeros | |
1078 | or shift-ones. */ | |
1079 | AARCH64_OPND_QLF_LSL, | |
1080 | AARCH64_OPND_QLF_MSL, | |
1081 | ||
1082 | /* Special qualifier helping retrieve qualifier information during the | |
1083 | decoding time (currently not in use). */ | |
1084 | AARCH64_OPND_QLF_RETRIEVE, | |
1085 | ||
1086 | /* Special qualifier used for indicating error in qualifier retrieval. */ | |
1087 | AARCH64_OPND_QLF_ERR, | |
1088 | } ATTRIBUTE_PACKED; | |
1089 | \f | |
1090 | /* Instruction class. */ | |
1091 | ||
1092 | enum aarch64_insn_class | |
1093 | { | |
1094 | aarch64_misc, | |
1095 | addsub_carry, | |
1096 | addsub_ext, | |
1097 | addsub_imm, | |
1098 | addsub_shift, | |
1099 | asimdall, | |
1100 | asimddiff, | |
1101 | asimdelem, | |
1102 | asimdext, | |
1103 | asimdimm, | |
1104 | asimdins, | |
1105 | asimdmisc, | |
1106 | asimdperm, | |
1107 | asimdsame, | |
1108 | asimdshf, | |
1109 | asimdtbl, | |
1110 | asisddiff, | |
1111 | asisdelem, | |
1112 | asisdlse, | |
1113 | asisdlsep, | |
1114 | asisdlso, | |
1115 | asisdlsop, | |
1116 | asisdmisc, | |
1117 | asisdone, | |
1118 | asisdpair, | |
1119 | asisdsame, | |
1120 | asisdshf, | |
1121 | bitfield, | |
1122 | branch_imm, | |
1123 | branch_reg, | |
1124 | compbranch, | |
1125 | condbranch, | |
1126 | condcmp_imm, | |
1127 | condcmp_reg, | |
1128 | condsel, | |
1129 | cryptoaes, | |
1130 | cryptosha2, | |
1131 | cryptosha3, | |
1132 | dp_1src, | |
1133 | dp_2src, | |
1134 | dp_3src, | |
1135 | exception, | |
1136 | extract, | |
1137 | float2fix, | |
1138 | float2int, | |
1139 | floatccmp, | |
1140 | floatcmp, | |
1141 | floatdp1, | |
1142 | floatdp2, | |
1143 | floatdp3, | |
1144 | floatimm, | |
1145 | floatsel, | |
1146 | fprcvtfloat2int, | |
1147 | fprcvtint2float, | |
1148 | ldst_immpost, | |
1149 | ldst_immpre, | |
1150 | ldst_imm9, /* immpost or immpre */ | |
1151 | ldst_imm10, /* LDRAA/LDRAB */ | |
1152 | ldst_pos, | |
1153 | ldst_regoff, | |
1154 | ldst_unpriv, | |
1155 | ldst_unscaled, | |
1156 | ldstexcl, | |
1157 | ldstnapair_offs, | |
1158 | ldstpair_off, | |
1159 | ldstpair_indexed, | |
1160 | loadlit, | |
1161 | log_imm, | |
1162 | log_shift, | |
1163 | lse_atomic, | |
1164 | lse128_atomic, | |
1165 | movewide, | |
1166 | pcreladdr, | |
1167 | ic_system, | |
1168 | sme_fp_sd, | |
1169 | sme_int_sd, | |
1170 | sme_misc, | |
1171 | sme_mov, | |
1172 | sme_ldr, | |
1173 | sme_psel, | |
1174 | sme_shift, | |
1175 | sme_size_12_bh, | |
1176 | sme_size_12_bhs, | |
1177 | sme_size_12_hs, | |
1178 | sme_size_12_b, | |
1179 | sme_size_22, | |
1180 | sme_size_22_hsd, | |
1181 | sme_sz_23, | |
1182 | sme_str, | |
1183 | sme_start, | |
1184 | sme_stop, | |
1185 | sme2_mov, | |
1186 | sme2_movaz, | |
1187 | sve_cpy, | |
1188 | sve_index, | |
1189 | sve_limm, | |
1190 | sve_misc, | |
1191 | sve_movprfx, | |
1192 | sve_pred_zm, | |
1193 | sve_shift_pred, | |
1194 | sve_shift_unpred, | |
1195 | sve_size_bhs, | |
1196 | sve_size_bhsd, | |
1197 | sve_size_hsd, | |
1198 | sve_size_hsd2, | |
1199 | sve_size_sd, | |
1200 | sve_size_bh, | |
1201 | sve_size_sd2, | |
1202 | sve_size_13, | |
1203 | sve_shift_tsz_hsd, | |
1204 | sve_shift_tsz_bhsd, | |
1205 | sve_size_tsz_bhs, | |
1206 | testbranch, | |
1207 | cryptosm3, | |
1208 | cryptosm4, | |
1209 | dotproduct, | |
1210 | bfloat16, | |
1211 | cssc, | |
1212 | gcs, | |
1213 | the, | |
1214 | sve2_urqvs, | |
1215 | sve_index1, | |
1216 | rcpc3, | |
1217 | lut, | |
1218 | last_iclass = lut | |
1219 | }; | |
1220 | ||
1221 | /* Opcode enumerators. */ | |
1222 | ||
1223 | enum aarch64_op | |
1224 | { | |
1225 | OP_NIL, | |
1226 | OP_STRB_POS, | |
1227 | OP_LDRB_POS, | |
1228 | OP_LDRSB_POS, | |
1229 | OP_STRH_POS, | |
1230 | OP_LDRH_POS, | |
1231 | OP_LDRSH_POS, | |
1232 | OP_STR_POS, | |
1233 | OP_LDR_POS, | |
1234 | OP_STRF_POS, | |
1235 | OP_LDRF_POS, | |
1236 | OP_LDRSW_POS, | |
1237 | OP_PRFM_POS, | |
1238 | ||
1239 | OP_STURB, | |
1240 | OP_LDURB, | |
1241 | OP_LDURSB, | |
1242 | OP_STURH, | |
1243 | OP_LDURH, | |
1244 | OP_LDURSH, | |
1245 | OP_STUR, | |
1246 | OP_LDUR, | |
1247 | OP_STURV, | |
1248 | OP_LDURV, | |
1249 | OP_LDURSW, | |
1250 | OP_PRFUM, | |
1251 | ||
1252 | OP_LDR_LIT, | |
1253 | OP_LDRV_LIT, | |
1254 | OP_LDRSW_LIT, | |
1255 | OP_PRFM_LIT, | |
1256 | ||
1257 | OP_ADD, | |
1258 | OP_B, | |
1259 | OP_BL, | |
1260 | ||
1261 | OP_MOVN, | |
1262 | OP_MOVZ, | |
1263 | OP_MOVK, | |
1264 | ||
1265 | OP_MOV_IMM_LOG, /* MOV alias for moving bitmask immediate. */ | |
1266 | OP_MOV_IMM_WIDE, /* MOV alias for moving wide immediate. */ | |
1267 | OP_MOV_IMM_WIDEN, /* MOV alias for moving wide immediate (negated). */ | |
1268 | ||
1269 | OP_MOV_V, /* MOV alias for moving vector register. */ | |
1270 | ||
1271 | OP_ASR_IMM, | |
1272 | OP_LSR_IMM, | |
1273 | OP_LSL_IMM, | |
1274 | ||
1275 | OP_BIC, | |
1276 | ||
1277 | OP_UBFX, | |
1278 | OP_BFXIL, | |
1279 | OP_SBFX, | |
1280 | OP_SBFIZ, | |
1281 | OP_BFI, | |
1282 | OP_BFC, /* ARMv8.2. */ | |
1283 | OP_UBFIZ, | |
1284 | OP_UXTB, | |
1285 | OP_UXTH, | |
1286 | OP_UXTW, | |
1287 | ||
1288 | OP_CINC, | |
1289 | OP_CINV, | |
1290 | OP_CNEG, | |
1291 | OP_CSET, | |
1292 | OP_CSETM, | |
1293 | ||
1294 | OP_FCVT, | |
1295 | OP_FCVTN, | |
1296 | OP_FCVTN2, | |
1297 | OP_FCVTL, | |
1298 | OP_FCVTL2, | |
1299 | OP_FCVTXN_S, /* Scalar version. */ | |
1300 | ||
1301 | OP_ROR_IMM, | |
1302 | ||
1303 | OP_SXTL, | |
1304 | OP_SXTL2, | |
1305 | OP_UXTL, | |
1306 | OP_UXTL2, | |
1307 | ||
1308 | OP_MOV_P_P, | |
1309 | OP_MOV_PN_PN, | |
1310 | OP_MOV_Z_P_Z, | |
1311 | OP_MOV_Z_V, | |
1312 | OP_MOV_Z_Z, | |
1313 | OP_MOV_Z_Zi, | |
1314 | OP_MOVM_P_P_P, | |
1315 | OP_MOVS_P_P, | |
1316 | OP_MOVZS_P_P_P, | |
1317 | OP_MOVZ_P_P_P, | |
1318 | OP_NOTS_P_P_P_Z, | |
1319 | OP_NOT_P_P_P_Z, | |
1320 | ||
1321 | OP_FCMLA_ELEM, /* ARMv8.3, indexed element version. */ | |
1322 | ||
1323 | OP_TOTAL_NUM, /* Pseudo. */ | |
1324 | }; | |
1325 | ||
1326 | /* Error types. */ | |
1327 | enum err_type | |
1328 | { | |
1329 | ERR_OK, | |
1330 | ERR_UND, | |
1331 | ERR_UNP, | |
1332 | ERR_NYI, | |
1333 | ERR_VFI, | |
1334 | ERR_NR_ENTRIES | |
1335 | }; | |
1336 | ||
1337 | /* Maximum number of operands an instruction can have. */ | |
1338 | #define AARCH64_MAX_OPND_NUM 7 | |
1339 | /* Maximum number of qualifier sequences an instruction can have. */ | |
1340 | #define AARCH64_MAX_QLF_SEQ_NUM 10 | |
1341 | /* Operand qualifier typedef */ | |
1342 | typedef enum aarch64_opnd_qualifier aarch64_opnd_qualifier_t; | |
1343 | /* Operand qualifier sequence typedef. */ | |
1344 | typedef aarch64_opnd_qualifier_t \ | |
1345 | aarch64_opnd_qualifier_seq_t [AARCH64_MAX_OPND_NUM]; | |
1346 | ||
1347 | /* FIXME: improve the efficiency. */ | |
1348 | static inline bool | |
1349 | empty_qualifier_sequence_p (const aarch64_opnd_qualifier_t *qualifiers) | |
1350 | { | |
1351 | int i; | |
1352 | for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) | |
1353 | if (qualifiers[i] != AARCH64_OPND_QLF_NIL) | |
1354 | return false; | |
1355 | return true; | |
1356 | } | |
1357 | ||
1358 | /* Forward declare error reporting type. */ | |
1359 | typedef struct aarch64_operand_error aarch64_operand_error; | |
1360 | /* Forward declare instruction sequence type. */ | |
1361 | typedef struct aarch64_instr_sequence aarch64_instr_sequence; | |
1362 | /* Forward declare instruction definition. */ | |
1363 | typedef struct aarch64_inst aarch64_inst; | |
1364 | ||
1365 | /* This structure holds information for a particular opcode. */ | |
1366 | ||
1367 | struct aarch64_opcode | |
1368 | { | |
1369 | /* The name of the mnemonic. */ | |
1370 | const char *name; | |
1371 | ||
1372 | /* The opcode itself. Those bits which will be filled in with | |
1373 | operands are zeroes. */ | |
1374 | aarch64_insn opcode; | |
1375 | ||
1376 | /* The opcode mask. This is used by the disassembler. This is a | |
1377 | mask containing ones indicating those bits which must match the | |
1378 | opcode field, and zeroes indicating those bits which need not | |
1379 | match (and are presumably filled in by operands). */ | |
1380 | aarch64_insn mask; | |
1381 | ||
1382 | /* Instruction class. */ | |
1383 | enum aarch64_insn_class iclass; | |
1384 | ||
1385 | /* Enumerator identifier. */ | |
1386 | enum aarch64_op op; | |
1387 | ||
1388 | /* Which architecture variant provides this instruction. */ | |
1389 | const aarch64_feature_set *avariant; | |
1390 | ||
1391 | /* An array of operand codes. Each code is an index into the | |
1392 | operand table. They appear in the order which the operands must | |
1393 | appear in assembly code, and are terminated by a zero. */ | |
1394 | enum aarch64_opnd operands[AARCH64_MAX_OPND_NUM]; | |
1395 | ||
1396 | /* A list of operand qualifier code sequence. Each operand qualifier | |
1397 | code qualifies the corresponding operand code. Each operand | |
1398 | qualifier sequence specifies a valid opcode variant and related | |
1399 | constraint on operands. */ | |
1400 | aarch64_opnd_qualifier_seq_t qualifiers_list[AARCH64_MAX_QLF_SEQ_NUM]; | |
1401 | ||
1402 | /* Flags providing information about this instruction */ | |
1403 | uint64_t flags; | |
1404 | ||
1405 | /* Extra constraints on the instruction that the verifier checks. */ | |
1406 | uint32_t constraints; | |
1407 | ||
1408 | /* If nonzero, this operand and operand 0 are both registers and | |
1409 | are required to have the same register number. */ | |
1410 | unsigned char tied_operand; | |
1411 | ||
1412 | /* If non-NULL, a function to verify that a given instruction is valid. */ | |
1413 | enum err_type (* verifier) (const struct aarch64_inst *, const aarch64_insn, | |
1414 | bfd_vma, bool, aarch64_operand_error *, | |
1415 | struct aarch64_instr_sequence *); | |
1416 | }; | |
1417 | ||
1418 | typedef struct aarch64_opcode aarch64_opcode; | |
1419 | ||
1420 | /* Table describing all the AArch64 opcodes. */ | |
1421 | extern const aarch64_opcode aarch64_opcode_table[]; | |
1422 | ||
1423 | /* Opcode flags. */ | |
1424 | #define F_ALIAS (1 << 0) | |
1425 | #define F_HAS_ALIAS (1 << 1) | |
1426 | /* Disassembly preference priority 1-3 (the larger the higher). If nothing | |
1427 | is specified, it is the priority 0 by default, i.e. the lowest priority. */ | |
1428 | #define F_P1 (1 << 2) | |
1429 | #define F_P2 (2 << 2) | |
1430 | #define F_P3 (3 << 2) | |
1431 | /* Flag an instruction that is truly conditional executed, e.g. b.cond. */ | |
1432 | #define F_COND (1 << 4) | |
1433 | /* Instruction has the field of 'sf'. */ | |
1434 | #define F_SF (1 << 5) | |
1435 | /* Instruction has the field of 'size:Q'. */ | |
1436 | #define F_SIZEQ (1 << 6) | |
1437 | /* Floating-point instruction has the field of 'type'. */ | |
1438 | #define F_FPTYPE (1 << 7) | |
1439 | /* AdvSIMD scalar instruction has the field of 'size'. */ | |
1440 | #define F_SSIZE (1 << 8) | |
1441 | /* AdvSIMD vector register arrangement specifier encoded in "imm5<3:0>:Q". */ | |
1442 | #define F_T (1 << 9) | |
1443 | /* Size of GPR operand in AdvSIMD instructions encoded in Q. */ | |
1444 | #define F_GPRSIZE_IN_Q (1 << 10) | |
1445 | /* Size of Rt load signed instruction encoded in opc[0], i.e. bit 22. */ | |
1446 | #define F_LDS_SIZE (1 << 11) | |
1447 | /* Optional operand; assume maximum of 1 operand can be optional. */ | |
1448 | #define F_OPD0_OPT (1 << 12) | |
1449 | #define F_OPD1_OPT (2 << 12) | |
1450 | #define F_OPD2_OPT (3 << 12) | |
1451 | #define F_OPD3_OPT (4 << 12) | |
1452 | #define F_OPD4_OPT (5 << 12) | |
1453 | /* Default value for the optional operand when omitted from the assembly. */ | |
1454 | #define F_DEFAULT(X) (((X) & 0x1f) << 15) | |
1455 | /* Instruction that is an alias of another instruction needs to be | |
1456 | encoded/decoded by converting it to/from the real form, followed by | |
1457 | the encoding/decoding according to the rules of the real opcode. | |
1458 | This compares to the direct coding using the alias's information. | |
1459 | N.B. this flag requires F_ALIAS to be used together. */ | |
1460 | #define F_CONV (1 << 20) | |
1461 | /* Use together with F_ALIAS to indicate an alias opcode is a programmer | |
1462 | friendly pseudo instruction available only in the assembly code (thus will | |
1463 | not show up in the disassembly). */ | |
1464 | #define F_PSEUDO (1 << 21) | |
1465 | /* Instruction has miscellaneous encoding/decoding rules. */ | |
1466 | #define F_MISC (1 << 22) | |
1467 | /* Instruction has the field of 'N'; used in conjunction with F_SF. */ | |
1468 | #define F_N (1 << 23) | |
1469 | /* Opcode dependent field. */ | |
1470 | #define F_OD(X) (((X) & 0x7) << 24) | |
1471 | /* Instruction has the field of 'sz'. */ | |
1472 | #define F_LSE_SZ (1 << 27) | |
1473 | /* Require an exact qualifier match, even for NIL qualifiers. */ | |
1474 | #define F_STRICT (1ULL << 28) | |
1475 | /* This system instruction is used to read system registers. */ | |
1476 | #define F_SYS_READ (1ULL << 29) | |
1477 | /* This system instruction is used to write system registers. */ | |
1478 | #define F_SYS_WRITE (1ULL << 30) | |
1479 | /* This instruction has an extra constraint on it that imposes a requirement on | |
1480 | subsequent instructions. */ | |
1481 | #define F_SCAN (1ULL << 31) | |
1482 | /* Instruction takes a pair of optional operands. If we specify the Nth operand | |
1483 | to be optional, then we also implicitly specify (N+1)th operand to also be | |
1484 | optional. */ | |
1485 | #define F_OPD_PAIR_OPT (1ULL << 32) | |
1486 | /* This instruction does not allow the full range of values that the | |
1487 | width of fields in the assembler instruction would theoretically | |
1488 | allow. This impacts the constraints on assembly but yields no | |
1489 | impact on disassembly. */ | |
1490 | #define F_OPD_NARROW (1ULL << 33) | |
1491 | /* For the instruction with size[22:23] field. */ | |
1492 | #define F_OPD_SIZE (1ULL << 34) | |
1493 | /* RCPC3 instruction has the field of 'size'. */ | |
1494 | #define F_RCPC3_SIZE (1ULL << 35) | |
1495 | /* This instruction need VGx2 or VGx4 mandatorily in the operand passed to | |
1496 | assembler. */ | |
1497 | #define F_VG_REQ (1ULL << 36) | |
1498 | ||
1499 | /* 4-bit flag field to indicate subclass of instructions. | |
1500 | Note the overlap between the set of subclass flags in each logical category | |
1501 | (F_LDST_*, F_ARITH_*, F_BRANCH_* etc.); The usage of flags as | |
1502 | iclass-specific enums is intentional. */ | |
1503 | #define F_SUBCLASS (15ULL << 37) | |
1504 | ||
1505 | #define F_LDST_LOAD (1ULL << 37) | |
1506 | #define F_LDST_STORE (2ULL << 37) | |
1507 | /* Subclasses to denote add, sub and mov insns. */ | |
1508 | #define F_ARITH_ADD (1ULL << 37) | |
1509 | #define F_ARITH_SUB (2ULL << 37) | |
1510 | #define F_ARITH_MOV (3ULL << 37) | |
1511 | /* Subclasses to denote call and ret insns. */ | |
1512 | #define F_BRANCH_CALL (1ULL << 37) | |
1513 | #define F_BRANCH_RET (2ULL << 37) | |
1514 | /* Subclass to denote that only tag update is involved. */ | |
1515 | #define F_DP_TAG_ONLY (1ULL << 37) | |
1516 | ||
1517 | #define F_SUBCLASS_OTHER (F_SUBCLASS) | |
1518 | ||
1519 | /* For LSFE instructions with size[30:31] field. */ | |
1520 | #define F_LSFE_SZ (1ULL << 41) | |
1521 | /* Next bit is 42. */ | |
1522 | ||
1523 | /* Instruction constraints. */ | |
1524 | /* This instruction has a predication constraint on the instruction at PC+4. */ | |
1525 | #define C_SCAN_MOVPRFX (1U << 0) | |
1526 | /* This instruction's operation width is determined by the operand with the | |
1527 | largest element size. */ | |
1528 | #define C_MAX_ELEM (1U << 1) | |
1529 | #define C_SCAN_MOPS_P (1U << 2) | |
1530 | #define C_SCAN_MOPS_M (2U << 2) | |
1531 | #define C_SCAN_MOPS_E (3U << 2) | |
1532 | #define C_SCAN_MOPS_PME (3U << 2) | |
1533 | /* Next bit is 4. */ | |
1534 | ||
1535 | static inline bool | |
1536 | alias_opcode_p (const aarch64_opcode *opcode) | |
1537 | { | |
1538 | return (opcode->flags & F_ALIAS) != 0; | |
1539 | } | |
1540 | ||
1541 | static inline bool | |
1542 | opcode_has_alias (const aarch64_opcode *opcode) | |
1543 | { | |
1544 | return (opcode->flags & F_HAS_ALIAS) != 0; | |
1545 | } | |
1546 | ||
1547 | /* Priority for disassembling preference. */ | |
1548 | static inline int | |
1549 | opcode_priority (const aarch64_opcode *opcode) | |
1550 | { | |
1551 | return (opcode->flags >> 2) & 0x3; | |
1552 | } | |
1553 | ||
1554 | static inline bool | |
1555 | pseudo_opcode_p (const aarch64_opcode *opcode) | |
1556 | { | |
1557 | return (opcode->flags & F_PSEUDO) != 0lu; | |
1558 | } | |
1559 | ||
1560 | /* Whether the opcode has the specific subclass flag. | |
1561 | N.B. The overlap between F_LDST_*, F_ARITH_*, and F_BRANCH_* etc. subclass | |
1562 | flags means that the callers of this function have the responsibility of | |
1563 | checking for the flags appropriate for the specific iclass. */ | |
1564 | static inline bool | |
1565 | aarch64_opcode_subclass_p (const aarch64_opcode *opcode, uint64_t flag) | |
1566 | { | |
1567 | return ((opcode->flags & F_SUBCLASS) == flag); | |
1568 | } | |
1569 | ||
1570 | /* Deal with two possible scenarios: If F_OP_PAIR_OPT not set, as is the case | |
1571 | by default, F_OPDn_OPT must equal IDX + 1, else F_OPDn_OPT must be in range | |
1572 | [IDX, IDX + 1]. */ | |
1573 | static inline bool | |
1574 | optional_operand_p (const aarch64_opcode *opcode, unsigned int idx) | |
1575 | { | |
1576 | if (opcode->flags & F_OPD_PAIR_OPT) | |
1577 | return (((opcode->flags >> 12) & 0x7) == idx | |
1578 | || ((opcode->flags >> 12) & 0x7) == idx + 1); | |
1579 | return ((opcode->flags >> 12) & 0x7) == idx + 1; | |
1580 | } | |
1581 | ||
1582 | static inline aarch64_insn | |
1583 | get_optional_operand_default_value (const aarch64_opcode *opcode) | |
1584 | { | |
1585 | return (opcode->flags >> 15) & 0x1f; | |
1586 | } | |
1587 | ||
1588 | static inline unsigned int | |
1589 | get_opcode_dependent_value (const aarch64_opcode *opcode) | |
1590 | { | |
1591 | return (opcode->flags >> 24) & 0x7; | |
1592 | } | |
1593 | ||
1594 | static inline bool | |
1595 | get_opcode_dependent_vg_status (const aarch64_opcode *opcode) | |
1596 | { | |
1597 | return (opcode->flags >> 36) & 0x1; | |
1598 | } | |
1599 | ||
1600 | static inline bool | |
1601 | opcode_has_special_coder (const aarch64_opcode *opcode) | |
1602 | { | |
1603 | return (opcode->flags & (F_SF | F_LSE_SZ | F_SIZEQ | F_FPTYPE | F_SSIZE | F_T | |
1604 | | F_GPRSIZE_IN_Q | F_LDS_SIZE | F_MISC | F_N | F_COND | |
1605 | | F_OPD_SIZE | F_RCPC3_SIZE | F_LSFE_SZ )) != 0; | |
1606 | } | |
1607 | \f | |
1608 | struct aarch64_name_value_pair | |
1609 | { | |
1610 | const char * name; | |
1611 | aarch64_insn value; | |
1612 | }; | |
1613 | ||
1614 | extern const struct aarch64_name_value_pair aarch64_operand_modifiers []; | |
1615 | extern const struct aarch64_name_value_pair aarch64_barrier_options [16]; | |
1616 | extern const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options [4]; | |
1617 | extern const struct aarch64_name_value_pair aarch64_prfops [32]; | |
1618 | extern const struct aarch64_name_value_pair aarch64_hint_options []; | |
1619 | ||
1620 | #define AARCH64_MAX_SYSREG_NAME_LEN 32 | |
1621 | ||
1622 | typedef struct | |
1623 | { | |
1624 | const char * name; | |
1625 | aarch64_insn value; | |
1626 | uint32_t flags; | |
1627 | ||
1628 | /* A set of features, all of which are required for this system register to be | |
1629 | available. */ | |
1630 | aarch64_feature_set features; | |
1631 | } aarch64_sys_reg; | |
1632 | ||
1633 | extern const aarch64_sys_reg aarch64_sys_regs []; | |
1634 | extern const aarch64_sys_reg aarch64_pstatefields []; | |
1635 | extern bool aarch64_sys_reg_deprecated_p (const uint32_t); | |
1636 | extern bool aarch64_sys_reg_128bit_p (const uint32_t); | |
1637 | extern bool aarch64_sys_reg_alias_p (const uint32_t); | |
1638 | extern bool aarch64_pstatefield_supported_p (const aarch64_feature_set, | |
1639 | const aarch64_sys_reg *); | |
1640 | ||
1641 | typedef struct | |
1642 | { | |
1643 | const char *name; | |
1644 | uint32_t value; | |
1645 | uint32_t flags ; | |
1646 | ||
1647 | /* A set of features, all of which are required for this system instruction to be | |
1648 | available. */ | |
1649 | aarch64_feature_set features; | |
1650 | } aarch64_sys_ins_reg; | |
1651 | ||
1652 | extern bool aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *); | |
1653 | extern bool | |
1654 | aarch64_sys_ins_reg_supported_p (const aarch64_feature_set, | |
1655 | const char *reg_name, | |
1656 | uint32_t, const aarch64_feature_set *); | |
1657 | ||
1658 | extern const aarch64_sys_ins_reg aarch64_sys_regs_ic []; | |
1659 | extern const aarch64_sys_ins_reg aarch64_sys_regs_dc []; | |
1660 | extern const aarch64_sys_ins_reg aarch64_sys_regs_at []; | |
1661 | extern const aarch64_sys_ins_reg aarch64_sys_regs_tlbi []; | |
1662 | extern const aarch64_sys_ins_reg aarch64_sys_regs_sr []; | |
1663 | ||
1664 | /* Shift/extending operator kinds. | |
1665 | N.B. order is important; keep aarch64_operand_modifiers synced. */ | |
1666 | enum aarch64_modifier_kind | |
1667 | { | |
1668 | AARCH64_MOD_NONE, | |
1669 | AARCH64_MOD_MSL, | |
1670 | AARCH64_MOD_ROR, | |
1671 | AARCH64_MOD_ASR, | |
1672 | AARCH64_MOD_LSR, | |
1673 | AARCH64_MOD_LSL, | |
1674 | AARCH64_MOD_UXTB, | |
1675 | AARCH64_MOD_UXTH, | |
1676 | AARCH64_MOD_UXTW, | |
1677 | AARCH64_MOD_UXTX, | |
1678 | AARCH64_MOD_SXTB, | |
1679 | AARCH64_MOD_SXTH, | |
1680 | AARCH64_MOD_SXTW, | |
1681 | AARCH64_MOD_SXTX, | |
1682 | AARCH64_MOD_MUL, | |
1683 | AARCH64_MOD_MUL_VL, | |
1684 | }; | |
1685 | ||
1686 | bool | |
1687 | aarch64_extend_operator_p (enum aarch64_modifier_kind); | |
1688 | ||
1689 | enum aarch64_modifier_kind | |
1690 | aarch64_get_operand_modifier (const struct aarch64_name_value_pair *); | |
1691 | /* Condition. */ | |
1692 | ||
1693 | typedef struct | |
1694 | { | |
1695 | /* A list of names with the first one as the disassembly preference; | |
1696 | terminated by NULL if fewer than 3. */ | |
1697 | const char *names[4]; | |
1698 | aarch64_insn value; | |
1699 | } aarch64_cond; | |
1700 | ||
1701 | extern const aarch64_cond aarch64_conds[16]; | |
1702 | ||
1703 | const aarch64_cond* get_cond_from_value (aarch64_insn value); | |
1704 | const aarch64_cond* get_inverted_cond (const aarch64_cond *cond); | |
1705 | \f | |
1706 | /* Information about a reference to part of ZA. */ | |
1707 | struct aarch64_indexed_za | |
1708 | { | |
1709 | /* Which tile is being accessed. Unused (and 0) for an index into ZA. */ | |
1710 | int regno; | |
1711 | ||
1712 | struct | |
1713 | { | |
1714 | /* The 32-bit index register. */ | |
1715 | int regno; | |
1716 | ||
1717 | /* The first (or only) immediate offset. */ | |
1718 | int64_t imm; | |
1719 | ||
1720 | /* The last immediate offset minus the first immediate offset. | |
1721 | Unlike the range size, this is guaranteed not to overflow | |
1722 | when the end offset > the start offset. */ | |
1723 | uint64_t countm1; | |
1724 | } index; | |
1725 | ||
1726 | /* The vector group size, or 0 if none. */ | |
1727 | unsigned group_size : 8; | |
1728 | ||
1729 | /* True if a tile access is vertical, false if it is horizontal. | |
1730 | Unused (and 0) for an index into ZA. */ | |
1731 | unsigned v : 1; | |
1732 | }; | |
1733 | ||
1734 | /* Information about a list of registers. */ | |
1735 | struct aarch64_reglist | |
1736 | { | |
1737 | unsigned first_regno : 8; | |
1738 | unsigned num_regs : 8; | |
1739 | /* The difference between the nth and the n+1th register. */ | |
1740 | unsigned stride : 8; | |
1741 | /* 1 if it is a list of reg element. */ | |
1742 | unsigned has_index : 1; | |
1743 | /* Lane index; valid only when has_index is 1. */ | |
1744 | int64_t index; | |
1745 | }; | |
1746 | ||
1747 | /* Structure representing an operand. */ | |
1748 | ||
1749 | struct aarch64_opnd_info | |
1750 | { | |
1751 | enum aarch64_opnd type; | |
1752 | aarch64_opnd_qualifier_t qualifier; | |
1753 | int idx; | |
1754 | ||
1755 | union | |
1756 | { | |
1757 | struct | |
1758 | { | |
1759 | unsigned regno; | |
1760 | } reg; | |
1761 | struct | |
1762 | { | |
1763 | unsigned int regno; | |
1764 | int64_t index; | |
1765 | } reglane; | |
1766 | /* e.g. LVn. */ | |
1767 | struct aarch64_reglist reglist; | |
1768 | /* e.g. immediate or pc relative address offset. */ | |
1769 | struct | |
1770 | { | |
1771 | int64_t value; | |
1772 | unsigned is_fp : 1; | |
1773 | } imm; | |
1774 | /* e.g. address in STR (register offset). */ | |
1775 | struct | |
1776 | { | |
1777 | unsigned base_regno; | |
1778 | struct | |
1779 | { | |
1780 | union | |
1781 | { | |
1782 | int imm; | |
1783 | unsigned regno; | |
1784 | }; | |
1785 | unsigned is_reg; | |
1786 | } offset; | |
1787 | unsigned pcrel : 1; /* PC-relative. */ | |
1788 | unsigned writeback : 1; | |
1789 | unsigned preind : 1; /* Pre-indexed. */ | |
1790 | unsigned postind : 1; /* Post-indexed. */ | |
1791 | } addr; | |
1792 | ||
1793 | struct | |
1794 | { | |
1795 | /* The encoding of the system register. */ | |
1796 | aarch64_insn value; | |
1797 | ||
1798 | /* The system register flags. */ | |
1799 | uint32_t flags; | |
1800 | } sysreg; | |
1801 | ||
1802 | /* ZA tile vector, e.g. <ZAn><HV>.D[<Wv>{, <imm>}] */ | |
1803 | struct aarch64_indexed_za indexed_za; | |
1804 | ||
1805 | const aarch64_cond *cond; | |
1806 | /* The encoding of the PSTATE field. */ | |
1807 | aarch64_insn pstatefield; | |
1808 | const aarch64_sys_ins_reg *sysins_op; | |
1809 | const struct aarch64_name_value_pair *barrier; | |
1810 | const struct aarch64_name_value_pair *hint_option; | |
1811 | const struct aarch64_name_value_pair *prfop; | |
1812 | }; | |
1813 | ||
1814 | /* Operand shifter; in use when the operand is a register offset address, | |
1815 | add/sub extended reg, etc. e.g. <R><m>{, <extend> {#<amount>}}. */ | |
1816 | struct | |
1817 | { | |
1818 | enum aarch64_modifier_kind kind; | |
1819 | unsigned operator_present: 1; /* Only valid during encoding. */ | |
1820 | /* Value of the 'S' field in ld/st reg offset; used only in decoding. */ | |
1821 | unsigned amount_present: 1; | |
1822 | int64_t amount; | |
1823 | } shifter; | |
1824 | ||
1825 | unsigned skip:1; /* Operand is not completed if there is a fixup needed | |
1826 | to be done on it. In some (but not all) of these | |
1827 | cases, we need to tell libopcodes to skip the | |
1828 | constraint checking and the encoding for this | |
1829 | operand, so that the libopcodes can pick up the | |
1830 | right opcode before the operand is fixed-up. This | |
1831 | flag should only be used during the | |
1832 | assembling/encoding. */ | |
1833 | unsigned present:1; /* Whether this operand is present in the assembly | |
1834 | line; not used during the disassembly. */ | |
1835 | }; | |
1836 | ||
1837 | typedef struct aarch64_opnd_info aarch64_opnd_info; | |
1838 | ||
1839 | /* Structure representing an instruction. | |
1840 | ||
1841 | It is used during both the assembling and disassembling. The assembler | |
1842 | fills an aarch64_inst after a successful parsing and then passes it to the | |
1843 | encoding routine to do the encoding. During the disassembling, the | |
1844 | disassembler calls the decoding routine to decode a binary instruction; on a | |
1845 | successful return, such a structure will be filled with information of the | |
1846 | instruction; then the disassembler uses the information to print out the | |
1847 | instruction. */ | |
1848 | ||
1849 | struct aarch64_inst | |
1850 | { | |
1851 | /* The value of the binary instruction. */ | |
1852 | aarch64_insn value; | |
1853 | ||
1854 | /* Corresponding opcode entry. */ | |
1855 | const aarch64_opcode *opcode; | |
1856 | ||
1857 | /* Condition for a truly conditional-executed instruction, e.g. b.cond. */ | |
1858 | const aarch64_cond *cond; | |
1859 | ||
1860 | /* Operands information. */ | |
1861 | aarch64_opnd_info operands[AARCH64_MAX_OPND_NUM]; | |
1862 | }; | |
1863 | ||
1864 | /* Defining the HINT #imm values for the aarch64_hint_options. */ | |
1865 | #define HINT_OPD_CSYNC 0x11 | |
1866 | #define HINT_OPD_DSYNC 0x13 | |
1867 | #define HINT_OPD_C 0x22 | |
1868 | #define HINT_OPD_J 0x24 | |
1869 | #define HINT_OPD_JC 0x26 | |
1870 | #define HINT_OPD_NULL 0x00 | |
1871 | ||
1872 | \f | |
1873 | /* Diagnosis related declaration and interface. */ | |
1874 | ||
1875 | /* Operand error kind enumerators. | |
1876 | ||
1877 | AARCH64_OPDE_RECOVERABLE | |
1878 | Less severe error found during the parsing, very possibly because that | |
1879 | GAS has picked up a wrong instruction template for the parsing. | |
1880 | ||
1881 | AARCH64_OPDE_A_SHOULD_FOLLOW_B | |
1882 | The instruction forms (or is expected to form) part of a sequence, | |
1883 | but the preceding instruction in the sequence wasn't the expected one. | |
1884 | The message refers to two strings: the name of the current instruction, | |
1885 | followed by the name of the expected preceding instruction. | |
1886 | ||
1887 | AARCH64_OPDE_EXPECTED_A_AFTER_B | |
1888 | Same as AARCH64_OPDE_A_SHOULD_FOLLOW_B, but shifting the focus | |
1889 | so that the current instruction is assumed to be the incorrect one: | |
1890 | "since the previous instruction was B, the current one should be A". | |
1891 | ||
1892 | AARCH64_OPDE_SYNTAX_ERROR | |
1893 | General syntax error; it can be either a user error, or simply because | |
1894 | that GAS is trying a wrong instruction template. | |
1895 | ||
1896 | AARCH64_OPDE_FATAL_SYNTAX_ERROR | |
1897 | Definitely a user syntax error. | |
1898 | ||
1899 | AARCH64_OPDE_INVALID_VARIANT | |
1900 | No syntax error, but the operands are not a valid combination, e.g. | |
1901 | FMOV D0,S0 | |
1902 | ||
1903 | The following errors are only reported against an asm string that is | |
1904 | syntactically valid and that has valid operand qualifiers. | |
1905 | ||
1906 | AARCH64_OPDE_INVALID_VG_SIZE | |
1907 | Error about a "VGx<n>" modifier in a ZA index not having the | |
1908 | correct <n>. This error effectively forms a pair with | |
1909 | AARCH64_OPDE_REG_LIST_LENGTH, since both errors relate to the number | |
1910 | of vectors that an instruction operates on. However, the "VGx<n>" | |
1911 | modifier is optional, whereas a register list always has a known | |
1912 | and explicit length. It therefore seems better to place more | |
1913 | importance on the register list length when selecting an opcode table | |
1914 | entry. This in turn means that having an incorrect register length | |
1915 | should be more severe than having an incorrect "VGx<n>". | |
1916 | ||
1917 | AARCH64_OPDE_REG_LIST_LENGTH | |
1918 | Error about a register list operand having an unexpected number of | |
1919 | registers. This error is low severity because there might be another | |
1920 | opcode entry that supports the given number of registers. | |
1921 | ||
1922 | AARCH64_OPDE_REG_LIST_STRIDE | |
1923 | Error about a register list operand having the correct number | |
1924 | (and type) of registers, but an unexpected stride. This error is | |
1925 | more severe than AARCH64_OPDE_REG_LIST_LENGTH because it implies | |
1926 | that the length is known to be correct. However, it is lower than | |
1927 | many other errors, since some instructions have forms that share | |
1928 | the same number of registers but have different strides. | |
1929 | ||
1930 | AARCH64_OPDE_UNTIED_IMMS | |
1931 | The asm failed to use the same immediate for a destination operand | |
1932 | and a tied source operand. | |
1933 | ||
1934 | AARCH64_OPDE_UNTIED_OPERAND | |
1935 | The asm failed to use the same register for a destination operand | |
1936 | and a tied source operand. | |
1937 | ||
1938 | AARCH64_OPDE_OUT_OF_RANGE | |
1939 | Error about some immediate value out of a valid range. | |
1940 | ||
1941 | AARCH64_OPDE_UNALIGNED | |
1942 | Error about some immediate value not properly aligned (i.e. not being a | |
1943 | multiple times of a certain value). | |
1944 | ||
1945 | AARCH64_OPDE_OTHER_ERROR | |
1946 | Error of the highest severity and used for any severe issue that does not | |
1947 | fall into any of the above categories. | |
1948 | ||
1949 | AARCH64_OPDE_INVALID_REGNO | |
1950 | A register was syntactically valid and had the right type, but it was | |
1951 | outside the range supported by the associated operand field. This is | |
1952 | a high severity error because there are currently no instructions that | |
1953 | would accept the operands that precede the erroneous one (if any) and | |
1954 | yet still accept a wider range of registers. | |
1955 | ||
1956 | AARCH64_OPDE_RECOVERABLE, AARCH64_OPDE_SYNTAX_ERROR and | |
1957 | AARCH64_OPDE_FATAL_SYNTAX_ERROR are only detected by GAS while the | |
1958 | AARCH64_OPDE_INVALID_VARIANT error can only be spotted by libopcodes as | |
1959 | only libopcodes has the information about the valid variants of each | |
1960 | instruction. | |
1961 | ||
1962 | The enumerators have an increasing severity. This is helpful when there are | |
1963 | multiple instruction templates available for a given mnemonic name (e.g. | |
1964 | FMOV); this mechanism will help choose the most suitable template from which | |
1965 | the generated diagnostics can most closely describe the issues, if any. | |
1966 | ||
1967 | This enum needs to be kept up-to-date with operand_mismatch_kind_names | |
1968 | in tc-aarch64.c. */ | |
1969 | ||
1970 | enum aarch64_operand_error_kind | |
1971 | { | |
1972 | AARCH64_OPDE_NIL, | |
1973 | AARCH64_OPDE_RECOVERABLE, | |
1974 | AARCH64_OPDE_A_SHOULD_FOLLOW_B, | |
1975 | AARCH64_OPDE_EXPECTED_A_AFTER_B, | |
1976 | AARCH64_OPDE_SYNTAX_ERROR, | |
1977 | AARCH64_OPDE_FATAL_SYNTAX_ERROR, | |
1978 | AARCH64_OPDE_INVALID_VARIANT, | |
1979 | AARCH64_OPDE_INVALID_VG_SIZE, | |
1980 | AARCH64_OPDE_REG_LIST_LENGTH, | |
1981 | AARCH64_OPDE_REG_LIST_STRIDE, | |
1982 | AARCH64_OPDE_UNTIED_IMMS, | |
1983 | AARCH64_OPDE_UNTIED_OPERAND, | |
1984 | AARCH64_OPDE_OUT_OF_RANGE, | |
1985 | AARCH64_OPDE_UNALIGNED, | |
1986 | AARCH64_OPDE_OTHER_ERROR, | |
1987 | AARCH64_OPDE_INVALID_REGNO | |
1988 | }; | |
1989 | ||
1990 | /* N.B. GAS assumes that this structure work well with shallow copy. */ | |
1991 | struct aarch64_operand_error | |
1992 | { | |
1993 | enum aarch64_operand_error_kind kind; | |
1994 | int index; | |
1995 | const char *error; | |
1996 | /* Some data for extra information. */ | |
1997 | union { | |
1998 | int i; | |
1999 | const char *s; | |
2000 | } data[3]; | |
2001 | bool non_fatal; | |
2002 | }; | |
2003 | ||
2004 | /* AArch64 sequence structure used to track instructions with F_SCAN | |
2005 | dependencies for both assembler and disassembler. */ | |
2006 | struct aarch64_instr_sequence | |
2007 | { | |
2008 | /* The instructions in the sequence, starting with the one that | |
2009 | caused it to be opened. */ | |
2010 | aarch64_inst *instr; | |
2011 | /* The number of instructions already in the sequence. */ | |
2012 | int num_added_insns; | |
2013 | /* The number of instructions allocated to the sequence. */ | |
2014 | int num_allocated_insns; | |
2015 | }; | |
2016 | ||
2017 | /* Encoding entrypoint. */ | |
2018 | ||
2019 | extern bool | |
2020 | aarch64_opcode_encode (const aarch64_opcode *, const aarch64_inst *, | |
2021 | aarch64_insn *, aarch64_opnd_qualifier_t *, | |
2022 | aarch64_operand_error *, aarch64_instr_sequence *); | |
2023 | ||
2024 | extern const aarch64_opcode * | |
2025 | aarch64_replace_opcode (struct aarch64_inst *, | |
2026 | const aarch64_opcode *); | |
2027 | ||
2028 | /* Given the opcode enumerator OP, return the pointer to the corresponding | |
2029 | opcode entry. */ | |
2030 | ||
2031 | extern const aarch64_opcode * | |
2032 | aarch64_get_opcode (enum aarch64_op); | |
2033 | ||
2034 | /* An instance of this structure is passed to aarch64_print_operand, and | |
2035 | the callback within this structure is used to apply styling to the | |
2036 | disassembler output. This structure encapsulates the callback and a | |
2037 | state pointer. */ | |
2038 | ||
2039 | struct aarch64_styler | |
2040 | { | |
2041 | /* The callback used to apply styling. Returns a string created from FMT | |
2042 | and ARGS with STYLE applied to the string. STYLER is a pointer back | |
2043 | to this object so that the callback can access the state member. | |
2044 | ||
2045 | The string returned from this callback must remain valid until the | |
2046 | call to aarch64_print_operand has completed. */ | |
2047 | const char *(*apply_style) (struct aarch64_styler *styler, | |
2048 | enum disassembler_style style, | |
2049 | const char *fmt, | |
2050 | va_list args); | |
2051 | ||
2052 | /* A pointer to a state object which can be used by the apply_style | |
2053 | callback function. */ | |
2054 | void *state; | |
2055 | }; | |
2056 | ||
2057 | /* Generate the string representation of an operand. */ | |
2058 | extern void | |
2059 | aarch64_print_operand (char *, size_t, bfd_vma, const aarch64_opcode *, | |
2060 | const aarch64_opnd_info *, int, int *, bfd_vma *, | |
2061 | char **, char *, size_t, | |
2062 | aarch64_feature_set features, | |
2063 | struct aarch64_styler *styler); | |
2064 | ||
2065 | /* Miscellaneous interface. */ | |
2066 | ||
2067 | extern int | |
2068 | aarch64_operand_index (const enum aarch64_opnd *, enum aarch64_opnd); | |
2069 | ||
2070 | extern aarch64_opnd_qualifier_t | |
2071 | aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *, int, | |
2072 | const aarch64_opnd_qualifier_t, int); | |
2073 | ||
2074 | extern bool | |
2075 | aarch64_is_destructive_by_operands (const aarch64_opcode *); | |
2076 | ||
2077 | extern int | |
2078 | aarch64_num_of_operands (const aarch64_opcode *); | |
2079 | ||
2080 | extern bool | |
2081 | aarch64_stack_pointer_p (const aarch64_opnd_info *); | |
2082 | ||
2083 | extern int | |
2084 | aarch64_zero_register_p (const aarch64_opnd_info *); | |
2085 | ||
2086 | extern enum err_type | |
2087 | aarch64_decode_insn (aarch64_insn, aarch64_inst *, bool, | |
2088 | aarch64_operand_error *); | |
2089 | ||
2090 | extern void | |
2091 | init_insn_sequence (const struct aarch64_inst *, aarch64_instr_sequence *); | |
2092 | ||
2093 | /* Given an operand qualifier, return the expected data element size | |
2094 | of a qualified operand. */ | |
2095 | extern unsigned char | |
2096 | aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t); | |
2097 | ||
2098 | extern enum aarch64_operand_class | |
2099 | aarch64_get_operand_class (enum aarch64_opnd); | |
2100 | ||
2101 | extern const char * | |
2102 | aarch64_get_operand_name (enum aarch64_opnd); | |
2103 | ||
2104 | extern const char * | |
2105 | aarch64_get_operand_desc (enum aarch64_opnd); | |
2106 | ||
2107 | extern bool | |
2108 | aarch64_sve_dupm_mov_immediate_p (uint64_t, int); | |
2109 | ||
2110 | extern bool | |
2111 | aarch64_cpu_supports_inst_p (aarch64_feature_set, aarch64_inst *); | |
2112 | ||
2113 | extern int | |
2114 | calc_ldst_datasize (const aarch64_opnd_info *opnds); | |
2115 | ||
2116 | #ifdef DEBUG_AARCH64 | |
2117 | extern int debug_dump; | |
2118 | ||
2119 | extern void | |
2120 | aarch64_verbose (const char *, ...) __attribute__ ((format (printf, 1, 2))); | |
2121 | ||
2122 | #define DEBUG_TRACE(M, ...) \ | |
2123 | { \ | |
2124 | if (debug_dump) \ | |
2125 | aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \ | |
2126 | } | |
2127 | ||
2128 | #define DEBUG_TRACE_IF(C, M, ...) \ | |
2129 | { \ | |
2130 | if (debug_dump && (C)) \ | |
2131 | aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \ | |
2132 | } | |
2133 | #else /* !DEBUG_AARCH64 */ | |
2134 | #define DEBUG_TRACE(M, ...) ; | |
2135 | #define DEBUG_TRACE_IF(C, M, ...) ; | |
2136 | #endif /* DEBUG_AARCH64 */ | |
2137 | ||
2138 | extern const char *const aarch64_sve_pattern_array[32]; | |
2139 | extern const char *const aarch64_sve_prfop_array[16]; | |
2140 | extern const char *const aarch64_rprfmop_array[64]; | |
2141 | extern const char *const aarch64_sme_vlxn_array[2]; | |
2142 | extern const char *const aarch64_brbop_array[2]; | |
2143 | ||
2144 | #ifdef __cplusplus | |
2145 | } | |
2146 | #endif | |
2147 | ||
2148 | #endif /* OPCODE_AARCH64_H */ |