]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - include/opcode/aarch64.h
Update year range in copyright notice of binutils files
[thirdparty/binutils-gdb.git] / include / opcode / aarch64.h
1 /* AArch64 assembler/disassembler support.
2
3 Copyright (C) 2009-2024 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GNU Binutils.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #ifndef OPCODE_AARCH64_H
23 #define OPCODE_AARCH64_H
24
25 #include "bfd.h"
26 #include <stdint.h>
27 #include <assert.h>
28 #include <stdlib.h>
29
30 #include "dis-asm.h"
31
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35
36 /* The offset for pc-relative addressing is currently defined to be 0. */
37 #define AARCH64_PCREL_OFFSET 0
38
39 typedef uint32_t aarch64_insn;
40
41 /* An enum containing all known CPU features. The values act as bit positions
42 into aarch64_feature_set. */
43 enum aarch64_feature_bit {
44 /* All processors. */
45 AARCH64_FEATURE_V8,
46 /* ARMv8.6 processors. */
47 AARCH64_FEATURE_V8_6A,
48 /* Bfloat16 insns. */
49 AARCH64_FEATURE_BFLOAT16,
50 /* Armv8-A processors. */
51 AARCH64_FEATURE_V8A,
52 /* SVE2 instructions. */
53 AARCH64_FEATURE_SVE2,
54 /* ARMv8.2 processors. */
55 AARCH64_FEATURE_V8_2A,
56 /* ARMv8.3 processors. */
57 AARCH64_FEATURE_V8_3A,
58 AARCH64_FEATURE_SVE2_AES,
59 AARCH64_FEATURE_SVE2_BITPERM,
60 AARCH64_FEATURE_SVE2_SM4,
61 AARCH64_FEATURE_SVE2_SHA3,
62 /* ARMv8.4 processors. */
63 AARCH64_FEATURE_V8_4A,
64 /* Armv8-R processors. */
65 AARCH64_FEATURE_V8R,
66 /* Armv8.7 processors. */
67 AARCH64_FEATURE_V8_7A,
68 /* Scalable Matrix Extension. */
69 AARCH64_FEATURE_SME,
70 /* Atomic 64-byte load/store. */
71 AARCH64_FEATURE_LS64,
72 /* v8.3 Pointer Authentication. */
73 AARCH64_FEATURE_PAC,
74 /* FP instructions. */
75 AARCH64_FEATURE_FP,
76 /* SIMD instructions. */
77 AARCH64_FEATURE_SIMD,
78 /* CRC instructions. */
79 AARCH64_FEATURE_CRC,
80 /* LSE instructions. */
81 AARCH64_FEATURE_LSE,
82 /* PAN instructions. */
83 AARCH64_FEATURE_PAN,
84 /* LOR instructions. */
85 AARCH64_FEATURE_LOR,
86 /* v8.1 SIMD instructions. */
87 AARCH64_FEATURE_RDMA,
88 /* v8.1 features. */
89 AARCH64_FEATURE_V8_1A,
90 /* v8.2 FP16 instructions. */
91 AARCH64_FEATURE_F16,
92 /* RAS Extensions. */
93 AARCH64_FEATURE_RAS,
94 /* Statistical Profiling. */
95 AARCH64_FEATURE_PROFILE,
96 /* SVE instructions. */
97 AARCH64_FEATURE_SVE,
98 /* RCPC instructions. */
99 AARCH64_FEATURE_RCPC,
100 /* Complex # instructions. */
101 AARCH64_FEATURE_COMPNUM,
102 /* Dot Product instructions. */
103 AARCH64_FEATURE_DOTPROD,
104 /* SM3 & SM4 instructions. */
105 AARCH64_FEATURE_SM4,
106 /* SHA2 instructions. */
107 AARCH64_FEATURE_SHA2,
108 /* SHA3 instructions. */
109 AARCH64_FEATURE_SHA3,
110 /* AES instructions. */
111 AARCH64_FEATURE_AES,
112 /* v8.2 FP16FML ins. */
113 AARCH64_FEATURE_F16_FML,
114 /* ARMv8.5 processors. */
115 AARCH64_FEATURE_V8_5A,
116 /* v8.5 Flag Manipulation version 2. */
117 AARCH64_FEATURE_FLAGMANIP,
118 /* FRINT[32,64][Z,X] insns. */
119 AARCH64_FEATURE_FRINTTS,
120 /* SB instruction. */
121 AARCH64_FEATURE_SB,
122 /* Execution and Data Prediction Restriction instructions. */
123 AARCH64_FEATURE_PREDRES,
124 /* DC CVADP. */
125 AARCH64_FEATURE_CVADP,
126 /* Random Number instructions. */
127 AARCH64_FEATURE_RNG,
128 /* BTI instructions. */
129 AARCH64_FEATURE_BTI,
130 /* SCXTNUM_ELx. */
131 AARCH64_FEATURE_SCXTNUM,
132 /* ID_PFR2 instructions. */
133 AARCH64_FEATURE_ID_PFR2,
134 /* SSBS mechanism enabled. */
135 AARCH64_FEATURE_SSBS,
136 /* Memory Tagging Extension. */
137 AARCH64_FEATURE_MEMTAG,
138 /* Transactional Memory Extension. */
139 AARCH64_FEATURE_TME,
140 /* Standardization of memory operations. */
141 AARCH64_FEATURE_MOPS,
142 /* Hinted conditional branches. */
143 AARCH64_FEATURE_HBC,
144 /* Matrix Multiply instructions. */
145 AARCH64_FEATURE_I8MM,
146 AARCH64_FEATURE_F32MM,
147 AARCH64_FEATURE_F64MM,
148 /* v8.4 Flag Manipulation. */
149 AARCH64_FEATURE_FLAGM,
150 /* Armv9.0-A processors. */
151 AARCH64_FEATURE_V9A,
152 /* SME F64F64. */
153 AARCH64_FEATURE_SME_F64F64,
154 /* SME I16I64. */
155 AARCH64_FEATURE_SME_I16I64,
156 /* Armv8.8 processors. */
157 AARCH64_FEATURE_V8_8A,
158 /* Common Short Sequence Compression instructions. */
159 AARCH64_FEATURE_CSSC,
160 /* Armv8.9-A processors. */
161 AARCH64_FEATURE_V8_9A,
162 /* Check Feature Status Extension. */
163 AARCH64_FEATURE_CHK,
164 /* Guarded Control Stack. */
165 AARCH64_FEATURE_GCS,
166 /* SPE Call Return branch records. */
167 AARCH64_FEATURE_SPE_CRR,
168 /* SPE Filter by data source. */
169 AARCH64_FEATURE_SPE_FDS,
170 /* Additional SPE events. */
171 AARCH64_FEATURE_SPEv1p4,
172 /* SME2. */
173 AARCH64_FEATURE_SME2,
174 /* Translation Hardening Extension. */
175 AARCH64_FEATURE_THE,
176 /* LSE128. */
177 AARCH64_FEATURE_LSE128,
178 /* ARMv8.9-A RAS Extensions. */
179 AARCH64_FEATURE_RASv2,
180 /* System Control Register2. */
181 AARCH64_FEATURE_SCTLR2,
182 /* Fine Grained Traps. */
183 AARCH64_FEATURE_FGT2,
184 /* Physical Fault Address. */
185 AARCH64_FEATURE_PFAR,
186 /* Address Translate Stage 1. */
187 AARCH64_FEATURE_ATS1A,
188 /* Memory Attribute Index Enhancement. */
189 AARCH64_FEATURE_AIE,
190 /* Stage 1 Permission Indirection Extension. */
191 AARCH64_FEATURE_S1PIE,
192 /* Stage 2 Permission Indirection Extension. */
193 AARCH64_FEATURE_S2PIE,
194 /* Stage 1 Permission Overlay Extension. */
195 AARCH64_FEATURE_S1POE,
196 /* Stage 2 Permission Overlay Extension. */
197 AARCH64_FEATURE_S2POE,
198 /* Extension to Translation Control Registers. */
199 AARCH64_FEATURE_TCR2,
200 /* Speculation Prediction Restriction instructions. */
201 AARCH64_FEATURE_PREDRES2,
202 /* Instrumentation Extension. */
203 AARCH64_FEATURE_ITE,
204 AARCH64_NUM_FEATURES
205 };
206
207 /* These macros take an initial argument X that gives the index into
208 an aarch64_feature_set. The macros then return the bitmask for
209 that array index. */
210
211 /* A mask in which feature bit BIT is set and all other bits are clear. */
212 #define AARCH64_UINT64_BIT(X, BIT) \
213 ((X) == (BIT) / 64 ? 1ULL << (BIT) % 64 : 0)
214
215 /* A mask that includes only AARCH64_FEATURE_<NAME>. */
216 #define AARCH64_FEATBIT(X, NAME) \
217 AARCH64_UINT64_BIT (X, AARCH64_FEATURE_##NAME)
218
219 /* A mask of the features that are enabled by each architecture version,
220 excluding those that are inherited from other architecture versions. */
221 #define AARCH64_ARCH_V8A_FEATURES(X) (AARCH64_FEATBIT (X, V8A) \
222 | AARCH64_FEATBIT (X, FP) \
223 | AARCH64_FEATBIT (X, RAS) \
224 | AARCH64_FEATBIT (X, SIMD) \
225 | AARCH64_FEATBIT (X, CHK))
226 #define AARCH64_ARCH_V8_1A_FEATURES(X) (AARCH64_FEATBIT (X, V8_1A) \
227 | AARCH64_FEATBIT (X, CRC) \
228 | AARCH64_FEATBIT (X, LSE) \
229 | AARCH64_FEATBIT (X, PAN) \
230 | AARCH64_FEATBIT (X, LOR) \
231 | AARCH64_FEATBIT (X, RDMA))
232 #define AARCH64_ARCH_V8_2A_FEATURES(X) (AARCH64_FEATBIT (X, V8_2A))
233 #define AARCH64_ARCH_V8_3A_FEATURES(X) (AARCH64_FEATBIT (X, V8_3A) \
234 | AARCH64_FEATBIT (X, PAC) \
235 | AARCH64_FEATBIT (X, RCPC) \
236 | AARCH64_FEATBIT (X, COMPNUM))
237 #define AARCH64_ARCH_V8_4A_FEATURES(X) (AARCH64_FEATBIT (X, V8_4A) \
238 | AARCH64_FEATBIT (X, DOTPROD) \
239 | AARCH64_FEATBIT (X, FLAGM) \
240 | AARCH64_FEATBIT (X, F16_FML))
241 #define AARCH64_ARCH_V8_5A_FEATURES(X) (AARCH64_FEATBIT (X, V8_5A) \
242 | AARCH64_FEATBIT (X, FLAGMANIP) \
243 | AARCH64_FEATBIT (X, FRINTTS) \
244 | AARCH64_FEATBIT (X, SB) \
245 | AARCH64_FEATBIT (X, PREDRES) \
246 | AARCH64_FEATBIT (X, CVADP) \
247 | AARCH64_FEATBIT (X, BTI) \
248 | AARCH64_FEATBIT (X, SCXTNUM) \
249 | AARCH64_FEATBIT (X, ID_PFR2) \
250 | AARCH64_FEATBIT (X, SSBS))
251 #define AARCH64_ARCH_V8_6A_FEATURES(X) (AARCH64_FEATBIT (X, V8_6A) \
252 | AARCH64_FEATBIT (X, BFLOAT16) \
253 | AARCH64_FEATBIT (X, I8MM))
254 #define AARCH64_ARCH_V8_7A_FEATURES(X) (AARCH64_FEATBIT (X, V8_7A) \
255 | AARCH64_FEATBIT (X, LS64))
256 #define AARCH64_ARCH_V8_8A_FEATURES(X) (AARCH64_FEATBIT (X, V8_8A) \
257 | AARCH64_FEATBIT (X, MOPS) \
258 | AARCH64_FEATBIT (X, HBC))
259 #define AARCH64_ARCH_V8_9A_FEATURES(X) (AARCH64_FEATBIT (X, V8_9A) \
260 | AARCH64_FEATBIT (X, SPEv1p4) \
261 | AARCH64_FEATBIT (X, SPE_CRR) \
262 | AARCH64_FEATBIT (X, SPE_FDS) \
263 | AARCH64_FEATBIT (X, RASv2) \
264 | AARCH64_FEATBIT (X, SCTLR2) \
265 | AARCH64_FEATBIT (X, FGT2) \
266 | AARCH64_FEATBIT (X, PFAR) \
267 | AARCH64_FEATBIT (X, ATS1A) \
268 | AARCH64_FEATBIT (X, AIE) \
269 | AARCH64_FEATBIT (X, S1PIE) \
270 | AARCH64_FEATBIT (X, S2PIE) \
271 | AARCH64_FEATBIT (X, S1POE) \
272 | AARCH64_FEATBIT (X, S2POE) \
273 | AARCH64_FEATBIT (X, TCR2) \
274 )
275
276 #define AARCH64_ARCH_V9A_FEATURES(X) (AARCH64_FEATBIT (X, V9A) \
277 | AARCH64_FEATBIT (X, F16) \
278 | AARCH64_FEATBIT (X, SVE) \
279 | AARCH64_FEATBIT (X, SVE2))
280 #define AARCH64_ARCH_V9_1A_FEATURES(X) AARCH64_ARCH_V8_6A_FEATURES (X)
281 #define AARCH64_ARCH_V9_2A_FEATURES(X) AARCH64_ARCH_V8_7A_FEATURES (X)
282 #define AARCH64_ARCH_V9_3A_FEATURES(X) AARCH64_ARCH_V8_8A_FEATURES (X)
283 #define AARCH64_ARCH_V9_4A_FEATURES(X) (AARCH64_ARCH_V8_9A_FEATURES (X) \
284 | AARCH64_FEATBIT (X, PREDRES2))
285
286 /* Architectures are the sum of the base and extensions. */
287 #define AARCH64_ARCH_V8A(X) (AARCH64_FEATBIT (X, V8) \
288 | AARCH64_ARCH_V8A_FEATURES (X))
289 #define AARCH64_ARCH_V8_1A(X) (AARCH64_ARCH_V8A (X) \
290 | AARCH64_ARCH_V8_1A_FEATURES (X))
291 #define AARCH64_ARCH_V8_2A(X) (AARCH64_ARCH_V8_1A (X) \
292 | AARCH64_ARCH_V8_2A_FEATURES (X))
293 #define AARCH64_ARCH_V8_3A(X) (AARCH64_ARCH_V8_2A (X) \
294 | AARCH64_ARCH_V8_3A_FEATURES (X))
295 #define AARCH64_ARCH_V8_4A(X) (AARCH64_ARCH_V8_3A (X) \
296 | AARCH64_ARCH_V8_4A_FEATURES (X))
297 #define AARCH64_ARCH_V8_5A(X) (AARCH64_ARCH_V8_4A (X) \
298 | AARCH64_ARCH_V8_5A_FEATURES (X))
299 #define AARCH64_ARCH_V8_6A(X) (AARCH64_ARCH_V8_5A (X) \
300 | AARCH64_ARCH_V8_6A_FEATURES (X))
301 #define AARCH64_ARCH_V8_7A(X) (AARCH64_ARCH_V8_6A (X) \
302 | AARCH64_ARCH_V8_7A_FEATURES (X))
303 #define AARCH64_ARCH_V8_8A(X) (AARCH64_ARCH_V8_7A (X) \
304 | AARCH64_ARCH_V8_8A_FEATURES (X))
305 #define AARCH64_ARCH_V8_9A(X) (AARCH64_ARCH_V8_8A (X) \
306 | AARCH64_ARCH_V8_9A_FEATURES (X))
307 #define AARCH64_ARCH_V8R(X) ((AARCH64_ARCH_V8_4A (X) \
308 | AARCH64_FEATBIT (X, V8R)) \
309 & ~AARCH64_FEATBIT (X, V8A) \
310 & ~AARCH64_FEATBIT (X, LOR))
311
312 #define AARCH64_ARCH_V9A(X) (AARCH64_ARCH_V8_5A (X) \
313 | AARCH64_ARCH_V9A_FEATURES (X))
314 #define AARCH64_ARCH_V9_1A(X) (AARCH64_ARCH_V9A (X) \
315 | AARCH64_ARCH_V9_1A_FEATURES (X))
316 #define AARCH64_ARCH_V9_2A(X) (AARCH64_ARCH_V9_1A (X) \
317 | AARCH64_ARCH_V9_2A_FEATURES (X))
318 #define AARCH64_ARCH_V9_3A(X) (AARCH64_ARCH_V9_2A (X) \
319 | AARCH64_ARCH_V9_3A_FEATURES (X))
320 #define AARCH64_ARCH_V9_4A(X) (AARCH64_ARCH_V9_3A (X) \
321 | AARCH64_ARCH_V9_4A_FEATURES (X))
322
323 #define AARCH64_ARCH_NONE(X) 0
324
325 /* CPU-specific features. */
326 typedef struct {
327 uint64_t flags[(AARCH64_NUM_FEATURES + 63) / 64];
328 } aarch64_feature_set;
329
330 #define AARCH64_CPU_HAS_FEATURE(CPU,FEAT) \
331 ((~(CPU).flags[0] & AARCH64_FEATBIT (0, FEAT)) == 0 \
332 && (~(CPU).flags[1] & AARCH64_FEATBIT (1, FEAT)) == 0)
333
334 #define AARCH64_CPU_HAS_ALL_FEATURES(CPU,FEAT) \
335 ((~(CPU).flags[0] & (FEAT).flags[0]) == 0 \
336 && (~(CPU).flags[1] & (FEAT).flags[1]) == 0)
337
338 #define AARCH64_CPU_HAS_ANY_FEATURES(CPU,FEAT) \
339 (((CPU).flags[0] & (FEAT).flags[0]) != 0 \
340 || ((CPU).flags[1] & (FEAT).flags[1]) != 0)
341
342 #define AARCH64_SET_FEATURE(DEST, FEAT) \
343 ((DEST).flags[0] = FEAT (0), \
344 (DEST).flags[1] = FEAT (1))
345
346 #define AARCH64_CLEAR_FEATURE(DEST, SRC, FEAT) \
347 ((DEST).flags[0] = (SRC).flags[0] & ~AARCH64_FEATBIT (0, FEAT), \
348 (DEST).flags[1] = (SRC).flags[1] & ~AARCH64_FEATBIT (1, FEAT))
349
350 #define AARCH64_MERGE_FEATURE_SETS(TARG,F1,F2) \
351 do \
352 { \
353 (TARG).flags[0] = (F1).flags[0] | (F2).flags[0]; \
354 (TARG).flags[1] = (F1).flags[1] | (F2).flags[1]; \
355 } \
356 while (0)
357
358 #define AARCH64_CLEAR_FEATURES(TARG,F1,F2) \
359 do \
360 { \
361 (TARG).flags[0] = (F1).flags[0] &~ (F2).flags[0]; \
362 (TARG).flags[1] = (F1).flags[1] &~ (F2).flags[1]; \
363 } \
364 while (0)
365
366 /* aarch64_feature_set initializers for no features and all features,
367 respectively. */
368 #define AARCH64_NO_FEATURES { { 0, 0 } }
369 #define AARCH64_ALL_FEATURES { { -1, -1 } }
370
371 /* An aarch64_feature_set initializer for a single feature,
372 AARCH64_FEATURE_<FEAT>. */
373 #define AARCH64_FEATURE(FEAT) \
374 { { AARCH64_FEATBIT (0, FEAT), AARCH64_FEATBIT (1, FEAT) } }
375
376 /* An aarch64_feature_set initializer for a specific architecture version,
377 including all the features that are enabled by default for that architecture
378 version. */
379 #define AARCH64_ARCH_FEATURES(ARCH) \
380 { { AARCH64_ARCH_##ARCH (0), AARCH64_ARCH_##ARCH (1) } }
381
382 /* Used by AARCH64_CPU_FEATURES. */
383 #define AARCH64_OR_FEATURES_1(X, ARCH, F1) \
384 (AARCH64_FEATBIT (X, F1) | AARCH64_ARCH_##ARCH (X))
385 #define AARCH64_OR_FEATURES_2(X, ARCH, F1, F2) \
386 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_1 (X, ARCH, F2))
387 #define AARCH64_OR_FEATURES_3(X, ARCH, F1, ...) \
388 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_2 (X, ARCH, __VA_ARGS__))
389 #define AARCH64_OR_FEATURES_4(X, ARCH, F1, ...) \
390 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_3 (X, ARCH, __VA_ARGS__))
391 #define AARCH64_OR_FEATURES_5(X, ARCH, F1, ...) \
392 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_4 (X, ARCH, __VA_ARGS__))
393 #define AARCH64_OR_FEATURES_6(X, ARCH, F1, ...) \
394 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_5 (X, ARCH, __VA_ARGS__))
395 #define AARCH64_OR_FEATURES_7(X, ARCH, F1, ...) \
396 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_6 (X, ARCH, __VA_ARGS__))
397 #define AARCH64_OR_FEATURES_8(X, ARCH, F1, ...) \
398 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_7 (X, ARCH, __VA_ARGS__))
399 #define AARCH64_OR_FEATURES_9(X, ARCH, F1, ...) \
400 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_8 (X, ARCH, __VA_ARGS__))
401
402 /* An aarch64_feature_set initializer for a CPU that implements architecture
403 version ARCH, and additionally provides the N features listed in "...". */
404 #define AARCH64_CPU_FEATURES(ARCH, N, ...) \
405 { { AARCH64_OR_FEATURES_##N (0, ARCH, __VA_ARGS__), \
406 AARCH64_OR_FEATURES_##N (1, ARCH, __VA_ARGS__) } }
407
408 /* An aarch64_feature_set initializer for the N features listed in "...". */
409 #define AARCH64_FEATURES(N, ...) \
410 AARCH64_CPU_FEATURES (NONE, N, __VA_ARGS__)
411
412 enum aarch64_operand_class
413 {
414 AARCH64_OPND_CLASS_NIL,
415 AARCH64_OPND_CLASS_INT_REG,
416 AARCH64_OPND_CLASS_MODIFIED_REG,
417 AARCH64_OPND_CLASS_FP_REG,
418 AARCH64_OPND_CLASS_SIMD_REG,
419 AARCH64_OPND_CLASS_SIMD_ELEMENT,
420 AARCH64_OPND_CLASS_SISD_REG,
421 AARCH64_OPND_CLASS_SIMD_REGLIST,
422 AARCH64_OPND_CLASS_SVE_REG,
423 AARCH64_OPND_CLASS_SVE_REGLIST,
424 AARCH64_OPND_CLASS_PRED_REG,
425 AARCH64_OPND_CLASS_ZA_ACCESS,
426 AARCH64_OPND_CLASS_ADDRESS,
427 AARCH64_OPND_CLASS_IMMEDIATE,
428 AARCH64_OPND_CLASS_SYSTEM,
429 AARCH64_OPND_CLASS_COND,
430 };
431
432 /* Operand code that helps both parsing and coding.
433 Keep AARCH64_OPERANDS synced. */
434
435 enum aarch64_opnd
436 {
437 AARCH64_OPND_NIL, /* no operand---MUST BE FIRST!*/
438
439 AARCH64_OPND_Rd, /* Integer register as destination. */
440 AARCH64_OPND_Rn, /* Integer register as source. */
441 AARCH64_OPND_Rm, /* Integer register as source. */
442 AARCH64_OPND_Rt, /* Integer register used in ld/st instructions. */
443 AARCH64_OPND_Rt2, /* Integer register used in ld/st pair instructions. */
444 AARCH64_OPND_X16, /* Integer register x16 in chkfeat instruction. */
445 AARCH64_OPND_Rt_LS64, /* Integer register used in LS64 instructions. */
446 AARCH64_OPND_Rt_SP, /* Integer Rt or SP used in STG instructions. */
447 AARCH64_OPND_Rs, /* Integer register used in ld/st exclusive. */
448 AARCH64_OPND_Ra, /* Integer register used in ddp_3src instructions. */
449 AARCH64_OPND_Rt_SYS, /* Integer register used in system instructions. */
450
451 AARCH64_OPND_Rd_SP, /* Integer Rd or SP. */
452 AARCH64_OPND_Rn_SP, /* Integer Rn or SP. */
453 AARCH64_OPND_Rm_SP, /* Integer Rm or SP. */
454 AARCH64_OPND_PAIRREG, /* Paired register operand. */
455 AARCH64_OPND_Rm_EXT, /* Integer Rm extended. */
456 AARCH64_OPND_Rm_SFT, /* Integer Rm shifted. */
457
458 AARCH64_OPND_Fd, /* Floating-point Fd. */
459 AARCH64_OPND_Fn, /* Floating-point Fn. */
460 AARCH64_OPND_Fm, /* Floating-point Fm. */
461 AARCH64_OPND_Fa, /* Floating-point Fa. */
462 AARCH64_OPND_Ft, /* Floating-point Ft. */
463 AARCH64_OPND_Ft2, /* Floating-point Ft2. */
464
465 AARCH64_OPND_Sd, /* AdvSIMD Scalar Sd. */
466 AARCH64_OPND_Sn, /* AdvSIMD Scalar Sn. */
467 AARCH64_OPND_Sm, /* AdvSIMD Scalar Sm. */
468
469 AARCH64_OPND_Va, /* AdvSIMD Vector Va. */
470 AARCH64_OPND_Vd, /* AdvSIMD Vector Vd. */
471 AARCH64_OPND_Vn, /* AdvSIMD Vector Vn. */
472 AARCH64_OPND_Vm, /* AdvSIMD Vector Vm. */
473 AARCH64_OPND_VdD1, /* AdvSIMD <Vd>.D[1]; for FMOV only. */
474 AARCH64_OPND_VnD1, /* AdvSIMD <Vn>.D[1]; for FMOV only. */
475 AARCH64_OPND_Ed, /* AdvSIMD Vector Element Vd. */
476 AARCH64_OPND_En, /* AdvSIMD Vector Element Vn. */
477 AARCH64_OPND_Em, /* AdvSIMD Vector Element Vm. */
478 AARCH64_OPND_Em16, /* AdvSIMD Vector Element Vm restricted to V0 - V15 when
479 qualifier is S_H. */
480 AARCH64_OPND_LVn, /* AdvSIMD Vector register list used in e.g. TBL. */
481 AARCH64_OPND_LVt, /* AdvSIMD Vector register list used in ld/st. */
482 AARCH64_OPND_LVt_AL, /* AdvSIMD Vector register list for loading single
483 structure to all lanes. */
484 AARCH64_OPND_LEt, /* AdvSIMD Vector Element list. */
485
486 AARCH64_OPND_CRn, /* Co-processor register in CRn field. */
487 AARCH64_OPND_CRm, /* Co-processor register in CRm field. */
488
489 AARCH64_OPND_IDX, /* AdvSIMD EXT index operand. */
490 AARCH64_OPND_MASK, /* AdvSIMD EXT index operand. */
491 AARCH64_OPND_IMM_VLSL,/* Immediate for shifting vector registers left. */
492 AARCH64_OPND_IMM_VLSR,/* Immediate for shifting vector registers right. */
493 AARCH64_OPND_SIMD_IMM,/* AdvSIMD modified immediate without shift. */
494 AARCH64_OPND_SIMD_IMM_SFT, /* AdvSIMD modified immediate with shift. */
495 AARCH64_OPND_SIMD_FPIMM,/* AdvSIMD 8-bit fp immediate. */
496 AARCH64_OPND_SHLL_IMM,/* Immediate shift for AdvSIMD SHLL instruction
497 (no encoding). */
498 AARCH64_OPND_IMM0, /* Immediate for #0. */
499 AARCH64_OPND_FPIMM0, /* Immediate for #0.0. */
500 AARCH64_OPND_FPIMM, /* Floating-point Immediate. */
501 AARCH64_OPND_IMMR, /* Immediate #<immr> in e.g. BFM. */
502 AARCH64_OPND_IMMS, /* Immediate #<imms> in e.g. BFM. */
503 AARCH64_OPND_WIDTH, /* Immediate #<width> in e.g. BFI. */
504 AARCH64_OPND_IMM, /* Immediate. */
505 AARCH64_OPND_IMM_2, /* Immediate. */
506 AARCH64_OPND_UIMM3_OP1,/* Unsigned 3-bit immediate in the op1 field. */
507 AARCH64_OPND_UIMM3_OP2,/* Unsigned 3-bit immediate in the op2 field. */
508 AARCH64_OPND_UIMM4, /* Unsigned 4-bit immediate in the CRm field. */
509 AARCH64_OPND_UIMM4_ADDG,/* Unsigned 4-bit immediate in addg/subg. */
510 AARCH64_OPND_UIMM7, /* Unsigned 7-bit immediate in the CRm:op2 fields. */
511 AARCH64_OPND_UIMM10, /* Unsigned 10-bit immediate in addg/subg. */
512 AARCH64_OPND_BIT_NUM, /* Immediate. */
513 AARCH64_OPND_EXCEPTION,/* imm16 operand in exception instructions. */
514 AARCH64_OPND_UNDEFINED,/* imm16 operand in undefined instruction. */
515 AARCH64_OPND_CCMP_IMM,/* Immediate in conditional compare instructions. */
516 AARCH64_OPND_SIMM5, /* 5-bit signed immediate in the imm5 field. */
517 AARCH64_OPND_NZCV, /* Flag bit specifier giving an alternative value for
518 each condition flag. */
519
520 AARCH64_OPND_LIMM, /* Logical Immediate. */
521 AARCH64_OPND_AIMM, /* Arithmetic immediate. */
522 AARCH64_OPND_HALF, /* #<imm16>{, LSL #<shift>} operand in move wide. */
523 AARCH64_OPND_FBITS, /* FP #<fbits> operand in e.g. SCVTF */
524 AARCH64_OPND_IMM_MOV, /* Immediate operand for the MOV alias. */
525 AARCH64_OPND_IMM_ROT1, /* Immediate rotate operand for FCMLA. */
526 AARCH64_OPND_IMM_ROT2, /* Immediate rotate operand for indexed FCMLA. */
527 AARCH64_OPND_IMM_ROT3, /* Immediate rotate operand for FCADD. */
528
529 AARCH64_OPND_COND, /* Standard condition as the last operand. */
530 AARCH64_OPND_COND1, /* Same as the above, but excluding AL and NV. */
531
532 AARCH64_OPND_ADDR_ADRP, /* Memory address for ADRP */
533 AARCH64_OPND_ADDR_PCREL14, /* 14-bit PC-relative address for e.g. TBZ. */
534 AARCH64_OPND_ADDR_PCREL19, /* 19-bit PC-relative address for e.g. LDR. */
535 AARCH64_OPND_ADDR_PCREL21, /* 21-bit PC-relative address for e.g. ADR. */
536 AARCH64_OPND_ADDR_PCREL26, /* 26-bit PC-relative address for e.g. BL. */
537
538 AARCH64_OPND_ADDR_SIMPLE, /* Address of ld/st exclusive. */
539 AARCH64_OPND_ADDR_REGOFF, /* Address of register offset. */
540 AARCH64_OPND_ADDR_SIMM7, /* Address of signed 7-bit immediate. */
541 AARCH64_OPND_ADDR_SIMM9, /* Address of signed 9-bit immediate. */
542 AARCH64_OPND_ADDR_SIMM9_2, /* Same as the above, but the immediate is
543 negative or unaligned and there is
544 no writeback allowed. This operand code
545 is only used to support the programmer-
546 friendly feature of using LDR/STR as the
547 the mnemonic name for LDUR/STUR instructions
548 wherever there is no ambiguity. */
549 AARCH64_OPND_ADDR_SIMM10, /* Address of signed 10-bit immediate. */
550 AARCH64_OPND_ADDR_SIMM11, /* Address with a signed 11-bit (multiple of
551 16) immediate. */
552 AARCH64_OPND_ADDR_UIMM12, /* Address of unsigned 12-bit immediate. */
553 AARCH64_OPND_ADDR_SIMM13, /* Address with a signed 13-bit (multiple of
554 16) immediate. */
555 AARCH64_OPND_SIMD_ADDR_SIMPLE,/* Address of ld/st multiple structures. */
556 AARCH64_OPND_ADDR_OFFSET, /* Address with an optional 9-bit immediate. */
557 AARCH64_OPND_SIMD_ADDR_POST, /* Address of ld/st multiple post-indexed. */
558
559 AARCH64_OPND_SYSREG, /* System register operand. */
560 AARCH64_OPND_PSTATEFIELD, /* PSTATE field name operand. */
561 AARCH64_OPND_SYSREG_AT, /* System register <at_op> operand. */
562 AARCH64_OPND_SYSREG_DC, /* System register <dc_op> operand. */
563 AARCH64_OPND_SYSREG_IC, /* System register <ic_op> operand. */
564 AARCH64_OPND_SYSREG_TLBI, /* System register <tlbi_op> operand. */
565 AARCH64_OPND_SYSREG_SR, /* System register RCTX operand. */
566 AARCH64_OPND_BARRIER, /* Barrier operand. */
567 AARCH64_OPND_BARRIER_DSB_NXS, /* Barrier operand for DSB nXS variant. */
568 AARCH64_OPND_BARRIER_ISB, /* Barrier operand for ISB. */
569 AARCH64_OPND_PRFOP, /* Prefetch operation. */
570 AARCH64_OPND_RPRFMOP, /* Range prefetch operation. */
571 AARCH64_OPND_BARRIER_PSB, /* Barrier operand for PSB. */
572 AARCH64_OPND_BARRIER_GCSB, /* Barrier operand for GCSB. */
573 AARCH64_OPND_BTI_TARGET, /* BTI {<target>}. */
574 AARCH64_OPND_LSE128_Rt, /* LSE128 <Xt1>. */
575 AARCH64_OPND_LSE128_Rt2, /* LSE128 <Xt2>. */
576 AARCH64_OPND_SVE_ADDR_RI_S4x16, /* SVE [<Xn|SP>, #<simm4>*16]. */
577 AARCH64_OPND_SVE_ADDR_RI_S4x32, /* SVE [<Xn|SP>, #<simm4>*32]. */
578 AARCH64_OPND_SVE_ADDR_RI_S4xVL, /* SVE [<Xn|SP>, #<simm4>, MUL VL]. */
579 AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL]. */
580 AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL]. */
581 AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, /* SVE [<Xn|SP>, #<simm4>*4, MUL VL]. */
582 AARCH64_OPND_SVE_ADDR_RI_S6xVL, /* SVE [<Xn|SP>, #<simm6>, MUL VL]. */
583 AARCH64_OPND_SVE_ADDR_RI_S9xVL, /* SVE [<Xn|SP>, #<simm9>, MUL VL]. */
584 AARCH64_OPND_SVE_ADDR_RI_U6, /* SVE [<Xn|SP>, #<uimm6>]. */
585 AARCH64_OPND_SVE_ADDR_RI_U6x2, /* SVE [<Xn|SP>, #<uimm6>*2]. */
586 AARCH64_OPND_SVE_ADDR_RI_U6x4, /* SVE [<Xn|SP>, #<uimm6>*4]. */
587 AARCH64_OPND_SVE_ADDR_RI_U6x8, /* SVE [<Xn|SP>, #<uimm6>*8]. */
588 AARCH64_OPND_SVE_ADDR_R, /* SVE [<Xn|SP>]. */
589 AARCH64_OPND_SVE_ADDR_RR, /* SVE [<Xn|SP>, <Xm|XZR>]. */
590 AARCH64_OPND_SVE_ADDR_RR_LSL1, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #1]. */
591 AARCH64_OPND_SVE_ADDR_RR_LSL2, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #2]. */
592 AARCH64_OPND_SVE_ADDR_RR_LSL3, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #3]. */
593 AARCH64_OPND_SVE_ADDR_RR_LSL4, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #4]. */
594 AARCH64_OPND_SVE_ADDR_RX, /* SVE [<Xn|SP>, <Xm>]. */
595 AARCH64_OPND_SVE_ADDR_RX_LSL1, /* SVE [<Xn|SP>, <Xm>, LSL #1]. */
596 AARCH64_OPND_SVE_ADDR_RX_LSL2, /* SVE [<Xn|SP>, <Xm>, LSL #2]. */
597 AARCH64_OPND_SVE_ADDR_RX_LSL3, /* SVE [<Xn|SP>, <Xm>, LSL #3]. */
598 AARCH64_OPND_SVE_ADDR_ZX, /* SVE [Zn.<T>{, <Xm>}]. */
599 AARCH64_OPND_SVE_ADDR_RZ, /* SVE [<Xn|SP>, Zm.D]. */
600 AARCH64_OPND_SVE_ADDR_RZ_LSL1, /* SVE [<Xn|SP>, Zm.D, LSL #1]. */
601 AARCH64_OPND_SVE_ADDR_RZ_LSL2, /* SVE [<Xn|SP>, Zm.D, LSL #2]. */
602 AARCH64_OPND_SVE_ADDR_RZ_LSL3, /* SVE [<Xn|SP>, Zm.D, LSL #3]. */
603 AARCH64_OPND_SVE_ADDR_RZ_XTW_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
604 Bit 14 controls S/U choice. */
605 AARCH64_OPND_SVE_ADDR_RZ_XTW_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
606 Bit 22 controls S/U choice. */
607 AARCH64_OPND_SVE_ADDR_RZ_XTW1_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
608 Bit 14 controls S/U choice. */
609 AARCH64_OPND_SVE_ADDR_RZ_XTW1_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
610 Bit 22 controls S/U choice. */
611 AARCH64_OPND_SVE_ADDR_RZ_XTW2_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
612 Bit 14 controls S/U choice. */
613 AARCH64_OPND_SVE_ADDR_RZ_XTW2_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
614 Bit 22 controls S/U choice. */
615 AARCH64_OPND_SVE_ADDR_RZ_XTW3_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
616 Bit 14 controls S/U choice. */
617 AARCH64_OPND_SVE_ADDR_RZ_XTW3_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
618 Bit 22 controls S/U choice. */
619 AARCH64_OPND_SVE_ADDR_ZI_U5, /* SVE [Zn.<T>, #<uimm5>]. */
620 AARCH64_OPND_SVE_ADDR_ZI_U5x2, /* SVE [Zn.<T>, #<uimm5>*2]. */
621 AARCH64_OPND_SVE_ADDR_ZI_U5x4, /* SVE [Zn.<T>, #<uimm5>*4]. */
622 AARCH64_OPND_SVE_ADDR_ZI_U5x8, /* SVE [Zn.<T>, #<uimm5>*8]. */
623 AARCH64_OPND_SVE_ADDR_ZZ_LSL, /* SVE [Zn.<T>, Zm,<T>, LSL #<msz>]. */
624 AARCH64_OPND_SVE_ADDR_ZZ_SXTW, /* SVE [Zn.<T>, Zm,<T>, SXTW #<msz>]. */
625 AARCH64_OPND_SVE_ADDR_ZZ_UXTW, /* SVE [Zn.<T>, Zm,<T>, UXTW #<msz>]. */
626 AARCH64_OPND_SVE_AIMM, /* SVE unsigned arithmetic immediate. */
627 AARCH64_OPND_SVE_ASIMM, /* SVE signed arithmetic immediate. */
628 AARCH64_OPND_SVE_FPIMM8, /* SVE 8-bit floating-point immediate. */
629 AARCH64_OPND_SVE_I1_HALF_ONE, /* SVE choice between 0.5 and 1.0. */
630 AARCH64_OPND_SVE_I1_HALF_TWO, /* SVE choice between 0.5 and 2.0. */
631 AARCH64_OPND_SVE_I1_ZERO_ONE, /* SVE choice between 0.0 and 1.0. */
632 AARCH64_OPND_SVE_IMM_ROT1, /* SVE 1-bit rotate operand (90 or 270). */
633 AARCH64_OPND_SVE_IMM_ROT2, /* SVE 2-bit rotate operand (N*90). */
634 AARCH64_OPND_SVE_IMM_ROT3, /* SVE cadd 1-bit rotate (90 or 270). */
635 AARCH64_OPND_SVE_INV_LIMM, /* SVE inverted logical immediate. */
636 AARCH64_OPND_SVE_LIMM, /* SVE logical immediate. */
637 AARCH64_OPND_SVE_LIMM_MOV, /* SVE logical immediate for MOV. */
638 AARCH64_OPND_SVE_PATTERN, /* SVE vector pattern enumeration. */
639 AARCH64_OPND_SVE_PATTERN_SCALED, /* Likewise, with additional MUL factor. */
640 AARCH64_OPND_SVE_PRFOP, /* SVE prefetch operation. */
641 AARCH64_OPND_SVE_Pd, /* SVE p0-p15 in Pd. */
642 AARCH64_OPND_SVE_PNd, /* SVE pn0-pn15 in Pd. */
643 AARCH64_OPND_SVE_Pg3, /* SVE p0-p7 in Pg. */
644 AARCH64_OPND_SVE_Pg4_5, /* SVE p0-p15 in Pg, bits [8,5]. */
645 AARCH64_OPND_SVE_Pg4_10, /* SVE p0-p15 in Pg, bits [13,10]. */
646 AARCH64_OPND_SVE_PNg4_10, /* SVE pn0-pn15 in Pg, bits [13,10]. */
647 AARCH64_OPND_SVE_Pg4_16, /* SVE p0-p15 in Pg, bits [19,16]. */
648 AARCH64_OPND_SVE_Pm, /* SVE p0-p15 in Pm. */
649 AARCH64_OPND_SVE_Pn, /* SVE p0-p15 in Pn. */
650 AARCH64_OPND_SVE_PNn, /* SVE pn0-pn15 in Pn. */
651 AARCH64_OPND_SVE_Pt, /* SVE p0-p15 in Pt. */
652 AARCH64_OPND_SVE_PNt, /* SVE pn0-pn15 in Pt. */
653 AARCH64_OPND_SVE_Rm, /* Integer Rm or ZR, alt. SVE position. */
654 AARCH64_OPND_SVE_Rn_SP, /* Integer Rn or SP, alt. SVE position. */
655 AARCH64_OPND_SVE_SHLIMM_PRED, /* SVE shift left amount (predicated). */
656 AARCH64_OPND_SVE_SHLIMM_UNPRED, /* SVE shift left amount (unpredicated). */
657 AARCH64_OPND_SVE_SHLIMM_UNPRED_22, /* SVE 3 bit shift left unpred. */
658 AARCH64_OPND_SVE_SHRIMM_PRED, /* SVE shift right amount (predicated). */
659 AARCH64_OPND_SVE_SHRIMM_UNPRED, /* SVE shift right amount (unpredicated). */
660 AARCH64_OPND_SVE_SHRIMM_UNPRED_22, /* SVE 3 bit shift right unpred. */
661 AARCH64_OPND_SVE_SIMM5, /* SVE signed 5-bit immediate. */
662 AARCH64_OPND_SVE_SIMM5B, /* SVE secondary signed 5-bit immediate. */
663 AARCH64_OPND_SVE_SIMM6, /* SVE signed 6-bit immediate. */
664 AARCH64_OPND_SVE_SIMM8, /* SVE signed 8-bit immediate. */
665 AARCH64_OPND_SVE_UIMM3, /* SVE unsigned 3-bit immediate. */
666 AARCH64_OPND_SVE_UIMM7, /* SVE unsigned 7-bit immediate. */
667 AARCH64_OPND_SVE_UIMM8, /* SVE unsigned 8-bit immediate. */
668 AARCH64_OPND_SVE_UIMM8_53, /* SVE split unsigned 8-bit immediate. */
669 AARCH64_OPND_SVE_VZn, /* Scalar SIMD&FP register in Zn field. */
670 AARCH64_OPND_SVE_Vd, /* Scalar SIMD&FP register in Vd. */
671 AARCH64_OPND_SVE_Vm, /* Scalar SIMD&FP register in Vm. */
672 AARCH64_OPND_SVE_Vn, /* Scalar SIMD&FP register in Vn. */
673 AARCH64_OPND_SVE_Za_5, /* SVE vector register in Za, bits [9,5]. */
674 AARCH64_OPND_SVE_Za_16, /* SVE vector register in Za, bits [20,16]. */
675 AARCH64_OPND_SVE_Zd, /* SVE vector register in Zd. */
676 AARCH64_OPND_SVE_Zm_5, /* SVE vector register in Zm, bits [9,5]. */
677 AARCH64_OPND_SVE_Zm_16, /* SVE vector register in Zm, bits [20,16]. */
678 AARCH64_OPND_SVE_Zm3_INDEX, /* z0-z7[0-3] in Zm, bits [20,16]. */
679 AARCH64_OPND_SVE_Zm3_11_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 11. */
680 AARCH64_OPND_SVE_Zm3_19_INDEX, /* z0-z7[0-3] in Zm3_INDEX plus bit 19. */
681 AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22. */
682 AARCH64_OPND_SVE_Zm4_11_INDEX, /* z0-z15[0-3] in Zm plus bit 11. */
683 AARCH64_OPND_SVE_Zm4_INDEX, /* z0-z15[0-1] in Zm, bits [20,16]. */
684 AARCH64_OPND_SVE_Zn, /* SVE vector register in Zn. */
685 AARCH64_OPND_SVE_Zn_INDEX, /* Indexed SVE vector register, for DUP. */
686 AARCH64_OPND_SVE_ZnxN, /* SVE vector register list in Zn. */
687 AARCH64_OPND_SVE_Zt, /* SVE vector register in Zt. */
688 AARCH64_OPND_SVE_ZtxN, /* SVE vector register list in Zt. */
689 AARCH64_OPND_SME_Zdnx2, /* SVE vector register list from [4:1]*2. */
690 AARCH64_OPND_SME_Zdnx4, /* SVE vector register list from [4:2]*4. */
691 AARCH64_OPND_SME_Zm, /* SVE vector register list in 4-bit Zm. */
692 AARCH64_OPND_SME_Zmx2, /* SVE vector register list from [20:17]*2. */
693 AARCH64_OPND_SME_Zmx4, /* SVE vector register list from [20:18]*4. */
694 AARCH64_OPND_SME_Znx2, /* SVE vector register list from [9:6]*2. */
695 AARCH64_OPND_SME_Znx4, /* SVE vector register list from [9:7]*4. */
696 AARCH64_OPND_SME_Ztx2_STRIDED, /* SVE vector register list in [4:0]&23. */
697 AARCH64_OPND_SME_Ztx4_STRIDED, /* SVE vector register list in [4:0]&19. */
698 AARCH64_OPND_SME_ZAda_2b, /* SME <ZAda>.S, 2-bits. */
699 AARCH64_OPND_SME_ZAda_3b, /* SME <ZAda>.D, 3-bits. */
700 AARCH64_OPND_SME_ZA_HV_idx_src, /* SME source ZA tile vector. */
701 AARCH64_OPND_SME_ZA_HV_idx_srcxN, /* SME N source ZA tile vectors. */
702 AARCH64_OPND_SME_ZA_HV_idx_dest, /* SME destination ZA tile vector. */
703 AARCH64_OPND_SME_ZA_HV_idx_destxN, /* SME N dest ZA tile vectors. */
704 AARCH64_OPND_SME_Pdx2, /* Predicate register list in [3:1]. */
705 AARCH64_OPND_SME_PdxN, /* Predicate register list in [3:0]. */
706 AARCH64_OPND_SME_Pm, /* SME scalable predicate register, bits [15:13]. */
707 AARCH64_OPND_SME_PNd3, /* Predicate-as-counter register, bits [3:0]. */
708 AARCH64_OPND_SME_PNg3, /* Predicate-as-counter register, bits [12:10]. */
709 AARCH64_OPND_SME_PNn, /* Predicate-as-counter register, bits [8:5]. */
710 AARCH64_OPND_SME_PNn3_INDEX1, /* Indexed pred-as-counter reg, bits [8:5]. */
711 AARCH64_OPND_SME_PNn3_INDEX2, /* Indexed pred-as-counter reg, bits [9:5]. */
712 AARCH64_OPND_SME_list_of_64bit_tiles, /* SME list of ZA tiles. */
713 AARCH64_OPND_SME_ZA_HV_idx_ldstr, /* SME destination ZA tile vector. */
714 AARCH64_OPND_SME_ZA_array_off1x4, /* SME ZA[<Wv>, #<imm1>*4:<imm1>*4+3]. */
715 AARCH64_OPND_SME_ZA_array_off2x2, /* SME ZA[<Wv>, #<imm2>*2:<imm2>*2+1]. */
716 AARCH64_OPND_SME_ZA_array_off2x4, /* SME ZA[<Wv>, #<imm2>*4:<imm2>*4+3]. */
717 AARCH64_OPND_SME_ZA_array_off3_0, /* SME ZA[<Wv>{, #<imm3>}]. */
718 AARCH64_OPND_SME_ZA_array_off3_5, /* SME ZA[<Wv>{, #<imm3>}]. */
719 AARCH64_OPND_SME_ZA_array_off3x2, /* SME ZA[<Wv>, #<imm3>*2:<imm3>*2+1]. */
720 AARCH64_OPND_SME_ZA_array_off4, /* SME ZA[<Wv>{, #<imm>}]. */
721 AARCH64_OPND_SME_ADDR_RI_U4xVL, /* SME [<Xn|SP>{, #<imm>, MUL VL}]. */
722 AARCH64_OPND_SME_SM_ZA, /* SME {SM | ZA}. */
723 AARCH64_OPND_SME_PnT_Wm_imm, /* SME <Pn>.<T>[<Wm>, #<imm>]. */
724 AARCH64_OPND_SME_SHRIMM4, /* 4-bit right shift, bits [19:16]. */
725 AARCH64_OPND_SME_SHRIMM5, /* size + 5-bit right shift, bits [23:22,20:16]. */
726 AARCH64_OPND_SME_Zm_INDEX1, /* Zn.T[index], bits [19:16,10]. */
727 AARCH64_OPND_SME_Zm_INDEX2, /* Zn.T[index], bits [19:16,11:10]. */
728 AARCH64_OPND_SME_Zm_INDEX3_1, /* Zn.T[index], bits [19:16,10,2:1]. */
729 AARCH64_OPND_SME_Zm_INDEX3_2, /* Zn.T[index], bits [19:16,11:10,2]. */
730 AARCH64_OPND_SME_Zm_INDEX3_10, /* Zn.T[index], bits [19:16,15,11:10]. */
731 AARCH64_OPND_SME_Zm_INDEX4_1, /* Zn.T[index], bits [19:16,11:10,2:1]. */
732 AARCH64_OPND_SME_Zm_INDEX4_10, /* Zn.T[index], bits [19:16,15,12:10]. */
733 AARCH64_OPND_SME_Zn_INDEX1_16, /* Zn[index], bits [9:5] and [16:16]. */
734 AARCH64_OPND_SME_Zn_INDEX2_15, /* Zn[index], bits [9:5] and [16:15]. */
735 AARCH64_OPND_SME_Zn_INDEX2_16, /* Zn[index], bits [9:5] and [17:16]. */
736 AARCH64_OPND_SME_Zn_INDEX3_14, /* Zn[index], bits [9:5] and [16:14]. */
737 AARCH64_OPND_SME_Zn_INDEX3_15, /* Zn[index], bits [9:5] and [17:15]. */
738 AARCH64_OPND_SME_Zn_INDEX4_14, /* Zn[index], bits [9:5] and [17:14]. */
739 AARCH64_OPND_SME_VLxN_10, /* VLx2 or VLx4, in bit 10. */
740 AARCH64_OPND_SME_VLxN_13, /* VLx2 or VLx4, in bit 13. */
741 AARCH64_OPND_SME_ZT0, /* The fixed token zt0/ZT0 (not encoded). */
742 AARCH64_OPND_SME_ZT0_INDEX, /* ZT0[<imm>], bits [14:12]. */
743 AARCH64_OPND_SME_ZT0_LIST, /* { zt0/ZT0 } (not encoded). */
744 AARCH64_OPND_TME_UIMM16, /* TME unsigned 16-bit immediate. */
745 AARCH64_OPND_SM3_IMM2, /* SM3 encodes lane in bits [13, 14]. */
746 AARCH64_OPND_MOPS_ADDR_Rd, /* [Rd]!, in bits [0, 4]. */
747 AARCH64_OPND_MOPS_ADDR_Rs, /* [Rs]!, in bits [16, 20]. */
748 AARCH64_OPND_MOPS_WB_Rn, /* Rn!, in bits [5, 9]. */
749 AARCH64_OPND_CSSC_SIMM8, /* CSSC signed 8-bit immediate. */
750 AARCH64_OPND_CSSC_UIMM8, /* CSSC unsigned 8-bit immediate. */
751 };
752
753 /* Qualifier constrains an operand. It either specifies a variant of an
754 operand type or limits values available to an operand type.
755
756 N.B. Order is important; keep aarch64_opnd_qualifiers synced. */
757
758 enum aarch64_opnd_qualifier
759 {
760 /* Indicating no further qualification on an operand. */
761 AARCH64_OPND_QLF_NIL,
762
763 /* Qualifying an operand which is a general purpose (integer) register;
764 indicating the operand data size or a specific register. */
765 AARCH64_OPND_QLF_W, /* Wn, WZR or WSP. */
766 AARCH64_OPND_QLF_X, /* Xn, XZR or XSP. */
767 AARCH64_OPND_QLF_WSP, /* WSP. */
768 AARCH64_OPND_QLF_SP, /* SP. */
769
770 /* Qualifying an operand which is a floating-point register, a SIMD
771 vector element or a SIMD vector element list; indicating operand data
772 size or the size of each SIMD vector element in the case of a SIMD
773 vector element list.
774 These qualifiers are also used to qualify an address operand to
775 indicate the size of data element a load/store instruction is
776 accessing.
777 They are also used for the immediate shift operand in e.g. SSHR. Such
778 a use is only for the ease of operand encoding/decoding and qualifier
779 sequence matching; such a use should not be applied widely; use the value
780 constraint qualifiers for immediate operands wherever possible. */
781 AARCH64_OPND_QLF_S_B,
782 AARCH64_OPND_QLF_S_H,
783 AARCH64_OPND_QLF_S_S,
784 AARCH64_OPND_QLF_S_D,
785 AARCH64_OPND_QLF_S_Q,
786 /* These type qualifiers have a special meaning in that they mean 4 x 1 byte
787 or 2 x 2 byte are selected by the instruction. Other than that they have
788 no difference with AARCH64_OPND_QLF_S_B in encoding. They are here purely
789 for syntactical reasons and is an exception from normal AArch64
790 disassembly scheme. */
791 AARCH64_OPND_QLF_S_4B,
792 AARCH64_OPND_QLF_S_2H,
793
794 /* Qualifying an operand which is a SIMD vector register or a SIMD vector
795 register list; indicating register shape.
796 They are also used for the immediate shift operand in e.g. SSHR. Such
797 a use is only for the ease of operand encoding/decoding and qualifier
798 sequence matching; such a use should not be applied widely; use the value
799 constraint qualifiers for immediate operands wherever possible. */
800 AARCH64_OPND_QLF_V_4B,
801 AARCH64_OPND_QLF_V_8B,
802 AARCH64_OPND_QLF_V_16B,
803 AARCH64_OPND_QLF_V_2H,
804 AARCH64_OPND_QLF_V_4H,
805 AARCH64_OPND_QLF_V_8H,
806 AARCH64_OPND_QLF_V_2S,
807 AARCH64_OPND_QLF_V_4S,
808 AARCH64_OPND_QLF_V_1D,
809 AARCH64_OPND_QLF_V_2D,
810 AARCH64_OPND_QLF_V_1Q,
811
812 AARCH64_OPND_QLF_P_Z,
813 AARCH64_OPND_QLF_P_M,
814
815 /* Used in scaled signed immediate that are scaled by a Tag granule
816 like in stg, st2g, etc. */
817 AARCH64_OPND_QLF_imm_tag,
818
819 /* Constraint on value. */
820 AARCH64_OPND_QLF_CR, /* CRn, CRm. */
821 AARCH64_OPND_QLF_imm_0_7,
822 AARCH64_OPND_QLF_imm_0_15,
823 AARCH64_OPND_QLF_imm_0_31,
824 AARCH64_OPND_QLF_imm_0_63,
825 AARCH64_OPND_QLF_imm_1_32,
826 AARCH64_OPND_QLF_imm_1_64,
827
828 /* Indicate whether an AdvSIMD modified immediate operand is shift-zeros
829 or shift-ones. */
830 AARCH64_OPND_QLF_LSL,
831 AARCH64_OPND_QLF_MSL,
832
833 /* Special qualifier helping retrieve qualifier information during the
834 decoding time (currently not in use). */
835 AARCH64_OPND_QLF_RETRIEVE,
836 };
837 \f
838 /* Instruction class. */
839
840 enum aarch64_insn_class
841 {
842 aarch64_misc,
843 addsub_carry,
844 addsub_ext,
845 addsub_imm,
846 addsub_shift,
847 asimdall,
848 asimddiff,
849 asimdelem,
850 asimdext,
851 asimdimm,
852 asimdins,
853 asimdmisc,
854 asimdperm,
855 asimdsame,
856 asimdshf,
857 asimdtbl,
858 asisddiff,
859 asisdelem,
860 asisdlse,
861 asisdlsep,
862 asisdlso,
863 asisdlsop,
864 asisdmisc,
865 asisdone,
866 asisdpair,
867 asisdsame,
868 asisdshf,
869 bitfield,
870 branch_imm,
871 branch_reg,
872 compbranch,
873 condbranch,
874 condcmp_imm,
875 condcmp_reg,
876 condsel,
877 cryptoaes,
878 cryptosha2,
879 cryptosha3,
880 dp_1src,
881 dp_2src,
882 dp_3src,
883 exception,
884 extract,
885 float2fix,
886 float2int,
887 floatccmp,
888 floatcmp,
889 floatdp1,
890 floatdp2,
891 floatdp3,
892 floatimm,
893 floatsel,
894 ldst_immpost,
895 ldst_immpre,
896 ldst_imm9, /* immpost or immpre */
897 ldst_imm10, /* LDRAA/LDRAB */
898 ldst_pos,
899 ldst_regoff,
900 ldst_unpriv,
901 ldst_unscaled,
902 ldstexcl,
903 ldstnapair_offs,
904 ldstpair_off,
905 ldstpair_indexed,
906 loadlit,
907 log_imm,
908 log_shift,
909 lse_atomic,
910 lse128_atomic,
911 movewide,
912 pcreladdr,
913 ic_system,
914 sme_fp_sd,
915 sme_int_sd,
916 sme_misc,
917 sme_mov,
918 sme_ldr,
919 sme_psel,
920 sme_shift,
921 sme_size_12_bhs,
922 sme_size_12_hs,
923 sme_size_22,
924 sme_size_22_hsd,
925 sme_sz_23,
926 sme_str,
927 sme_start,
928 sme_stop,
929 sme2_mov,
930 sve_cpy,
931 sve_index,
932 sve_limm,
933 sve_misc,
934 sve_movprfx,
935 sve_pred_zm,
936 sve_shift_pred,
937 sve_shift_unpred,
938 sve_size_bhs,
939 sve_size_bhsd,
940 sve_size_hsd,
941 sve_size_hsd2,
942 sve_size_sd,
943 sve_size_bh,
944 sve_size_sd2,
945 sve_size_13,
946 sve_shift_tsz_hsd,
947 sve_shift_tsz_bhsd,
948 sve_size_tsz_bhs,
949 testbranch,
950 cryptosm3,
951 cryptosm4,
952 dotproduct,
953 bfloat16,
954 cssc,
955 gcs,
956 };
957
958 /* Opcode enumerators. */
959
960 enum aarch64_op
961 {
962 OP_NIL,
963 OP_STRB_POS,
964 OP_LDRB_POS,
965 OP_LDRSB_POS,
966 OP_STRH_POS,
967 OP_LDRH_POS,
968 OP_LDRSH_POS,
969 OP_STR_POS,
970 OP_LDR_POS,
971 OP_STRF_POS,
972 OP_LDRF_POS,
973 OP_LDRSW_POS,
974 OP_PRFM_POS,
975
976 OP_STURB,
977 OP_LDURB,
978 OP_LDURSB,
979 OP_STURH,
980 OP_LDURH,
981 OP_LDURSH,
982 OP_STUR,
983 OP_LDUR,
984 OP_STURV,
985 OP_LDURV,
986 OP_LDURSW,
987 OP_PRFUM,
988
989 OP_LDR_LIT,
990 OP_LDRV_LIT,
991 OP_LDRSW_LIT,
992 OP_PRFM_LIT,
993
994 OP_ADD,
995 OP_B,
996 OP_BL,
997
998 OP_MOVN,
999 OP_MOVZ,
1000 OP_MOVK,
1001
1002 OP_MOV_IMM_LOG, /* MOV alias for moving bitmask immediate. */
1003 OP_MOV_IMM_WIDE, /* MOV alias for moving wide immediate. */
1004 OP_MOV_IMM_WIDEN, /* MOV alias for moving wide immediate (negated). */
1005
1006 OP_MOV_V, /* MOV alias for moving vector register. */
1007
1008 OP_ASR_IMM,
1009 OP_LSR_IMM,
1010 OP_LSL_IMM,
1011
1012 OP_BIC,
1013
1014 OP_UBFX,
1015 OP_BFXIL,
1016 OP_SBFX,
1017 OP_SBFIZ,
1018 OP_BFI,
1019 OP_BFC, /* ARMv8.2. */
1020 OP_UBFIZ,
1021 OP_UXTB,
1022 OP_UXTH,
1023 OP_UXTW,
1024
1025 OP_CINC,
1026 OP_CINV,
1027 OP_CNEG,
1028 OP_CSET,
1029 OP_CSETM,
1030
1031 OP_FCVT,
1032 OP_FCVTN,
1033 OP_FCVTN2,
1034 OP_FCVTL,
1035 OP_FCVTL2,
1036 OP_FCVTXN_S, /* Scalar version. */
1037
1038 OP_ROR_IMM,
1039
1040 OP_SXTL,
1041 OP_SXTL2,
1042 OP_UXTL,
1043 OP_UXTL2,
1044
1045 OP_MOV_P_P,
1046 OP_MOV_PN_PN,
1047 OP_MOV_Z_P_Z,
1048 OP_MOV_Z_V,
1049 OP_MOV_Z_Z,
1050 OP_MOV_Z_Zi,
1051 OP_MOVM_P_P_P,
1052 OP_MOVS_P_P,
1053 OP_MOVZS_P_P_P,
1054 OP_MOVZ_P_P_P,
1055 OP_NOTS_P_P_P_Z,
1056 OP_NOT_P_P_P_Z,
1057
1058 OP_FCMLA_ELEM, /* ARMv8.3, indexed element version. */
1059
1060 OP_TOTAL_NUM, /* Pseudo. */
1061 };
1062
1063 /* Error types. */
1064 enum err_type
1065 {
1066 ERR_OK,
1067 ERR_UND,
1068 ERR_UNP,
1069 ERR_NYI,
1070 ERR_VFI,
1071 ERR_NR_ENTRIES
1072 };
1073
1074 /* Maximum number of operands an instruction can have. */
1075 #define AARCH64_MAX_OPND_NUM 6
1076 /* Maximum number of qualifier sequences an instruction can have. */
1077 #define AARCH64_MAX_QLF_SEQ_NUM 10
1078 /* Operand qualifier typedef; optimized for the size. */
1079 typedef unsigned char aarch64_opnd_qualifier_t;
1080 /* Operand qualifier sequence typedef. */
1081 typedef aarch64_opnd_qualifier_t \
1082 aarch64_opnd_qualifier_seq_t [AARCH64_MAX_OPND_NUM];
1083
1084 /* FIXME: improve the efficiency. */
1085 static inline bool
1086 empty_qualifier_sequence_p (const aarch64_opnd_qualifier_t *qualifiers)
1087 {
1088 int i;
1089 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1090 if (qualifiers[i] != AARCH64_OPND_QLF_NIL)
1091 return false;
1092 return true;
1093 }
1094
1095 /* Forward declare error reporting type. */
1096 typedef struct aarch64_operand_error aarch64_operand_error;
1097 /* Forward declare instruction sequence type. */
1098 typedef struct aarch64_instr_sequence aarch64_instr_sequence;
1099 /* Forward declare instruction definition. */
1100 typedef struct aarch64_inst aarch64_inst;
1101
1102 /* This structure holds information for a particular opcode. */
1103
1104 struct aarch64_opcode
1105 {
1106 /* The name of the mnemonic. */
1107 const char *name;
1108
1109 /* The opcode itself. Those bits which will be filled in with
1110 operands are zeroes. */
1111 aarch64_insn opcode;
1112
1113 /* The opcode mask. This is used by the disassembler. This is a
1114 mask containing ones indicating those bits which must match the
1115 opcode field, and zeroes indicating those bits which need not
1116 match (and are presumably filled in by operands). */
1117 aarch64_insn mask;
1118
1119 /* Instruction class. */
1120 enum aarch64_insn_class iclass;
1121
1122 /* Enumerator identifier. */
1123 enum aarch64_op op;
1124
1125 /* Which architecture variant provides this instruction. */
1126 const aarch64_feature_set *avariant;
1127
1128 /* An array of operand codes. Each code is an index into the
1129 operand table. They appear in the order which the operands must
1130 appear in assembly code, and are terminated by a zero. */
1131 enum aarch64_opnd operands[AARCH64_MAX_OPND_NUM];
1132
1133 /* A list of operand qualifier code sequence. Each operand qualifier
1134 code qualifies the corresponding operand code. Each operand
1135 qualifier sequence specifies a valid opcode variant and related
1136 constraint on operands. */
1137 aarch64_opnd_qualifier_seq_t qualifiers_list[AARCH64_MAX_QLF_SEQ_NUM];
1138
1139 /* Flags providing information about this instruction */
1140 uint64_t flags;
1141
1142 /* Extra constraints on the instruction that the verifier checks. */
1143 uint32_t constraints;
1144
1145 /* If nonzero, this operand and operand 0 are both registers and
1146 are required to have the same register number. */
1147 unsigned char tied_operand;
1148
1149 /* If non-NULL, a function to verify that a given instruction is valid. */
1150 enum err_type (* verifier) (const struct aarch64_inst *, const aarch64_insn,
1151 bfd_vma, bool, aarch64_operand_error *,
1152 struct aarch64_instr_sequence *);
1153 };
1154
1155 typedef struct aarch64_opcode aarch64_opcode;
1156
1157 /* Table describing all the AArch64 opcodes. */
1158 extern const aarch64_opcode aarch64_opcode_table[];
1159
1160 /* Opcode flags. */
1161 #define F_ALIAS (1 << 0)
1162 #define F_HAS_ALIAS (1 << 1)
1163 /* Disassembly preference priority 1-3 (the larger the higher). If nothing
1164 is specified, it is the priority 0 by default, i.e. the lowest priority. */
1165 #define F_P1 (1 << 2)
1166 #define F_P2 (2 << 2)
1167 #define F_P3 (3 << 2)
1168 /* Flag an instruction that is truly conditional executed, e.g. b.cond. */
1169 #define F_COND (1 << 4)
1170 /* Instruction has the field of 'sf'. */
1171 #define F_SF (1 << 5)
1172 /* Instruction has the field of 'size:Q'. */
1173 #define F_SIZEQ (1 << 6)
1174 /* Floating-point instruction has the field of 'type'. */
1175 #define F_FPTYPE (1 << 7)
1176 /* AdvSIMD scalar instruction has the field of 'size'. */
1177 #define F_SSIZE (1 << 8)
1178 /* AdvSIMD vector register arrangement specifier encoded in "imm5<3:0>:Q". */
1179 #define F_T (1 << 9)
1180 /* Size of GPR operand in AdvSIMD instructions encoded in Q. */
1181 #define F_GPRSIZE_IN_Q (1 << 10)
1182 /* Size of Rt load signed instruction encoded in opc[0], i.e. bit 22. */
1183 #define F_LDS_SIZE (1 << 11)
1184 /* Optional operand; assume maximum of 1 operand can be optional. */
1185 #define F_OPD0_OPT (1 << 12)
1186 #define F_OPD1_OPT (2 << 12)
1187 #define F_OPD2_OPT (3 << 12)
1188 #define F_OPD3_OPT (4 << 12)
1189 #define F_OPD4_OPT (5 << 12)
1190 /* Default value for the optional operand when omitted from the assembly. */
1191 #define F_DEFAULT(X) (((X) & 0x1f) << 15)
1192 /* Instruction that is an alias of another instruction needs to be
1193 encoded/decoded by converting it to/from the real form, followed by
1194 the encoding/decoding according to the rules of the real opcode.
1195 This compares to the direct coding using the alias's information.
1196 N.B. this flag requires F_ALIAS to be used together. */
1197 #define F_CONV (1 << 20)
1198 /* Use together with F_ALIAS to indicate an alias opcode is a programmer
1199 friendly pseudo instruction available only in the assembly code (thus will
1200 not show up in the disassembly). */
1201 #define F_PSEUDO (1 << 21)
1202 /* Instruction has miscellaneous encoding/decoding rules. */
1203 #define F_MISC (1 << 22)
1204 /* Instruction has the field of 'N'; used in conjunction with F_SF. */
1205 #define F_N (1 << 23)
1206 /* Opcode dependent field. */
1207 #define F_OD(X) (((X) & 0x7) << 24)
1208 /* Instruction has the field of 'sz'. */
1209 #define F_LSE_SZ (1 << 27)
1210 /* Require an exact qualifier match, even for NIL qualifiers. */
1211 #define F_STRICT (1ULL << 28)
1212 /* This system instruction is used to read system registers. */
1213 #define F_SYS_READ (1ULL << 29)
1214 /* This system instruction is used to write system registers. */
1215 #define F_SYS_WRITE (1ULL << 30)
1216 /* This instruction has an extra constraint on it that imposes a requirement on
1217 subsequent instructions. */
1218 #define F_SCAN (1ULL << 31)
1219 /* Next bit is 32. */
1220
1221 /* Instruction constraints. */
1222 /* This instruction has a predication constraint on the instruction at PC+4. */
1223 #define C_SCAN_MOVPRFX (1U << 0)
1224 /* This instruction's operation width is determined by the operand with the
1225 largest element size. */
1226 #define C_MAX_ELEM (1U << 1)
1227 #define C_SCAN_MOPS_P (1U << 2)
1228 #define C_SCAN_MOPS_M (2U << 2)
1229 #define C_SCAN_MOPS_E (3U << 2)
1230 #define C_SCAN_MOPS_PME (3U << 2)
1231 /* Next bit is 4. */
1232
1233 static inline bool
1234 alias_opcode_p (const aarch64_opcode *opcode)
1235 {
1236 return (opcode->flags & F_ALIAS) != 0;
1237 }
1238
1239 static inline bool
1240 opcode_has_alias (const aarch64_opcode *opcode)
1241 {
1242 return (opcode->flags & F_HAS_ALIAS) != 0;
1243 }
1244
1245 /* Priority for disassembling preference. */
1246 static inline int
1247 opcode_priority (const aarch64_opcode *opcode)
1248 {
1249 return (opcode->flags >> 2) & 0x3;
1250 }
1251
1252 static inline bool
1253 pseudo_opcode_p (const aarch64_opcode *opcode)
1254 {
1255 return (opcode->flags & F_PSEUDO) != 0lu;
1256 }
1257
1258 static inline bool
1259 optional_operand_p (const aarch64_opcode *opcode, unsigned int idx)
1260 {
1261 return ((opcode->flags >> 12) & 0x7) == idx + 1;
1262 }
1263
1264 static inline aarch64_insn
1265 get_optional_operand_default_value (const aarch64_opcode *opcode)
1266 {
1267 return (opcode->flags >> 15) & 0x1f;
1268 }
1269
1270 static inline unsigned int
1271 get_opcode_dependent_value (const aarch64_opcode *opcode)
1272 {
1273 return (opcode->flags >> 24) & 0x7;
1274 }
1275
1276 static inline bool
1277 opcode_has_special_coder (const aarch64_opcode *opcode)
1278 {
1279 return (opcode->flags & (F_SF | F_LSE_SZ | F_SIZEQ | F_FPTYPE | F_SSIZE | F_T
1280 | F_GPRSIZE_IN_Q | F_LDS_SIZE | F_MISC | F_N | F_COND)) != 0;
1281 }
1282 \f
1283 struct aarch64_name_value_pair
1284 {
1285 const char * name;
1286 aarch64_insn value;
1287 };
1288
1289 extern const struct aarch64_name_value_pair aarch64_operand_modifiers [];
1290 extern const struct aarch64_name_value_pair aarch64_barrier_options [16];
1291 extern const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options [4];
1292 extern const struct aarch64_name_value_pair aarch64_prfops [32];
1293 extern const struct aarch64_name_value_pair aarch64_hint_options [];
1294
1295 #define AARCH64_MAX_SYSREG_NAME_LEN 32
1296
1297 typedef struct
1298 {
1299 const char * name;
1300 aarch64_insn value;
1301 uint32_t flags;
1302
1303 /* A set of features, all of which are required for this system register to be
1304 available. */
1305 aarch64_feature_set features;
1306 } aarch64_sys_reg;
1307
1308 extern const aarch64_sys_reg aarch64_sys_regs [];
1309 extern const aarch64_sys_reg aarch64_pstatefields [];
1310 extern bool aarch64_sys_reg_deprecated_p (const uint32_t);
1311 extern bool aarch64_sys_reg_alias_p (const uint32_t);
1312 extern bool aarch64_pstatefield_supported_p (const aarch64_feature_set,
1313 const aarch64_sys_reg *);
1314
1315 typedef struct
1316 {
1317 const char *name;
1318 uint32_t value;
1319 uint32_t flags ;
1320 } aarch64_sys_ins_reg;
1321
1322 extern bool aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *);
1323 extern bool
1324 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set,
1325 const char *reg_name, aarch64_insn,
1326 uint32_t, const aarch64_feature_set *);
1327
1328 extern const aarch64_sys_ins_reg aarch64_sys_regs_ic [];
1329 extern const aarch64_sys_ins_reg aarch64_sys_regs_dc [];
1330 extern const aarch64_sys_ins_reg aarch64_sys_regs_at [];
1331 extern const aarch64_sys_ins_reg aarch64_sys_regs_tlbi [];
1332 extern const aarch64_sys_ins_reg aarch64_sys_regs_sr [];
1333
1334 /* Shift/extending operator kinds.
1335 N.B. order is important; keep aarch64_operand_modifiers synced. */
1336 enum aarch64_modifier_kind
1337 {
1338 AARCH64_MOD_NONE,
1339 AARCH64_MOD_MSL,
1340 AARCH64_MOD_ROR,
1341 AARCH64_MOD_ASR,
1342 AARCH64_MOD_LSR,
1343 AARCH64_MOD_LSL,
1344 AARCH64_MOD_UXTB,
1345 AARCH64_MOD_UXTH,
1346 AARCH64_MOD_UXTW,
1347 AARCH64_MOD_UXTX,
1348 AARCH64_MOD_SXTB,
1349 AARCH64_MOD_SXTH,
1350 AARCH64_MOD_SXTW,
1351 AARCH64_MOD_SXTX,
1352 AARCH64_MOD_MUL,
1353 AARCH64_MOD_MUL_VL,
1354 };
1355
1356 bool
1357 aarch64_extend_operator_p (enum aarch64_modifier_kind);
1358
1359 enum aarch64_modifier_kind
1360 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *);
1361 /* Condition. */
1362
1363 typedef struct
1364 {
1365 /* A list of names with the first one as the disassembly preference;
1366 terminated by NULL if fewer than 3. */
1367 const char *names[4];
1368 aarch64_insn value;
1369 } aarch64_cond;
1370
1371 extern const aarch64_cond aarch64_conds[16];
1372
1373 const aarch64_cond* get_cond_from_value (aarch64_insn value);
1374 const aarch64_cond* get_inverted_cond (const aarch64_cond *cond);
1375 \f
1376 /* Information about a reference to part of ZA. */
1377 struct aarch64_indexed_za
1378 {
1379 /* Which tile is being accessed. Unused (and 0) for an index into ZA. */
1380 int regno;
1381
1382 struct
1383 {
1384 /* The 32-bit index register. */
1385 int regno;
1386
1387 /* The first (or only) immediate offset. */
1388 int64_t imm;
1389
1390 /* The last immediate offset minus the first immediate offset.
1391 Unlike the range size, this is guaranteed not to overflow
1392 when the end offset > the start offset. */
1393 uint64_t countm1;
1394 } index;
1395
1396 /* The vector group size, or 0 if none. */
1397 unsigned group_size : 8;
1398
1399 /* True if a tile access is vertical, false if it is horizontal.
1400 Unused (and 0) for an index into ZA. */
1401 unsigned v : 1;
1402 };
1403
1404 /* Information about a list of registers. */
1405 struct aarch64_reglist
1406 {
1407 unsigned first_regno : 8;
1408 unsigned num_regs : 8;
1409 /* The difference between the nth and the n+1th register. */
1410 unsigned stride : 8;
1411 /* 1 if it is a list of reg element. */
1412 unsigned has_index : 1;
1413 /* Lane index; valid only when has_index is 1. */
1414 int64_t index;
1415 };
1416
1417 /* Structure representing an operand. */
1418
1419 struct aarch64_opnd_info
1420 {
1421 enum aarch64_opnd type;
1422 aarch64_opnd_qualifier_t qualifier;
1423 int idx;
1424
1425 union
1426 {
1427 struct
1428 {
1429 unsigned regno;
1430 } reg;
1431 struct
1432 {
1433 unsigned int regno;
1434 int64_t index;
1435 } reglane;
1436 /* e.g. LVn. */
1437 struct aarch64_reglist reglist;
1438 /* e.g. immediate or pc relative address offset. */
1439 struct
1440 {
1441 int64_t value;
1442 unsigned is_fp : 1;
1443 } imm;
1444 /* e.g. address in STR (register offset). */
1445 struct
1446 {
1447 unsigned base_regno;
1448 struct
1449 {
1450 union
1451 {
1452 int imm;
1453 unsigned regno;
1454 };
1455 unsigned is_reg;
1456 } offset;
1457 unsigned pcrel : 1; /* PC-relative. */
1458 unsigned writeback : 1;
1459 unsigned preind : 1; /* Pre-indexed. */
1460 unsigned postind : 1; /* Post-indexed. */
1461 } addr;
1462
1463 struct
1464 {
1465 /* The encoding of the system register. */
1466 aarch64_insn value;
1467
1468 /* The system register flags. */
1469 uint32_t flags;
1470 } sysreg;
1471
1472 /* ZA tile vector, e.g. <ZAn><HV>.D[<Wv>{, <imm>}] */
1473 struct aarch64_indexed_za indexed_za;
1474
1475 const aarch64_cond *cond;
1476 /* The encoding of the PSTATE field. */
1477 aarch64_insn pstatefield;
1478 const aarch64_sys_ins_reg *sysins_op;
1479 const struct aarch64_name_value_pair *barrier;
1480 const struct aarch64_name_value_pair *hint_option;
1481 const struct aarch64_name_value_pair *prfop;
1482 };
1483
1484 /* Operand shifter; in use when the operand is a register offset address,
1485 add/sub extended reg, etc. e.g. <R><m>{, <extend> {#<amount>}}. */
1486 struct
1487 {
1488 enum aarch64_modifier_kind kind;
1489 unsigned operator_present: 1; /* Only valid during encoding. */
1490 /* Value of the 'S' field in ld/st reg offset; used only in decoding. */
1491 unsigned amount_present: 1;
1492 int64_t amount;
1493 } shifter;
1494
1495 unsigned skip:1; /* Operand is not completed if there is a fixup needed
1496 to be done on it. In some (but not all) of these
1497 cases, we need to tell libopcodes to skip the
1498 constraint checking and the encoding for this
1499 operand, so that the libopcodes can pick up the
1500 right opcode before the operand is fixed-up. This
1501 flag should only be used during the
1502 assembling/encoding. */
1503 unsigned present:1; /* Whether this operand is present in the assembly
1504 line; not used during the disassembly. */
1505 };
1506
1507 typedef struct aarch64_opnd_info aarch64_opnd_info;
1508
1509 /* Structure representing an instruction.
1510
1511 It is used during both the assembling and disassembling. The assembler
1512 fills an aarch64_inst after a successful parsing and then passes it to the
1513 encoding routine to do the encoding. During the disassembling, the
1514 disassembler calls the decoding routine to decode a binary instruction; on a
1515 successful return, such a structure will be filled with information of the
1516 instruction; then the disassembler uses the information to print out the
1517 instruction. */
1518
1519 struct aarch64_inst
1520 {
1521 /* The value of the binary instruction. */
1522 aarch64_insn value;
1523
1524 /* Corresponding opcode entry. */
1525 const aarch64_opcode *opcode;
1526
1527 /* Condition for a truly conditional-executed instrutions, e.g. b.cond. */
1528 const aarch64_cond *cond;
1529
1530 /* Operands information. */
1531 aarch64_opnd_info operands[AARCH64_MAX_OPND_NUM];
1532 };
1533
1534 /* Defining the HINT #imm values for the aarch64_hint_options. */
1535 #define HINT_OPD_CSYNC 0x11
1536 #define HINT_OPD_DSYNC 0x13
1537 #define HINT_OPD_C 0x22
1538 #define HINT_OPD_J 0x24
1539 #define HINT_OPD_JC 0x26
1540 #define HINT_OPD_NULL 0x00
1541
1542 \f
1543 /* Diagnosis related declaration and interface. */
1544
1545 /* Operand error kind enumerators.
1546
1547 AARCH64_OPDE_RECOVERABLE
1548 Less severe error found during the parsing, very possibly because that
1549 GAS has picked up a wrong instruction template for the parsing.
1550
1551 AARCH64_OPDE_A_SHOULD_FOLLOW_B
1552 The instruction forms (or is expected to form) part of a sequence,
1553 but the preceding instruction in the sequence wasn't the expected one.
1554 The message refers to two strings: the name of the current instruction,
1555 followed by the name of the expected preceding instruction.
1556
1557 AARCH64_OPDE_EXPECTED_A_AFTER_B
1558 Same as AARCH64_OPDE_A_SHOULD_FOLLOW_B, but shifting the focus
1559 so that the current instruction is assumed to be the incorrect one:
1560 "since the previous instruction was B, the current one should be A".
1561
1562 AARCH64_OPDE_SYNTAX_ERROR
1563 General syntax error; it can be either a user error, or simply because
1564 that GAS is trying a wrong instruction template.
1565
1566 AARCH64_OPDE_FATAL_SYNTAX_ERROR
1567 Definitely a user syntax error.
1568
1569 AARCH64_OPDE_INVALID_VARIANT
1570 No syntax error, but the operands are not a valid combination, e.g.
1571 FMOV D0,S0
1572
1573 The following errors are only reported against an asm string that is
1574 syntactically valid and that has valid operand qualifiers.
1575
1576 AARCH64_OPDE_INVALID_VG_SIZE
1577 Error about a "VGx<n>" modifier in a ZA index not having the
1578 correct <n>. This error effectively forms a pair with
1579 AARCH64_OPDE_REG_LIST_LENGTH, since both errors relate to the number
1580 of vectors that an instruction operates on. However, the "VGx<n>"
1581 modifier is optional, whereas a register list always has a known
1582 and explicit length. It therefore seems better to place more
1583 importance on the register list length when selecting an opcode table
1584 entry. This in turn means that having an incorrect register length
1585 should be more severe than having an incorrect "VGx<n>".
1586
1587 AARCH64_OPDE_REG_LIST_LENGTH
1588 Error about a register list operand having an unexpected number of
1589 registers. This error is low severity because there might be another
1590 opcode entry that supports the given number of registers.
1591
1592 AARCH64_OPDE_REG_LIST_STRIDE
1593 Error about a register list operand having the correct number
1594 (and type) of registers, but an unexpected stride. This error is
1595 more severe than AARCH64_OPDE_REG_LIST_LENGTH because it implies
1596 that the length is known to be correct. However, it is lower than
1597 many other errors, since some instructions have forms that share
1598 the same number of registers but have different strides.
1599
1600 AARCH64_OPDE_UNTIED_IMMS
1601 The asm failed to use the same immediate for a destination operand
1602 and a tied source operand.
1603
1604 AARCH64_OPDE_UNTIED_OPERAND
1605 The asm failed to use the same register for a destination operand
1606 and a tied source operand.
1607
1608 AARCH64_OPDE_OUT_OF_RANGE
1609 Error about some immediate value out of a valid range.
1610
1611 AARCH64_OPDE_UNALIGNED
1612 Error about some immediate value not properly aligned (i.e. not being a
1613 multiple times of a certain value).
1614
1615 AARCH64_OPDE_OTHER_ERROR
1616 Error of the highest severity and used for any severe issue that does not
1617 fall into any of the above categories.
1618
1619 AARCH64_OPDE_INVALID_REGNO
1620 A register was syntactically valid and had the right type, but it was
1621 outside the range supported by the associated operand field. This is
1622 a high severity error because there are currently no instructions that
1623 would accept the operands that precede the erroneous one (if any) and
1624 yet still accept a wider range of registers.
1625
1626 AARCH64_OPDE_RECOVERABLE, AARCH64_OPDE_SYNTAX_ERROR and
1627 AARCH64_OPDE_FATAL_SYNTAX_ERROR are only deteced by GAS while the
1628 AARCH64_OPDE_INVALID_VARIANT error can only be spotted by libopcodes as
1629 only libopcodes has the information about the valid variants of each
1630 instruction.
1631
1632 The enumerators have an increasing severity. This is helpful when there are
1633 multiple instruction templates available for a given mnemonic name (e.g.
1634 FMOV); this mechanism will help choose the most suitable template from which
1635 the generated diagnostics can most closely describe the issues, if any.
1636
1637 This enum needs to be kept up-to-date with operand_mismatch_kind_names
1638 in tc-aarch64.c. */
1639
1640 enum aarch64_operand_error_kind
1641 {
1642 AARCH64_OPDE_NIL,
1643 AARCH64_OPDE_RECOVERABLE,
1644 AARCH64_OPDE_A_SHOULD_FOLLOW_B,
1645 AARCH64_OPDE_EXPECTED_A_AFTER_B,
1646 AARCH64_OPDE_SYNTAX_ERROR,
1647 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
1648 AARCH64_OPDE_INVALID_VARIANT,
1649 AARCH64_OPDE_INVALID_VG_SIZE,
1650 AARCH64_OPDE_REG_LIST_LENGTH,
1651 AARCH64_OPDE_REG_LIST_STRIDE,
1652 AARCH64_OPDE_UNTIED_IMMS,
1653 AARCH64_OPDE_UNTIED_OPERAND,
1654 AARCH64_OPDE_OUT_OF_RANGE,
1655 AARCH64_OPDE_UNALIGNED,
1656 AARCH64_OPDE_OTHER_ERROR,
1657 AARCH64_OPDE_INVALID_REGNO
1658 };
1659
1660 /* N.B. GAS assumes that this structure work well with shallow copy. */
1661 struct aarch64_operand_error
1662 {
1663 enum aarch64_operand_error_kind kind;
1664 int index;
1665 const char *error;
1666 /* Some data for extra information. */
1667 union {
1668 int i;
1669 const char *s;
1670 } data[3];
1671 bool non_fatal;
1672 };
1673
1674 /* AArch64 sequence structure used to track instructions with F_SCAN
1675 dependencies for both assembler and disassembler. */
1676 struct aarch64_instr_sequence
1677 {
1678 /* The instructions in the sequence, starting with the one that
1679 caused it to be opened. */
1680 aarch64_inst *instr;
1681 /* The number of instructions already in the sequence. */
1682 int num_added_insns;
1683 /* The number of instructions allocated to the sequence. */
1684 int num_allocated_insns;
1685 };
1686
1687 /* Encoding entrypoint. */
1688
1689 extern bool
1690 aarch64_opcode_encode (const aarch64_opcode *, const aarch64_inst *,
1691 aarch64_insn *, aarch64_opnd_qualifier_t *,
1692 aarch64_operand_error *, aarch64_instr_sequence *);
1693
1694 extern const aarch64_opcode *
1695 aarch64_replace_opcode (struct aarch64_inst *,
1696 const aarch64_opcode *);
1697
1698 /* Given the opcode enumerator OP, return the pointer to the corresponding
1699 opcode entry. */
1700
1701 extern const aarch64_opcode *
1702 aarch64_get_opcode (enum aarch64_op);
1703
1704 /* An instance of this structure is passed to aarch64_print_operand, and
1705 the callback within this structure is used to apply styling to the
1706 disassembler output. This structure encapsulates the callback and a
1707 state pointer. */
1708
1709 struct aarch64_styler
1710 {
1711 /* The callback used to apply styling. Returns a string created from FMT
1712 and ARGS with STYLE applied to the string. STYLER is a pointer back
1713 to this object so that the callback can access the state member.
1714
1715 The string returned from this callback must remain valid until the
1716 call to aarch64_print_operand has completed. */
1717 const char *(*apply_style) (struct aarch64_styler *styler,
1718 enum disassembler_style style,
1719 const char *fmt,
1720 va_list args);
1721
1722 /* A pointer to a state object which can be used by the apply_style
1723 callback function. */
1724 void *state;
1725 };
1726
1727 /* Generate the string representation of an operand. */
1728 extern void
1729 aarch64_print_operand (char *, size_t, bfd_vma, const aarch64_opcode *,
1730 const aarch64_opnd_info *, int, int *, bfd_vma *,
1731 char **, char *, size_t,
1732 aarch64_feature_set features,
1733 struct aarch64_styler *styler);
1734
1735 /* Miscellaneous interface. */
1736
1737 extern int
1738 aarch64_operand_index (const enum aarch64_opnd *, enum aarch64_opnd);
1739
1740 extern aarch64_opnd_qualifier_t
1741 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *, int,
1742 const aarch64_opnd_qualifier_t, int);
1743
1744 extern bool
1745 aarch64_is_destructive_by_operands (const aarch64_opcode *);
1746
1747 extern int
1748 aarch64_num_of_operands (const aarch64_opcode *);
1749
1750 extern int
1751 aarch64_stack_pointer_p (const aarch64_opnd_info *);
1752
1753 extern int
1754 aarch64_zero_register_p (const aarch64_opnd_info *);
1755
1756 extern enum err_type
1757 aarch64_decode_insn (aarch64_insn, aarch64_inst *, bool,
1758 aarch64_operand_error *);
1759
1760 extern void
1761 init_insn_sequence (const struct aarch64_inst *, aarch64_instr_sequence *);
1762
1763 /* Given an operand qualifier, return the expected data element size
1764 of a qualified operand. */
1765 extern unsigned char
1766 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t);
1767
1768 extern enum aarch64_operand_class
1769 aarch64_get_operand_class (enum aarch64_opnd);
1770
1771 extern const char *
1772 aarch64_get_operand_name (enum aarch64_opnd);
1773
1774 extern const char *
1775 aarch64_get_operand_desc (enum aarch64_opnd);
1776
1777 extern bool
1778 aarch64_sve_dupm_mov_immediate_p (uint64_t, int);
1779
1780 extern bool
1781 aarch64_cpu_supports_inst_p (aarch64_feature_set, aarch64_inst *);
1782
1783 #ifdef DEBUG_AARCH64
1784 extern int debug_dump;
1785
1786 extern void
1787 aarch64_verbose (const char *, ...) __attribute__ ((format (printf, 1, 2)));
1788
1789 #define DEBUG_TRACE(M, ...) \
1790 { \
1791 if (debug_dump) \
1792 aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \
1793 }
1794
1795 #define DEBUG_TRACE_IF(C, M, ...) \
1796 { \
1797 if (debug_dump && (C)) \
1798 aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \
1799 }
1800 #else /* !DEBUG_AARCH64 */
1801 #define DEBUG_TRACE(M, ...) ;
1802 #define DEBUG_TRACE_IF(C, M, ...) ;
1803 #endif /* DEBUG_AARCH64 */
1804
1805 extern const char *const aarch64_sve_pattern_array[32];
1806 extern const char *const aarch64_sve_prfop_array[16];
1807 extern const char *const aarch64_rprfmop_array[64];
1808 extern const char *const aarch64_sme_vlxn_array[2];
1809
1810 #ifdef __cplusplus
1811 }
1812 #endif
1813
1814 #endif /* OPCODE_AARCH64_H */