]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - include/opcode/aarch64.h
aarch64: system register aliasing detection
[thirdparty/binutils-gdb.git] / include / opcode / aarch64.h
1 /* AArch64 assembler/disassembler support.
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GNU Binutils.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #ifndef OPCODE_AARCH64_H
23 #define OPCODE_AARCH64_H
24
25 #include "bfd.h"
26 #include <stdint.h>
27 #include <assert.h>
28 #include <stdlib.h>
29
30 #include "dis-asm.h"
31
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35
36 /* The offset for pc-relative addressing is currently defined to be 0. */
37 #define AARCH64_PCREL_OFFSET 0
38
39 typedef uint32_t aarch64_insn;
40
41 /* An enum containing all known CPU features. The values act as bit positions
42 into aarch64_feature_set. */
43 enum aarch64_feature_bit {
44 /* All processors. */
45 AARCH64_FEATURE_V8,
46 /* ARMv8.6 processors. */
47 AARCH64_FEATURE_V8_6A,
48 /* Bfloat16 insns. */
49 AARCH64_FEATURE_BFLOAT16,
50 /* Armv8-A processors. */
51 AARCH64_FEATURE_V8A,
52 /* SVE2 instructions. */
53 AARCH64_FEATURE_SVE2,
54 /* ARMv8.2 processors. */
55 AARCH64_FEATURE_V8_2A,
56 /* ARMv8.3 processors. */
57 AARCH64_FEATURE_V8_3A,
58 AARCH64_FEATURE_SVE2_AES,
59 AARCH64_FEATURE_SVE2_BITPERM,
60 AARCH64_FEATURE_SVE2_SM4,
61 AARCH64_FEATURE_SVE2_SHA3,
62 /* ARMv8.4 processors. */
63 AARCH64_FEATURE_V8_4A,
64 /* Armv8-R processors. */
65 AARCH64_FEATURE_V8R,
66 /* Armv8.7 processors. */
67 AARCH64_FEATURE_V8_7A,
68 /* Scalable Matrix Extension. */
69 AARCH64_FEATURE_SME,
70 /* Atomic 64-byte load/store. */
71 AARCH64_FEATURE_LS64,
72 /* v8.3 Pointer Authentication. */
73 AARCH64_FEATURE_PAC,
74 /* FP instructions. */
75 AARCH64_FEATURE_FP,
76 /* SIMD instructions. */
77 AARCH64_FEATURE_SIMD,
78 /* CRC instructions. */
79 AARCH64_FEATURE_CRC,
80 /* LSE instructions. */
81 AARCH64_FEATURE_LSE,
82 /* PAN instructions. */
83 AARCH64_FEATURE_PAN,
84 /* LOR instructions. */
85 AARCH64_FEATURE_LOR,
86 /* v8.1 SIMD instructions. */
87 AARCH64_FEATURE_RDMA,
88 /* v8.1 features. */
89 AARCH64_FEATURE_V8_1A,
90 /* v8.2 FP16 instructions. */
91 AARCH64_FEATURE_F16,
92 /* RAS Extensions. */
93 AARCH64_FEATURE_RAS,
94 /* Statistical Profiling. */
95 AARCH64_FEATURE_PROFILE,
96 /* SVE instructions. */
97 AARCH64_FEATURE_SVE,
98 /* RCPC instructions. */
99 AARCH64_FEATURE_RCPC,
100 /* Complex # instructions. */
101 AARCH64_FEATURE_COMPNUM,
102 /* Dot Product instructions. */
103 AARCH64_FEATURE_DOTPROD,
104 /* SM3 & SM4 instructions. */
105 AARCH64_FEATURE_SM4,
106 /* SHA2 instructions. */
107 AARCH64_FEATURE_SHA2,
108 /* SHA3 instructions. */
109 AARCH64_FEATURE_SHA3,
110 /* AES instructions. */
111 AARCH64_FEATURE_AES,
112 /* v8.2 FP16FML ins. */
113 AARCH64_FEATURE_F16_FML,
114 /* ARMv8.5 processors. */
115 AARCH64_FEATURE_V8_5A,
116 /* v8.5 Flag Manipulation version 2. */
117 AARCH64_FEATURE_FLAGMANIP,
118 /* FRINT[32,64][Z,X] insns. */
119 AARCH64_FEATURE_FRINTTS,
120 /* SB instruction. */
121 AARCH64_FEATURE_SB,
122 /* Execution and Data Prediction Restriction instructions. */
123 AARCH64_FEATURE_PREDRES,
124 /* DC CVADP. */
125 AARCH64_FEATURE_CVADP,
126 /* Random Number instructions. */
127 AARCH64_FEATURE_RNG,
128 /* BTI instructions. */
129 AARCH64_FEATURE_BTI,
130 /* SCXTNUM_ELx. */
131 AARCH64_FEATURE_SCXTNUM,
132 /* ID_PFR2 instructions. */
133 AARCH64_FEATURE_ID_PFR2,
134 /* SSBS mechanism enabled. */
135 AARCH64_FEATURE_SSBS,
136 /* Memory Tagging Extension. */
137 AARCH64_FEATURE_MEMTAG,
138 /* Transactional Memory Extension. */
139 AARCH64_FEATURE_TME,
140 /* Standardization of memory operations. */
141 AARCH64_FEATURE_MOPS,
142 /* Hinted conditional branches. */
143 AARCH64_FEATURE_HBC,
144 /* Matrix Multiply instructions. */
145 AARCH64_FEATURE_I8MM,
146 AARCH64_FEATURE_F32MM,
147 AARCH64_FEATURE_F64MM,
148 /* v8.4 Flag Manipulation. */
149 AARCH64_FEATURE_FLAGM,
150 /* Armv9.0-A processors. */
151 AARCH64_FEATURE_V9A,
152 /* SME F64F64. */
153 AARCH64_FEATURE_SME_F64F64,
154 /* SME I16I64. */
155 AARCH64_FEATURE_SME_I16I64,
156 /* Armv8.8 processors. */
157 AARCH64_FEATURE_V8_8A,
158 /* Common Short Sequence Compression instructions. */
159 AARCH64_FEATURE_CSSC,
160 /* SME2. */
161 AARCH64_FEATURE_SME2,
162 DUMMY1,
163 DUMMY2,
164 DUMMY3,
165 AARCH64_NUM_FEATURES
166 };
167
168 /* These macros take an initial argument X that gives the index into
169 an aarch64_feature_set. The macros then return the bitmask for
170 that array index. */
171
172 /* A mask in which feature bit BIT is set and all other bits are clear. */
173 #define AARCH64_UINT64_BIT(X, BIT) \
174 ((X) == (BIT) / 64 ? 1ULL << (BIT) % 64 : 0)
175
176 /* A mask that includes only AARCH64_FEATURE_<NAME>. */
177 #define AARCH64_FEATBIT(X, NAME) \
178 AARCH64_UINT64_BIT (X, AARCH64_FEATURE_##NAME)
179
180 /* A mask of the features that are enabled by each architecture version,
181 excluding those that are inherited from other architecture versions. */
182 #define AARCH64_ARCH_V8A_FEATURES(X) (AARCH64_FEATBIT (X, V8A) \
183 | AARCH64_FEATBIT (X, FP) \
184 | AARCH64_FEATBIT (X, RAS) \
185 | AARCH64_FEATBIT (X, SIMD))
186 #define AARCH64_ARCH_V8_1A_FEATURES(X) (AARCH64_FEATBIT (X, V8_1A) \
187 | AARCH64_FEATBIT (X, CRC) \
188 | AARCH64_FEATBIT (X, LSE) \
189 | AARCH64_FEATBIT (X, PAN) \
190 | AARCH64_FEATBIT (X, LOR) \
191 | AARCH64_FEATBIT (X, RDMA))
192 #define AARCH64_ARCH_V8_2A_FEATURES(X) (AARCH64_FEATBIT (X, V8_2A))
193 #define AARCH64_ARCH_V8_3A_FEATURES(X) (AARCH64_FEATBIT (X, V8_3A) \
194 | AARCH64_FEATBIT (X, PAC) \
195 | AARCH64_FEATBIT (X, RCPC) \
196 | AARCH64_FEATBIT (X, COMPNUM))
197 #define AARCH64_ARCH_V8_4A_FEATURES(X) (AARCH64_FEATBIT (X, V8_4A) \
198 | AARCH64_FEATBIT (X, DOTPROD) \
199 | AARCH64_FEATBIT (X, FLAGM) \
200 | AARCH64_FEATBIT (X, F16_FML))
201 #define AARCH64_ARCH_V8_5A_FEATURES(X) (AARCH64_FEATBIT (X, V8_5A) \
202 | AARCH64_FEATBIT (X, FLAGMANIP) \
203 | AARCH64_FEATBIT (X, FRINTTS) \
204 | AARCH64_FEATBIT (X, SB) \
205 | AARCH64_FEATBIT (X, PREDRES) \
206 | AARCH64_FEATBIT (X, CVADP) \
207 | AARCH64_FEATBIT (X, BTI) \
208 | AARCH64_FEATBIT (X, SCXTNUM) \
209 | AARCH64_FEATBIT (X, ID_PFR2) \
210 | AARCH64_FEATBIT (X, SSBS))
211 #define AARCH64_ARCH_V8_6A_FEATURES(X) (AARCH64_FEATBIT (X, V8_6A) \
212 | AARCH64_FEATBIT (X, BFLOAT16) \
213 | AARCH64_FEATBIT (X, I8MM))
214 #define AARCH64_ARCH_V8_7A_FEATURES(X) (AARCH64_FEATBIT (X, V8_7A) \
215 | AARCH64_FEATBIT (X, LS64))
216 #define AARCH64_ARCH_V8_8A_FEATURES(X) (AARCH64_FEATBIT (X, V8_8A) \
217 | AARCH64_FEATBIT (X, MOPS) \
218 | AARCH64_FEATBIT (X, HBC))
219
220 #define AARCH64_ARCH_V9A_FEATURES(X) (AARCH64_FEATBIT (X, V9A) \
221 | AARCH64_FEATBIT (X, F16) \
222 | AARCH64_FEATBIT (X, SVE) \
223 | AARCH64_FEATBIT (X, SVE2))
224 #define AARCH64_ARCH_V9_1A_FEATURES(X) AARCH64_ARCH_V8_6A_FEATURES (X)
225 #define AARCH64_ARCH_V9_2A_FEATURES(X) AARCH64_ARCH_V8_7A_FEATURES (X)
226 #define AARCH64_ARCH_V9_3A_FEATURES(X) AARCH64_ARCH_V8_8A_FEATURES (X)
227
228 /* Architectures are the sum of the base and extensions. */
229 #define AARCH64_ARCH_V8A(X) (AARCH64_FEATBIT (X, V8) \
230 | AARCH64_ARCH_V8A_FEATURES (X))
231 #define AARCH64_ARCH_V8_1A(X) (AARCH64_ARCH_V8A (X) \
232 | AARCH64_ARCH_V8_1A_FEATURES (X))
233 #define AARCH64_ARCH_V8_2A(X) (AARCH64_ARCH_V8_1A (X) \
234 | AARCH64_ARCH_V8_2A_FEATURES (X))
235 #define AARCH64_ARCH_V8_3A(X) (AARCH64_ARCH_V8_2A (X) \
236 | AARCH64_ARCH_V8_3A_FEATURES (X))
237 #define AARCH64_ARCH_V8_4A(X) (AARCH64_ARCH_V8_3A (X) \
238 | AARCH64_ARCH_V8_4A_FEATURES (X))
239 #define AARCH64_ARCH_V8_5A(X) (AARCH64_ARCH_V8_4A (X) \
240 | AARCH64_ARCH_V8_5A_FEATURES (X))
241 #define AARCH64_ARCH_V8_6A(X) (AARCH64_ARCH_V8_5A (X) \
242 | AARCH64_ARCH_V8_6A_FEATURES (X))
243 #define AARCH64_ARCH_V8_7A(X) (AARCH64_ARCH_V8_6A (X) \
244 | AARCH64_ARCH_V8_7A_FEATURES (X))
245 #define AARCH64_ARCH_V8_8A(X) (AARCH64_ARCH_V8_7A (X) \
246 | AARCH64_ARCH_V8_8A_FEATURES (X))
247 #define AARCH64_ARCH_V8R(X) ((AARCH64_ARCH_V8_4A (X) \
248 | AARCH64_FEATBIT (X, V8R)) \
249 & ~AARCH64_FEATBIT (X, V8A) \
250 & ~AARCH64_FEATBIT (X, LOR))
251
252 #define AARCH64_ARCH_V9A(X) (AARCH64_ARCH_V8_5A (X) \
253 | AARCH64_ARCH_V9A_FEATURES (X))
254 #define AARCH64_ARCH_V9_1A(X) (AARCH64_ARCH_V9A (X) \
255 | AARCH64_ARCH_V9_1A_FEATURES (X))
256 #define AARCH64_ARCH_V9_2A(X) (AARCH64_ARCH_V9_1A (X) \
257 | AARCH64_ARCH_V9_2A_FEATURES (X))
258 #define AARCH64_ARCH_V9_3A(X) (AARCH64_ARCH_V9_2A (X) \
259 | AARCH64_ARCH_V9_3A_FEATURES (X))
260
261 #define AARCH64_ARCH_NONE(X) 0
262
263 /* CPU-specific features. */
264 typedef struct {
265 uint64_t flags[(AARCH64_NUM_FEATURES + 63) / 64];
266 } aarch64_feature_set;
267
268 #define AARCH64_CPU_HAS_FEATURE(CPU,FEAT) \
269 ((~(CPU).flags[0] & AARCH64_FEATBIT (0, FEAT)) == 0 \
270 && (~(CPU).flags[1] & AARCH64_FEATBIT (1, FEAT)) == 0)
271
272 #define AARCH64_CPU_HAS_ALL_FEATURES(CPU,FEAT) \
273 ((~(CPU).flags[0] & (FEAT).flags[0]) == 0 \
274 && (~(CPU).flags[1] & (FEAT).flags[1]) == 0)
275
276 #define AARCH64_CPU_HAS_ANY_FEATURES(CPU,FEAT) \
277 (((CPU).flags[0] & (FEAT).flags[0]) != 0 \
278 || ((CPU).flags[1] & (FEAT).flags[1]) != 0)
279
280 #define AARCH64_SET_FEATURE(DEST, FEAT) \
281 ((DEST).flags[0] = FEAT (0), \
282 (DEST).flags[1] = FEAT (1))
283
284 #define AARCH64_CLEAR_FEATURE(DEST, SRC, FEAT) \
285 ((DEST).flags[0] = (SRC).flags[0] & ~AARCH64_FEATBIT (0, FEAT), \
286 (DEST).flags[1] = (SRC).flags[1] & ~AARCH64_FEATBIT (1, FEAT))
287
288 #define AARCH64_MERGE_FEATURE_SETS(TARG,F1,F2) \
289 do \
290 { \
291 (TARG).flags[0] = (F1).flags[0] | (F2).flags[0]; \
292 (TARG).flags[1] = (F1).flags[1] | (F2).flags[1]; \
293 } \
294 while (0)
295
296 #define AARCH64_CLEAR_FEATURES(TARG,F1,F2) \
297 do \
298 { \
299 (TARG).flags[0] = (F1).flags[0] &~ (F2).flags[0]; \
300 (TARG).flags[1] = (F1).flags[1] &~ (F2).flags[1]; \
301 } \
302 while (0)
303
304 /* aarch64_feature_set initializers for no features and all features,
305 respectively. */
306 #define AARCH64_NO_FEATURES { { 0, 0 } }
307 #define AARCH64_ALL_FEATURES { { -1, -1 } }
308
309 /* An aarch64_feature_set initializer for a single feature,
310 AARCH64_FEATURE_<FEAT>. */
311 #define AARCH64_FEATURE(FEAT) \
312 { { AARCH64_FEATBIT (0, FEAT), AARCH64_FEATBIT (1, FEAT) } }
313
314 /* An aarch64_feature_set initializer for a specific architecture version,
315 including all the features that are enabled by default for that architecture
316 version. */
317 #define AARCH64_ARCH_FEATURES(ARCH) \
318 { { AARCH64_ARCH_##ARCH (0), AARCH64_ARCH_##ARCH (1) } }
319
320 /* Used by AARCH64_CPU_FEATURES. */
321 #define AARCH64_OR_FEATURES_1(X, ARCH, F1) \
322 (AARCH64_FEATBIT (X, F1) | AARCH64_ARCH_##ARCH (X))
323 #define AARCH64_OR_FEATURES_2(X, ARCH, F1, F2) \
324 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_1 (X, ARCH, F2))
325 #define AARCH64_OR_FEATURES_3(X, ARCH, F1, ...) \
326 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_2 (X, ARCH, __VA_ARGS__))
327 #define AARCH64_OR_FEATURES_4(X, ARCH, F1, ...) \
328 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_3 (X, ARCH, __VA_ARGS__))
329 #define AARCH64_OR_FEATURES_5(X, ARCH, F1, ...) \
330 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_4 (X, ARCH, __VA_ARGS__))
331 #define AARCH64_OR_FEATURES_6(X, ARCH, F1, ...) \
332 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_5 (X, ARCH, __VA_ARGS__))
333 #define AARCH64_OR_FEATURES_7(X, ARCH, F1, ...) \
334 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_6 (X, ARCH, __VA_ARGS__))
335 #define AARCH64_OR_FEATURES_8(X, ARCH, F1, ...) \
336 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_7 (X, ARCH, __VA_ARGS__))
337 #define AARCH64_OR_FEATURES_9(X, ARCH, F1, ...) \
338 (AARCH64_FEATBIT (X, F1) | AARCH64_OR_FEATURES_8 (X, ARCH, __VA_ARGS__))
339
340 /* An aarch64_feature_set initializer for a CPU that implements architecture
341 version ARCH, and additionally provides the N features listed in "...". */
342 #define AARCH64_CPU_FEATURES(ARCH, N, ...) \
343 { { AARCH64_OR_FEATURES_##N (0, ARCH, __VA_ARGS__), \
344 AARCH64_OR_FEATURES_##N (1, ARCH, __VA_ARGS__) } }
345
346 /* An aarch64_feature_set initializer for the N features listed in "...". */
347 #define AARCH64_FEATURES(N, ...) \
348 AARCH64_CPU_FEATURES (NONE, N, __VA_ARGS__)
349
350 enum aarch64_operand_class
351 {
352 AARCH64_OPND_CLASS_NIL,
353 AARCH64_OPND_CLASS_INT_REG,
354 AARCH64_OPND_CLASS_MODIFIED_REG,
355 AARCH64_OPND_CLASS_FP_REG,
356 AARCH64_OPND_CLASS_SIMD_REG,
357 AARCH64_OPND_CLASS_SIMD_ELEMENT,
358 AARCH64_OPND_CLASS_SISD_REG,
359 AARCH64_OPND_CLASS_SIMD_REGLIST,
360 AARCH64_OPND_CLASS_SVE_REG,
361 AARCH64_OPND_CLASS_SVE_REGLIST,
362 AARCH64_OPND_CLASS_PRED_REG,
363 AARCH64_OPND_CLASS_ZA_ACCESS,
364 AARCH64_OPND_CLASS_ADDRESS,
365 AARCH64_OPND_CLASS_IMMEDIATE,
366 AARCH64_OPND_CLASS_SYSTEM,
367 AARCH64_OPND_CLASS_COND,
368 };
369
370 /* Operand code that helps both parsing and coding.
371 Keep AARCH64_OPERANDS synced. */
372
373 enum aarch64_opnd
374 {
375 AARCH64_OPND_NIL, /* no operand---MUST BE FIRST!*/
376
377 AARCH64_OPND_Rd, /* Integer register as destination. */
378 AARCH64_OPND_Rn, /* Integer register as source. */
379 AARCH64_OPND_Rm, /* Integer register as source. */
380 AARCH64_OPND_Rt, /* Integer register used in ld/st instructions. */
381 AARCH64_OPND_Rt2, /* Integer register used in ld/st pair instructions. */
382 AARCH64_OPND_Rt_LS64, /* Integer register used in LS64 instructions. */
383 AARCH64_OPND_Rt_SP, /* Integer Rt or SP used in STG instructions. */
384 AARCH64_OPND_Rs, /* Integer register used in ld/st exclusive. */
385 AARCH64_OPND_Ra, /* Integer register used in ddp_3src instructions. */
386 AARCH64_OPND_Rt_SYS, /* Integer register used in system instructions. */
387
388 AARCH64_OPND_Rd_SP, /* Integer Rd or SP. */
389 AARCH64_OPND_Rn_SP, /* Integer Rn or SP. */
390 AARCH64_OPND_Rm_SP, /* Integer Rm or SP. */
391 AARCH64_OPND_PAIRREG, /* Paired register operand. */
392 AARCH64_OPND_Rm_EXT, /* Integer Rm extended. */
393 AARCH64_OPND_Rm_SFT, /* Integer Rm shifted. */
394
395 AARCH64_OPND_Fd, /* Floating-point Fd. */
396 AARCH64_OPND_Fn, /* Floating-point Fn. */
397 AARCH64_OPND_Fm, /* Floating-point Fm. */
398 AARCH64_OPND_Fa, /* Floating-point Fa. */
399 AARCH64_OPND_Ft, /* Floating-point Ft. */
400 AARCH64_OPND_Ft2, /* Floating-point Ft2. */
401
402 AARCH64_OPND_Sd, /* AdvSIMD Scalar Sd. */
403 AARCH64_OPND_Sn, /* AdvSIMD Scalar Sn. */
404 AARCH64_OPND_Sm, /* AdvSIMD Scalar Sm. */
405
406 AARCH64_OPND_Va, /* AdvSIMD Vector Va. */
407 AARCH64_OPND_Vd, /* AdvSIMD Vector Vd. */
408 AARCH64_OPND_Vn, /* AdvSIMD Vector Vn. */
409 AARCH64_OPND_Vm, /* AdvSIMD Vector Vm. */
410 AARCH64_OPND_VdD1, /* AdvSIMD <Vd>.D[1]; for FMOV only. */
411 AARCH64_OPND_VnD1, /* AdvSIMD <Vn>.D[1]; for FMOV only. */
412 AARCH64_OPND_Ed, /* AdvSIMD Vector Element Vd. */
413 AARCH64_OPND_En, /* AdvSIMD Vector Element Vn. */
414 AARCH64_OPND_Em, /* AdvSIMD Vector Element Vm. */
415 AARCH64_OPND_Em16, /* AdvSIMD Vector Element Vm restricted to V0 - V15 when
416 qualifier is S_H. */
417 AARCH64_OPND_LVn, /* AdvSIMD Vector register list used in e.g. TBL. */
418 AARCH64_OPND_LVt, /* AdvSIMD Vector register list used in ld/st. */
419 AARCH64_OPND_LVt_AL, /* AdvSIMD Vector register list for loading single
420 structure to all lanes. */
421 AARCH64_OPND_LEt, /* AdvSIMD Vector Element list. */
422
423 AARCH64_OPND_CRn, /* Co-processor register in CRn field. */
424 AARCH64_OPND_CRm, /* Co-processor register in CRm field. */
425
426 AARCH64_OPND_IDX, /* AdvSIMD EXT index operand. */
427 AARCH64_OPND_MASK, /* AdvSIMD EXT index operand. */
428 AARCH64_OPND_IMM_VLSL,/* Immediate for shifting vector registers left. */
429 AARCH64_OPND_IMM_VLSR,/* Immediate for shifting vector registers right. */
430 AARCH64_OPND_SIMD_IMM,/* AdvSIMD modified immediate without shift. */
431 AARCH64_OPND_SIMD_IMM_SFT, /* AdvSIMD modified immediate with shift. */
432 AARCH64_OPND_SIMD_FPIMM,/* AdvSIMD 8-bit fp immediate. */
433 AARCH64_OPND_SHLL_IMM,/* Immediate shift for AdvSIMD SHLL instruction
434 (no encoding). */
435 AARCH64_OPND_IMM0, /* Immediate for #0. */
436 AARCH64_OPND_FPIMM0, /* Immediate for #0.0. */
437 AARCH64_OPND_FPIMM, /* Floating-point Immediate. */
438 AARCH64_OPND_IMMR, /* Immediate #<immr> in e.g. BFM. */
439 AARCH64_OPND_IMMS, /* Immediate #<imms> in e.g. BFM. */
440 AARCH64_OPND_WIDTH, /* Immediate #<width> in e.g. BFI. */
441 AARCH64_OPND_IMM, /* Immediate. */
442 AARCH64_OPND_IMM_2, /* Immediate. */
443 AARCH64_OPND_UIMM3_OP1,/* Unsigned 3-bit immediate in the op1 field. */
444 AARCH64_OPND_UIMM3_OP2,/* Unsigned 3-bit immediate in the op2 field. */
445 AARCH64_OPND_UIMM4, /* Unsigned 4-bit immediate in the CRm field. */
446 AARCH64_OPND_UIMM4_ADDG,/* Unsigned 4-bit immediate in addg/subg. */
447 AARCH64_OPND_UIMM7, /* Unsigned 7-bit immediate in the CRm:op2 fields. */
448 AARCH64_OPND_UIMM10, /* Unsigned 10-bit immediate in addg/subg. */
449 AARCH64_OPND_BIT_NUM, /* Immediate. */
450 AARCH64_OPND_EXCEPTION,/* imm16 operand in exception instructions. */
451 AARCH64_OPND_UNDEFINED,/* imm16 operand in undefined instruction. */
452 AARCH64_OPND_CCMP_IMM,/* Immediate in conditional compare instructions. */
453 AARCH64_OPND_SIMM5, /* 5-bit signed immediate in the imm5 field. */
454 AARCH64_OPND_NZCV, /* Flag bit specifier giving an alternative value for
455 each condition flag. */
456
457 AARCH64_OPND_LIMM, /* Logical Immediate. */
458 AARCH64_OPND_AIMM, /* Arithmetic immediate. */
459 AARCH64_OPND_HALF, /* #<imm16>{, LSL #<shift>} operand in move wide. */
460 AARCH64_OPND_FBITS, /* FP #<fbits> operand in e.g. SCVTF */
461 AARCH64_OPND_IMM_MOV, /* Immediate operand for the MOV alias. */
462 AARCH64_OPND_IMM_ROT1, /* Immediate rotate operand for FCMLA. */
463 AARCH64_OPND_IMM_ROT2, /* Immediate rotate operand for indexed FCMLA. */
464 AARCH64_OPND_IMM_ROT3, /* Immediate rotate operand for FCADD. */
465
466 AARCH64_OPND_COND, /* Standard condition as the last operand. */
467 AARCH64_OPND_COND1, /* Same as the above, but excluding AL and NV. */
468
469 AARCH64_OPND_ADDR_ADRP, /* Memory address for ADRP */
470 AARCH64_OPND_ADDR_PCREL14, /* 14-bit PC-relative address for e.g. TBZ. */
471 AARCH64_OPND_ADDR_PCREL19, /* 19-bit PC-relative address for e.g. LDR. */
472 AARCH64_OPND_ADDR_PCREL21, /* 21-bit PC-relative address for e.g. ADR. */
473 AARCH64_OPND_ADDR_PCREL26, /* 26-bit PC-relative address for e.g. BL. */
474
475 AARCH64_OPND_ADDR_SIMPLE, /* Address of ld/st exclusive. */
476 AARCH64_OPND_ADDR_REGOFF, /* Address of register offset. */
477 AARCH64_OPND_ADDR_SIMM7, /* Address of signed 7-bit immediate. */
478 AARCH64_OPND_ADDR_SIMM9, /* Address of signed 9-bit immediate. */
479 AARCH64_OPND_ADDR_SIMM9_2, /* Same as the above, but the immediate is
480 negative or unaligned and there is
481 no writeback allowed. This operand code
482 is only used to support the programmer-
483 friendly feature of using LDR/STR as the
484 the mnemonic name for LDUR/STUR instructions
485 wherever there is no ambiguity. */
486 AARCH64_OPND_ADDR_SIMM10, /* Address of signed 10-bit immediate. */
487 AARCH64_OPND_ADDR_SIMM11, /* Address with a signed 11-bit (multiple of
488 16) immediate. */
489 AARCH64_OPND_ADDR_UIMM12, /* Address of unsigned 12-bit immediate. */
490 AARCH64_OPND_ADDR_SIMM13, /* Address with a signed 13-bit (multiple of
491 16) immediate. */
492 AARCH64_OPND_SIMD_ADDR_SIMPLE,/* Address of ld/st multiple structures. */
493 AARCH64_OPND_ADDR_OFFSET, /* Address with an optional 9-bit immediate. */
494 AARCH64_OPND_SIMD_ADDR_POST, /* Address of ld/st multiple post-indexed. */
495
496 AARCH64_OPND_SYSREG, /* System register operand. */
497 AARCH64_OPND_PSTATEFIELD, /* PSTATE field name operand. */
498 AARCH64_OPND_SYSREG_AT, /* System register <at_op> operand. */
499 AARCH64_OPND_SYSREG_DC, /* System register <dc_op> operand. */
500 AARCH64_OPND_SYSREG_IC, /* System register <ic_op> operand. */
501 AARCH64_OPND_SYSREG_TLBI, /* System register <tlbi_op> operand. */
502 AARCH64_OPND_SYSREG_SR, /* System register RCTX operand. */
503 AARCH64_OPND_BARRIER, /* Barrier operand. */
504 AARCH64_OPND_BARRIER_DSB_NXS, /* Barrier operand for DSB nXS variant. */
505 AARCH64_OPND_BARRIER_ISB, /* Barrier operand for ISB. */
506 AARCH64_OPND_PRFOP, /* Prefetch operation. */
507 AARCH64_OPND_RPRFMOP, /* Range prefetch operation. */
508 AARCH64_OPND_BARRIER_PSB, /* Barrier operand for PSB. */
509 AARCH64_OPND_BTI_TARGET, /* BTI {<target>}. */
510 AARCH64_OPND_SVE_ADDR_RI_S4x16, /* SVE [<Xn|SP>, #<simm4>*16]. */
511 AARCH64_OPND_SVE_ADDR_RI_S4x32, /* SVE [<Xn|SP>, #<simm4>*32]. */
512 AARCH64_OPND_SVE_ADDR_RI_S4xVL, /* SVE [<Xn|SP>, #<simm4>, MUL VL]. */
513 AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL]. */
514 AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL]. */
515 AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, /* SVE [<Xn|SP>, #<simm4>*4, MUL VL]. */
516 AARCH64_OPND_SVE_ADDR_RI_S6xVL, /* SVE [<Xn|SP>, #<simm6>, MUL VL]. */
517 AARCH64_OPND_SVE_ADDR_RI_S9xVL, /* SVE [<Xn|SP>, #<simm9>, MUL VL]. */
518 AARCH64_OPND_SVE_ADDR_RI_U6, /* SVE [<Xn|SP>, #<uimm6>]. */
519 AARCH64_OPND_SVE_ADDR_RI_U6x2, /* SVE [<Xn|SP>, #<uimm6>*2]. */
520 AARCH64_OPND_SVE_ADDR_RI_U6x4, /* SVE [<Xn|SP>, #<uimm6>*4]. */
521 AARCH64_OPND_SVE_ADDR_RI_U6x8, /* SVE [<Xn|SP>, #<uimm6>*8]. */
522 AARCH64_OPND_SVE_ADDR_R, /* SVE [<Xn|SP>]. */
523 AARCH64_OPND_SVE_ADDR_RR, /* SVE [<Xn|SP>, <Xm|XZR>]. */
524 AARCH64_OPND_SVE_ADDR_RR_LSL1, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #1]. */
525 AARCH64_OPND_SVE_ADDR_RR_LSL2, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #2]. */
526 AARCH64_OPND_SVE_ADDR_RR_LSL3, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #3]. */
527 AARCH64_OPND_SVE_ADDR_RR_LSL4, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #4]. */
528 AARCH64_OPND_SVE_ADDR_RX, /* SVE [<Xn|SP>, <Xm>]. */
529 AARCH64_OPND_SVE_ADDR_RX_LSL1, /* SVE [<Xn|SP>, <Xm>, LSL #1]. */
530 AARCH64_OPND_SVE_ADDR_RX_LSL2, /* SVE [<Xn|SP>, <Xm>, LSL #2]. */
531 AARCH64_OPND_SVE_ADDR_RX_LSL3, /* SVE [<Xn|SP>, <Xm>, LSL #3]. */
532 AARCH64_OPND_SVE_ADDR_ZX, /* SVE [Zn.<T>{, <Xm>}]. */
533 AARCH64_OPND_SVE_ADDR_RZ, /* SVE [<Xn|SP>, Zm.D]. */
534 AARCH64_OPND_SVE_ADDR_RZ_LSL1, /* SVE [<Xn|SP>, Zm.D, LSL #1]. */
535 AARCH64_OPND_SVE_ADDR_RZ_LSL2, /* SVE [<Xn|SP>, Zm.D, LSL #2]. */
536 AARCH64_OPND_SVE_ADDR_RZ_LSL3, /* SVE [<Xn|SP>, Zm.D, LSL #3]. */
537 AARCH64_OPND_SVE_ADDR_RZ_XTW_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
538 Bit 14 controls S/U choice. */
539 AARCH64_OPND_SVE_ADDR_RZ_XTW_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
540 Bit 22 controls S/U choice. */
541 AARCH64_OPND_SVE_ADDR_RZ_XTW1_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
542 Bit 14 controls S/U choice. */
543 AARCH64_OPND_SVE_ADDR_RZ_XTW1_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
544 Bit 22 controls S/U choice. */
545 AARCH64_OPND_SVE_ADDR_RZ_XTW2_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
546 Bit 14 controls S/U choice. */
547 AARCH64_OPND_SVE_ADDR_RZ_XTW2_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
548 Bit 22 controls S/U choice. */
549 AARCH64_OPND_SVE_ADDR_RZ_XTW3_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
550 Bit 14 controls S/U choice. */
551 AARCH64_OPND_SVE_ADDR_RZ_XTW3_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
552 Bit 22 controls S/U choice. */
553 AARCH64_OPND_SVE_ADDR_ZI_U5, /* SVE [Zn.<T>, #<uimm5>]. */
554 AARCH64_OPND_SVE_ADDR_ZI_U5x2, /* SVE [Zn.<T>, #<uimm5>*2]. */
555 AARCH64_OPND_SVE_ADDR_ZI_U5x4, /* SVE [Zn.<T>, #<uimm5>*4]. */
556 AARCH64_OPND_SVE_ADDR_ZI_U5x8, /* SVE [Zn.<T>, #<uimm5>*8]. */
557 AARCH64_OPND_SVE_ADDR_ZZ_LSL, /* SVE [Zn.<T>, Zm,<T>, LSL #<msz>]. */
558 AARCH64_OPND_SVE_ADDR_ZZ_SXTW, /* SVE [Zn.<T>, Zm,<T>, SXTW #<msz>]. */
559 AARCH64_OPND_SVE_ADDR_ZZ_UXTW, /* SVE [Zn.<T>, Zm,<T>, UXTW #<msz>]. */
560 AARCH64_OPND_SVE_AIMM, /* SVE unsigned arithmetic immediate. */
561 AARCH64_OPND_SVE_ASIMM, /* SVE signed arithmetic immediate. */
562 AARCH64_OPND_SVE_FPIMM8, /* SVE 8-bit floating-point immediate. */
563 AARCH64_OPND_SVE_I1_HALF_ONE, /* SVE choice between 0.5 and 1.0. */
564 AARCH64_OPND_SVE_I1_HALF_TWO, /* SVE choice between 0.5 and 2.0. */
565 AARCH64_OPND_SVE_I1_ZERO_ONE, /* SVE choice between 0.0 and 1.0. */
566 AARCH64_OPND_SVE_IMM_ROT1, /* SVE 1-bit rotate operand (90 or 270). */
567 AARCH64_OPND_SVE_IMM_ROT2, /* SVE 2-bit rotate operand (N*90). */
568 AARCH64_OPND_SVE_IMM_ROT3, /* SVE cadd 1-bit rotate (90 or 270). */
569 AARCH64_OPND_SVE_INV_LIMM, /* SVE inverted logical immediate. */
570 AARCH64_OPND_SVE_LIMM, /* SVE logical immediate. */
571 AARCH64_OPND_SVE_LIMM_MOV, /* SVE logical immediate for MOV. */
572 AARCH64_OPND_SVE_PATTERN, /* SVE vector pattern enumeration. */
573 AARCH64_OPND_SVE_PATTERN_SCALED, /* Likewise, with additional MUL factor. */
574 AARCH64_OPND_SVE_PRFOP, /* SVE prefetch operation. */
575 AARCH64_OPND_SVE_Pd, /* SVE p0-p15 in Pd. */
576 AARCH64_OPND_SVE_PNd, /* SVE pn0-pn15 in Pd. */
577 AARCH64_OPND_SVE_Pg3, /* SVE p0-p7 in Pg. */
578 AARCH64_OPND_SVE_Pg4_5, /* SVE p0-p15 in Pg, bits [8,5]. */
579 AARCH64_OPND_SVE_Pg4_10, /* SVE p0-p15 in Pg, bits [13,10]. */
580 AARCH64_OPND_SVE_PNg4_10, /* SVE pn0-pn15 in Pg, bits [13,10]. */
581 AARCH64_OPND_SVE_Pg4_16, /* SVE p0-p15 in Pg, bits [19,16]. */
582 AARCH64_OPND_SVE_Pm, /* SVE p0-p15 in Pm. */
583 AARCH64_OPND_SVE_Pn, /* SVE p0-p15 in Pn. */
584 AARCH64_OPND_SVE_PNn, /* SVE pn0-pn15 in Pn. */
585 AARCH64_OPND_SVE_Pt, /* SVE p0-p15 in Pt. */
586 AARCH64_OPND_SVE_PNt, /* SVE pn0-pn15 in Pt. */
587 AARCH64_OPND_SVE_Rm, /* Integer Rm or ZR, alt. SVE position. */
588 AARCH64_OPND_SVE_Rn_SP, /* Integer Rn or SP, alt. SVE position. */
589 AARCH64_OPND_SVE_SHLIMM_PRED, /* SVE shift left amount (predicated). */
590 AARCH64_OPND_SVE_SHLIMM_UNPRED, /* SVE shift left amount (unpredicated). */
591 AARCH64_OPND_SVE_SHLIMM_UNPRED_22, /* SVE 3 bit shift left unpred. */
592 AARCH64_OPND_SVE_SHRIMM_PRED, /* SVE shift right amount (predicated). */
593 AARCH64_OPND_SVE_SHRIMM_UNPRED, /* SVE shift right amount (unpredicated). */
594 AARCH64_OPND_SVE_SHRIMM_UNPRED_22, /* SVE 3 bit shift right unpred. */
595 AARCH64_OPND_SVE_SIMM5, /* SVE signed 5-bit immediate. */
596 AARCH64_OPND_SVE_SIMM5B, /* SVE secondary signed 5-bit immediate. */
597 AARCH64_OPND_SVE_SIMM6, /* SVE signed 6-bit immediate. */
598 AARCH64_OPND_SVE_SIMM8, /* SVE signed 8-bit immediate. */
599 AARCH64_OPND_SVE_UIMM3, /* SVE unsigned 3-bit immediate. */
600 AARCH64_OPND_SVE_UIMM7, /* SVE unsigned 7-bit immediate. */
601 AARCH64_OPND_SVE_UIMM8, /* SVE unsigned 8-bit immediate. */
602 AARCH64_OPND_SVE_UIMM8_53, /* SVE split unsigned 8-bit immediate. */
603 AARCH64_OPND_SVE_VZn, /* Scalar SIMD&FP register in Zn field. */
604 AARCH64_OPND_SVE_Vd, /* Scalar SIMD&FP register in Vd. */
605 AARCH64_OPND_SVE_Vm, /* Scalar SIMD&FP register in Vm. */
606 AARCH64_OPND_SVE_Vn, /* Scalar SIMD&FP register in Vn. */
607 AARCH64_OPND_SVE_Za_5, /* SVE vector register in Za, bits [9,5]. */
608 AARCH64_OPND_SVE_Za_16, /* SVE vector register in Za, bits [20,16]. */
609 AARCH64_OPND_SVE_Zd, /* SVE vector register in Zd. */
610 AARCH64_OPND_SVE_Zm_5, /* SVE vector register in Zm, bits [9,5]. */
611 AARCH64_OPND_SVE_Zm_16, /* SVE vector register in Zm, bits [20,16]. */
612 AARCH64_OPND_SVE_Zm3_INDEX, /* z0-z7[0-3] in Zm, bits [20,16]. */
613 AARCH64_OPND_SVE_Zm3_11_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 11. */
614 AARCH64_OPND_SVE_Zm3_19_INDEX, /* z0-z7[0-3] in Zm3_INDEX plus bit 19. */
615 AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22. */
616 AARCH64_OPND_SVE_Zm4_11_INDEX, /* z0-z15[0-3] in Zm plus bit 11. */
617 AARCH64_OPND_SVE_Zm4_INDEX, /* z0-z15[0-1] in Zm, bits [20,16]. */
618 AARCH64_OPND_SVE_Zn, /* SVE vector register in Zn. */
619 AARCH64_OPND_SVE_Zn_INDEX, /* Indexed SVE vector register, for DUP. */
620 AARCH64_OPND_SVE_ZnxN, /* SVE vector register list in Zn. */
621 AARCH64_OPND_SVE_Zt, /* SVE vector register in Zt. */
622 AARCH64_OPND_SVE_ZtxN, /* SVE vector register list in Zt. */
623 AARCH64_OPND_SME_Zdnx2, /* SVE vector register list from [4:1]*2. */
624 AARCH64_OPND_SME_Zdnx4, /* SVE vector register list from [4:2]*4. */
625 AARCH64_OPND_SME_Zm, /* SVE vector register list in 4-bit Zm. */
626 AARCH64_OPND_SME_Zmx2, /* SVE vector register list from [20:17]*2. */
627 AARCH64_OPND_SME_Zmx4, /* SVE vector register list from [20:18]*4. */
628 AARCH64_OPND_SME_Znx2, /* SVE vector register list from [9:6]*2. */
629 AARCH64_OPND_SME_Znx4, /* SVE vector register list from [9:7]*4. */
630 AARCH64_OPND_SME_Ztx2_STRIDED, /* SVE vector register list in [4:0]&23. */
631 AARCH64_OPND_SME_Ztx4_STRIDED, /* SVE vector register list in [4:0]&19. */
632 AARCH64_OPND_SME_ZAda_2b, /* SME <ZAda>.S, 2-bits. */
633 AARCH64_OPND_SME_ZAda_3b, /* SME <ZAda>.D, 3-bits. */
634 AARCH64_OPND_SME_ZA_HV_idx_src, /* SME source ZA tile vector. */
635 AARCH64_OPND_SME_ZA_HV_idx_srcxN, /* SME N source ZA tile vectors. */
636 AARCH64_OPND_SME_ZA_HV_idx_dest, /* SME destination ZA tile vector. */
637 AARCH64_OPND_SME_ZA_HV_idx_destxN, /* SME N dest ZA tile vectors. */
638 AARCH64_OPND_SME_Pdx2, /* Predicate register list in [3:1]. */
639 AARCH64_OPND_SME_PdxN, /* Predicate register list in [3:0]. */
640 AARCH64_OPND_SME_Pm, /* SME scalable predicate register, bits [15:13]. */
641 AARCH64_OPND_SME_PNd3, /* Predicate-as-counter register, bits [3:0]. */
642 AARCH64_OPND_SME_PNg3, /* Predicate-as-counter register, bits [12:10]. */
643 AARCH64_OPND_SME_PNn, /* Predicate-as-counter register, bits [8:5]. */
644 AARCH64_OPND_SME_PNn3_INDEX1, /* Indexed pred-as-counter reg, bits [8:5]. */
645 AARCH64_OPND_SME_PNn3_INDEX2, /* Indexed pred-as-counter reg, bits [9:5]. */
646 AARCH64_OPND_SME_list_of_64bit_tiles, /* SME list of ZA tiles. */
647 AARCH64_OPND_SME_ZA_HV_idx_ldstr, /* SME destination ZA tile vector. */
648 AARCH64_OPND_SME_ZA_array_off1x4, /* SME ZA[<Wv>, #<imm1>*4:<imm1>*4+3]. */
649 AARCH64_OPND_SME_ZA_array_off2x2, /* SME ZA[<Wv>, #<imm2>*2:<imm2>*2+1]. */
650 AARCH64_OPND_SME_ZA_array_off2x4, /* SME ZA[<Wv>, #<imm2>*4:<imm2>*4+3]. */
651 AARCH64_OPND_SME_ZA_array_off3_0, /* SME ZA[<Wv>{, #<imm3>}]. */
652 AARCH64_OPND_SME_ZA_array_off3_5, /* SME ZA[<Wv>{, #<imm3>}]. */
653 AARCH64_OPND_SME_ZA_array_off3x2, /* SME ZA[<Wv>, #<imm3>*2:<imm3>*2+1]. */
654 AARCH64_OPND_SME_ZA_array_off4, /* SME ZA[<Wv>{, #<imm>}]. */
655 AARCH64_OPND_SME_ADDR_RI_U4xVL, /* SME [<Xn|SP>{, #<imm>, MUL VL}]. */
656 AARCH64_OPND_SME_SM_ZA, /* SME {SM | ZA}. */
657 AARCH64_OPND_SME_PnT_Wm_imm, /* SME <Pn>.<T>[<Wm>, #<imm>]. */
658 AARCH64_OPND_SME_SHRIMM4, /* 4-bit right shift, bits [19:16]. */
659 AARCH64_OPND_SME_SHRIMM5, /* size + 5-bit right shift, bits [23:22,20:16]. */
660 AARCH64_OPND_SME_Zm_INDEX1, /* Zn.T[index], bits [19:16,10]. */
661 AARCH64_OPND_SME_Zm_INDEX2, /* Zn.T[index], bits [19:16,11:10]. */
662 AARCH64_OPND_SME_Zm_INDEX3_1, /* Zn.T[index], bits [19:16,10,2:1]. */
663 AARCH64_OPND_SME_Zm_INDEX3_2, /* Zn.T[index], bits [19:16,11:10,2]. */
664 AARCH64_OPND_SME_Zm_INDEX3_10, /* Zn.T[index], bits [19:16,15,11:10]. */
665 AARCH64_OPND_SME_Zm_INDEX4_1, /* Zn.T[index], bits [19:16,11:10,2:1]. */
666 AARCH64_OPND_SME_Zm_INDEX4_10, /* Zn.T[index], bits [19:16,15,12:10]. */
667 AARCH64_OPND_SME_Zn_INDEX1_16, /* Zn[index], bits [9:5] and [16:16]. */
668 AARCH64_OPND_SME_Zn_INDEX2_15, /* Zn[index], bits [9:5] and [16:15]. */
669 AARCH64_OPND_SME_Zn_INDEX2_16, /* Zn[index], bits [9:5] and [17:16]. */
670 AARCH64_OPND_SME_Zn_INDEX3_14, /* Zn[index], bits [9:5] and [16:14]. */
671 AARCH64_OPND_SME_Zn_INDEX3_15, /* Zn[index], bits [9:5] and [17:15]. */
672 AARCH64_OPND_SME_Zn_INDEX4_14, /* Zn[index], bits [9:5] and [17:14]. */
673 AARCH64_OPND_SME_VLxN_10, /* VLx2 or VLx4, in bit 10. */
674 AARCH64_OPND_SME_VLxN_13, /* VLx2 or VLx4, in bit 13. */
675 AARCH64_OPND_SME_ZT0, /* The fixed token zt0/ZT0 (not encoded). */
676 AARCH64_OPND_SME_ZT0_INDEX, /* ZT0[<imm>], bits [14:12]. */
677 AARCH64_OPND_SME_ZT0_LIST, /* { zt0/ZT0 } (not encoded). */
678 AARCH64_OPND_TME_UIMM16, /* TME unsigned 16-bit immediate. */
679 AARCH64_OPND_SM3_IMM2, /* SM3 encodes lane in bits [13, 14]. */
680 AARCH64_OPND_MOPS_ADDR_Rd, /* [Rd]!, in bits [0, 4]. */
681 AARCH64_OPND_MOPS_ADDR_Rs, /* [Rs]!, in bits [16, 20]. */
682 AARCH64_OPND_MOPS_WB_Rn, /* Rn!, in bits [5, 9]. */
683 AARCH64_OPND_CSSC_SIMM8, /* CSSC signed 8-bit immediate. */
684 AARCH64_OPND_CSSC_UIMM8, /* CSSC unsigned 8-bit immediate. */
685 };
686
687 /* Qualifier constrains an operand. It either specifies a variant of an
688 operand type or limits values available to an operand type.
689
690 N.B. Order is important; keep aarch64_opnd_qualifiers synced. */
691
692 enum aarch64_opnd_qualifier
693 {
694 /* Indicating no further qualification on an operand. */
695 AARCH64_OPND_QLF_NIL,
696
697 /* Qualifying an operand which is a general purpose (integer) register;
698 indicating the operand data size or a specific register. */
699 AARCH64_OPND_QLF_W, /* Wn, WZR or WSP. */
700 AARCH64_OPND_QLF_X, /* Xn, XZR or XSP. */
701 AARCH64_OPND_QLF_WSP, /* WSP. */
702 AARCH64_OPND_QLF_SP, /* SP. */
703
704 /* Qualifying an operand which is a floating-point register, a SIMD
705 vector element or a SIMD vector element list; indicating operand data
706 size or the size of each SIMD vector element in the case of a SIMD
707 vector element list.
708 These qualifiers are also used to qualify an address operand to
709 indicate the size of data element a load/store instruction is
710 accessing.
711 They are also used for the immediate shift operand in e.g. SSHR. Such
712 a use is only for the ease of operand encoding/decoding and qualifier
713 sequence matching; such a use should not be applied widely; use the value
714 constraint qualifiers for immediate operands wherever possible. */
715 AARCH64_OPND_QLF_S_B,
716 AARCH64_OPND_QLF_S_H,
717 AARCH64_OPND_QLF_S_S,
718 AARCH64_OPND_QLF_S_D,
719 AARCH64_OPND_QLF_S_Q,
720 /* These type qualifiers have a special meaning in that they mean 4 x 1 byte
721 or 2 x 2 byte are selected by the instruction. Other than that they have
722 no difference with AARCH64_OPND_QLF_S_B in encoding. They are here purely
723 for syntactical reasons and is an exception from normal AArch64
724 disassembly scheme. */
725 AARCH64_OPND_QLF_S_4B,
726 AARCH64_OPND_QLF_S_2H,
727
728 /* Qualifying an operand which is a SIMD vector register or a SIMD vector
729 register list; indicating register shape.
730 They are also used for the immediate shift operand in e.g. SSHR. Such
731 a use is only for the ease of operand encoding/decoding and qualifier
732 sequence matching; such a use should not be applied widely; use the value
733 constraint qualifiers for immediate operands wherever possible. */
734 AARCH64_OPND_QLF_V_4B,
735 AARCH64_OPND_QLF_V_8B,
736 AARCH64_OPND_QLF_V_16B,
737 AARCH64_OPND_QLF_V_2H,
738 AARCH64_OPND_QLF_V_4H,
739 AARCH64_OPND_QLF_V_8H,
740 AARCH64_OPND_QLF_V_2S,
741 AARCH64_OPND_QLF_V_4S,
742 AARCH64_OPND_QLF_V_1D,
743 AARCH64_OPND_QLF_V_2D,
744 AARCH64_OPND_QLF_V_1Q,
745
746 AARCH64_OPND_QLF_P_Z,
747 AARCH64_OPND_QLF_P_M,
748
749 /* Used in scaled signed immediate that are scaled by a Tag granule
750 like in stg, st2g, etc. */
751 AARCH64_OPND_QLF_imm_tag,
752
753 /* Constraint on value. */
754 AARCH64_OPND_QLF_CR, /* CRn, CRm. */
755 AARCH64_OPND_QLF_imm_0_7,
756 AARCH64_OPND_QLF_imm_0_15,
757 AARCH64_OPND_QLF_imm_0_31,
758 AARCH64_OPND_QLF_imm_0_63,
759 AARCH64_OPND_QLF_imm_1_32,
760 AARCH64_OPND_QLF_imm_1_64,
761
762 /* Indicate whether an AdvSIMD modified immediate operand is shift-zeros
763 or shift-ones. */
764 AARCH64_OPND_QLF_LSL,
765 AARCH64_OPND_QLF_MSL,
766
767 /* Special qualifier helping retrieve qualifier information during the
768 decoding time (currently not in use). */
769 AARCH64_OPND_QLF_RETRIEVE,
770 };
771 \f
772 /* Instruction class. */
773
774 enum aarch64_insn_class
775 {
776 aarch64_misc,
777 addsub_carry,
778 addsub_ext,
779 addsub_imm,
780 addsub_shift,
781 asimdall,
782 asimddiff,
783 asimdelem,
784 asimdext,
785 asimdimm,
786 asimdins,
787 asimdmisc,
788 asimdperm,
789 asimdsame,
790 asimdshf,
791 asimdtbl,
792 asisddiff,
793 asisdelem,
794 asisdlse,
795 asisdlsep,
796 asisdlso,
797 asisdlsop,
798 asisdmisc,
799 asisdone,
800 asisdpair,
801 asisdsame,
802 asisdshf,
803 bitfield,
804 branch_imm,
805 branch_reg,
806 compbranch,
807 condbranch,
808 condcmp_imm,
809 condcmp_reg,
810 condsel,
811 cryptoaes,
812 cryptosha2,
813 cryptosha3,
814 dp_1src,
815 dp_2src,
816 dp_3src,
817 exception,
818 extract,
819 float2fix,
820 float2int,
821 floatccmp,
822 floatcmp,
823 floatdp1,
824 floatdp2,
825 floatdp3,
826 floatimm,
827 floatsel,
828 ldst_immpost,
829 ldst_immpre,
830 ldst_imm9, /* immpost or immpre */
831 ldst_imm10, /* LDRAA/LDRAB */
832 ldst_pos,
833 ldst_regoff,
834 ldst_unpriv,
835 ldst_unscaled,
836 ldstexcl,
837 ldstnapair_offs,
838 ldstpair_off,
839 ldstpair_indexed,
840 loadlit,
841 log_imm,
842 log_shift,
843 lse_atomic,
844 movewide,
845 pcreladdr,
846 ic_system,
847 sme_fp_sd,
848 sme_int_sd,
849 sme_misc,
850 sme_mov,
851 sme_ldr,
852 sme_psel,
853 sme_shift,
854 sme_size_12_bhs,
855 sme_size_12_hs,
856 sme_size_22,
857 sme_size_22_hsd,
858 sme_sz_23,
859 sme_str,
860 sme_start,
861 sme_stop,
862 sme2_mov,
863 sve_cpy,
864 sve_index,
865 sve_limm,
866 sve_misc,
867 sve_movprfx,
868 sve_pred_zm,
869 sve_shift_pred,
870 sve_shift_unpred,
871 sve_size_bhs,
872 sve_size_bhsd,
873 sve_size_hsd,
874 sve_size_hsd2,
875 sve_size_sd,
876 sve_size_bh,
877 sve_size_sd2,
878 sve_size_13,
879 sve_shift_tsz_hsd,
880 sve_shift_tsz_bhsd,
881 sve_size_tsz_bhs,
882 testbranch,
883 cryptosm3,
884 cryptosm4,
885 dotproduct,
886 bfloat16,
887 cssc,
888 };
889
890 /* Opcode enumerators. */
891
892 enum aarch64_op
893 {
894 OP_NIL,
895 OP_STRB_POS,
896 OP_LDRB_POS,
897 OP_LDRSB_POS,
898 OP_STRH_POS,
899 OP_LDRH_POS,
900 OP_LDRSH_POS,
901 OP_STR_POS,
902 OP_LDR_POS,
903 OP_STRF_POS,
904 OP_LDRF_POS,
905 OP_LDRSW_POS,
906 OP_PRFM_POS,
907
908 OP_STURB,
909 OP_LDURB,
910 OP_LDURSB,
911 OP_STURH,
912 OP_LDURH,
913 OP_LDURSH,
914 OP_STUR,
915 OP_LDUR,
916 OP_STURV,
917 OP_LDURV,
918 OP_LDURSW,
919 OP_PRFUM,
920
921 OP_LDR_LIT,
922 OP_LDRV_LIT,
923 OP_LDRSW_LIT,
924 OP_PRFM_LIT,
925
926 OP_ADD,
927 OP_B,
928 OP_BL,
929
930 OP_MOVN,
931 OP_MOVZ,
932 OP_MOVK,
933
934 OP_MOV_IMM_LOG, /* MOV alias for moving bitmask immediate. */
935 OP_MOV_IMM_WIDE, /* MOV alias for moving wide immediate. */
936 OP_MOV_IMM_WIDEN, /* MOV alias for moving wide immediate (negated). */
937
938 OP_MOV_V, /* MOV alias for moving vector register. */
939
940 OP_ASR_IMM,
941 OP_LSR_IMM,
942 OP_LSL_IMM,
943
944 OP_BIC,
945
946 OP_UBFX,
947 OP_BFXIL,
948 OP_SBFX,
949 OP_SBFIZ,
950 OP_BFI,
951 OP_BFC, /* ARMv8.2. */
952 OP_UBFIZ,
953 OP_UXTB,
954 OP_UXTH,
955 OP_UXTW,
956
957 OP_CINC,
958 OP_CINV,
959 OP_CNEG,
960 OP_CSET,
961 OP_CSETM,
962
963 OP_FCVT,
964 OP_FCVTN,
965 OP_FCVTN2,
966 OP_FCVTL,
967 OP_FCVTL2,
968 OP_FCVTXN_S, /* Scalar version. */
969
970 OP_ROR_IMM,
971
972 OP_SXTL,
973 OP_SXTL2,
974 OP_UXTL,
975 OP_UXTL2,
976
977 OP_MOV_P_P,
978 OP_MOV_PN_PN,
979 OP_MOV_Z_P_Z,
980 OP_MOV_Z_V,
981 OP_MOV_Z_Z,
982 OP_MOV_Z_Zi,
983 OP_MOVM_P_P_P,
984 OP_MOVS_P_P,
985 OP_MOVZS_P_P_P,
986 OP_MOVZ_P_P_P,
987 OP_NOTS_P_P_P_Z,
988 OP_NOT_P_P_P_Z,
989
990 OP_FCMLA_ELEM, /* ARMv8.3, indexed element version. */
991
992 OP_TOTAL_NUM, /* Pseudo. */
993 };
994
995 /* Error types. */
996 enum err_type
997 {
998 ERR_OK,
999 ERR_UND,
1000 ERR_UNP,
1001 ERR_NYI,
1002 ERR_VFI,
1003 ERR_NR_ENTRIES
1004 };
1005
1006 /* Maximum number of operands an instruction can have. */
1007 #define AARCH64_MAX_OPND_NUM 6
1008 /* Maximum number of qualifier sequences an instruction can have. */
1009 #define AARCH64_MAX_QLF_SEQ_NUM 10
1010 /* Operand qualifier typedef; optimized for the size. */
1011 typedef unsigned char aarch64_opnd_qualifier_t;
1012 /* Operand qualifier sequence typedef. */
1013 typedef aarch64_opnd_qualifier_t \
1014 aarch64_opnd_qualifier_seq_t [AARCH64_MAX_OPND_NUM];
1015
1016 /* FIXME: improve the efficiency. */
1017 static inline bool
1018 empty_qualifier_sequence_p (const aarch64_opnd_qualifier_t *qualifiers)
1019 {
1020 int i;
1021 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1022 if (qualifiers[i] != AARCH64_OPND_QLF_NIL)
1023 return false;
1024 return true;
1025 }
1026
1027 /* Forward declare error reporting type. */
1028 typedef struct aarch64_operand_error aarch64_operand_error;
1029 /* Forward declare instruction sequence type. */
1030 typedef struct aarch64_instr_sequence aarch64_instr_sequence;
1031 /* Forward declare instruction definition. */
1032 typedef struct aarch64_inst aarch64_inst;
1033
1034 /* This structure holds information for a particular opcode. */
1035
1036 struct aarch64_opcode
1037 {
1038 /* The name of the mnemonic. */
1039 const char *name;
1040
1041 /* The opcode itself. Those bits which will be filled in with
1042 operands are zeroes. */
1043 aarch64_insn opcode;
1044
1045 /* The opcode mask. This is used by the disassembler. This is a
1046 mask containing ones indicating those bits which must match the
1047 opcode field, and zeroes indicating those bits which need not
1048 match (and are presumably filled in by operands). */
1049 aarch64_insn mask;
1050
1051 /* Instruction class. */
1052 enum aarch64_insn_class iclass;
1053
1054 /* Enumerator identifier. */
1055 enum aarch64_op op;
1056
1057 /* Which architecture variant provides this instruction. */
1058 const aarch64_feature_set *avariant;
1059
1060 /* An array of operand codes. Each code is an index into the
1061 operand table. They appear in the order which the operands must
1062 appear in assembly code, and are terminated by a zero. */
1063 enum aarch64_opnd operands[AARCH64_MAX_OPND_NUM];
1064
1065 /* A list of operand qualifier code sequence. Each operand qualifier
1066 code qualifies the corresponding operand code. Each operand
1067 qualifier sequence specifies a valid opcode variant and related
1068 constraint on operands. */
1069 aarch64_opnd_qualifier_seq_t qualifiers_list[AARCH64_MAX_QLF_SEQ_NUM];
1070
1071 /* Flags providing information about this instruction */
1072 uint64_t flags;
1073
1074 /* Extra constraints on the instruction that the verifier checks. */
1075 uint32_t constraints;
1076
1077 /* If nonzero, this operand and operand 0 are both registers and
1078 are required to have the same register number. */
1079 unsigned char tied_operand;
1080
1081 /* If non-NULL, a function to verify that a given instruction is valid. */
1082 enum err_type (* verifier) (const struct aarch64_inst *, const aarch64_insn,
1083 bfd_vma, bool, aarch64_operand_error *,
1084 struct aarch64_instr_sequence *);
1085 };
1086
1087 typedef struct aarch64_opcode aarch64_opcode;
1088
1089 /* Table describing all the AArch64 opcodes. */
1090 extern const aarch64_opcode aarch64_opcode_table[];
1091
1092 /* Opcode flags. */
1093 #define F_ALIAS (1 << 0)
1094 #define F_HAS_ALIAS (1 << 1)
1095 /* Disassembly preference priority 1-3 (the larger the higher). If nothing
1096 is specified, it is the priority 0 by default, i.e. the lowest priority. */
1097 #define F_P1 (1 << 2)
1098 #define F_P2 (2 << 2)
1099 #define F_P3 (3 << 2)
1100 /* Flag an instruction that is truly conditional executed, e.g. b.cond. */
1101 #define F_COND (1 << 4)
1102 /* Instruction has the field of 'sf'. */
1103 #define F_SF (1 << 5)
1104 /* Instruction has the field of 'size:Q'. */
1105 #define F_SIZEQ (1 << 6)
1106 /* Floating-point instruction has the field of 'type'. */
1107 #define F_FPTYPE (1 << 7)
1108 /* AdvSIMD scalar instruction has the field of 'size'. */
1109 #define F_SSIZE (1 << 8)
1110 /* AdvSIMD vector register arrangement specifier encoded in "imm5<3:0>:Q". */
1111 #define F_T (1 << 9)
1112 /* Size of GPR operand in AdvSIMD instructions encoded in Q. */
1113 #define F_GPRSIZE_IN_Q (1 << 10)
1114 /* Size of Rt load signed instruction encoded in opc[0], i.e. bit 22. */
1115 #define F_LDS_SIZE (1 << 11)
1116 /* Optional operand; assume maximum of 1 operand can be optional. */
1117 #define F_OPD0_OPT (1 << 12)
1118 #define F_OPD1_OPT (2 << 12)
1119 #define F_OPD2_OPT (3 << 12)
1120 #define F_OPD3_OPT (4 << 12)
1121 #define F_OPD4_OPT (5 << 12)
1122 /* Default value for the optional operand when omitted from the assembly. */
1123 #define F_DEFAULT(X) (((X) & 0x1f) << 15)
1124 /* Instruction that is an alias of another instruction needs to be
1125 encoded/decoded by converting it to/from the real form, followed by
1126 the encoding/decoding according to the rules of the real opcode.
1127 This compares to the direct coding using the alias's information.
1128 N.B. this flag requires F_ALIAS to be used together. */
1129 #define F_CONV (1 << 20)
1130 /* Use together with F_ALIAS to indicate an alias opcode is a programmer
1131 friendly pseudo instruction available only in the assembly code (thus will
1132 not show up in the disassembly). */
1133 #define F_PSEUDO (1 << 21)
1134 /* Instruction has miscellaneous encoding/decoding rules. */
1135 #define F_MISC (1 << 22)
1136 /* Instruction has the field of 'N'; used in conjunction with F_SF. */
1137 #define F_N (1 << 23)
1138 /* Opcode dependent field. */
1139 #define F_OD(X) (((X) & 0x7) << 24)
1140 /* Instruction has the field of 'sz'. */
1141 #define F_LSE_SZ (1 << 27)
1142 /* Require an exact qualifier match, even for NIL qualifiers. */
1143 #define F_STRICT (1ULL << 28)
1144 /* This system instruction is used to read system registers. */
1145 #define F_SYS_READ (1ULL << 29)
1146 /* This system instruction is used to write system registers. */
1147 #define F_SYS_WRITE (1ULL << 30)
1148 /* This instruction has an extra constraint on it that imposes a requirement on
1149 subsequent instructions. */
1150 #define F_SCAN (1ULL << 31)
1151 /* Next bit is 32. */
1152
1153 /* Instruction constraints. */
1154 /* This instruction has a predication constraint on the instruction at PC+4. */
1155 #define C_SCAN_MOVPRFX (1U << 0)
1156 /* This instruction's operation width is determined by the operand with the
1157 largest element size. */
1158 #define C_MAX_ELEM (1U << 1)
1159 #define C_SCAN_MOPS_P (1U << 2)
1160 #define C_SCAN_MOPS_M (2U << 2)
1161 #define C_SCAN_MOPS_E (3U << 2)
1162 #define C_SCAN_MOPS_PME (3U << 2)
1163 /* Next bit is 4. */
1164
1165 static inline bool
1166 alias_opcode_p (const aarch64_opcode *opcode)
1167 {
1168 return (opcode->flags & F_ALIAS) != 0;
1169 }
1170
1171 static inline bool
1172 opcode_has_alias (const aarch64_opcode *opcode)
1173 {
1174 return (opcode->flags & F_HAS_ALIAS) != 0;
1175 }
1176
1177 /* Priority for disassembling preference. */
1178 static inline int
1179 opcode_priority (const aarch64_opcode *opcode)
1180 {
1181 return (opcode->flags >> 2) & 0x3;
1182 }
1183
1184 static inline bool
1185 pseudo_opcode_p (const aarch64_opcode *opcode)
1186 {
1187 return (opcode->flags & F_PSEUDO) != 0lu;
1188 }
1189
1190 static inline bool
1191 optional_operand_p (const aarch64_opcode *opcode, unsigned int idx)
1192 {
1193 return ((opcode->flags >> 12) & 0x7) == idx + 1;
1194 }
1195
1196 static inline aarch64_insn
1197 get_optional_operand_default_value (const aarch64_opcode *opcode)
1198 {
1199 return (opcode->flags >> 15) & 0x1f;
1200 }
1201
1202 static inline unsigned int
1203 get_opcode_dependent_value (const aarch64_opcode *opcode)
1204 {
1205 return (opcode->flags >> 24) & 0x7;
1206 }
1207
1208 static inline bool
1209 opcode_has_special_coder (const aarch64_opcode *opcode)
1210 {
1211 return (opcode->flags & (F_SF | F_LSE_SZ | F_SIZEQ | F_FPTYPE | F_SSIZE | F_T
1212 | F_GPRSIZE_IN_Q | F_LDS_SIZE | F_MISC | F_N | F_COND)) != 0;
1213 }
1214 \f
1215 struct aarch64_name_value_pair
1216 {
1217 const char * name;
1218 aarch64_insn value;
1219 };
1220
1221 extern const struct aarch64_name_value_pair aarch64_operand_modifiers [];
1222 extern const struct aarch64_name_value_pair aarch64_barrier_options [16];
1223 extern const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options [4];
1224 extern const struct aarch64_name_value_pair aarch64_prfops [32];
1225 extern const struct aarch64_name_value_pair aarch64_hint_options [];
1226
1227 #define AARCH64_MAX_SYSREG_NAME_LEN 32
1228
1229 typedef struct
1230 {
1231 const char * name;
1232 aarch64_insn value;
1233 uint32_t flags;
1234
1235 /* A set of features, all of which are required for this system register to be
1236 available. */
1237 aarch64_feature_set features;
1238 } aarch64_sys_reg;
1239
1240 extern const aarch64_sys_reg aarch64_sys_regs [];
1241 extern const aarch64_sys_reg aarch64_pstatefields [];
1242 extern bool aarch64_sys_reg_deprecated_p (const uint32_t);
1243 extern bool aarch64_sys_reg_alias_p (const uint32_t);
1244 extern bool aarch64_pstatefield_supported_p (const aarch64_feature_set,
1245 const aarch64_sys_reg *);
1246
1247 typedef struct
1248 {
1249 const char *name;
1250 uint32_t value;
1251 uint32_t flags ;
1252 } aarch64_sys_ins_reg;
1253
1254 extern bool aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *);
1255 extern bool
1256 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set,
1257 const char *reg_name, aarch64_insn,
1258 uint32_t, const aarch64_feature_set *);
1259
1260 extern const aarch64_sys_ins_reg aarch64_sys_regs_ic [];
1261 extern const aarch64_sys_ins_reg aarch64_sys_regs_dc [];
1262 extern const aarch64_sys_ins_reg aarch64_sys_regs_at [];
1263 extern const aarch64_sys_ins_reg aarch64_sys_regs_tlbi [];
1264 extern const aarch64_sys_ins_reg aarch64_sys_regs_sr [];
1265
1266 /* Shift/extending operator kinds.
1267 N.B. order is important; keep aarch64_operand_modifiers synced. */
1268 enum aarch64_modifier_kind
1269 {
1270 AARCH64_MOD_NONE,
1271 AARCH64_MOD_MSL,
1272 AARCH64_MOD_ROR,
1273 AARCH64_MOD_ASR,
1274 AARCH64_MOD_LSR,
1275 AARCH64_MOD_LSL,
1276 AARCH64_MOD_UXTB,
1277 AARCH64_MOD_UXTH,
1278 AARCH64_MOD_UXTW,
1279 AARCH64_MOD_UXTX,
1280 AARCH64_MOD_SXTB,
1281 AARCH64_MOD_SXTH,
1282 AARCH64_MOD_SXTW,
1283 AARCH64_MOD_SXTX,
1284 AARCH64_MOD_MUL,
1285 AARCH64_MOD_MUL_VL,
1286 };
1287
1288 bool
1289 aarch64_extend_operator_p (enum aarch64_modifier_kind);
1290
1291 enum aarch64_modifier_kind
1292 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *);
1293 /* Condition. */
1294
1295 typedef struct
1296 {
1297 /* A list of names with the first one as the disassembly preference;
1298 terminated by NULL if fewer than 3. */
1299 const char *names[4];
1300 aarch64_insn value;
1301 } aarch64_cond;
1302
1303 extern const aarch64_cond aarch64_conds[16];
1304
1305 const aarch64_cond* get_cond_from_value (aarch64_insn value);
1306 const aarch64_cond* get_inverted_cond (const aarch64_cond *cond);
1307 \f
1308 /* Information about a reference to part of ZA. */
1309 struct aarch64_indexed_za
1310 {
1311 /* Which tile is being accessed. Unused (and 0) for an index into ZA. */
1312 int regno;
1313
1314 struct
1315 {
1316 /* The 32-bit index register. */
1317 int regno;
1318
1319 /* The first (or only) immediate offset. */
1320 int64_t imm;
1321
1322 /* The last immediate offset minus the first immediate offset.
1323 Unlike the range size, this is guaranteed not to overflow
1324 when the end offset > the start offset. */
1325 uint64_t countm1;
1326 } index;
1327
1328 /* The vector group size, or 0 if none. */
1329 unsigned group_size : 8;
1330
1331 /* True if a tile access is vertical, false if it is horizontal.
1332 Unused (and 0) for an index into ZA. */
1333 unsigned v : 1;
1334 };
1335
1336 /* Information about a list of registers. */
1337 struct aarch64_reglist
1338 {
1339 unsigned first_regno : 8;
1340 unsigned num_regs : 8;
1341 /* The difference between the nth and the n+1th register. */
1342 unsigned stride : 8;
1343 /* 1 if it is a list of reg element. */
1344 unsigned has_index : 1;
1345 /* Lane index; valid only when has_index is 1. */
1346 int64_t index;
1347 };
1348
1349 /* Structure representing an operand. */
1350
1351 struct aarch64_opnd_info
1352 {
1353 enum aarch64_opnd type;
1354 aarch64_opnd_qualifier_t qualifier;
1355 int idx;
1356
1357 union
1358 {
1359 struct
1360 {
1361 unsigned regno;
1362 } reg;
1363 struct
1364 {
1365 unsigned int regno;
1366 int64_t index;
1367 } reglane;
1368 /* e.g. LVn. */
1369 struct aarch64_reglist reglist;
1370 /* e.g. immediate or pc relative address offset. */
1371 struct
1372 {
1373 int64_t value;
1374 unsigned is_fp : 1;
1375 } imm;
1376 /* e.g. address in STR (register offset). */
1377 struct
1378 {
1379 unsigned base_regno;
1380 struct
1381 {
1382 union
1383 {
1384 int imm;
1385 unsigned regno;
1386 };
1387 unsigned is_reg;
1388 } offset;
1389 unsigned pcrel : 1; /* PC-relative. */
1390 unsigned writeback : 1;
1391 unsigned preind : 1; /* Pre-indexed. */
1392 unsigned postind : 1; /* Post-indexed. */
1393 } addr;
1394
1395 struct
1396 {
1397 /* The encoding of the system register. */
1398 aarch64_insn value;
1399
1400 /* The system register flags. */
1401 uint32_t flags;
1402 } sysreg;
1403
1404 /* ZA tile vector, e.g. <ZAn><HV>.D[<Wv>{, <imm>}] */
1405 struct aarch64_indexed_za indexed_za;
1406
1407 const aarch64_cond *cond;
1408 /* The encoding of the PSTATE field. */
1409 aarch64_insn pstatefield;
1410 const aarch64_sys_ins_reg *sysins_op;
1411 const struct aarch64_name_value_pair *barrier;
1412 const struct aarch64_name_value_pair *hint_option;
1413 const struct aarch64_name_value_pair *prfop;
1414 };
1415
1416 /* Operand shifter; in use when the operand is a register offset address,
1417 add/sub extended reg, etc. e.g. <R><m>{, <extend> {#<amount>}}. */
1418 struct
1419 {
1420 enum aarch64_modifier_kind kind;
1421 unsigned operator_present: 1; /* Only valid during encoding. */
1422 /* Value of the 'S' field in ld/st reg offset; used only in decoding. */
1423 unsigned amount_present: 1;
1424 int64_t amount;
1425 } shifter;
1426
1427 unsigned skip:1; /* Operand is not completed if there is a fixup needed
1428 to be done on it. In some (but not all) of these
1429 cases, we need to tell libopcodes to skip the
1430 constraint checking and the encoding for this
1431 operand, so that the libopcodes can pick up the
1432 right opcode before the operand is fixed-up. This
1433 flag should only be used during the
1434 assembling/encoding. */
1435 unsigned present:1; /* Whether this operand is present in the assembly
1436 line; not used during the disassembly. */
1437 };
1438
1439 typedef struct aarch64_opnd_info aarch64_opnd_info;
1440
1441 /* Structure representing an instruction.
1442
1443 It is used during both the assembling and disassembling. The assembler
1444 fills an aarch64_inst after a successful parsing and then passes it to the
1445 encoding routine to do the encoding. During the disassembling, the
1446 disassembler calls the decoding routine to decode a binary instruction; on a
1447 successful return, such a structure will be filled with information of the
1448 instruction; then the disassembler uses the information to print out the
1449 instruction. */
1450
1451 struct aarch64_inst
1452 {
1453 /* The value of the binary instruction. */
1454 aarch64_insn value;
1455
1456 /* Corresponding opcode entry. */
1457 const aarch64_opcode *opcode;
1458
1459 /* Condition for a truly conditional-executed instrutions, e.g. b.cond. */
1460 const aarch64_cond *cond;
1461
1462 /* Operands information. */
1463 aarch64_opnd_info operands[AARCH64_MAX_OPND_NUM];
1464 };
1465
1466 /* Defining the HINT #imm values for the aarch64_hint_options. */
1467 #define HINT_OPD_CSYNC 0x11
1468 #define HINT_OPD_C 0x22
1469 #define HINT_OPD_J 0x24
1470 #define HINT_OPD_JC 0x26
1471 #define HINT_OPD_NULL 0x00
1472
1473 \f
1474 /* Diagnosis related declaration and interface. */
1475
1476 /* Operand error kind enumerators.
1477
1478 AARCH64_OPDE_RECOVERABLE
1479 Less severe error found during the parsing, very possibly because that
1480 GAS has picked up a wrong instruction template for the parsing.
1481
1482 AARCH64_OPDE_A_SHOULD_FOLLOW_B
1483 The instruction forms (or is expected to form) part of a sequence,
1484 but the preceding instruction in the sequence wasn't the expected one.
1485 The message refers to two strings: the name of the current instruction,
1486 followed by the name of the expected preceding instruction.
1487
1488 AARCH64_OPDE_EXPECTED_A_AFTER_B
1489 Same as AARCH64_OPDE_A_SHOULD_FOLLOW_B, but shifting the focus
1490 so that the current instruction is assumed to be the incorrect one:
1491 "since the previous instruction was B, the current one should be A".
1492
1493 AARCH64_OPDE_SYNTAX_ERROR
1494 General syntax error; it can be either a user error, or simply because
1495 that GAS is trying a wrong instruction template.
1496
1497 AARCH64_OPDE_FATAL_SYNTAX_ERROR
1498 Definitely a user syntax error.
1499
1500 AARCH64_OPDE_INVALID_VARIANT
1501 No syntax error, but the operands are not a valid combination, e.g.
1502 FMOV D0,S0
1503
1504 The following errors are only reported against an asm string that is
1505 syntactically valid and that has valid operand qualifiers.
1506
1507 AARCH64_OPDE_INVALID_VG_SIZE
1508 Error about a "VGx<n>" modifier in a ZA index not having the
1509 correct <n>. This error effectively forms a pair with
1510 AARCH64_OPDE_REG_LIST_LENGTH, since both errors relate to the number
1511 of vectors that an instruction operates on. However, the "VGx<n>"
1512 modifier is optional, whereas a register list always has a known
1513 and explicit length. It therefore seems better to place more
1514 importance on the register list length when selecting an opcode table
1515 entry. This in turn means that having an incorrect register length
1516 should be more severe than having an incorrect "VGx<n>".
1517
1518 AARCH64_OPDE_REG_LIST_LENGTH
1519 Error about a register list operand having an unexpected number of
1520 registers. This error is low severity because there might be another
1521 opcode entry that supports the given number of registers.
1522
1523 AARCH64_OPDE_REG_LIST_STRIDE
1524 Error about a register list operand having the correct number
1525 (and type) of registers, but an unexpected stride. This error is
1526 more severe than AARCH64_OPDE_REG_LIST_LENGTH because it implies
1527 that the length is known to be correct. However, it is lower than
1528 many other errors, since some instructions have forms that share
1529 the same number of registers but have different strides.
1530
1531 AARCH64_OPDE_UNTIED_IMMS
1532 The asm failed to use the same immediate for a destination operand
1533 and a tied source operand.
1534
1535 AARCH64_OPDE_UNTIED_OPERAND
1536 The asm failed to use the same register for a destination operand
1537 and a tied source operand.
1538
1539 AARCH64_OPDE_OUT_OF_RANGE
1540 Error about some immediate value out of a valid range.
1541
1542 AARCH64_OPDE_UNALIGNED
1543 Error about some immediate value not properly aligned (i.e. not being a
1544 multiple times of a certain value).
1545
1546 AARCH64_OPDE_OTHER_ERROR
1547 Error of the highest severity and used for any severe issue that does not
1548 fall into any of the above categories.
1549
1550 AARCH64_OPDE_INVALID_REGNO
1551 A register was syntactically valid and had the right type, but it was
1552 outside the range supported by the associated operand field. This is
1553 a high severity error because there are currently no instructions that
1554 would accept the operands that precede the erroneous one (if any) and
1555 yet still accept a wider range of registers.
1556
1557 AARCH64_OPDE_RECOVERABLE, AARCH64_OPDE_SYNTAX_ERROR and
1558 AARCH64_OPDE_FATAL_SYNTAX_ERROR are only deteced by GAS while the
1559 AARCH64_OPDE_INVALID_VARIANT error can only be spotted by libopcodes as
1560 only libopcodes has the information about the valid variants of each
1561 instruction.
1562
1563 The enumerators have an increasing severity. This is helpful when there are
1564 multiple instruction templates available for a given mnemonic name (e.g.
1565 FMOV); this mechanism will help choose the most suitable template from which
1566 the generated diagnostics can most closely describe the issues, if any.
1567
1568 This enum needs to be kept up-to-date with operand_mismatch_kind_names
1569 in tc-aarch64.c. */
1570
1571 enum aarch64_operand_error_kind
1572 {
1573 AARCH64_OPDE_NIL,
1574 AARCH64_OPDE_RECOVERABLE,
1575 AARCH64_OPDE_A_SHOULD_FOLLOW_B,
1576 AARCH64_OPDE_EXPECTED_A_AFTER_B,
1577 AARCH64_OPDE_SYNTAX_ERROR,
1578 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
1579 AARCH64_OPDE_INVALID_VARIANT,
1580 AARCH64_OPDE_INVALID_VG_SIZE,
1581 AARCH64_OPDE_REG_LIST_LENGTH,
1582 AARCH64_OPDE_REG_LIST_STRIDE,
1583 AARCH64_OPDE_UNTIED_IMMS,
1584 AARCH64_OPDE_UNTIED_OPERAND,
1585 AARCH64_OPDE_OUT_OF_RANGE,
1586 AARCH64_OPDE_UNALIGNED,
1587 AARCH64_OPDE_OTHER_ERROR,
1588 AARCH64_OPDE_INVALID_REGNO
1589 };
1590
1591 /* N.B. GAS assumes that this structure work well with shallow copy. */
1592 struct aarch64_operand_error
1593 {
1594 enum aarch64_operand_error_kind kind;
1595 int index;
1596 const char *error;
1597 /* Some data for extra information. */
1598 union {
1599 int i;
1600 const char *s;
1601 } data[3];
1602 bool non_fatal;
1603 };
1604
1605 /* AArch64 sequence structure used to track instructions with F_SCAN
1606 dependencies for both assembler and disassembler. */
1607 struct aarch64_instr_sequence
1608 {
1609 /* The instructions in the sequence, starting with the one that
1610 caused it to be opened. */
1611 aarch64_inst *instr;
1612 /* The number of instructions already in the sequence. */
1613 int num_added_insns;
1614 /* The number of instructions allocated to the sequence. */
1615 int num_allocated_insns;
1616 };
1617
1618 /* Encoding entrypoint. */
1619
1620 extern bool
1621 aarch64_opcode_encode (const aarch64_opcode *, const aarch64_inst *,
1622 aarch64_insn *, aarch64_opnd_qualifier_t *,
1623 aarch64_operand_error *, aarch64_instr_sequence *);
1624
1625 extern const aarch64_opcode *
1626 aarch64_replace_opcode (struct aarch64_inst *,
1627 const aarch64_opcode *);
1628
1629 /* Given the opcode enumerator OP, return the pointer to the corresponding
1630 opcode entry. */
1631
1632 extern const aarch64_opcode *
1633 aarch64_get_opcode (enum aarch64_op);
1634
1635 /* An instance of this structure is passed to aarch64_print_operand, and
1636 the callback within this structure is used to apply styling to the
1637 disassembler output. This structure encapsulates the callback and a
1638 state pointer. */
1639
1640 struct aarch64_styler
1641 {
1642 /* The callback used to apply styling. Returns a string created from FMT
1643 and ARGS with STYLE applied to the string. STYLER is a pointer back
1644 to this object so that the callback can access the state member.
1645
1646 The string returned from this callback must remain valid until the
1647 call to aarch64_print_operand has completed. */
1648 const char *(*apply_style) (struct aarch64_styler *styler,
1649 enum disassembler_style style,
1650 const char *fmt,
1651 va_list args);
1652
1653 /* A pointer to a state object which can be used by the apply_style
1654 callback function. */
1655 void *state;
1656 };
1657
1658 /* Generate the string representation of an operand. */
1659 extern void
1660 aarch64_print_operand (char *, size_t, bfd_vma, const aarch64_opcode *,
1661 const aarch64_opnd_info *, int, int *, bfd_vma *,
1662 char **, char *, size_t,
1663 aarch64_feature_set features,
1664 struct aarch64_styler *styler);
1665
1666 /* Miscellaneous interface. */
1667
1668 extern int
1669 aarch64_operand_index (const enum aarch64_opnd *, enum aarch64_opnd);
1670
1671 extern aarch64_opnd_qualifier_t
1672 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *, int,
1673 const aarch64_opnd_qualifier_t, int);
1674
1675 extern bool
1676 aarch64_is_destructive_by_operands (const aarch64_opcode *);
1677
1678 extern int
1679 aarch64_num_of_operands (const aarch64_opcode *);
1680
1681 extern int
1682 aarch64_stack_pointer_p (const aarch64_opnd_info *);
1683
1684 extern int
1685 aarch64_zero_register_p (const aarch64_opnd_info *);
1686
1687 extern enum err_type
1688 aarch64_decode_insn (aarch64_insn, aarch64_inst *, bool,
1689 aarch64_operand_error *);
1690
1691 extern void
1692 init_insn_sequence (const struct aarch64_inst *, aarch64_instr_sequence *);
1693
1694 /* Given an operand qualifier, return the expected data element size
1695 of a qualified operand. */
1696 extern unsigned char
1697 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t);
1698
1699 extern enum aarch64_operand_class
1700 aarch64_get_operand_class (enum aarch64_opnd);
1701
1702 extern const char *
1703 aarch64_get_operand_name (enum aarch64_opnd);
1704
1705 extern const char *
1706 aarch64_get_operand_desc (enum aarch64_opnd);
1707
1708 extern bool
1709 aarch64_sve_dupm_mov_immediate_p (uint64_t, int);
1710
1711 extern bool
1712 aarch64_cpu_supports_inst_p (aarch64_feature_set, aarch64_inst *);
1713
1714 #ifdef DEBUG_AARCH64
1715 extern int debug_dump;
1716
1717 extern void
1718 aarch64_verbose (const char *, ...) __attribute__ ((format (printf, 1, 2)));
1719
1720 #define DEBUG_TRACE(M, ...) \
1721 { \
1722 if (debug_dump) \
1723 aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \
1724 }
1725
1726 #define DEBUG_TRACE_IF(C, M, ...) \
1727 { \
1728 if (debug_dump && (C)) \
1729 aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \
1730 }
1731 #else /* !DEBUG_AARCH64 */
1732 #define DEBUG_TRACE(M, ...) ;
1733 #define DEBUG_TRACE_IF(C, M, ...) ;
1734 #endif /* DEBUG_AARCH64 */
1735
1736 extern const char *const aarch64_sve_pattern_array[32];
1737 extern const char *const aarch64_sve_prfop_array[16];
1738 extern const char *const aarch64_rprfmop_array[64];
1739 extern const char *const aarch64_sme_vlxn_array[2];
1740
1741 #ifdef __cplusplus
1742 }
1743 #endif
1744
1745 #endif /* OPCODE_AARCH64_H */