]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - opcodes/aarch64-opc.c
aarch64: Fix scbnds validation
[thirdparty/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2020 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include "bfd_stdint.h"
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
247 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
248 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
249 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
250 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
251 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
252 { 5, 14 }, /* imm14: in test bit and branch instructions. */
253 { 5, 16 }, /* imm16: in exception instructions. */
254 { 0, 16 }, /* imm16_2: in udf instruction. */
255 { 0, 26 }, /* imm26: in unconditional branch instructions. */
256 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
257 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
258 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
259 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
260 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
261 { 22, 1 }, /* N: in logical (immediate) instructions. */
262 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
263 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
264 { 31, 1 }, /* sf: in integer data processing instructions. */
265 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
266 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
267 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
268 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
269 { 31, 1 }, /* b5: in the test bit and branch instructions. */
270 { 19, 5 }, /* b40: in the test bit and branch instructions. */
271 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
272 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
273 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
274 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
275 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
276 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
277 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
278 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
279 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
280 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
281 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
282 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
283 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
284 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
285 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
286 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
287 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
289 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
290 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
291 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
292 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
293 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
294 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
295 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
296 { 5, 1 }, /* SVE_i1: single-bit immediate. */
297 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
298 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
299 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
300 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
301 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
302 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
303 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
304 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
305 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
306 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
307 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
308 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
309 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
310 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
311 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
312 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
313 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
314 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
315 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
316 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
317 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
318 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
319 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
320 { 16, 4 }, /* SVE_tsz: triangular size select. */
321 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
322 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
323 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
324 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
325 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
326 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
327 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
328 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
329 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
330 { 22, 1 }, /* sz: 1-bit element size select. */
331 { 0, 5 }, /* Cad: Capability Destination register. */
332 { 5, 5 }, /* Can, Capability source register. */
333 { 16, 5 }, /* Cam, Capability register in load / store and other cap
334 instructions. */
335 { 16, 5 }, /* Cas, Capability register in some memory / load store
336 instructions. */
337 { 0, 5 }, /* Cat, Capability register in load store pair type
338 instructions. */
339 { 10, 5 }, /* Cat2, Capability register in destination for load store pair
340 type instructions. */
341 { 22, 1 }, /* a64c_shift_ai: Shift bit in immediate ADD/SUB. */
342 { 13, 8 }, /* a64c_imm8: BICFLGS imm8. */
343 { 14, 1 }, /* a64c_shift: Shift bit in SCBNDS. */
344 { 13, 3 }, /* perm: permission specifier in clrperm. */
345 { 13, 2 }, /* form: form specifier in seal. */
346 { 13, 7 }, /* capaddr_simm7: Signed immediate for BLR/BR. */
347 { 30, 1 }, /* a64c_index2: in ld/st pair inst deciding the pre/post-index. */
348 { 5, 17 }, /* imm17: in ld/st pair inst deciding the pre/post-index. */
349 { 10, 1 }, /* altbase_sf: in altbase instructions.
350 XXX We should make the SF fields into full fields throughout
351 the code base and even identify capability registers that
352 way. The OP in the altbase instructions allow that. */
353 { 22, 1 }, /* altbase_sf2: Size bit in altbase LDUR. */
354 { 22, 2 }, /* altbase_sf3: Size bits in altbase SIMD LDUR. */
355 { 5, 18 }, /* a64c_immhi: e.g. in ADRDP. */
356 { 19, 1 }, /* a64c_op0: in A64C system instructions. */
357 };
358
359 enum aarch64_operand_class
360 aarch64_get_operand_class (enum aarch64_opnd type)
361 {
362 return aarch64_operands[type].op_class;
363 }
364
365 const char *
366 aarch64_get_operand_name (enum aarch64_opnd type)
367 {
368 return aarch64_operands[type].name;
369 }
370
371 /* Get operand description string.
372 This is usually for the diagnosis purpose. */
373 const char *
374 aarch64_get_operand_desc (enum aarch64_opnd type)
375 {
376 return aarch64_operands[type].desc;
377 }
378
379 /* Table of all conditional affixes. */
380 const aarch64_cond aarch64_conds[16] =
381 {
382 {{"eq", "none"}, 0x0},
383 {{"ne", "any"}, 0x1},
384 {{"cs", "hs", "nlast"}, 0x2},
385 {{"cc", "lo", "ul", "last"}, 0x3},
386 {{"mi", "first"}, 0x4},
387 {{"pl", "nfrst"}, 0x5},
388 {{"vs"}, 0x6},
389 {{"vc"}, 0x7},
390 {{"hi", "pmore"}, 0x8},
391 {{"ls", "plast"}, 0x9},
392 {{"ge", "tcont"}, 0xa},
393 {{"lt", "tstop"}, 0xb},
394 {{"gt"}, 0xc},
395 {{"le"}, 0xd},
396 {{"al"}, 0xe},
397 {{"nv"}, 0xf},
398 };
399
400 const aarch64_cond *
401 get_cond_from_value (aarch64_insn value)
402 {
403 assert (value < 16);
404 return &aarch64_conds[(unsigned int) value];
405 }
406
407 const aarch64_cond *
408 get_inverted_cond (const aarch64_cond *cond)
409 {
410 return &aarch64_conds[cond->value ^ 0x1];
411 }
412
413 /* Return a permission string in OUT. OUT needs to be at least 4 bytes wide. */
414 static void
415 get_perm_str (aarch64_insn perm, char *out)
416 {
417 int i = 0;
418 assert (perm < 8);
419
420 /* XXX 0x0 is a valid permission, i.e. no permissions at all. The
421 reference however deems the value to be RESERVED. */
422 if (perm == 0)
423 {
424 out[i++] = '#';
425 out[i++] = '0';
426 }
427
428 if (perm & 4)
429 out[i++] = 'r';
430 if (perm & 2)
431 out[i++] = 'w';
432 if (perm & 1)
433 out[i++] = 'x';
434
435 out [i] = '\0';
436 }
437
438 aarch64_insn
439 get_perm_bit (char p)
440 {
441 switch (p)
442 {
443 case 'r':
444 return 4;
445 case 'w':
446 return 2;
447 case 'x':
448 return 1;
449 }
450
451 return 8;
452 }
453
454 /* Table of all forms. */
455 const aarch64_form aarch64_forms[] =
456 {
457 {NULL, 0x0}, /* RESERVED */
458 {"rb", 0x1},
459 {"lpb", 0x2},
460 {"lb", 0x3},
461 };
462
463 const aarch64_form *
464 get_form_from_value (aarch64_insn value)
465 {
466 assert (value < sizeof (aarch64_forms) / sizeof (aarch64_form));
467 return &aarch64_forms[(unsigned int) value];
468 }
469
470 const aarch64_form *
471 get_form_from_str (const char *form, size_t len)
472 {
473 unsigned i;
474 for (i = 1; i < sizeof (aarch64_forms) / sizeof (aarch64_form); i++)
475 if (!strncmp (form, aarch64_forms[i].name, len))
476 return &aarch64_forms[i];
477
478 return NULL;
479 }
480
481 /* Table describing the operand extension/shifting operators; indexed by
482 enum aarch64_modifier_kind.
483
484 The value column provides the most common values for encoding modifiers,
485 which enables table-driven encoding/decoding for the modifiers. */
486 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
487 {
488 {"none", 0x0},
489 {"msl", 0x0},
490 {"ror", 0x3},
491 {"asr", 0x2},
492 {"lsr", 0x1},
493 {"lsl", 0x0},
494 {"uxtb", 0x0},
495 {"uxth", 0x1},
496 {"uxtw", 0x2},
497 {"uxtx", 0x3},
498 {"sxtb", 0x4},
499 {"sxth", 0x5},
500 {"sxtw", 0x6},
501 {"sxtx", 0x7},
502 {"mul", 0x0},
503 {"mul vl", 0x0},
504 {NULL, 0},
505 };
506
507 enum aarch64_modifier_kind
508 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
509 {
510 return desc - aarch64_operand_modifiers;
511 }
512
513 aarch64_insn
514 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
515 {
516 return aarch64_operand_modifiers[kind].value;
517 }
518
519 enum aarch64_modifier_kind
520 aarch64_get_operand_modifier_from_value (aarch64_insn value,
521 bfd_boolean extend_p)
522 {
523 if (extend_p == TRUE)
524 return AARCH64_MOD_UXTB + value;
525 else
526 return AARCH64_MOD_LSL - value;
527 }
528
529 bfd_boolean
530 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
531 {
532 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
533 ? TRUE : FALSE;
534 }
535
536 static inline bfd_boolean
537 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
538 {
539 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
540 ? TRUE : FALSE;
541 }
542
543 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
544 {
545 { "#0x00", 0x0 },
546 { "oshld", 0x1 },
547 { "oshst", 0x2 },
548 { "osh", 0x3 },
549 { "#0x04", 0x4 },
550 { "nshld", 0x5 },
551 { "nshst", 0x6 },
552 { "nsh", 0x7 },
553 { "#0x08", 0x8 },
554 { "ishld", 0x9 },
555 { "ishst", 0xa },
556 { "ish", 0xb },
557 { "#0x0c", 0xc },
558 { "ld", 0xd },
559 { "st", 0xe },
560 { "sy", 0xf },
561 };
562
563 /* Table describing the operands supported by the aliases of the HINT
564 instruction.
565
566 The name column is the operand that is accepted for the alias. The value
567 column is the hint number of the alias. The list of operands is terminated
568 by NULL in the name column. */
569
570 const struct aarch64_name_value_pair aarch64_hint_options[] =
571 {
572 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
573 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
574 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
575 { "c", HINT_OPD_C }, /* BTI C. */
576 { "j", HINT_OPD_J }, /* BTI J. */
577 { "jc", HINT_OPD_JC }, /* BTI JC. */
578 { NULL, HINT_OPD_NULL },
579 };
580
581 /* op -> op: load = 0 instruction = 1 store = 2
582 l -> level: 1-3
583 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
584 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
585 const struct aarch64_name_value_pair aarch64_prfops[32] =
586 {
587 { "pldl1keep", B(0, 1, 0) },
588 { "pldl1strm", B(0, 1, 1) },
589 { "pldl2keep", B(0, 2, 0) },
590 { "pldl2strm", B(0, 2, 1) },
591 { "pldl3keep", B(0, 3, 0) },
592 { "pldl3strm", B(0, 3, 1) },
593 { NULL, 0x06 },
594 { NULL, 0x07 },
595 { "plil1keep", B(1, 1, 0) },
596 { "plil1strm", B(1, 1, 1) },
597 { "plil2keep", B(1, 2, 0) },
598 { "plil2strm", B(1, 2, 1) },
599 { "plil3keep", B(1, 3, 0) },
600 { "plil3strm", B(1, 3, 1) },
601 { NULL, 0x0e },
602 { NULL, 0x0f },
603 { "pstl1keep", B(2, 1, 0) },
604 { "pstl1strm", B(2, 1, 1) },
605 { "pstl2keep", B(2, 2, 0) },
606 { "pstl2strm", B(2, 2, 1) },
607 { "pstl3keep", B(2, 3, 0) },
608 { "pstl3strm", B(2, 3, 1) },
609 { NULL, 0x16 },
610 { NULL, 0x17 },
611 { NULL, 0x18 },
612 { NULL, 0x19 },
613 { NULL, 0x1a },
614 { NULL, 0x1b },
615 { NULL, 0x1c },
616 { NULL, 0x1d },
617 { NULL, 0x1e },
618 { NULL, 0x1f },
619 };
620 #undef B
621 \f
622 /* Utilities on value constraint. */
623
624 static inline int
625 value_in_range_p (int64_t value, int low, int high)
626 {
627 return (value >= low && value <= high) ? 1 : 0;
628 }
629
630 /* Return true if VALUE is a multiple of ALIGN. */
631 static inline int
632 value_aligned_p (int64_t value, int align)
633 {
634 return (value % align) == 0;
635 }
636
637 /* A signed value fits in a field. */
638 static inline int
639 value_fit_signed_field_p (int64_t value, unsigned width)
640 {
641 assert (width < 32);
642 if (width < sizeof (value) * 8)
643 {
644 int64_t lim = (uint64_t) 1 << (width - 1);
645 if (value >= -lim && value < lim)
646 return 1;
647 }
648 return 0;
649 }
650
651 /* An unsigned value fits in a field. */
652 static inline int
653 value_fit_unsigned_field_p (int64_t value, unsigned width)
654 {
655 assert (width < 32);
656 if (width < sizeof (value) * 8)
657 {
658 int64_t lim = (uint64_t) 1 << width;
659 if (value >= 0 && value < lim)
660 return 1;
661 }
662 return 0;
663 }
664
665 /* Return 1 if OPERAND is SP or WSP. */
666 int
667 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
668 {
669 return ((aarch64_get_operand_class (operand->type)
670 == AARCH64_OPND_CLASS_INT_REG)
671 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
672 && operand->reg.regno == 31);
673 }
674
675 /* Return 1 if OPERAND is XZR or WZP. */
676 int
677 aarch64_zero_register_p (const aarch64_opnd_info *operand)
678 {
679 return ((aarch64_get_operand_class (operand->type)
680 == AARCH64_OPND_CLASS_INT_REG)
681 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
682 && operand->reg.regno == 31);
683 }
684
685 /* Return true if the operand *OPERAND that has the operand code
686 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
687 qualified by the qualifier TARGET. */
688
689 static inline int
690 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
691 aarch64_opnd_qualifier_t target)
692 {
693 switch (operand->qualifier)
694 {
695 case AARCH64_OPND_QLF_W:
696 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
697 return 1;
698 break;
699 case AARCH64_OPND_QLF_X:
700 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
701 return 1;
702 break;
703 case AARCH64_OPND_QLF_WSP:
704 if (target == AARCH64_OPND_QLF_W
705 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
706 return 1;
707 break;
708 case AARCH64_OPND_QLF_SP:
709 if (target == AARCH64_OPND_QLF_X
710 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
711 return 1;
712 break;
713 default:
714 break;
715 }
716
717 return 0;
718 }
719
720 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
721 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
722
723 Return NIL if more than one expected qualifiers are found. */
724
725 aarch64_opnd_qualifier_t
726 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
727 int idx,
728 const aarch64_opnd_qualifier_t known_qlf,
729 int known_idx)
730 {
731 int i, saved_i;
732
733 /* Special case.
734
735 When the known qualifier is NIL, we have to assume that there is only
736 one qualifier sequence in the *QSEQ_LIST and return the corresponding
737 qualifier directly. One scenario is that for instruction
738 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
739 which has only one possible valid qualifier sequence
740 NIL, S_D
741 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
742 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
743
744 Because the qualifier NIL has dual roles in the qualifier sequence:
745 it can mean no qualifier for the operand, or the qualifer sequence is
746 not in use (when all qualifiers in the sequence are NILs), we have to
747 handle this special case here. */
748 if (known_qlf == AARCH64_OPND_NIL)
749 {
750 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
751 return qseq_list[0][idx];
752 }
753
754 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
755 {
756 if (qseq_list[i][known_idx] == known_qlf)
757 {
758 if (saved_i != -1)
759 /* More than one sequences are found to have KNOWN_QLF at
760 KNOWN_IDX. */
761 return AARCH64_OPND_NIL;
762 saved_i = i;
763 }
764 }
765
766 return qseq_list[saved_i][idx];
767 }
768
769 enum operand_qualifier_kind
770 {
771 OQK_NIL,
772 OQK_OPD_VARIANT,
773 OQK_VALUE_IN_RANGE,
774 OQK_MISC,
775 };
776
777 /* Operand qualifier description. */
778 struct operand_qualifier_data
779 {
780 /* The usage of the three data fields depends on the qualifier kind. */
781 int data0;
782 int data1;
783 int data2;
784 /* Description. */
785 const char *desc;
786 /* Kind. */
787 enum operand_qualifier_kind kind;
788 };
789
790 /* Indexed by the operand qualifier enumerators. */
791 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
792 {
793 {0, 0, 0, "NIL", OQK_NIL},
794
795 /* Operand variant qualifiers.
796 First 3 fields:
797 element size, number of elements and common value for encoding. */
798
799 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
800 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
801 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
802 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
803
804 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
805 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
806 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
807 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
808 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
809 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
810 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
811
812 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
813 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
814 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
815 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
816 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
817 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
818 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
819 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
820 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
821 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
822 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
823
824 {0, 0, 0, "z", OQK_OPD_VARIANT},
825 {0, 0, 0, "m", OQK_OPD_VARIANT},
826
827 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
828 {16, 0, 0, "tag", OQK_OPD_VARIANT},
829 {16, 1, 0, "c", OQK_OPD_VARIANT},
830
831 /* Qualifiers constraining the value range.
832 First 3 fields:
833 Lower bound, higher bound, unused. */
834
835 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
836 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
837 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
838 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
839 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
840 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
841 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
842
843 /* Qualifiers for miscellaneous purpose.
844 First 3 fields:
845 unused, unused and unused. */
846
847 {0, 0, 0, "lsl", 0},
848 {0, 0, 0, "msl", 0},
849
850 {0, 0, 0, "retrieving", 0},
851 };
852
853 static inline bfd_boolean
854 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
855 {
856 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
857 ? TRUE : FALSE;
858 }
859
860 static inline bfd_boolean
861 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
862 {
863 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
864 ? TRUE : FALSE;
865 }
866
867 const char*
868 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
869 {
870 return aarch64_opnd_qualifiers[qualifier].desc;
871 }
872
873 /* Given an operand qualifier, return the expected data element size
874 of a qualified operand. */
875 unsigned char
876 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
877 {
878 assert (operand_variant_qualifier_p (qualifier) == TRUE);
879 return aarch64_opnd_qualifiers[qualifier].data0;
880 }
881
882 unsigned char
883 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
884 {
885 assert (operand_variant_qualifier_p (qualifier) == TRUE);
886 return aarch64_opnd_qualifiers[qualifier].data1;
887 }
888
889 aarch64_insn
890 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
891 {
892 assert (operand_variant_qualifier_p (qualifier) == TRUE);
893 return aarch64_opnd_qualifiers[qualifier].data2;
894 }
895
896 static int
897 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
898 {
899 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
900 return aarch64_opnd_qualifiers[qualifier].data0;
901 }
902
903 static int
904 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
905 {
906 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
907 return aarch64_opnd_qualifiers[qualifier].data1;
908 }
909
910 #ifdef DEBUG_AARCH64
911 void
912 aarch64_verbose (const char *str, ...)
913 {
914 va_list ap;
915 va_start (ap, str);
916 printf ("#### ");
917 vprintf (str, ap);
918 printf ("\n");
919 va_end (ap);
920 }
921
922 static inline void
923 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
924 {
925 int i;
926 printf ("#### \t");
927 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
928 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
929 printf ("\n");
930 }
931
932 static void
933 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
934 const aarch64_opnd_qualifier_t *qualifier)
935 {
936 int i;
937 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
938
939 aarch64_verbose ("dump_match_qualifiers:");
940 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
941 curr[i] = opnd[i].qualifier;
942 dump_qualifier_sequence (curr);
943 aarch64_verbose ("against");
944 dump_qualifier_sequence (qualifier);
945 }
946 #endif /* DEBUG_AARCH64 */
947
948 /* This function checks if the given instruction INSN is a destructive
949 instruction based on the usage of the registers. It does not recognize
950 unary destructive instructions. */
951 bfd_boolean
952 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
953 {
954 int i = 0;
955 const enum aarch64_opnd *opnds = opcode->operands;
956
957 if (opnds[0] == AARCH64_OPND_NIL)
958 return FALSE;
959
960 while (opnds[++i] != AARCH64_OPND_NIL)
961 if (opnds[i] == opnds[0])
962 return TRUE;
963
964 return FALSE;
965 }
966
967 /* TODO improve this, we can have an extra field at the runtime to
968 store the number of operands rather than calculating it every time. */
969
970 int
971 aarch64_num_of_operands (const aarch64_opcode *opcode)
972 {
973 int i = 0;
974 const enum aarch64_opnd *opnds = opcode->operands;
975 while (opnds[i++] != AARCH64_OPND_NIL)
976 ;
977 --i;
978 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
979 return i;
980 }
981
982 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
983 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
984
985 N.B. on the entry, it is very likely that only some operands in *INST
986 have had their qualifiers been established.
987
988 If STOP_AT is not -1, the function will only try to match
989 the qualifier sequence for operands before and including the operand
990 of index STOP_AT; and on success *RET will only be filled with the first
991 (STOP_AT+1) qualifiers.
992
993 A couple examples of the matching algorithm:
994
995 X,W,NIL should match
996 X,W,NIL
997
998 NIL,NIL should match
999 X ,NIL
1000
1001 Apart from serving the main encoding routine, this can also be called
1002 during or after the operand decoding. */
1003
1004 int
1005 aarch64_find_best_match (const aarch64_inst *inst,
1006 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
1007 int stop_at, aarch64_opnd_qualifier_t *ret)
1008 {
1009 int found = 0;
1010 int i, num_opnds;
1011 const aarch64_opnd_qualifier_t *qualifiers;
1012
1013 num_opnds = aarch64_num_of_operands (inst->opcode);
1014 if (num_opnds == 0)
1015 {
1016 DEBUG_TRACE ("SUCCEED: no operand");
1017 return 1;
1018 }
1019
1020 if (stop_at < 0 || stop_at >= num_opnds)
1021 stop_at = num_opnds - 1;
1022
1023 /* For each pattern. */
1024 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
1025 {
1026 int j;
1027 qualifiers = *qualifiers_list;
1028
1029 /* Start as positive. */
1030 found = 1;
1031
1032 DEBUG_TRACE ("%d", i);
1033 #ifdef DEBUG_AARCH64
1034 if (debug_dump)
1035 dump_match_qualifiers (inst->operands, qualifiers);
1036 #endif
1037
1038 /* Most opcodes has much fewer patterns in the list.
1039 First NIL qualifier indicates the end in the list. */
1040 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
1041 {
1042 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
1043 if (i)
1044 found = 0;
1045 break;
1046 }
1047
1048 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
1049 {
1050 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
1051 {
1052 /* Either the operand does not have qualifier, or the qualifier
1053 for the operand needs to be deduced from the qualifier
1054 sequence.
1055 In the latter case, any constraint checking related with
1056 the obtained qualifier should be done later in
1057 operand_general_constraint_met_p. */
1058 continue;
1059 }
1060 else if (*qualifiers != inst->operands[j].qualifier)
1061 {
1062 /* Unless the target qualifier can also qualify the operand
1063 (which has already had a non-nil qualifier), non-equal
1064 qualifiers are generally un-matched. */
1065 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
1066 continue;
1067 else
1068 {
1069 found = 0;
1070 break;
1071 }
1072 }
1073 else
1074 continue; /* Equal qualifiers are certainly matched. */
1075 }
1076
1077 /* Qualifiers established. */
1078 if (found == 1)
1079 break;
1080 }
1081
1082 if (found == 1)
1083 {
1084 /* Fill the result in *RET. */
1085 int j;
1086 qualifiers = *qualifiers_list;
1087
1088 DEBUG_TRACE ("complete qualifiers using list %d", i);
1089 #ifdef DEBUG_AARCH64
1090 if (debug_dump)
1091 dump_qualifier_sequence (qualifiers);
1092 #endif
1093
1094 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1095 ret[j] = *qualifiers;
1096 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1097 ret[j] = AARCH64_OPND_QLF_NIL;
1098
1099 DEBUG_TRACE ("SUCCESS");
1100 return 1;
1101 }
1102
1103 DEBUG_TRACE ("FAIL");
1104 return 0;
1105 }
1106
1107 /* Operand qualifier matching and resolving.
1108
1109 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1110 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1111
1112 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1113 succeeds. */
1114
1115 static int
1116 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
1117 {
1118 int i, nops;
1119 aarch64_opnd_qualifier_seq_t qualifiers;
1120
1121 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1122 qualifiers))
1123 {
1124 DEBUG_TRACE ("matching FAIL");
1125 return 0;
1126 }
1127
1128 if (inst->opcode->flags & F_STRICT)
1129 {
1130 /* Require an exact qualifier match, even for NIL qualifiers. */
1131 nops = aarch64_num_of_operands (inst->opcode);
1132 for (i = 0; i < nops; ++i)
1133 if (inst->operands[i].qualifier != qualifiers[i])
1134 return FALSE;
1135 }
1136
1137 /* Update the qualifiers. */
1138 if (update_p == TRUE)
1139 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1140 {
1141 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1142 break;
1143 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1144 "update %s with %s for operand %d",
1145 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1146 aarch64_get_qualifier_name (qualifiers[i]), i);
1147 inst->operands[i].qualifier = qualifiers[i];
1148 }
1149
1150 DEBUG_TRACE ("matching SUCCESS");
1151 return 1;
1152 }
1153
1154 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1155 register by MOVZ.
1156
1157 IS32 indicates whether value is a 32-bit immediate or not.
1158 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1159 amount will be returned in *SHIFT_AMOUNT. */
1160
1161 bfd_boolean
1162 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1163 {
1164 int amount;
1165
1166 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1167
1168 if (is32)
1169 {
1170 /* Allow all zeros or all ones in top 32-bits, so that
1171 32-bit constant expressions like ~0x80000000 are
1172 permitted. */
1173 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1174 /* Immediate out of range. */
1175 return FALSE;
1176 value &= 0xffffffff;
1177 }
1178
1179 /* first, try movz then movn */
1180 amount = -1;
1181 if ((value & ((uint64_t) 0xffff << 0)) == value)
1182 amount = 0;
1183 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1184 amount = 16;
1185 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1186 amount = 32;
1187 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1188 amount = 48;
1189
1190 if (amount == -1)
1191 {
1192 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1193 return FALSE;
1194 }
1195
1196 if (shift_amount != NULL)
1197 *shift_amount = amount;
1198
1199 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1200
1201 return TRUE;
1202 }
1203
1204 /* Build the accepted values for immediate logical SIMD instructions.
1205
1206 The standard encodings of the immediate value are:
1207 N imms immr SIMD size R S
1208 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1209 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1210 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1211 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1212 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1213 0 11110s 00000r 2 UInt(r) UInt(s)
1214 where all-ones value of S is reserved.
1215
1216 Let's call E the SIMD size.
1217
1218 The immediate value is: S+1 bits '1' rotated to the right by R.
1219
1220 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1221 (remember S != E - 1). */
1222
1223 #define TOTAL_IMM_NB 5334
1224
1225 typedef struct
1226 {
1227 uint64_t imm;
1228 aarch64_insn encoding;
1229 } simd_imm_encoding;
1230
1231 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1232
1233 static int
1234 simd_imm_encoding_cmp(const void *i1, const void *i2)
1235 {
1236 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1237 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1238
1239 if (imm1->imm < imm2->imm)
1240 return -1;
1241 if (imm1->imm > imm2->imm)
1242 return +1;
1243 return 0;
1244 }
1245
1246 /* immediate bitfield standard encoding
1247 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1248 1 ssssss rrrrrr 64 rrrrrr ssssss
1249 0 0sssss 0rrrrr 32 rrrrr sssss
1250 0 10ssss 00rrrr 16 rrrr ssss
1251 0 110sss 000rrr 8 rrr sss
1252 0 1110ss 0000rr 4 rr ss
1253 0 11110s 00000r 2 r s */
1254 static inline int
1255 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1256 {
1257 return (is64 << 12) | (r << 6) | s;
1258 }
1259
1260 static void
1261 build_immediate_table (void)
1262 {
1263 uint32_t log_e, e, s, r, s_mask;
1264 uint64_t mask, imm;
1265 int nb_imms;
1266 int is64;
1267
1268 nb_imms = 0;
1269 for (log_e = 1; log_e <= 6; log_e++)
1270 {
1271 /* Get element size. */
1272 e = 1u << log_e;
1273 if (log_e == 6)
1274 {
1275 is64 = 1;
1276 mask = 0xffffffffffffffffull;
1277 s_mask = 0;
1278 }
1279 else
1280 {
1281 is64 = 0;
1282 mask = (1ull << e) - 1;
1283 /* log_e s_mask
1284 1 ((1 << 4) - 1) << 2 = 111100
1285 2 ((1 << 3) - 1) << 3 = 111000
1286 3 ((1 << 2) - 1) << 4 = 110000
1287 4 ((1 << 1) - 1) << 5 = 100000
1288 5 ((1 << 0) - 1) << 6 = 000000 */
1289 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1290 }
1291 for (s = 0; s < e - 1; s++)
1292 for (r = 0; r < e; r++)
1293 {
1294 /* s+1 consecutive bits to 1 (s < 63) */
1295 imm = (1ull << (s + 1)) - 1;
1296 /* rotate right by r */
1297 if (r != 0)
1298 imm = (imm >> r) | ((imm << (e - r)) & mask);
1299 /* replicate the constant depending on SIMD size */
1300 switch (log_e)
1301 {
1302 case 1: imm = (imm << 2) | imm;
1303 /* Fall through. */
1304 case 2: imm = (imm << 4) | imm;
1305 /* Fall through. */
1306 case 3: imm = (imm << 8) | imm;
1307 /* Fall through. */
1308 case 4: imm = (imm << 16) | imm;
1309 /* Fall through. */
1310 case 5: imm = (imm << 32) | imm;
1311 /* Fall through. */
1312 case 6: break;
1313 default: abort ();
1314 }
1315 simd_immediates[nb_imms].imm = imm;
1316 simd_immediates[nb_imms].encoding =
1317 encode_immediate_bitfield(is64, s | s_mask, r);
1318 nb_imms++;
1319 }
1320 }
1321 assert (nb_imms == TOTAL_IMM_NB);
1322 qsort(simd_immediates, nb_imms,
1323 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1324 }
1325
1326 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1327 be accepted by logical (immediate) instructions
1328 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1329
1330 ESIZE is the number of bytes in the decoded immediate value.
1331 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1332 VALUE will be returned in *ENCODING. */
1333
1334 bfd_boolean
1335 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1336 {
1337 simd_imm_encoding imm_enc;
1338 const simd_imm_encoding *imm_encoding;
1339 static bfd_boolean initialized = FALSE;
1340 uint64_t upper;
1341 int i;
1342
1343 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1344 value, esize);
1345
1346 if (!initialized)
1347 {
1348 build_immediate_table ();
1349 initialized = TRUE;
1350 }
1351
1352 /* Allow all zeros or all ones in top bits, so that
1353 constant expressions like ~1 are permitted. */
1354 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1355 if ((value & ~upper) != value && (value | upper) != value)
1356 return FALSE;
1357
1358 /* Replicate to a full 64-bit value. */
1359 value &= ~upper;
1360 for (i = esize * 8; i < 64; i *= 2)
1361 value |= (value << i);
1362
1363 imm_enc.imm = value;
1364 imm_encoding = (const simd_imm_encoding *)
1365 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1366 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1367 if (imm_encoding == NULL)
1368 {
1369 DEBUG_TRACE ("exit with FALSE");
1370 return FALSE;
1371 }
1372 if (encoding != NULL)
1373 *encoding = imm_encoding->encoding;
1374 DEBUG_TRACE ("exit with TRUE");
1375 return TRUE;
1376 }
1377
1378 /* If 64-bit immediate IMM is in the format of
1379 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1380 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1381 of value "abcdefgh". Otherwise return -1. */
1382 int
1383 aarch64_shrink_expanded_imm8 (uint64_t imm)
1384 {
1385 int i, ret;
1386 uint32_t byte;
1387
1388 ret = 0;
1389 for (i = 0; i < 8; i++)
1390 {
1391 byte = (imm >> (8 * i)) & 0xff;
1392 if (byte == 0xff)
1393 ret |= 1 << i;
1394 else if (byte != 0x00)
1395 return -1;
1396 }
1397 return ret;
1398 }
1399
1400 /* Utility inline functions for operand_general_constraint_met_p. */
1401
1402 static inline void
1403 set_error (aarch64_operand_error *mismatch_detail,
1404 enum aarch64_operand_error_kind kind, int idx,
1405 const char* error)
1406 {
1407 if (mismatch_detail == NULL)
1408 return;
1409 mismatch_detail->kind = kind;
1410 mismatch_detail->index = idx;
1411 mismatch_detail->error = error;
1412 }
1413
1414 static inline void
1415 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1416 const char* error)
1417 {
1418 if (mismatch_detail == NULL)
1419 return;
1420 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1421 }
1422
1423 static inline void
1424 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1425 int idx, int lower_bound, int upper_bound,
1426 const char* error)
1427 {
1428 if (mismatch_detail == NULL)
1429 return;
1430 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1431 mismatch_detail->data[0] = lower_bound;
1432 mismatch_detail->data[1] = upper_bound;
1433 }
1434
1435 static inline void
1436 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1437 int idx, int lower_bound, int upper_bound)
1438 {
1439 if (mismatch_detail == NULL)
1440 return;
1441 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1442 _("immediate value"));
1443 }
1444
1445 static inline void
1446 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1447 int idx, int lower_bound, int upper_bound)
1448 {
1449 if (mismatch_detail == NULL)
1450 return;
1451 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1452 _("immediate offset"));
1453 }
1454
1455 static inline void
1456 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1457 int idx, int lower_bound, int upper_bound)
1458 {
1459 if (mismatch_detail == NULL)
1460 return;
1461 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1462 _("register number"));
1463 }
1464
1465 static inline void
1466 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1467 int idx, int lower_bound, int upper_bound)
1468 {
1469 if (mismatch_detail == NULL)
1470 return;
1471 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1472 _("register element index"));
1473 }
1474
1475 static inline void
1476 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1477 int idx, int lower_bound, int upper_bound)
1478 {
1479 if (mismatch_detail == NULL)
1480 return;
1481 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1482 _("shift amount"));
1483 }
1484
1485 /* Report that the MUL modifier in operand IDX should be in the range
1486 [LOWER_BOUND, UPPER_BOUND]. */
1487 static inline void
1488 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1489 int idx, int lower_bound, int upper_bound)
1490 {
1491 if (mismatch_detail == NULL)
1492 return;
1493 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1494 _("multiplier"));
1495 }
1496
1497 static inline void
1498 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1499 int alignment)
1500 {
1501 if (mismatch_detail == NULL)
1502 return;
1503 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1504 mismatch_detail->data[0] = alignment;
1505 }
1506
1507 static inline void
1508 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1509 int expected_num)
1510 {
1511 if (mismatch_detail == NULL)
1512 return;
1513 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1514 mismatch_detail->data[0] = expected_num;
1515 }
1516
1517 static inline void
1518 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1519 const char* error)
1520 {
1521 if (mismatch_detail == NULL)
1522 return;
1523 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1524 }
1525
1526 static bfd_boolean
1527 validate_adr_reg_for_feature (enum aarch64_opnd type,
1528 aarch64_feature_set features,
1529 aarch64_operand_error *mismatch_detail)
1530 {
1531 if (AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_C64)
1532 && type != AARCH64_OPND_Cad)
1533 {
1534 set_syntax_error (mismatch_detail, 0, _("capability register expected"));
1535 return FALSE;
1536 }
1537 return TRUE;
1538 }
1539
1540 /* General constraint checking based on operand code.
1541
1542 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1543 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1544
1545 This function has to be called after the qualifiers for all operands
1546 have been resolved.
1547
1548 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1549 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1550 of error message during the disassembling where error message is not
1551 wanted. We avoid the dynamic construction of strings of error messages
1552 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1553 use a combination of error code, static string and some integer data to
1554 represent an error. */
1555
1556 static int
1557 operand_general_constraint_met_p (aarch64_feature_set features,
1558 const aarch64_opnd_info *opnds, int idx,
1559 enum aarch64_opnd type,
1560 const aarch64_opcode *opcode,
1561 aarch64_operand_error *mismatch_detail)
1562 {
1563 unsigned num, modifiers, shift;
1564 unsigned char size;
1565 int64_t imm, min_value, max_value;
1566 uint64_t uvalue, mask;
1567 const aarch64_opnd_info *opnd = opnds + idx;
1568 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1569
1570 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1571
1572 switch (aarch64_operands[type].op_class)
1573 {
1574 case AARCH64_OPND_CLASS_INT_REG:
1575 case AARCH64_OPND_CLASS_CAP_REG:
1576 /* Check pair reg constraints for cas* instructions. */
1577 if (type == AARCH64_OPND_PAIRREG)
1578 {
1579 assert (idx == 1 || idx == 3);
1580 if (opnds[idx - 1].reg.regno % 2 != 0)
1581 {
1582 set_syntax_error (mismatch_detail, idx - 1,
1583 _("reg pair must start from even reg"));
1584 return 0;
1585 }
1586 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1587 {
1588 set_syntax_error (mismatch_detail, idx,
1589 _("reg pair must be contiguous"));
1590 return 0;
1591 }
1592 break;
1593 }
1594
1595 /* <Xt> may be optional in some IC and TLBI instructions. */
1596 if (type == AARCH64_OPND_Rt_SYS
1597 || type == AARCH64_OPND_Cat_SYS)
1598 {
1599 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1600 == AARCH64_OPND_CLASS_SYSTEM));
1601 if (opnds[1].present
1602 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1603 {
1604 set_other_error (mismatch_detail, idx, _("extraneous register"));
1605 return 0;
1606 }
1607 if (!opnds[1].present
1608 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1609 {
1610 set_other_error (mismatch_detail, idx, _("missing register"));
1611 return 0;
1612 }
1613 }
1614 switch (qualifier)
1615 {
1616 case AARCH64_OPND_QLF_WSP:
1617 case AARCH64_OPND_QLF_SP:
1618 if (!aarch64_stack_pointer_p (opnd))
1619 {
1620 set_other_error (mismatch_detail, idx,
1621 _("stack pointer register expected"));
1622 return 0;
1623 }
1624 break;
1625 default:
1626 break;
1627 }
1628 /* Reject A64 RET with default operand when in C64 mode. */
1629 if (opcode->iclass == branch_reg
1630 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_C64)
1631 && !opnd->present)
1632 {
1633 set_other_error (mismatch_detail, idx,
1634 _("capability register expected"));
1635 return 0;
1636 }
1637 break;
1638
1639 case AARCH64_OPND_CLASS_SVE_REG:
1640 switch (type)
1641 {
1642 case AARCH64_OPND_SVE_Zm3_INDEX:
1643 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1644 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1645 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1646 case AARCH64_OPND_SVE_Zm4_INDEX:
1647 size = get_operand_fields_width (get_operand_from_code (type));
1648 shift = get_operand_specific_data (&aarch64_operands[type]);
1649 mask = (1 << shift) - 1;
1650 if (opnd->reg.regno > mask)
1651 {
1652 assert (mask == 7 || mask == 15);
1653 set_other_error (mismatch_detail, idx,
1654 mask == 15
1655 ? _("z0-z15 expected")
1656 : _("z0-z7 expected"));
1657 return 0;
1658 }
1659 mask = (1u << (size - shift)) - 1;
1660 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1661 {
1662 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1663 return 0;
1664 }
1665 break;
1666
1667 case AARCH64_OPND_SVE_Zn_INDEX:
1668 size = aarch64_get_qualifier_esize (opnd->qualifier);
1669 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1670 {
1671 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1672 0, 64 / size - 1);
1673 return 0;
1674 }
1675 break;
1676
1677 case AARCH64_OPND_SVE_ZnxN:
1678 case AARCH64_OPND_SVE_ZtxN:
1679 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1680 {
1681 set_other_error (mismatch_detail, idx,
1682 _("invalid register list"));
1683 return 0;
1684 }
1685 break;
1686
1687 default:
1688 break;
1689 }
1690 break;
1691
1692 case AARCH64_OPND_CLASS_PRED_REG:
1693 if (opnd->reg.regno >= 8
1694 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1695 {
1696 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1697 return 0;
1698 }
1699 break;
1700
1701 case AARCH64_OPND_CLASS_COND:
1702 if (type == AARCH64_OPND_COND1
1703 && (opnds[idx].cond->value & 0xe) == 0xe)
1704 {
1705 /* Not allow AL or NV. */
1706 set_syntax_error (mismatch_detail, idx, NULL);
1707 }
1708 break;
1709
1710 case AARCH64_OPND_CLASS_ADDRESS:
1711 /* Check writeback. */
1712 switch (opcode->iclass)
1713 {
1714 case ldst_pos:
1715 case ldst_unscaled:
1716 case ldstnapair_offs:
1717 case ldstpair_off:
1718 case ldst_unpriv:
1719 case ldst_altbase:
1720 if (opnd->addr.writeback == 1)
1721 {
1722 set_syntax_error (mismatch_detail, idx,
1723 _("unexpected address writeback"));
1724 return 0;
1725 }
1726 break;
1727 case ldst_imm10:
1728 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1729 {
1730 set_syntax_error (mismatch_detail, idx,
1731 _("unexpected address writeback"));
1732 return 0;
1733 }
1734 break;
1735 case ldst_imm9:
1736 case ldstpair_indexed:
1737 case asisdlsep:
1738 case asisdlsop:
1739 if (opnd->addr.writeback == 0)
1740 {
1741 set_syntax_error (mismatch_detail, idx,
1742 _("address writeback expected"));
1743 return 0;
1744 }
1745 break;
1746 default:
1747 assert (opnd->addr.writeback == 0);
1748 break;
1749 }
1750 switch (type)
1751 {
1752 case AARCH64_OPND_ADDR_ADRP:
1753 if (!validate_adr_reg_for_feature (opcode->operands[0], features,
1754 mismatch_detail))
1755 return 0;
1756 break;
1757
1758 case AARCH64_OPND_A64C_ADDR_SIMM7:
1759 case AARCH64_OPND_CAPADDR_SIMM7:
1760 case AARCH64_OPND_ADDR_SIMM7:
1761 /* Scaled signed 7 bits immediate offset. */
1762 /* Get the size of the data element that is accessed, which may be
1763 different from that of the source register size,
1764 e.g. in strb/ldrb. */
1765 size = aarch64_get_qualifier_esize (opnd->qualifier);
1766 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1767 {
1768 set_offset_out_of_range_error (mismatch_detail, idx,
1769 -64 * size, 63 * size);
1770 return 0;
1771 }
1772 if (!value_aligned_p (opnd->addr.offset.imm, size))
1773 {
1774 set_unaligned_error (mismatch_detail, idx, size);
1775 return 0;
1776 }
1777 break;
1778 case AARCH64_OPND_ADDR_OFFSET:
1779 case AARCH64_OPND_CAPADDR_SIMM9:
1780 case AARCH64_OPND_ADDR_SIMM9:
1781 /* Unscaled signed 9 bits immediate offset. */
1782 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1783 {
1784 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1785 return 0;
1786 }
1787 break;
1788
1789 case AARCH64_OPND_A64C_ADDR_SIMM9:
1790 /* Scaled signed 9 bits immediate offset. This is currently only
1791 used for cpability load/stores. */
1792 size = aarch64_get_qualifier_esize (opnd->qualifier);
1793 if (!value_in_range_p (opnd->addr.offset.imm, -256 * size, 255 * size))
1794 {
1795 set_offset_out_of_range_error (mismatch_detail, idx,
1796 -256 * size, 255 * size);
1797 return 0;
1798 }
1799 if (!value_aligned_p (opnd->addr.offset.imm, size))
1800 {
1801 set_unaligned_error (mismatch_detail, idx, size);
1802 return 0;
1803 }
1804 break;
1805
1806 case AARCH64_OPND_ADDR_SIMM9_2:
1807 /* Unscaled signed 9 bits immediate offset, which has to be negative
1808 or unaligned. */
1809 size = aarch64_get_qualifier_esize (qualifier);
1810 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1811 && !value_aligned_p (opnd->addr.offset.imm, size))
1812 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1813 return 1;
1814 set_other_error (mismatch_detail, idx,
1815 _("negative or unaligned offset expected"));
1816 return 0;
1817
1818 case AARCH64_OPND_ADDR_SIMM10:
1819 /* Scaled signed 10 bits immediate offset. */
1820 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1821 {
1822 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1823 return 0;
1824 }
1825 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1826 {
1827 set_unaligned_error (mismatch_detail, idx, 8);
1828 return 0;
1829 }
1830 break;
1831
1832 case AARCH64_OPND_ADDR_SIMM11:
1833 /* Signed 11 bits immediate offset (multiple of 16). */
1834 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1835 {
1836 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1837 return 0;
1838 }
1839
1840 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1841 {
1842 set_unaligned_error (mismatch_detail, idx, 16);
1843 return 0;
1844 }
1845 break;
1846
1847 case AARCH64_OPND_ADDR_SIMM13:
1848 /* Signed 13 bits immediate offset (multiple of 16). */
1849 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1850 {
1851 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1852 return 0;
1853 }
1854
1855 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1856 {
1857 set_unaligned_error (mismatch_detail, idx, 16);
1858 return 0;
1859 }
1860 break;
1861
1862 case AARCH64_OPND_SIMD_ADDR_POST:
1863 /* AdvSIMD load/store multiple structures, post-index. */
1864 assert (idx == 1);
1865 if (opnd->addr.offset.is_reg)
1866 {
1867 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1868 return 1;
1869 else
1870 {
1871 set_other_error (mismatch_detail, idx,
1872 _("invalid register offset"));
1873 return 0;
1874 }
1875 }
1876 else
1877 {
1878 const aarch64_opnd_info *prev = &opnds[idx-1];
1879 unsigned num_bytes; /* total number of bytes transferred. */
1880 /* The opcode dependent area stores the number of elements in
1881 each structure to be loaded/stored. */
1882 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1883 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1884 /* Special handling of loading single structure to all lane. */
1885 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1886 * aarch64_get_qualifier_esize (prev->qualifier);
1887 else
1888 num_bytes = prev->reglist.num_regs
1889 * aarch64_get_qualifier_esize (prev->qualifier)
1890 * aarch64_get_qualifier_nelem (prev->qualifier);
1891 if ((int) num_bytes != opnd->addr.offset.imm)
1892 {
1893 set_other_error (mismatch_detail, idx,
1894 _("invalid post-increment amount"));
1895 return 0;
1896 }
1897 }
1898 break;
1899
1900 case AARCH64_OPND_CAPADDR_REGOFF:
1901 case AARCH64_OPND_ADDR_REGOFF:
1902 /* Get the size of the data element that is accessed, which may be
1903 different from that of the source register size,
1904 e.g. in strb/ldrb. */
1905 size = aarch64_get_qualifier_esize (opnd->qualifier);
1906 /* It is either no shift or shift by the binary logarithm of SIZE. */
1907 if (opnd->shifter.amount != 0
1908 && opnd->shifter.amount != (int)get_logsz (size))
1909 {
1910 set_other_error (mismatch_detail, idx,
1911 _("invalid shift amount"));
1912 return 0;
1913 }
1914 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1915 operators. */
1916 switch (opnd->shifter.kind)
1917 {
1918 case AARCH64_MOD_UXTW:
1919 case AARCH64_MOD_LSL:
1920 case AARCH64_MOD_SXTW:
1921 case AARCH64_MOD_SXTX: break;
1922 default:
1923 set_other_error (mismatch_detail, idx,
1924 _("invalid extend/shift operator"));
1925 return 0;
1926 }
1927 break;
1928
1929 case AARCH64_OPND_CAPADDR_UIMM9:
1930 case AARCH64_OPND_ADDR_UIMM12:
1931 imm = opnd->addr.offset.imm;
1932 unsigned range = 4095;
1933 if (opnd->type == AARCH64_OPND_CAPADDR_UIMM9)
1934 range >>= 3;
1935
1936 /* Get the size of the data element that is accessed, which may be
1937 different from that of the source register size,
1938 e.g. in strb/ldrb. */
1939 size = aarch64_get_qualifier_esize (qualifier);
1940 if (!value_in_range_p (opnd->addr.offset.imm, 0, size * range))
1941 {
1942 set_offset_out_of_range_error (mismatch_detail, idx,
1943 0, size * range);
1944 return 0;
1945 }
1946 if (!value_aligned_p (opnd->addr.offset.imm, size))
1947 {
1948 set_unaligned_error (mismatch_detail, idx, size);
1949 return 0;
1950 }
1951 break;
1952
1953 case AARCH64_OPND_ADDR_PCREL21:
1954 if (!validate_adr_reg_for_feature (opcode->operands[0], features,
1955 mismatch_detail))
1956 return 0;
1957 /* Fall through. */
1958 case AARCH64_OPND_ADDR_PCREL14:
1959 case AARCH64_OPND_ADDR_PCREL17:
1960 case AARCH64_OPND_ADDR_PCREL19:
1961 case AARCH64_OPND_ADDR_PCREL26:
1962 {
1963 int shift_amt = 0;
1964 const aarch64_operand *op = get_operand_from_code (type);
1965 if (operand_need_shift_by_two (op))
1966 shift_amt = 2;
1967 else if (operand_need_shift_by_four (op))
1968 shift_amt = 4;
1969
1970 imm = opnd->imm.value;
1971
1972 if (shift_amt > 0)
1973 {
1974 /* The offset value in a PC-relative (or PCC-relative) branch
1975 instruction is always encoded without the lowest alignment
1976 bits, i.e. 2 bits for PC and 4 bits for PCC. */
1977 if (!value_aligned_p (imm, 1 << shift_amt))
1978 {
1979 set_unaligned_error (mismatch_detail, idx, 1 << shift_amt);
1980 return 0;
1981 }
1982 /* Right shift by 2 so that we can carry out the following check
1983 canonically. */
1984 imm >>= shift_amt;
1985 }
1986 size = get_operand_fields_width (get_operand_from_code (type));
1987 if (!value_fit_signed_field_p (imm, size))
1988 {
1989 set_other_error (mismatch_detail, idx,
1990 _("immediate out of range"));
1991 return 0;
1992 }
1993 break;
1994 }
1995
1996 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1997 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1998 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1999 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
2000 min_value = -8;
2001 max_value = 7;
2002 sve_imm_offset_vl:
2003 assert (!opnd->addr.offset.is_reg);
2004 assert (opnd->addr.preind);
2005 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
2006 min_value *= num;
2007 max_value *= num;
2008 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
2009 || (opnd->shifter.operator_present
2010 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
2011 {
2012 set_other_error (mismatch_detail, idx,
2013 _("invalid addressing mode"));
2014 return 0;
2015 }
2016 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2017 {
2018 set_offset_out_of_range_error (mismatch_detail, idx,
2019 min_value, max_value);
2020 return 0;
2021 }
2022 if (!value_aligned_p (opnd->addr.offset.imm, num))
2023 {
2024 set_unaligned_error (mismatch_detail, idx, num);
2025 return 0;
2026 }
2027 break;
2028
2029 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
2030 min_value = -32;
2031 max_value = 31;
2032 goto sve_imm_offset_vl;
2033
2034 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
2035 min_value = -256;
2036 max_value = 255;
2037 goto sve_imm_offset_vl;
2038
2039 case AARCH64_OPND_SVE_ADDR_RI_U6:
2040 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
2041 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
2042 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
2043 min_value = 0;
2044 max_value = 63;
2045 sve_imm_offset:
2046 assert (!opnd->addr.offset.is_reg);
2047 assert (opnd->addr.preind);
2048 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
2049 min_value *= num;
2050 max_value *= num;
2051 if (opnd->shifter.operator_present
2052 || opnd->shifter.amount_present)
2053 {
2054 set_other_error (mismatch_detail, idx,
2055 _("invalid addressing mode"));
2056 return 0;
2057 }
2058 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2059 {
2060 set_offset_out_of_range_error (mismatch_detail, idx,
2061 min_value, max_value);
2062 return 0;
2063 }
2064 if (!value_aligned_p (opnd->addr.offset.imm, num))
2065 {
2066 set_unaligned_error (mismatch_detail, idx, num);
2067 return 0;
2068 }
2069 break;
2070
2071 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
2072 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
2073 min_value = -8;
2074 max_value = 7;
2075 goto sve_imm_offset;
2076
2077 case AARCH64_OPND_SVE_ADDR_ZX:
2078 /* Everything is already ensured by parse_operands or
2079 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
2080 argument type). */
2081 assert (opnd->addr.offset.is_reg);
2082 assert (opnd->addr.preind);
2083 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
2084 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2085 assert (opnd->shifter.operator_present == 0);
2086 break;
2087
2088 case AARCH64_OPND_SVE_ADDR_R:
2089 case AARCH64_OPND_SVE_ADDR_RR:
2090 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
2091 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
2092 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
2093 case AARCH64_OPND_SVE_ADDR_RX:
2094 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
2095 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
2096 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
2097 case AARCH64_OPND_SVE_ADDR_RZ:
2098 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
2099 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
2100 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
2101 modifiers = 1 << AARCH64_MOD_LSL;
2102 sve_rr_operand:
2103 assert (opnd->addr.offset.is_reg);
2104 assert (opnd->addr.preind);
2105 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
2106 && opnd->addr.offset.regno == 31)
2107 {
2108 set_other_error (mismatch_detail, idx,
2109 _("index register xzr is not allowed"));
2110 return 0;
2111 }
2112 if (((1 << opnd->shifter.kind) & modifiers) == 0
2113 || (opnd->shifter.amount
2114 != get_operand_specific_data (&aarch64_operands[type])))
2115 {
2116 set_other_error (mismatch_detail, idx,
2117 _("invalid addressing mode"));
2118 return 0;
2119 }
2120 break;
2121
2122 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
2123 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
2124 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
2125 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
2126 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
2127 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
2128 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
2129 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
2130 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
2131 goto sve_rr_operand;
2132
2133 case AARCH64_OPND_SVE_ADDR_ZI_U5:
2134 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
2135 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
2136 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
2137 min_value = 0;
2138 max_value = 31;
2139 goto sve_imm_offset;
2140
2141 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
2142 modifiers = 1 << AARCH64_MOD_LSL;
2143 sve_zz_operand:
2144 assert (opnd->addr.offset.is_reg);
2145 assert (opnd->addr.preind);
2146 if (((1 << opnd->shifter.kind) & modifiers) == 0
2147 || opnd->shifter.amount < 0
2148 || opnd->shifter.amount > 3)
2149 {
2150 set_other_error (mismatch_detail, idx,
2151 _("invalid addressing mode"));
2152 return 0;
2153 }
2154 break;
2155
2156 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2157 modifiers = (1 << AARCH64_MOD_SXTW);
2158 goto sve_zz_operand;
2159
2160 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2161 modifiers = 1 << AARCH64_MOD_UXTW;
2162 goto sve_zz_operand;
2163
2164 default:
2165 break;
2166 }
2167 break;
2168
2169 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2170 if (type == AARCH64_OPND_LEt)
2171 {
2172 /* Get the upper bound for the element index. */
2173 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2174 if (!value_in_range_p (opnd->reglist.index, 0, num))
2175 {
2176 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2177 return 0;
2178 }
2179 }
2180 /* The opcode dependent area stores the number of elements in
2181 each structure to be loaded/stored. */
2182 num = get_opcode_dependent_value (opcode);
2183 switch (type)
2184 {
2185 case AARCH64_OPND_LVt:
2186 assert (num >= 1 && num <= 4);
2187 /* Unless LD1/ST1, the number of registers should be equal to that
2188 of the structure elements. */
2189 if (num != 1 && opnd->reglist.num_regs != num)
2190 {
2191 set_reg_list_error (mismatch_detail, idx, num);
2192 return 0;
2193 }
2194 break;
2195 case AARCH64_OPND_LVt_AL:
2196 case AARCH64_OPND_LEt:
2197 assert (num >= 1 && num <= 4);
2198 /* The number of registers should be equal to that of the structure
2199 elements. */
2200 if (opnd->reglist.num_regs != num)
2201 {
2202 set_reg_list_error (mismatch_detail, idx, num);
2203 return 0;
2204 }
2205 break;
2206 default:
2207 break;
2208 }
2209 break;
2210
2211 case AARCH64_OPND_CLASS_IMMEDIATE:
2212 /* Constraint check on immediate operand. */
2213 imm = opnd->imm.value;
2214 /* E.g. imm_0_31 constrains value to be 0..31. */
2215 if (qualifier_value_in_range_constraint_p (qualifier)
2216 && !value_in_range_p (imm, get_lower_bound (qualifier),
2217 get_upper_bound (qualifier)))
2218 {
2219 set_imm_out_of_range_error (mismatch_detail, idx,
2220 get_lower_bound (qualifier),
2221 get_upper_bound (qualifier));
2222 return 0;
2223 }
2224
2225 switch (type)
2226 {
2227 case AARCH64_OPND_A64C_IMM6_EXT:
2228 if (opnd->shifter.amount)
2229 {
2230 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2231 {
2232 set_other_error (mismatch_detail, idx,
2233 _("invalid shift operator"));
2234 return 0;
2235 }
2236 if (opnd->shifter.amount != 4)
2237 {
2238 set_other_error (mismatch_detail, idx,
2239 _("shift amount must be 4"));
2240 return 0;
2241 }
2242 }
2243 if (!value_fit_unsigned_field_p (opnd->imm.value, 6))
2244 {
2245 set_other_error (mismatch_detail, idx,
2246 _("immediate out of range"));
2247 return 0;
2248 }
2249 break;
2250
2251 case AARCH64_OPND_AIMM:
2252 case AARCH64_OPND_A64C_AIMM:
2253 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2254 {
2255 set_other_error (mismatch_detail, idx,
2256 _("invalid shift operator"));
2257 return 0;
2258 }
2259 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2260 {
2261 set_other_error (mismatch_detail, idx,
2262 _("shift amount must be 0 or 12"));
2263 return 0;
2264 }
2265 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2266 {
2267 set_other_error (mismatch_detail, idx,
2268 _("immediate out of range"));
2269 return 0;
2270 }
2271 break;
2272
2273 case AARCH64_OPND_HALF:
2274 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2275 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2276 {
2277 set_other_error (mismatch_detail, idx,
2278 _("invalid shift operator"));
2279 return 0;
2280 }
2281 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2282 if (!value_aligned_p (opnd->shifter.amount, 16))
2283 {
2284 set_other_error (mismatch_detail, idx,
2285 _("shift amount must be a multiple of 16"));
2286 return 0;
2287 }
2288 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2289 {
2290 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2291 0, size * 8 - 16);
2292 return 0;
2293 }
2294 if (opnd->imm.value < 0)
2295 {
2296 set_other_error (mismatch_detail, idx,
2297 _("negative immediate value not allowed"));
2298 return 0;
2299 }
2300 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2301 {
2302 set_other_error (mismatch_detail, idx,
2303 _("immediate out of range"));
2304 return 0;
2305 }
2306 break;
2307
2308 case AARCH64_OPND_IMM_MOV:
2309 {
2310 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2311 imm = opnd->imm.value;
2312 assert (idx == 1);
2313 switch (opcode->op)
2314 {
2315 case OP_MOV_IMM_WIDEN:
2316 imm = ~imm;
2317 /* Fall through. */
2318 case OP_MOV_IMM_WIDE:
2319 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2320 {
2321 set_other_error (mismatch_detail, idx,
2322 _("immediate out of range"));
2323 return 0;
2324 }
2325 break;
2326 case OP_MOV_IMM_LOG:
2327 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2328 {
2329 set_other_error (mismatch_detail, idx,
2330 _("immediate out of range"));
2331 return 0;
2332 }
2333 break;
2334 default:
2335 assert (0);
2336 return 0;
2337 }
2338 }
2339 break;
2340
2341 case AARCH64_OPND_NZCV:
2342 case AARCH64_OPND_CCMP_IMM:
2343 case AARCH64_OPND_EXCEPTION:
2344 case AARCH64_OPND_UNDEFINED:
2345 case AARCH64_OPND_TME_UIMM16:
2346 case AARCH64_OPND_UIMM4:
2347 case AARCH64_OPND_UIMM4_ADDG:
2348 case AARCH64_OPND_UIMM7:
2349 case AARCH64_OPND_UIMM3_OP1:
2350 case AARCH64_OPND_UIMM3_OP2:
2351 case AARCH64_OPND_SVE_UIMM3:
2352 case AARCH64_OPND_SVE_UIMM7:
2353 case AARCH64_OPND_SVE_UIMM8:
2354 case AARCH64_OPND_SVE_UIMM8_53:
2355 size = get_operand_fields_width (get_operand_from_code (type));
2356 assert (size < 32);
2357 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2358 {
2359 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2360 (1u << size) - 1);
2361 return 0;
2362 }
2363 break;
2364
2365 case AARCH64_OPND_UIMM10:
2366 /* Scaled unsigned 10 bits immediate offset. */
2367 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2368 {
2369 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2370 return 0;
2371 }
2372
2373 if (!value_aligned_p (opnd->imm.value, 16))
2374 {
2375 set_unaligned_error (mismatch_detail, idx, 16);
2376 return 0;
2377 }
2378 break;
2379
2380 case AARCH64_OPND_SIMM5:
2381 case AARCH64_OPND_SVE_SIMM5:
2382 case AARCH64_OPND_SVE_SIMM5B:
2383 case AARCH64_OPND_SVE_SIMM6:
2384 case AARCH64_OPND_SVE_SIMM8:
2385 size = get_operand_fields_width (get_operand_from_code (type));
2386 assert (size < 32);
2387 if (!value_fit_signed_field_p (opnd->imm.value, size))
2388 {
2389 set_imm_out_of_range_error (mismatch_detail, idx,
2390 -(1 << (size - 1)),
2391 (1 << (size - 1)) - 1);
2392 return 0;
2393 }
2394 break;
2395
2396 case AARCH64_OPND_WIDTH:
2397 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2398 && opnds[0].type == AARCH64_OPND_Rd);
2399 size = get_upper_bound (qualifier);
2400 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2401 /* lsb+width <= reg.size */
2402 {
2403 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2404 size - opnds[idx-1].imm.value);
2405 return 0;
2406 }
2407 break;
2408
2409 case AARCH64_OPND_LIMM:
2410 case AARCH64_OPND_SVE_LIMM:
2411 {
2412 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2413 uint64_t uimm = opnd->imm.value;
2414 if (opcode->op == OP_BIC)
2415 uimm = ~uimm;
2416 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2417 {
2418 set_other_error (mismatch_detail, idx,
2419 _("immediate out of range"));
2420 return 0;
2421 }
2422 }
2423 break;
2424
2425 case AARCH64_OPND_A64C_IMMV4:
2426 if (opnd->imm.value != 4)
2427 {
2428 set_other_error (mismatch_detail, idx,
2429 _("immediate #4 expected"));
2430 return 0;
2431 }
2432 break;
2433
2434 case AARCH64_OPND_IMM0:
2435 case AARCH64_OPND_FPIMM0:
2436 if (opnd->imm.value != 0)
2437 {
2438 set_other_error (mismatch_detail, idx,
2439 _("immediate zero expected"));
2440 return 0;
2441 }
2442 break;
2443
2444 case AARCH64_OPND_IMM_ROT1:
2445 case AARCH64_OPND_IMM_ROT2:
2446 case AARCH64_OPND_SVE_IMM_ROT2:
2447 if (opnd->imm.value != 0
2448 && opnd->imm.value != 90
2449 && opnd->imm.value != 180
2450 && opnd->imm.value != 270)
2451 {
2452 set_other_error (mismatch_detail, idx,
2453 _("rotate expected to be 0, 90, 180 or 270"));
2454 return 0;
2455 }
2456 break;
2457
2458 case AARCH64_OPND_IMM_ROT3:
2459 case AARCH64_OPND_SVE_IMM_ROT1:
2460 case AARCH64_OPND_SVE_IMM_ROT3:
2461 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2462 {
2463 set_other_error (mismatch_detail, idx,
2464 _("rotate expected to be 90 or 270"));
2465 return 0;
2466 }
2467 break;
2468
2469 case AARCH64_OPND_SHLL_IMM:
2470 assert (idx == 2);
2471 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2472 if (opnd->imm.value != size)
2473 {
2474 set_other_error (mismatch_detail, idx,
2475 _("invalid shift amount"));
2476 return 0;
2477 }
2478 break;
2479
2480 case AARCH64_OPND_IMM_VLSL:
2481 size = aarch64_get_qualifier_esize (qualifier);
2482 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2483 {
2484 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2485 size * 8 - 1);
2486 return 0;
2487 }
2488 break;
2489
2490 case AARCH64_OPND_IMM_VLSR:
2491 size = aarch64_get_qualifier_esize (qualifier);
2492 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2493 {
2494 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2495 return 0;
2496 }
2497 break;
2498
2499 case AARCH64_OPND_SIMD_IMM:
2500 case AARCH64_OPND_SIMD_IMM_SFT:
2501 /* Qualifier check. */
2502 switch (qualifier)
2503 {
2504 case AARCH64_OPND_QLF_LSL:
2505 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2506 {
2507 set_other_error (mismatch_detail, idx,
2508 _("invalid shift operator"));
2509 return 0;
2510 }
2511 break;
2512 case AARCH64_OPND_QLF_MSL:
2513 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2514 {
2515 set_other_error (mismatch_detail, idx,
2516 _("invalid shift operator"));
2517 return 0;
2518 }
2519 break;
2520 case AARCH64_OPND_QLF_NIL:
2521 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2522 {
2523 set_other_error (mismatch_detail, idx,
2524 _("shift is not permitted"));
2525 return 0;
2526 }
2527 break;
2528 default:
2529 assert (0);
2530 return 0;
2531 }
2532 /* Is the immediate valid? */
2533 assert (idx == 1);
2534 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2535 {
2536 /* uimm8 or simm8 */
2537 if (!value_in_range_p (opnd->imm.value, -128, 255))
2538 {
2539 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2540 return 0;
2541 }
2542 }
2543 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2544 {
2545 /* uimm64 is not
2546 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2547 ffffffffgggggggghhhhhhhh'. */
2548 set_other_error (mismatch_detail, idx,
2549 _("invalid value for immediate"));
2550 return 0;
2551 }
2552 /* Is the shift amount valid? */
2553 switch (opnd->shifter.kind)
2554 {
2555 case AARCH64_MOD_LSL:
2556 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2557 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2558 {
2559 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2560 (size - 1) * 8);
2561 return 0;
2562 }
2563 if (!value_aligned_p (opnd->shifter.amount, 8))
2564 {
2565 set_unaligned_error (mismatch_detail, idx, 8);
2566 return 0;
2567 }
2568 break;
2569 case AARCH64_MOD_MSL:
2570 /* Only 8 and 16 are valid shift amount. */
2571 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2572 {
2573 set_other_error (mismatch_detail, idx,
2574 _("shift amount must be 0 or 16"));
2575 return 0;
2576 }
2577 break;
2578 default:
2579 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2580 {
2581 set_other_error (mismatch_detail, idx,
2582 _("invalid shift operator"));
2583 return 0;
2584 }
2585 break;
2586 }
2587 break;
2588
2589 case AARCH64_OPND_FPIMM:
2590 case AARCH64_OPND_SIMD_FPIMM:
2591 case AARCH64_OPND_SVE_FPIMM8:
2592 if (opnd->imm.is_fp == 0)
2593 {
2594 set_other_error (mismatch_detail, idx,
2595 _("floating-point immediate expected"));
2596 return 0;
2597 }
2598 /* The value is expected to be an 8-bit floating-point constant with
2599 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2600 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2601 instruction). */
2602 if (!value_in_range_p (opnd->imm.value, 0, 255))
2603 {
2604 set_other_error (mismatch_detail, idx,
2605 _("immediate out of range"));
2606 return 0;
2607 }
2608 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2609 {
2610 set_other_error (mismatch_detail, idx,
2611 _("invalid shift operator"));
2612 return 0;
2613 }
2614 break;
2615
2616 case AARCH64_OPND_SVE_AIMM:
2617 min_value = 0;
2618 sve_aimm:
2619 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2620 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2621 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2622 uvalue = opnd->imm.value;
2623 shift = opnd->shifter.amount;
2624 if (size == 1)
2625 {
2626 if (shift != 0)
2627 {
2628 set_other_error (mismatch_detail, idx,
2629 _("no shift amount allowed for"
2630 " 8-bit constants"));
2631 return 0;
2632 }
2633 }
2634 else
2635 {
2636 if (shift != 0 && shift != 8)
2637 {
2638 set_other_error (mismatch_detail, idx,
2639 _("shift amount must be 0 or 8"));
2640 return 0;
2641 }
2642 if (shift == 0 && (uvalue & 0xff) == 0)
2643 {
2644 shift = 8;
2645 uvalue = (int64_t) uvalue / 256;
2646 }
2647 }
2648 mask >>= shift;
2649 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2650 {
2651 set_other_error (mismatch_detail, idx,
2652 _("immediate too big for element size"));
2653 return 0;
2654 }
2655 uvalue = (uvalue - min_value) & mask;
2656 if (uvalue > 0xff)
2657 {
2658 set_other_error (mismatch_detail, idx,
2659 _("invalid arithmetic immediate"));
2660 return 0;
2661 }
2662 break;
2663
2664 case AARCH64_OPND_SVE_ASIMM:
2665 min_value = -128;
2666 goto sve_aimm;
2667
2668 case AARCH64_OPND_SVE_I1_HALF_ONE:
2669 assert (opnd->imm.is_fp);
2670 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2671 {
2672 set_other_error (mismatch_detail, idx,
2673 _("floating-point value must be 0.5 or 1.0"));
2674 return 0;
2675 }
2676 break;
2677
2678 case AARCH64_OPND_SVE_I1_HALF_TWO:
2679 assert (opnd->imm.is_fp);
2680 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2681 {
2682 set_other_error (mismatch_detail, idx,
2683 _("floating-point value must be 0.5 or 2.0"));
2684 return 0;
2685 }
2686 break;
2687
2688 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2689 assert (opnd->imm.is_fp);
2690 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2691 {
2692 set_other_error (mismatch_detail, idx,
2693 _("floating-point value must be 0.0 or 1.0"));
2694 return 0;
2695 }
2696 break;
2697
2698 case AARCH64_OPND_SVE_INV_LIMM:
2699 {
2700 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2701 uint64_t uimm = ~opnd->imm.value;
2702 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2703 {
2704 set_other_error (mismatch_detail, idx,
2705 _("immediate out of range"));
2706 return 0;
2707 }
2708 }
2709 break;
2710
2711 case AARCH64_OPND_SVE_LIMM_MOV:
2712 {
2713 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2714 uint64_t uimm = opnd->imm.value;
2715 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2716 {
2717 set_other_error (mismatch_detail, idx,
2718 _("immediate out of range"));
2719 return 0;
2720 }
2721 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2722 {
2723 set_other_error (mismatch_detail, idx,
2724 _("invalid replicated MOV immediate"));
2725 return 0;
2726 }
2727 }
2728 break;
2729
2730 case AARCH64_OPND_SVE_PATTERN_SCALED:
2731 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2732 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2733 {
2734 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2735 return 0;
2736 }
2737 break;
2738
2739 case AARCH64_OPND_SVE_SHLIMM_PRED:
2740 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2741 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2742 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2743 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2744 {
2745 set_imm_out_of_range_error (mismatch_detail, idx,
2746 0, 8 * size - 1);
2747 return 0;
2748 }
2749 break;
2750
2751 case AARCH64_OPND_SVE_SHRIMM_PRED:
2752 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2753 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2754 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2755 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2756 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2757 {
2758 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2759 return 0;
2760 }
2761 break;
2762
2763 default:
2764 break;
2765 }
2766 break;
2767
2768 case AARCH64_OPND_CLASS_SYSTEM:
2769 switch (type)
2770 {
2771 case AARCH64_OPND_PSTATEFIELD:
2772 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2773 /* MSR UAO, #uimm4
2774 MSR PAN, #uimm4
2775 MSR SSBS,#uimm4
2776 The immediate must be #0 or #1. */
2777 if ((opnd->pstatefield == 0x03 /* UAO. */
2778 || opnd->pstatefield == 0x04 /* PAN. */
2779 || opnd->pstatefield == 0x19 /* SSBS. */
2780 || opnd->pstatefield == 0x1a) /* DIT. */
2781 && opnds[1].imm.value > 1)
2782 {
2783 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2784 return 0;
2785 }
2786 /* MSR SPSel, #uimm4
2787 Uses uimm4 as a control value to select the stack pointer: if
2788 bit 0 is set it selects the current exception level's stack
2789 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2790 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2791 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2792 {
2793 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2794 return 0;
2795 }
2796 break;
2797 case AARCH64_OPND_SYSREG:
2798 {
2799 bfd_boolean part_match = FALSE, full_match = FALSE;
2800 unsigned flags = 0;
2801 int regno_idx = idx == 0 ? 1 : 0;
2802 enum aarch64_opnd reg_type = opcode->operands[regno_idx];
2803 size_t i;
2804
2805 if (reg_type == AARCH64_OPND_Cat)
2806 flags = F_CAPREG;
2807
2808 for (i = 0; aarch64_sys_regs[i].name; ++i)
2809 if (aarch64_sys_regs[i].value == opnd->sysreg.value)
2810 {
2811 part_match = TRUE;
2812 if ((aarch64_sys_regs[i].flags & F_CAPREG) == flags)
2813 {
2814 full_match = TRUE;
2815 break;
2816 }
2817 }
2818
2819 /* Matching values but mismatched target register. */
2820 if (part_match && !full_match)
2821 {
2822 if (flags & F_CAPREG)
2823 {
2824 set_other_error (mismatch_detail, regno_idx,
2825 _("sysreg expects capability register"));
2826 return 0;
2827 }
2828 else
2829 {
2830 set_other_error (mismatch_detail, regno_idx,
2831 _("sysreg expects integer register"));
2832 return 0;
2833 }
2834 }
2835 }
2836 break;
2837 case AARCH64_OPND_SYSREG_DC:
2838 case AARCH64_OPND_SYSREG_IC:
2839 {
2840 enum aarch64_operand_class class;
2841
2842 if (!opnds[1].present)
2843 break;
2844
2845 class = aarch64_get_operand_class(opcode->operands[1]);
2846
2847 if (AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_C64)
2848 && class == AARCH64_OPND_CLASS_INT_REG)
2849 {
2850 set_other_error (mismatch_detail, 1,
2851 _("capability register expected"));
2852 return 0;
2853 }
2854
2855 if (!AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_C64)
2856 && class == AARCH64_OPND_CLASS_CAP_REG)
2857 {
2858 set_other_error (mismatch_detail, 1,
2859 _("integer register expected"));
2860 return 0;
2861 }
2862 }
2863 break;
2864 default:
2865 break;
2866 }
2867 break;
2868
2869 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2870 /* Get the upper bound for the element index. */
2871 if (opcode->op == OP_FCMLA_ELEM)
2872 /* FCMLA index range depends on the vector size of other operands
2873 and is halfed because complex numbers take two elements. */
2874 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2875 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2876 else
2877 num = 16;
2878 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2879 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2880
2881 /* Index out-of-range. */
2882 if (!value_in_range_p (opnd->reglane.index, 0, num))
2883 {
2884 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2885 return 0;
2886 }
2887 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2888 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2889 number is encoded in "size:M:Rm":
2890 size <Vm>
2891 00 RESERVED
2892 01 0:Rm
2893 10 M:Rm
2894 11 RESERVED */
2895 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2896 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2897 {
2898 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2899 return 0;
2900 }
2901 break;
2902
2903 case AARCH64_OPND_CLASS_MODIFIED_REG:
2904 assert (idx == 1 || idx == 2);
2905 switch (type)
2906 {
2907 case AARCH64_OPND_A64C_Rm_EXT:
2908 case AARCH64_OPND_Rm_EXT:
2909 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2910 && opnd->shifter.kind != AARCH64_MOD_LSL)
2911 {
2912 set_other_error (mismatch_detail, idx,
2913 _("extend operator expected"));
2914 return 0;
2915 }
2916 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2917 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2918 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2919 case. */
2920 if (type == AARCH64_OPND_Rm_EXT
2921 && !aarch64_stack_pointer_p (opnds + 0)
2922 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2923 {
2924 if (!opnd->shifter.operator_present)
2925 {
2926 set_other_error (mismatch_detail, idx,
2927 _("missing extend operator"));
2928 return 0;
2929 }
2930 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2931 {
2932 set_other_error (mismatch_detail, idx,
2933 _("'LSL' operator not allowed"));
2934 return 0;
2935 }
2936 }
2937 assert (opnd->shifter.operator_present /* Default to LSL. */
2938 || opnd->shifter.kind == AARCH64_MOD_LSL);
2939 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2940 {
2941 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2942 return 0;
2943 }
2944 /* In the 64-bit form, the final register operand is written as Wm
2945 for all but the (possibly omitted) UXTX/LSL and SXTX
2946 operators.
2947 N.B. GAS allows X register to be used with any operator as a
2948 programming convenience. */
2949 if (qualifier == AARCH64_OPND_QLF_X
2950 && opnd->shifter.kind != AARCH64_MOD_LSL
2951 && opnd->shifter.kind != AARCH64_MOD_UXTX
2952 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2953 {
2954 set_other_error (mismatch_detail, idx, _("W register expected"));
2955 return 0;
2956 }
2957 break;
2958
2959 case AARCH64_OPND_Rm_SFT:
2960 /* ROR is not available to the shifted register operand in
2961 arithmetic instructions. */
2962 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2963 {
2964 set_other_error (mismatch_detail, idx,
2965 _("shift operator expected"));
2966 return 0;
2967 }
2968 if (opnd->shifter.kind == AARCH64_MOD_ROR
2969 && opcode->iclass != log_shift)
2970 {
2971 set_other_error (mismatch_detail, idx,
2972 _("'ROR' operator not allowed"));
2973 return 0;
2974 }
2975 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2976 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2977 {
2978 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2979 return 0;
2980 }
2981 break;
2982
2983 default:
2984 break;
2985 }
2986 break;
2987
2988 default:
2989 break;
2990 }
2991
2992 return 1;
2993 }
2994
2995 /* Main entrypoint for the operand constraint checking.
2996
2997 Return 1 if operands of *INST meet the constraint applied by the operand
2998 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2999 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
3000 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
3001 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
3002 error kind when it is notified that an instruction does not pass the check).
3003
3004 Un-determined operand qualifiers may get established during the process. */
3005
3006 int
3007 aarch64_match_operands_constraint (aarch64_feature_set features,
3008 aarch64_inst *inst,
3009 aarch64_operand_error *mismatch_detail)
3010 {
3011 int i;
3012
3013 DEBUG_TRACE ("enter");
3014
3015 /* Check for cases where a source register needs to be the same as the
3016 destination register. Do this before matching qualifiers since if
3017 an instruction has both invalid tying and invalid qualifiers,
3018 the error about qualifiers would suggest several alternative
3019 instructions that also have invalid tying. */
3020 i = inst->opcode->tied_operand;
3021 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
3022 {
3023 if (mismatch_detail)
3024 {
3025 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
3026 mismatch_detail->index = i;
3027 mismatch_detail->error = NULL;
3028 }
3029 return 0;
3030 }
3031
3032 /* Match operands' qualifier.
3033 *INST has already had qualifier establish for some, if not all, of
3034 its operands; we need to find out whether these established
3035 qualifiers match one of the qualifier sequence in
3036 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
3037 with the corresponding qualifier in such a sequence.
3038 Only basic operand constraint checking is done here; the more thorough
3039 constraint checking will carried out by operand_general_constraint_met_p,
3040 which has be to called after this in order to get all of the operands'
3041 qualifiers established. */
3042 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
3043 {
3044 DEBUG_TRACE ("FAIL on operand qualifier matching");
3045 if (mismatch_detail)
3046 {
3047 /* Return an error type to indicate that it is the qualifier
3048 matching failure; we don't care about which operand as there
3049 are enough information in the opcode table to reproduce it. */
3050 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
3051 mismatch_detail->index = -1;
3052 mismatch_detail->error = NULL;
3053 }
3054 return 0;
3055 }
3056
3057 /* Match operands' constraint. */
3058 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3059 {
3060 enum aarch64_opnd type = inst->opcode->operands[i];
3061 if (type == AARCH64_OPND_NIL)
3062 break;
3063 if (inst->operands[i].skip)
3064 {
3065 DEBUG_TRACE ("skip the incomplete operand %d", i);
3066 continue;
3067 }
3068 if (operand_general_constraint_met_p (features, inst->operands, i, type,
3069 inst->opcode,
3070 mismatch_detail) == 0)
3071 {
3072 DEBUG_TRACE ("FAIL on operand %d", i);
3073 return 0;
3074 }
3075 }
3076
3077 DEBUG_TRACE ("PASS");
3078
3079 return 1;
3080 }
3081
3082 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
3083 Also updates the TYPE of each INST->OPERANDS with the corresponding
3084 value of OPCODE->OPERANDS.
3085
3086 Note that some operand qualifiers may need to be manually cleared by
3087 the caller before it further calls the aarch64_opcode_encode; by
3088 doing this, it helps the qualifier matching facilities work
3089 properly. */
3090
3091 const aarch64_opcode*
3092 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
3093 {
3094 int i;
3095 const aarch64_opcode *old = inst->opcode;
3096
3097 inst->opcode = opcode;
3098
3099 /* Update the operand types. */
3100 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3101 {
3102 inst->operands[i].type = opcode->operands[i];
3103 if (opcode->operands[i] == AARCH64_OPND_NIL)
3104 break;
3105 }
3106
3107 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
3108
3109 return old;
3110 }
3111
3112 int
3113 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
3114 {
3115 int i;
3116 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3117 if (operands[i] == operand)
3118 return i;
3119 else if (operands[i] == AARCH64_OPND_NIL)
3120 break;
3121 return -1;
3122 }
3123 \f
3124 /* R0...R30, followed by FOR31. */
3125 #define BANK(R, FOR31) \
3126 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
3127 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
3128 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
3129 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
3130 /* [0][0] 32-bit integer regs with sp Wn
3131 [0][1] 64-bit integer regs with sp Xn sf=1
3132 [0][2] 129-bit cap regs with sp Cn
3133 [1][0] 32-bit integer regs with #0 Wn
3134 [1][1] 64-bit integer regs with #0 Xn sf=1
3135 [1][2] 129-bit cap regs with #0 Cn */
3136 static const char *int_reg[2][3][32] = {
3137 #define R32(X) "w" #X
3138 #define R64(X) "x" #X
3139 #define CAP(X) "c" #X
3140 { BANK (R32, "wsp"), BANK (R64, "sp"), BANK (CAP, "csp") },
3141 { BANK (R32, "wzr"), BANK (R64, "xzr"), BANK (CAP, "czr") }
3142 #undef CAP
3143 #undef R64
3144 #undef R32
3145 };
3146
3147 /* Names of the SVE vector registers, first with .S suffixes,
3148 then with .D suffixes. */
3149
3150 static const char *sve_reg[2][32] = {
3151 #define ZS(X) "z" #X ".s"
3152 #define ZD(X) "z" #X ".d"
3153 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
3154 #undef ZD
3155 #undef ZS
3156 };
3157 #undef BANK
3158
3159 /* Return the integer register name.
3160 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
3161
3162 static inline const char *
3163 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
3164 {
3165 const int has_zr = sp_reg_p ? 0 : 1;
3166 const int bank = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
3167 return int_reg[has_zr][bank][regno];
3168 }
3169
3170 /* Like get_int_reg_name, but BANK is always 1. */
3171
3172 static inline const char *
3173 get_64bit_int_reg_name (int regno, int sp_reg_p)
3174 {
3175 const int has_zr = sp_reg_p ? 0 : 1;
3176 return int_reg[has_zr][1][regno];
3177 }
3178
3179 /* Like get_int_reg_name, but BANK is always 2. */
3180 static inline const char *
3181 get_cap_reg_name (int regno, int sp_reg_p)
3182 {
3183 const int has_zr = sp_reg_p ? 0 : 1;
3184 return int_reg[has_zr][2][regno];
3185 }
3186
3187 static inline const char *
3188 get_altbase_reg_name (aarch64_feature_set features, int regno, int sp_reg_p,
3189 const aarch64_opcode *opcode)
3190 {
3191 if (AARCH64_CPU_HAS_FEATURE(features, AARCH64_FEATURE_C64)
3192 && opcode->iclass != br_capaddr)
3193 return get_64bit_int_reg_name (regno, sp_reg_p);
3194 else
3195 return get_cap_reg_name (regno, sp_reg_p);
3196 }
3197
3198 static inline const char *
3199 get_base_reg_name (aarch64_feature_set features, int regno, int sp_reg_p)
3200 {
3201 if (AARCH64_CPU_HAS_FEATURE(features, AARCH64_FEATURE_C64))
3202 return get_cap_reg_name (regno, sp_reg_p);
3203 else
3204 return get_64bit_int_reg_name (regno, sp_reg_p);
3205 }
3206
3207 /* Get the name of the integer offset register in OPND, using the shift type
3208 to decide whether it's a word or doubleword. */
3209
3210 static inline const char *
3211 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
3212 {
3213 switch (opnd->shifter.kind)
3214 {
3215 case AARCH64_MOD_UXTW:
3216 case AARCH64_MOD_SXTW:
3217 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
3218
3219 case AARCH64_MOD_LSL:
3220 case AARCH64_MOD_SXTX:
3221 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
3222
3223 default:
3224 abort ();
3225 }
3226 }
3227
3228 /* Get the name of the SVE vector offset register in OPND, using the operand
3229 qualifier to decide whether the suffix should be .S or .D. */
3230
3231 static inline const char *
3232 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
3233 {
3234 assert (qualifier == AARCH64_OPND_QLF_S_S
3235 || qualifier == AARCH64_OPND_QLF_S_D);
3236 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
3237 }
3238
3239 /* Types for expanding an encoded 8-bit value to a floating-point value. */
3240
3241 typedef union
3242 {
3243 uint64_t i;
3244 double d;
3245 } double_conv_t;
3246
3247 typedef union
3248 {
3249 uint32_t i;
3250 float f;
3251 } single_conv_t;
3252
3253 typedef union
3254 {
3255 uint32_t i;
3256 float f;
3257 } half_conv_t;
3258
3259 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
3260 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
3261 (depending on the type of the instruction). IMM8 will be expanded to a
3262 single-precision floating-point value (SIZE == 4) or a double-precision
3263 floating-point value (SIZE == 8). A half-precision floating-point value
3264 (SIZE == 2) is expanded to a single-precision floating-point value. The
3265 expanded value is returned. */
3266
3267 static uint64_t
3268 expand_fp_imm (int size, uint32_t imm8)
3269 {
3270 uint64_t imm = 0;
3271 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3272
3273 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
3274 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
3275 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
3276 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3277 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
3278 if (size == 8)
3279 {
3280 imm = (imm8_7 << (63-32)) /* imm8<7> */
3281 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
3282 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3283 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3284 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
3285 imm <<= 32;
3286 }
3287 else if (size == 4 || size == 2)
3288 {
3289 imm = (imm8_7 << 31) /* imm8<7> */
3290 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
3291 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
3292 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
3293 }
3294 else
3295 {
3296 /* An unsupported size. */
3297 assert (0);
3298 }
3299
3300 return imm;
3301 }
3302
3303 /* Produce the string representation of the register list operand *OPND
3304 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3305 the register name that comes before the register number, such as "v". */
3306 static void
3307 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3308 const char *prefix)
3309 {
3310 const int num_regs = opnd->reglist.num_regs;
3311 const int first_reg = opnd->reglist.first_regno;
3312 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
3313 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3314 char tb[8]; /* Temporary buffer. */
3315
3316 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3317 assert (num_regs >= 1 && num_regs <= 4);
3318
3319 /* Prepare the index if any. */
3320 if (opnd->reglist.has_index)
3321 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3322 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
3323 else
3324 tb[0] = '\0';
3325
3326 /* The hyphenated form is preferred for disassembly if there are
3327 more than two registers in the list, and the register numbers
3328 are monotonically increasing in increments of one. */
3329 if (num_regs > 2 && last_reg > first_reg)
3330 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
3331 prefix, last_reg, qlf_name, tb);
3332 else
3333 {
3334 const int reg0 = first_reg;
3335 const int reg1 = (first_reg + 1) & 0x1f;
3336 const int reg2 = (first_reg + 2) & 0x1f;
3337 const int reg3 = (first_reg + 3) & 0x1f;
3338
3339 switch (num_regs)
3340 {
3341 case 1:
3342 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
3343 break;
3344 case 2:
3345 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
3346 prefix, reg1, qlf_name, tb);
3347 break;
3348 case 3:
3349 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3350 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3351 prefix, reg2, qlf_name, tb);
3352 break;
3353 case 4:
3354 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3355 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3356 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
3357 break;
3358 }
3359 }
3360 }
3361
3362 /* Print the register+immediate address in OPND to BUF, which has SIZE
3363 characters. BASE is the name of the base register. */
3364
3365 static void
3366 print_immediate_offset_address (char *buf, size_t size,
3367 const aarch64_opnd_info *opnd,
3368 const char *base)
3369 {
3370 if (opnd->addr.writeback)
3371 {
3372 if (opnd->addr.preind)
3373 {
3374 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3375 snprintf (buf, size, "[%s]!", base);
3376 else
3377 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
3378 }
3379 else
3380 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
3381 }
3382 else
3383 {
3384 if (opnd->shifter.operator_present)
3385 {
3386 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3387 snprintf (buf, size, "[%s, #%d, mul vl]",
3388 base, opnd->addr.offset.imm);
3389 }
3390 else if (opnd->addr.offset.imm)
3391 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3392 else
3393 snprintf (buf, size, "[%s]", base);
3394 }
3395 }
3396
3397 /* Produce the string representation of the register offset address operand
3398 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3399 the names of the base and offset registers. */
3400 static void
3401 print_register_offset_address (char *buf, size_t size,
3402 const aarch64_opnd_info *opnd,
3403 const char *base, const char *offset)
3404 {
3405 char tb[16]; /* Temporary buffer. */
3406 bfd_boolean print_extend_p = TRUE;
3407 bfd_boolean print_amount_p = TRUE;
3408 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3409
3410 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3411 || !opnd->shifter.amount_present))
3412 {
3413 /* Not print the shift/extend amount when the amount is zero and
3414 when it is not the special case of 8-bit load/store instruction. */
3415 print_amount_p = FALSE;
3416 /* Likewise, no need to print the shift operator LSL in such a
3417 situation. */
3418 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3419 print_extend_p = FALSE;
3420 }
3421
3422 /* Prepare for the extend/shift. */
3423 if (print_extend_p)
3424 {
3425 if (print_amount_p)
3426 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3427 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3428 (opnd->shifter.amount % 100));
3429 else
3430 snprintf (tb, sizeof (tb), ", %s", shift_name);
3431 }
3432 else
3433 tb[0] = '\0';
3434
3435 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3436 }
3437
3438 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3439 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3440 PC, PCREL_P and ADDRESS are used to pass in and return information about
3441 the PC-relative address calculation, where the PC value is passed in
3442 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3443 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3444 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3445
3446 The function serves both the disassembler and the assembler diagnostics
3447 issuer, which is the reason why it lives in this file. */
3448
3449 void
3450 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3451 const aarch64_opcode *opcode,
3452 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3453 bfd_vma *address, char** notes,
3454 aarch64_feature_set features)
3455 {
3456 unsigned int i, num_conds;
3457 const char *name = NULL;
3458 const aarch64_opnd_info *opnd = opnds + idx;
3459 enum aarch64_modifier_kind kind;
3460 uint64_t addr, enum_value;
3461
3462 buf[0] = '\0';
3463 if (pcrel_p)
3464 *pcrel_p = 0;
3465
3466 switch (opnd->type)
3467 {
3468 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3469 the <ic_op>, therefore we use opnd->present to override the
3470 generic optional-ness information. */
3471 case AARCH64_OPND_Cat_SYS:
3472 case AARCH64_OPND_Rt_SYS:
3473 if (opnd->present)
3474 snprintf (buf, size, "%s",
3475 get_base_reg_name (features, opnd->reg.regno, 0));
3476 break;
3477
3478 case AARCH64_OPND_Rsz:
3479 case AARCH64_OPND_Rsz2:
3480 case AARCH64_OPND_Rd:
3481 case AARCH64_OPND_Rn:
3482 case AARCH64_OPND_Rm:
3483 case AARCH64_OPND_Rt:
3484 case AARCH64_OPND_Wt:
3485 case AARCH64_OPND_Rt2:
3486 case AARCH64_OPND_Rs:
3487 case AARCH64_OPND_Ra:
3488 case AARCH64_OPND_PAIRREG:
3489 case AARCH64_OPND_SVE_Rm:
3490 /* Omit the operand, e.g. RET. */
3491 if (optional_operand_p (opcode, idx)
3492 && (opnd->reg.regno == get_optional_operand_default_value (opcode)))
3493 break;
3494 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3495 || opnd->qualifier == AARCH64_OPND_QLF_X);
3496 snprintf (buf, size, "%s",
3497 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3498 break;
3499
3500 case AARCH64_OPND_Rd_SP:
3501 case AARCH64_OPND_Rn_SP:
3502 case AARCH64_OPND_Rt_SP:
3503 case AARCH64_OPND_SVE_Rn_SP:
3504 case AARCH64_OPND_Rm_SP:
3505 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3506 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3507 || opnd->qualifier == AARCH64_OPND_QLF_X
3508 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3509 snprintf (buf, size, "%s",
3510 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3511 break;
3512
3513 case AARCH64_OPND_A64C_Rm_EXT:
3514 case AARCH64_OPND_Rm_EXT:
3515 kind = opnd->shifter.kind;
3516 assert (idx == 1 || idx == 2);
3517 if ((aarch64_stack_pointer_p (opnds)
3518 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3519 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3520 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3521 && kind == AARCH64_MOD_UXTW)
3522 || (opnd->qualifier == AARCH64_OPND_QLF_X
3523 && kind == AARCH64_MOD_UXTX)))
3524 {
3525 /* 'LSL' is the preferred form in this case. */
3526 kind = AARCH64_MOD_LSL;
3527 if (opnd->shifter.amount == 0)
3528 {
3529 /* Shifter omitted. */
3530 snprintf (buf, size, "%s",
3531 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3532 break;
3533 }
3534 }
3535 if (opnd->shifter.amount)
3536 snprintf (buf, size, "%s, %s #%" PRIi64,
3537 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3538 aarch64_operand_modifiers[kind].name,
3539 opnd->shifter.amount);
3540 else
3541 snprintf (buf, size, "%s, %s",
3542 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3543 aarch64_operand_modifiers[kind].name);
3544 break;
3545
3546 case AARCH64_OPND_Rm_SFT:
3547 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3548 || opnd->qualifier == AARCH64_OPND_QLF_X);
3549 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3550 snprintf (buf, size, "%s",
3551 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3552 else
3553 snprintf (buf, size, "%s, %s #%" PRIi64,
3554 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3555 aarch64_operand_modifiers[opnd->shifter.kind].name,
3556 opnd->shifter.amount);
3557 break;
3558
3559 case AARCH64_OPND_Fsz:
3560 case AARCH64_OPND_Fd:
3561 case AARCH64_OPND_Fn:
3562 case AARCH64_OPND_Fm:
3563 case AARCH64_OPND_Fa:
3564 case AARCH64_OPND_Ft:
3565 case AARCH64_OPND_Ft2:
3566 case AARCH64_OPND_Sd:
3567 case AARCH64_OPND_Sn:
3568 case AARCH64_OPND_Sm:
3569 case AARCH64_OPND_St:
3570 case AARCH64_OPND_SVE_VZn:
3571 case AARCH64_OPND_SVE_Vd:
3572 case AARCH64_OPND_SVE_Vm:
3573 case AARCH64_OPND_SVE_Vn:
3574 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3575 opnd->reg.regno);
3576 break;
3577
3578 case AARCH64_OPND_Va:
3579 case AARCH64_OPND_Vd:
3580 case AARCH64_OPND_Vn:
3581 case AARCH64_OPND_Vm:
3582 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3583 aarch64_get_qualifier_name (opnd->qualifier));
3584 break;
3585
3586 case AARCH64_OPND_Ed:
3587 case AARCH64_OPND_En:
3588 case AARCH64_OPND_Em:
3589 case AARCH64_OPND_Em16:
3590 case AARCH64_OPND_SM3_IMM2:
3591 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3592 aarch64_get_qualifier_name (opnd->qualifier),
3593 opnd->reglane.index);
3594 break;
3595
3596 case AARCH64_OPND_VdD1:
3597 case AARCH64_OPND_VnD1:
3598 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3599 break;
3600
3601 case AARCH64_OPND_LVn:
3602 case AARCH64_OPND_LVt:
3603 case AARCH64_OPND_LVt_AL:
3604 case AARCH64_OPND_LEt:
3605 print_register_list (buf, size, opnd, "v");
3606 break;
3607
3608 case AARCH64_OPND_SVE_Pd:
3609 case AARCH64_OPND_SVE_Pg3:
3610 case AARCH64_OPND_SVE_Pg4_5:
3611 case AARCH64_OPND_SVE_Pg4_10:
3612 case AARCH64_OPND_SVE_Pg4_16:
3613 case AARCH64_OPND_SVE_Pm:
3614 case AARCH64_OPND_SVE_Pn:
3615 case AARCH64_OPND_SVE_Pt:
3616 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3617 snprintf (buf, size, "p%d", opnd->reg.regno);
3618 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3619 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3620 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3621 aarch64_get_qualifier_name (opnd->qualifier));
3622 else
3623 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3624 aarch64_get_qualifier_name (opnd->qualifier));
3625 break;
3626
3627 case AARCH64_OPND_SVE_Za_5:
3628 case AARCH64_OPND_SVE_Za_16:
3629 case AARCH64_OPND_SVE_Zd:
3630 case AARCH64_OPND_SVE_Zm_5:
3631 case AARCH64_OPND_SVE_Zm_16:
3632 case AARCH64_OPND_SVE_Zn:
3633 case AARCH64_OPND_SVE_Zt:
3634 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3635 snprintf (buf, size, "z%d", opnd->reg.regno);
3636 else
3637 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3638 aarch64_get_qualifier_name (opnd->qualifier));
3639 break;
3640
3641 case AARCH64_OPND_SVE_ZnxN:
3642 case AARCH64_OPND_SVE_ZtxN:
3643 print_register_list (buf, size, opnd, "z");
3644 break;
3645
3646 case AARCH64_OPND_SVE_Zm3_INDEX:
3647 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3648 case AARCH64_OPND_SVE_Zm3_11_INDEX:
3649 case AARCH64_OPND_SVE_Zm4_11_INDEX:
3650 case AARCH64_OPND_SVE_Zm4_INDEX:
3651 case AARCH64_OPND_SVE_Zn_INDEX:
3652 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3653 aarch64_get_qualifier_name (opnd->qualifier),
3654 opnd->reglane.index);
3655 break;
3656
3657 case AARCH64_OPND_CRn:
3658 case AARCH64_OPND_CRm:
3659 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3660 break;
3661
3662 case AARCH64_OPND_A64C_IMMV4:
3663 case AARCH64_OPND_A64C_IMM8:
3664 case AARCH64_OPND_IDX:
3665 case AARCH64_OPND_MASK:
3666 case AARCH64_OPND_IMM:
3667 case AARCH64_OPND_IMM_2:
3668 case AARCH64_OPND_WIDTH:
3669 case AARCH64_OPND_UIMM3_OP1:
3670 case AARCH64_OPND_UIMM3_OP2:
3671 case AARCH64_OPND_BIT_NUM:
3672 case AARCH64_OPND_IMM_VLSL:
3673 case AARCH64_OPND_IMM_VLSR:
3674 case AARCH64_OPND_SHLL_IMM:
3675 case AARCH64_OPND_IMM0:
3676 case AARCH64_OPND_IMMR:
3677 case AARCH64_OPND_IMMS:
3678 case AARCH64_OPND_UNDEFINED:
3679 case AARCH64_OPND_FBITS:
3680 case AARCH64_OPND_TME_UIMM16:
3681 case AARCH64_OPND_SIMM5:
3682 case AARCH64_OPND_SVE_SHLIMM_PRED:
3683 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3684 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3685 case AARCH64_OPND_SVE_SHRIMM_PRED:
3686 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3687 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3688 case AARCH64_OPND_SVE_SIMM5:
3689 case AARCH64_OPND_SVE_SIMM5B:
3690 case AARCH64_OPND_SVE_SIMM6:
3691 case AARCH64_OPND_SVE_SIMM8:
3692 case AARCH64_OPND_SVE_UIMM3:
3693 case AARCH64_OPND_SVE_UIMM7:
3694 case AARCH64_OPND_SVE_UIMM8:
3695 case AARCH64_OPND_SVE_UIMM8_53:
3696 case AARCH64_OPND_IMM_ROT1:
3697 case AARCH64_OPND_IMM_ROT2:
3698 case AARCH64_OPND_IMM_ROT3:
3699 case AARCH64_OPND_SVE_IMM_ROT1:
3700 case AARCH64_OPND_SVE_IMM_ROT2:
3701 case AARCH64_OPND_SVE_IMM_ROT3:
3702 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3703 break;
3704
3705 case AARCH64_OPND_SVE_I1_HALF_ONE:
3706 case AARCH64_OPND_SVE_I1_HALF_TWO:
3707 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3708 {
3709 single_conv_t c;
3710 c.i = opnd->imm.value;
3711 snprintf (buf, size, "#%.1f", c.f);
3712 break;
3713 }
3714
3715 case AARCH64_OPND_SVE_PATTERN:
3716 if (optional_operand_p (opcode, idx)
3717 && opnd->imm.value == get_optional_operand_default_value (opcode))
3718 break;
3719 enum_value = opnd->imm.value;
3720 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3721 if (aarch64_sve_pattern_array[enum_value])
3722 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3723 else
3724 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3725 break;
3726
3727 case AARCH64_OPND_SVE_PATTERN_SCALED:
3728 if (optional_operand_p (opcode, idx)
3729 && !opnd->shifter.operator_present
3730 && opnd->imm.value == get_optional_operand_default_value (opcode))
3731 break;
3732 enum_value = opnd->imm.value;
3733 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3734 if (aarch64_sve_pattern_array[opnd->imm.value])
3735 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3736 else
3737 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3738 if (opnd->shifter.operator_present)
3739 {
3740 size_t len = strlen (buf);
3741 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3742 aarch64_operand_modifiers[opnd->shifter.kind].name,
3743 opnd->shifter.amount);
3744 }
3745 break;
3746
3747 case AARCH64_OPND_SVE_PRFOP:
3748 enum_value = opnd->imm.value;
3749 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3750 if (aarch64_sve_prfop_array[enum_value])
3751 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3752 else
3753 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3754 break;
3755
3756 case AARCH64_OPND_IMM_MOV:
3757 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3758 {
3759 case 4: /* e.g. MOV Wd, #<imm32>. */
3760 {
3761 int imm32 = opnd->imm.value;
3762 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3763 }
3764 break;
3765 case 8: /* e.g. MOV Xd, #<imm64>. */
3766 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3767 opnd->imm.value, opnd->imm.value);
3768 break;
3769 default: assert (0);
3770 }
3771 break;
3772
3773 case AARCH64_OPND_FPIMM0:
3774 snprintf (buf, size, "#0.0");
3775 break;
3776
3777 case AARCH64_OPND_A64C_IMM6_EXT:
3778 case AARCH64_OPND_A64C_AIMM:
3779 case AARCH64_OPND_LIMM:
3780 case AARCH64_OPND_AIMM:
3781 case AARCH64_OPND_HALF:
3782 case AARCH64_OPND_SVE_INV_LIMM:
3783 case AARCH64_OPND_SVE_LIMM:
3784 case AARCH64_OPND_SVE_LIMM_MOV:
3785 if (opnd->shifter.amount)
3786 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3787 opnd->shifter.amount);
3788 else
3789 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3790 break;
3791
3792 case AARCH64_OPND_SIMD_IMM:
3793 case AARCH64_OPND_SIMD_IMM_SFT:
3794 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3795 || opnd->shifter.kind == AARCH64_MOD_NONE)
3796 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3797 else
3798 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3799 aarch64_operand_modifiers[opnd->shifter.kind].name,
3800 opnd->shifter.amount);
3801 break;
3802
3803 case AARCH64_OPND_SVE_AIMM:
3804 case AARCH64_OPND_SVE_ASIMM:
3805 if (opnd->shifter.amount)
3806 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3807 opnd->shifter.amount);
3808 else
3809 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3810 break;
3811
3812 case AARCH64_OPND_FPIMM:
3813 case AARCH64_OPND_SIMD_FPIMM:
3814 case AARCH64_OPND_SVE_FPIMM8:
3815 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3816 {
3817 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3818 {
3819 half_conv_t c;
3820 c.i = expand_fp_imm (2, opnd->imm.value);
3821 snprintf (buf, size, "#%.18e", c.f);
3822 }
3823 break;
3824 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3825 {
3826 single_conv_t c;
3827 c.i = expand_fp_imm (4, opnd->imm.value);
3828 snprintf (buf, size, "#%.18e", c.f);
3829 }
3830 break;
3831 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3832 {
3833 double_conv_t c;
3834 c.i = expand_fp_imm (8, opnd->imm.value);
3835 snprintf (buf, size, "#%.18e", c.d);
3836 }
3837 break;
3838 default: assert (0);
3839 }
3840 break;
3841
3842 case AARCH64_OPND_CCMP_IMM:
3843 case AARCH64_OPND_NZCV:
3844 case AARCH64_OPND_EXCEPTION:
3845 case AARCH64_OPND_UIMM4:
3846 case AARCH64_OPND_UIMM4_ADDG:
3847 case AARCH64_OPND_UIMM7:
3848 case AARCH64_OPND_UIMM10:
3849 if (optional_operand_p (opcode, idx) == TRUE
3850 && (opnd->imm.value ==
3851 (int64_t) get_optional_operand_default_value (opcode)))
3852 /* Omit the operand, e.g. DCPS1. */
3853 break;
3854 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3855 break;
3856
3857 case AARCH64_OPND_PERM:
3858 {
3859 char perm[4];
3860 get_perm_str (opnd->perm, perm);
3861 snprintf (buf, size, "%s", perm);
3862 }
3863 break;
3864
3865 case AARCH64_OPND_FORM:
3866 snprintf (buf, size, "%s", opnd->form->name);
3867 break;
3868
3869 case AARCH64_OPND_COND:
3870 case AARCH64_OPND_COND1:
3871 snprintf (buf, size, "%s", opnd->cond->names[0]);
3872 num_conds = ARRAY_SIZE (opnd->cond->names);
3873 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3874 {
3875 size_t len = strlen (buf);
3876 if (i == 1)
3877 snprintf (buf + len, size - len, " // %s = %s",
3878 opnd->cond->names[0], opnd->cond->names[i]);
3879 else
3880 snprintf (buf + len, size - len, ", %s",
3881 opnd->cond->names[i]);
3882 }
3883 break;
3884
3885 case AARCH64_OPND_A64C_ADDR_ADRDP:
3886 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3887 break;
3888
3889 case AARCH64_OPND_ADDR_ADRP:
3890 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3891 + opnd->imm.value;
3892 if (pcrel_p)
3893 *pcrel_p = 1;
3894 if (address)
3895 *address = addr;
3896 /* This is not necessary during the disassembling, as print_address_func
3897 in the disassemble_info will take care of the printing. But some
3898 other callers may be still interested in getting the string in *STR,
3899 so here we do snprintf regardless. */
3900 snprintf (buf, size, "#0x%" PRIx64, addr);
3901 break;
3902
3903 case AARCH64_OPND_ADDR_PCREL14:
3904 case AARCH64_OPND_ADDR_PCREL17:
3905 case AARCH64_OPND_ADDR_PCREL19:
3906 case AARCH64_OPND_ADDR_PCREL21:
3907 case AARCH64_OPND_ADDR_PCREL26:
3908 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3909
3910 /* For A64C PCREL17, the final address is rounded down to align to
3911 capability boundary. */
3912 if (opnd->type == AARCH64_OPND_ADDR_PCREL17)
3913 addr = addr & ~(uint64_t) 0xf;
3914
3915 if (pcrel_p)
3916 *pcrel_p = 1;
3917 if (address)
3918 *address = addr;
3919 /* This is not necessary during the disassembling, as print_address_func
3920 in the disassemble_info will take care of the printing. But some
3921 other callers may be still interested in getting the string in *STR,
3922 so here we do snprintf regardless. */
3923 snprintf (buf, size, "#0x%" PRIx64, addr);
3924 break;
3925
3926 case AARCH64_OPND_CAPADDR_SIMPLE:
3927 snprintf (buf, size, "[%s]",
3928 get_altbase_reg_name (features, opnd->addr.base_regno, 1, opcode));
3929 break;
3930
3931 case AARCH64_OPND_ADDR_SIMPLE:
3932 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3933 case AARCH64_OPND_SIMD_ADDR_POST:
3934 name = get_base_reg_name (features, opnd->addr.base_regno, 1);
3935 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3936 {
3937 if (opnd->addr.offset.is_reg)
3938 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3939 else
3940 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3941 }
3942 else
3943 snprintf (buf, size, "[%s]", name);
3944 break;
3945
3946 case AARCH64_OPND_CAPADDR_REGOFF:
3947 print_register_offset_address
3948 (buf, size, opnd,
3949 get_altbase_reg_name (features, opnd->addr.base_regno, 1, opcode),
3950 get_offset_int_reg_name (opnd));
3951 break;
3952
3953 case AARCH64_OPND_ADDR_REGOFF:
3954 case AARCH64_OPND_SVE_ADDR_R:
3955 case AARCH64_OPND_SVE_ADDR_RR:
3956 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3957 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3958 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3959 case AARCH64_OPND_SVE_ADDR_RX:
3960 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3961 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3962 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3963 print_register_offset_address
3964 (buf, size, opnd,
3965 get_base_reg_name (features, opnd->addr.base_regno, 1),
3966 get_offset_int_reg_name (opnd));
3967 break;
3968
3969 case AARCH64_OPND_SVE_ADDR_ZX:
3970 print_register_offset_address
3971 (buf, size, opnd,
3972 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3973 get_64bit_int_reg_name (opnd->addr.offset.regno, 0));
3974 break;
3975
3976 case AARCH64_OPND_SVE_ADDR_RZ:
3977 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3978 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3979 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3980 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3981 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3982 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3983 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3984 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3985 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3986 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3987 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3988 print_register_offset_address
3989 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3990 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3991 break;
3992
3993 case AARCH64_OPND_CAPADDR_SIMM9:
3994 case AARCH64_OPND_CAPADDR_SIMM7:
3995 print_immediate_offset_address
3996 (buf, size, opnd,
3997 get_altbase_reg_name (features, opnd->addr.base_regno, 1, opcode));
3998 break;
3999
4000 case AARCH64_OPND_A64C_ADDR_SIMM9:
4001 case AARCH64_OPND_A64C_ADDR_SIMM7:
4002 case AARCH64_OPND_ADDR_SIMM7:
4003 case AARCH64_OPND_ADDR_SIMM9:
4004 case AARCH64_OPND_ADDR_SIMM9_2:
4005 case AARCH64_OPND_ADDR_SIMM10:
4006 case AARCH64_OPND_ADDR_SIMM11:
4007 case AARCH64_OPND_ADDR_SIMM13:
4008 case AARCH64_OPND_ADDR_OFFSET:
4009 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
4010 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
4011 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
4012 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
4013 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
4014 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
4015 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
4016 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
4017 case AARCH64_OPND_SVE_ADDR_RI_U6:
4018 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
4019 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
4020 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
4021 print_immediate_offset_address
4022 (buf, size, opnd,
4023 get_base_reg_name (features, opnd->addr.base_regno, 1));
4024 break;
4025
4026 case AARCH64_OPND_SVE_ADDR_ZI_U5:
4027 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
4028 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
4029 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
4030 print_immediate_offset_address
4031 (buf, size, opnd,
4032 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
4033 break;
4034
4035 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
4036 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
4037 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
4038 print_register_offset_address
4039 (buf, size, opnd,
4040 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4041 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
4042 break;
4043
4044 case AARCH64_OPND_CAPADDR_UIMM9:
4045 name = get_altbase_reg_name (features, opnd->addr.base_regno, 1, opcode);
4046 goto do_print;
4047
4048 case AARCH64_OPND_ADDR_UIMM12:
4049 name = get_base_reg_name (features, opnd->addr.base_regno, 1);
4050 do_print:
4051 if (opnd->addr.offset.imm)
4052 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
4053 else
4054 snprintf (buf, size, "[%s]", name);
4055 break;
4056
4057 case AARCH64_OPND_SYSREG:
4058 for (i = 0; aarch64_sys_regs[i].name; ++i)
4059 {
4060 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
4061
4062 bfd_boolean exact_match
4063 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
4064 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
4065 && AARCH64_CPU_HAS_FEATURE (features, sr->features)
4066 && ((sr->flags & F_CAPREG)
4067 == (opcode->iclass == a64c ? F_CAPREG : 0));
4068
4069 /* Try and find an exact match, But if that fails, return the first
4070 partial match that was found. */
4071 if (aarch64_sys_regs[i].value == opnd->sysreg.value
4072 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
4073 && (name == NULL || exact_match))
4074 {
4075 name = aarch64_sys_regs[i].name;
4076 if (exact_match)
4077 {
4078 if (notes)
4079 *notes = NULL;
4080 break;
4081 }
4082
4083 /* If we didn't match exactly, that means the presense of a flag
4084 indicates what we didn't want for this instruction. e.g. If
4085 F_REG_READ is there, that means we were looking for a write
4086 register. See aarch64_ext_sysreg. */
4087 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
4088 *notes = _("reading from a write-only register");
4089 else if (aarch64_sys_regs[i].flags & F_REG_READ)
4090 *notes = _("writing to a read-only register");
4091 }
4092 }
4093
4094 if (name)
4095 snprintf (buf, size, "%s", name);
4096 else
4097 {
4098 /* Implementation defined system register. */
4099 unsigned int value = opnd->sysreg.value;
4100 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
4101 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
4102 value & 0x7);
4103 }
4104 break;
4105
4106 case AARCH64_OPND_PSTATEFIELD:
4107 for (i = 0; aarch64_pstatefields[i].name; ++i)
4108 if (aarch64_pstatefields[i].value == opnd->pstatefield)
4109 break;
4110 assert (aarch64_pstatefields[i].name);
4111 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
4112 break;
4113
4114 case AARCH64_OPND_SYSREG_AT:
4115 case AARCH64_OPND_SYSREG_DC:
4116 case AARCH64_OPND_SYSREG_IC:
4117 case AARCH64_OPND_SYSREG_TLBI:
4118 case AARCH64_OPND_SYSREG_SR:
4119 snprintf (buf, size, "%s", opnd->sysins_op->name);
4120 break;
4121
4122 case AARCH64_OPND_BARRIER:
4123 snprintf (buf, size, "%s", opnd->barrier->name);
4124 break;
4125
4126 case AARCH64_OPND_BARRIER_ISB:
4127 /* Operand can be omitted, e.g. in DCPS1. */
4128 if (! optional_operand_p (opcode, idx)
4129 || (opnd->barrier->value
4130 != get_optional_operand_default_value (opcode)))
4131 snprintf (buf, size, "#0x%x", opnd->barrier->value);
4132 break;
4133
4134 case AARCH64_OPND_PRFOP:
4135 if (opnd->prfop->name != NULL)
4136 snprintf (buf, size, "%s", opnd->prfop->name);
4137 else
4138 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
4139 break;
4140
4141 case AARCH64_OPND_BARRIER_PSB:
4142 snprintf (buf, size, "csync");
4143 break;
4144
4145 case AARCH64_OPND_BTI_TARGET:
4146 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
4147 snprintf (buf, size, "%s", opnd->hint_option->name);
4148 break;
4149
4150 case AARCH64_OPND_Cad_SP:
4151 case AARCH64_OPND_Can_SP:
4152 case AARCH64_OPND_Cam_SP:
4153 snprintf (buf, size, "%s", get_cap_reg_name (opnd->reg.regno, 1));
4154 break;
4155
4156 case AARCH64_OPND_A64C_CST_REG:
4157 case AARCH64_OPND_Cat:
4158 case AARCH64_OPND_Cat2:
4159 case AARCH64_OPND_Can:
4160 case AARCH64_OPND_Cam:
4161 case AARCH64_OPND_Cad:
4162 case AARCH64_OPND_Cas:
4163 snprintf (buf, size, "%s", get_cap_reg_name (opnd->reg.regno, 0));
4164 break;
4165
4166 default:
4167 assert (0);
4168 }
4169 }
4170 \f
4171 #define CPENC(op0,op1,crn,crm,op2) \
4172 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
4173 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
4174 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
4175 /* for 3.9.10 System Instructions */
4176 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
4177
4178 #define C0 0
4179 #define C1 1
4180 #define C2 2
4181 #define C3 3
4182 #define C4 4
4183 #define C5 5
4184 #define C6 6
4185 #define C7 7
4186 #define C8 8
4187 #define C9 9
4188 #define C10 10
4189 #define C11 11
4190 #define C12 12
4191 #define C13 13
4192 #define C14 14
4193 #define C15 15
4194
4195 #define SYSREG(name, encoding, flags, features) \
4196 { name, encoding, flags, features }
4197
4198 #define SR_CORE(n,e,f) SYSREG (n,e,f,0)
4199
4200 #define SR_FEAT(n,e,f,feat) \
4201 SYSREG ((n), (e), (f) | F_ARCHEXT, AARCH64_FEATURE_##feat)
4202
4203 #define SR_FEAT2(n,e,f,fe1,fe2) \
4204 SYSREG ((n), (e), (f) | F_ARCHEXT, \
4205 AARCH64_FEATURE_##fe1 | AARCH64_FEATURE_##fe2)
4206
4207 #define SR_RNG(n,e,f) SR_FEAT2(n,e,f,RNG,V8_5)
4208 #define SR_V8_1_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_1)
4209 #define SR_V8_4_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_4)
4210
4211 #define SR_V8_A(n,e,f) SR_FEAT (n,e,f,V8_A)
4212 #define SR_V8_R(n,e,f) SR_FEAT (n,e,f,V8_R)
4213 #define SR_V8_1(n,e,f) SR_FEAT (n,e,f,V8_1)
4214 #define SR_V8_2(n,e,f) SR_FEAT (n,e,f,V8_2)
4215 #define SR_V8_3(n,e,f) SR_FEAT (n,e,f,V8_3)
4216 #define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
4217 #define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
4218 #define SR_PAN(n,e,f) SR_FEAT (n,e,f,PAN)
4219 #define SR_RAS(n,e,f) SR_FEAT (n,e,f,RAS)
4220 #define SR_SSBS(n,e,f) SR_FEAT (n,e,f,SSBS)
4221 #define SR_SVE(n,e,f) SR_FEAT (n,e,f,SVE)
4222 #define SR_ID_PFR2(n,e,f) SR_FEAT (n,e,f,ID_PFR2)
4223 #define SR_PROFILE(n,e,f) SR_FEAT (n,e,f,PROFILE)
4224 #define SR_MEMTAG(n,e,f) SR_FEAT (n,e,f,MEMTAG)
4225 #define SR_SCXTNUM(n,e,f) SR_FEAT (n,e,f,SCXTNUM)
4226 #define SR_MORELLO(n,e,f) SR_FEAT (n,e,f,A64C)
4227
4228 #define SR_EXPAND_ELx(f,x) \
4229 f (x, 1), \
4230 f (x, 2), \
4231 f (x, 3), \
4232 f (x, 4), \
4233 f (x, 5), \
4234 f (x, 6), \
4235 f (x, 7), \
4236 f (x, 8), \
4237 f (x, 9), \
4238 f (x, 10), \
4239 f (x, 11), \
4240 f (x, 12), \
4241 f (x, 13), \
4242 f (x, 14), \
4243 f (x, 15),
4244
4245 #define SR_EXPAND_EL12(f) \
4246 SR_EXPAND_ELx (f,1) \
4247 SR_EXPAND_ELx (f,2)
4248
4249 /* TODO there is one more issues need to be resolved
4250 1. handle cpu-implementation-defined system registers.
4251
4252 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4253 respectively. If neither of these are set then the register is read-write. */
4254 const aarch64_sys_reg aarch64_sys_regs [] =
4255 {
4256 SR_CORE ("spsr_el1", CPEN_ (0,C0,0), 0), /* = spsr_svc. */
4257 SR_V8_1 ("spsr_el12", CPEN_ (5,C0,0), 0),
4258 SR_CORE ("elr_el1", CPEN_ (0,C0,1), 0),
4259 SR_V8_1 ("elr_el12", CPEN_ (5,C0,1), 0),
4260 SR_CORE ("sp_el0", CPEN_ (0,C1,0), 0),
4261 SR_CORE ("spsel", CPEN_ (0,C2,0), 0),
4262 SR_CORE ("daif", CPEN_ (3,C2,1), 0),
4263 SR_CORE ("currentel", CPEN_ (0,C2,2), F_REG_READ),
4264 SR_PAN ("pan", CPEN_ (0,C2,3), 0),
4265 SR_V8_2 ("uao", CPEN_ (0,C2,4), 0),
4266 SR_CORE ("nzcv", CPEN_ (3,C2,0), 0),
4267 SR_SSBS ("ssbs", CPEN_ (3,C2,6), 0),
4268 SR_CORE ("fpcr", CPEN_ (3,C4,0), 0),
4269 SR_CORE ("fpsr", CPEN_ (3,C4,1), 0),
4270 SR_CORE ("dspsr_el0", CPEN_ (3,C5,0), 0),
4271 SR_CORE ("dlr_el0", CPEN_ (3,C5,1), 0),
4272 SR_CORE ("spsr_el2", CPEN_ (4,C0,0), 0), /* = spsr_hyp. */
4273 SR_CORE ("elr_el2", CPEN_ (4,C0,1), 0),
4274 SR_CORE ("sp_el1", CPEN_ (4,C1,0), 0),
4275 SR_CORE ("spsr_irq", CPEN_ (4,C3,0), 0),
4276 SR_CORE ("spsr_abt", CPEN_ (4,C3,1), 0),
4277 SR_CORE ("spsr_und", CPEN_ (4,C3,2), 0),
4278 SR_CORE ("spsr_fiq", CPEN_ (4,C3,3), 0),
4279 SR_CORE ("spsr_el3", CPEN_ (6,C0,0), 0),
4280 SR_CORE ("elr_el3", CPEN_ (6,C0,1), 0),
4281 SR_CORE ("sp_el2", CPEN_ (6,C1,0), 0),
4282 SR_CORE ("spsr_svc", CPEN_ (0,C0,0), F_DEPRECATED), /* = spsr_el1. */
4283 SR_CORE ("spsr_hyp", CPEN_ (4,C0,0), F_DEPRECATED), /* = spsr_el2. */
4284 SR_CORE ("midr_el1", CPENC (3,0,C0,C0,0), F_REG_READ),
4285 SR_CORE ("ctr_el0", CPENC (3,3,C0,C0,1), F_REG_READ),
4286 SR_CORE ("mpidr_el1", CPENC (3,0,C0,C0,5), F_REG_READ),
4287 SR_CORE ("revidr_el1", CPENC (3,0,C0,C0,6), F_REG_READ),
4288 SR_CORE ("aidr_el1", CPENC (3,1,C0,C0,7), F_REG_READ),
4289 SR_CORE ("dczid_el0", CPENC (3,3,C0,C0,7), F_REG_READ),
4290 SR_CORE ("id_dfr0_el1", CPENC (3,0,C0,C1,2), F_REG_READ),
4291 SR_CORE ("id_pfr0_el1", CPENC (3,0,C0,C1,0), F_REG_READ),
4292 SR_CORE ("id_pfr1_el1", CPENC (3,0,C0,C1,1), F_REG_READ),
4293 SR_ID_PFR2 ("id_pfr2_el1", CPENC (3,0,C0,C3,4), F_REG_READ),
4294 SR_CORE ("id_afr0_el1", CPENC (3,0,C0,C1,3), F_REG_READ),
4295 SR_CORE ("id_mmfr0_el1", CPENC (3,0,C0,C1,4), F_REG_READ),
4296 SR_CORE ("id_mmfr1_el1", CPENC (3,0,C0,C1,5), F_REG_READ),
4297 SR_CORE ("id_mmfr2_el1", CPENC (3,0,C0,C1,6), F_REG_READ),
4298 SR_CORE ("id_mmfr3_el1", CPENC (3,0,C0,C1,7), F_REG_READ),
4299 SR_CORE ("id_mmfr4_el1", CPENC (3,0,C0,C2,6), F_REG_READ),
4300 SR_CORE ("id_isar0_el1", CPENC (3,0,C0,C2,0), F_REG_READ),
4301 SR_CORE ("id_isar1_el1", CPENC (3,0,C0,C2,1), F_REG_READ),
4302 SR_CORE ("id_isar2_el1", CPENC (3,0,C0,C2,2), F_REG_READ),
4303 SR_CORE ("id_isar3_el1", CPENC (3,0,C0,C2,3), F_REG_READ),
4304 SR_CORE ("id_isar4_el1", CPENC (3,0,C0,C2,4), F_REG_READ),
4305 SR_CORE ("id_isar5_el1", CPENC (3,0,C0,C2,5), F_REG_READ),
4306 SR_CORE ("mvfr0_el1", CPENC (3,0,C0,C3,0), F_REG_READ),
4307 SR_CORE ("mvfr1_el1", CPENC (3,0,C0,C3,1), F_REG_READ),
4308 SR_CORE ("mvfr2_el1", CPENC (3,0,C0,C3,2), F_REG_READ),
4309 SR_CORE ("ccsidr_el1", CPENC (3,1,C0,C0,0), F_REG_READ),
4310 SR_CORE ("id_aa64pfr0_el1", CPENC (3,0,C0,C4,0), F_REG_READ),
4311 SR_CORE ("id_aa64pfr1_el1", CPENC (3,0,C0,C4,1), F_REG_READ),
4312 SR_CORE ("id_aa64dfr0_el1", CPENC (3,0,C0,C5,0), F_REG_READ),
4313 SR_CORE ("id_aa64dfr1_el1", CPENC (3,0,C0,C5,1), F_REG_READ),
4314 SR_CORE ("id_aa64isar0_el1", CPENC (3,0,C0,C6,0), F_REG_READ),
4315 SR_CORE ("id_aa64isar1_el1", CPENC (3,0,C0,C6,1), F_REG_READ),
4316 SR_CORE ("id_aa64mmfr0_el1", CPENC (3,0,C0,C7,0), F_REG_READ),
4317 SR_CORE ("id_aa64mmfr1_el1", CPENC (3,0,C0,C7,1), F_REG_READ),
4318 SR_V8_2 ("id_aa64mmfr2_el1", CPENC (3,0,C0,C7,2), F_REG_READ),
4319 SR_CORE ("id_aa64afr0_el1", CPENC (3,0,C0,C5,4), F_REG_READ),
4320 SR_CORE ("id_aa64afr1_el1", CPENC (3,0,C0,C5,5), F_REG_READ),
4321 SR_SVE ("id_aa64zfr0_el1", CPENC (3,0,C0,C4,4), F_REG_READ),
4322 SR_CORE ("clidr_el1", CPENC (3,1,C0,C0,1), F_REG_READ),
4323 SR_CORE ("csselr_el1", CPENC (3,2,C0,C0,0), 0),
4324 SR_CORE ("vpidr_el2", CPENC (3,4,C0,C0,0), 0),
4325 SR_CORE ("vmpidr_el2", CPENC (3,4,C0,C0,5), 0),
4326 SR_CORE ("sctlr_el1", CPENC (3,0,C1,C0,0), 0),
4327 SR_CORE ("sctlr_el2", CPENC (3,4,C1,C0,0), 0),
4328 SR_CORE ("sctlr_el3", CPENC (3,6,C1,C0,0), 0),
4329 SR_V8_1 ("sctlr_el12", CPENC (3,5,C1,C0,0), 0),
4330 SR_CORE ("actlr_el1", CPENC (3,0,C1,C0,1), 0),
4331 SR_CORE ("actlr_el2", CPENC (3,4,C1,C0,1), 0),
4332 SR_CORE ("actlr_el3", CPENC (3,6,C1,C0,1), 0),
4333 SR_CORE ("cpacr_el1", CPENC (3,0,C1,C0,2), 0),
4334 SR_V8_1 ("cpacr_el12", CPENC (3,5,C1,C0,2), 0),
4335 SR_CORE ("cptr_el2", CPENC (3,4,C1,C1,2), 0),
4336 SR_CORE ("cptr_el3", CPENC (3,6,C1,C1,2), 0),
4337 SR_CORE ("scr_el3", CPENC (3,6,C1,C1,0), 0),
4338 SR_CORE ("hcr_el2", CPENC (3,4,C1,C1,0), 0),
4339 SR_CORE ("mdcr_el2", CPENC (3,4,C1,C1,1), 0),
4340 SR_CORE ("mdcr_el3", CPENC (3,6,C1,C3,1), 0),
4341 SR_CORE ("hstr_el2", CPENC (3,4,C1,C1,3), 0),
4342 SR_CORE ("hacr_el2", CPENC (3,4,C1,C1,7), 0),
4343 SR_SVE ("zcr_el1", CPENC (3,0,C1,C2,0), 0),
4344 SR_SVE ("zcr_el12", CPENC (3,5,C1,C2,0), 0),
4345 SR_SVE ("zcr_el2", CPENC (3,4,C1,C2,0), 0),
4346 SR_SVE ("zcr_el3", CPENC (3,6,C1,C2,0), 0),
4347 SR_SVE ("zidr_el1", CPENC (3,0,C0,C0,7), 0),
4348 SR_CORE ("ttbr0_el1", CPENC (3,0,C2,C0,0), 0),
4349 SR_CORE ("ttbr1_el1", CPENC (3,0,C2,C0,1), 0),
4350 SR_V8_A ("ttbr0_el2", CPENC (3,4,C2,C0,0), 0),
4351 SR_V8_1_A ("ttbr1_el2", CPENC (3,4,C2,C0,1), 0),
4352 SR_CORE ("ttbr0_el3", CPENC (3,6,C2,C0,0), 0),
4353 SR_V8_1 ("ttbr0_el12", CPENC (3,5,C2,C0,0), 0),
4354 SR_V8_1 ("ttbr1_el12", CPENC (3,5,C2,C0,1), 0),
4355 SR_V8_A ("vttbr_el2", CPENC (3,4,C2,C1,0), 0),
4356 SR_CORE ("tcr_el1", CPENC (3,0,C2,C0,2), 0),
4357 SR_CORE ("tcr_el2", CPENC (3,4,C2,C0,2), 0),
4358 SR_CORE ("tcr_el3", CPENC (3,6,C2,C0,2), 0),
4359 SR_V8_1 ("tcr_el12", CPENC (3,5,C2,C0,2), 0),
4360 SR_CORE ("vtcr_el2", CPENC (3,4,C2,C1,2), 0),
4361 SR_V8_3 ("apiakeylo_el1", CPENC (3,0,C2,C1,0), 0),
4362 SR_V8_3 ("apiakeyhi_el1", CPENC (3,0,C2,C1,1), 0),
4363 SR_V8_3 ("apibkeylo_el1", CPENC (3,0,C2,C1,2), 0),
4364 SR_V8_3 ("apibkeyhi_el1", CPENC (3,0,C2,C1,3), 0),
4365 SR_V8_3 ("apdakeylo_el1", CPENC (3,0,C2,C2,0), 0),
4366 SR_V8_3 ("apdakeyhi_el1", CPENC (3,0,C2,C2,1), 0),
4367 SR_V8_3 ("apdbkeylo_el1", CPENC (3,0,C2,C2,2), 0),
4368 SR_V8_3 ("apdbkeyhi_el1", CPENC (3,0,C2,C2,3), 0),
4369 SR_V8_3 ("apgakeylo_el1", CPENC (3,0,C2,C3,0), 0),
4370 SR_V8_3 ("apgakeyhi_el1", CPENC (3,0,C2,C3,1), 0),
4371 SR_CORE ("afsr0_el1", CPENC (3,0,C5,C1,0), 0),
4372 SR_CORE ("afsr1_el1", CPENC (3,0,C5,C1,1), 0),
4373 SR_CORE ("afsr0_el2", CPENC (3,4,C5,C1,0), 0),
4374 SR_CORE ("afsr1_el2", CPENC (3,4,C5,C1,1), 0),
4375 SR_CORE ("afsr0_el3", CPENC (3,6,C5,C1,0), 0),
4376 SR_V8_1 ("afsr0_el12", CPENC (3,5,C5,C1,0), 0),
4377 SR_CORE ("afsr1_el3", CPENC (3,6,C5,C1,1), 0),
4378 SR_V8_1 ("afsr1_el12", CPENC (3,5,C5,C1,1), 0),
4379 SR_CORE ("esr_el1", CPENC (3,0,C5,C2,0), 0),
4380 SR_CORE ("esr_el2", CPENC (3,4,C5,C2,0), 0),
4381 SR_CORE ("esr_el3", CPENC (3,6,C5,C2,0), 0),
4382 SR_V8_1 ("esr_el12", CPENC (3,5,C5,C2,0), 0),
4383 SR_RAS ("vsesr_el2", CPENC (3,4,C5,C2,3), 0),
4384 SR_CORE ("fpexc32_el2", CPENC (3,4,C5,C3,0), 0),
4385 SR_RAS ("erridr_el1", CPENC (3,0,C5,C3,0), F_REG_READ),
4386 SR_RAS ("errselr_el1", CPENC (3,0,C5,C3,1), 0),
4387 SR_RAS ("erxfr_el1", CPENC (3,0,C5,C4,0), F_REG_READ),
4388 SR_RAS ("erxctlr_el1", CPENC (3,0,C5,C4,1), 0),
4389 SR_RAS ("erxstatus_el1", CPENC (3,0,C5,C4,2), 0),
4390 SR_RAS ("erxaddr_el1", CPENC (3,0,C5,C4,3), 0),
4391 SR_RAS ("erxmisc0_el1", CPENC (3,0,C5,C5,0), 0),
4392 SR_RAS ("erxmisc1_el1", CPENC (3,0,C5,C5,1), 0),
4393 SR_CORE ("far_el1", CPENC (3,0,C6,C0,0), 0),
4394 SR_CORE ("far_el2", CPENC (3,4,C6,C0,0), 0),
4395 SR_CORE ("far_el3", CPENC (3,6,C6,C0,0), 0),
4396 SR_V8_1 ("far_el12", CPENC (3,5,C6,C0,0), 0),
4397 SR_CORE ("hpfar_el2", CPENC (3,4,C6,C0,4), 0),
4398 SR_CORE ("par_el1", CPENC (3,0,C7,C4,0), 0),
4399 SR_CORE ("mair_el1", CPENC (3,0,C10,C2,0), 0),
4400 SR_CORE ("mair_el2", CPENC (3,4,C10,C2,0), 0),
4401 SR_CORE ("mair_el3", CPENC (3,6,C10,C2,0), 0),
4402 SR_V8_1 ("mair_el12", CPENC (3,5,C10,C2,0), 0),
4403 SR_CORE ("amair_el1", CPENC (3,0,C10,C3,0), 0),
4404 SR_CORE ("amair_el2", CPENC (3,4,C10,C3,0), 0),
4405 SR_CORE ("amair_el3", CPENC (3,6,C10,C3,0), 0),
4406 SR_V8_1 ("amair_el12", CPENC (3,5,C10,C3,0), 0),
4407 SR_CORE ("vbar_el1", CPENC (3,0,C12,C0,0), 0),
4408 SR_CORE ("vbar_el2", CPENC (3,4,C12,C0,0), 0),
4409 SR_CORE ("vbar_el3", CPENC (3,6,C12,C0,0), 0),
4410 SR_V8_1 ("vbar_el12", CPENC (3,5,C12,C0,0), 0),
4411 SR_CORE ("rvbar_el1", CPENC (3,0,C12,C0,1), F_REG_READ),
4412 SR_CORE ("rvbar_el2", CPENC (3,4,C12,C0,1), F_REG_READ),
4413 SR_CORE ("rvbar_el3", CPENC (3,6,C12,C0,1), F_REG_READ),
4414 SR_CORE ("rmr_el1", CPENC (3,0,C12,C0,2), 0),
4415 SR_CORE ("rmr_el2", CPENC (3,4,C12,C0,2), 0),
4416 SR_CORE ("rmr_el3", CPENC (3,6,C12,C0,2), 0),
4417 SR_CORE ("isr_el1", CPENC (3,0,C12,C1,0), F_REG_READ),
4418 SR_RAS ("disr_el1", CPENC (3,0,C12,C1,1), 0),
4419 SR_RAS ("vdisr_el2", CPENC (3,4,C12,C1,1), 0),
4420 SR_CORE ("contextidr_el1", CPENC (3,0,C13,C0,1), 0),
4421 SR_V8_1 ("contextidr_el2", CPENC (3,4,C13,C0,1), 0),
4422 SR_V8_1 ("contextidr_el12", CPENC (3,5,C13,C0,1), 0),
4423 SR_RNG ("rndr", CPENC (3,3,C2,C4,0), F_REG_READ),
4424 SR_RNG ("rndrrs", CPENC (3,3,C2,C4,1), F_REG_READ),
4425 SR_MEMTAG ("tco", CPENC (3,3,C4,C2,7), 0),
4426 SR_MEMTAG ("tfsre0_el1", CPENC (3,0,C5,C6,1), 0),
4427 SR_MEMTAG ("tfsr_el1", CPENC (3,0,C5,C6,0), 0),
4428 SR_MEMTAG ("tfsr_el2", CPENC (3,4,C5,C6,0), 0),
4429 SR_MEMTAG ("tfsr_el3", CPENC (3,6,C5,C6,0), 0),
4430 SR_MEMTAG ("tfsr_el12", CPENC (3,5,C5,C6,0), 0),
4431 SR_MEMTAG ("rgsr_el1", CPENC (3,0,C1,C0,5), 0),
4432 SR_MEMTAG ("gcr_el1", CPENC (3,0,C1,C0,6), 0),
4433 SR_MEMTAG ("gmid_el1", CPENC (3,1,C0,C0,4), F_REG_READ),
4434 SR_CORE ("tpidr_el0", CPENC (3,3,C13,C0,2), 0),
4435 SR_CORE ("tpidrro_el0", CPENC (3,3,C13,C0,3), 0),
4436 SR_CORE ("tpidr_el1", CPENC (3,0,C13,C0,4), 0),
4437 SR_CORE ("tpidr_el2", CPENC (3,4,C13,C0,2), 0),
4438 SR_CORE ("tpidr_el3", CPENC (3,6,C13,C0,2), 0),
4439 SR_SCXTNUM ("scxtnum_el0", CPENC (3,3,C13,C0,7), 0),
4440 SR_SCXTNUM ("scxtnum_el1", CPENC (3,0,C13,C0,7), 0),
4441 SR_SCXTNUM ("scxtnum_el2", CPENC (3,4,C13,C0,7), 0),
4442 SR_SCXTNUM ("scxtnum_el12", CPENC (3,5,C13,C0,7), 0),
4443 SR_SCXTNUM ("scxtnum_el3", CPENC (3,6,C13,C0,7), 0),
4444 SR_CORE ("teecr32_el1", CPENC (2,2,C0, C0,0), 0), /* See section 3.9.7.1. */
4445 SR_CORE ("cntfrq_el0", CPENC (3,3,C14,C0,0), 0),
4446 SR_CORE ("cntpct_el0", CPENC (3,3,C14,C0,1), F_REG_READ),
4447 SR_CORE ("cntvct_el0", CPENC (3,3,C14,C0,2), F_REG_READ),
4448 SR_CORE ("cntvoff_el2", CPENC (3,4,C14,C0,3), 0),
4449 SR_CORE ("cntkctl_el1", CPENC (3,0,C14,C1,0), 0),
4450 SR_V8_1 ("cntkctl_el12", CPENC (3,5,C14,C1,0), 0),
4451 SR_CORE ("cnthctl_el2", CPENC (3,4,C14,C1,0), 0),
4452 SR_CORE ("cntp_tval_el0", CPENC (3,3,C14,C2,0), 0),
4453 SR_V8_1 ("cntp_tval_el02", CPENC (3,5,C14,C2,0), 0),
4454 SR_CORE ("cntp_ctl_el0", CPENC (3,3,C14,C2,1), 0),
4455 SR_V8_1 ("cntp_ctl_el02", CPENC (3,5,C14,C2,1), 0),
4456 SR_CORE ("cntp_cval_el0", CPENC (3,3,C14,C2,2), 0),
4457 SR_V8_1 ("cntp_cval_el02", CPENC (3,5,C14,C2,2), 0),
4458 SR_CORE ("cntv_tval_el0", CPENC (3,3,C14,C3,0), 0),
4459 SR_V8_1 ("cntv_tval_el02", CPENC (3,5,C14,C3,0), 0),
4460 SR_CORE ("cntv_ctl_el0", CPENC (3,3,C14,C3,1), 0),
4461 SR_V8_1 ("cntv_ctl_el02", CPENC (3,5,C14,C3,1), 0),
4462 SR_CORE ("cntv_cval_el0", CPENC (3,3,C14,C3,2), 0),
4463 SR_V8_1 ("cntv_cval_el02", CPENC (3,5,C14,C3,2), 0),
4464 SR_CORE ("cnthp_tval_el2", CPENC (3,4,C14,C2,0), 0),
4465 SR_CORE ("cnthp_ctl_el2", CPENC (3,4,C14,C2,1), 0),
4466 SR_CORE ("cnthp_cval_el2", CPENC (3,4,C14,C2,2), 0),
4467 SR_CORE ("cntps_tval_el1", CPENC (3,7,C14,C2,0), 0),
4468 SR_CORE ("cntps_ctl_el1", CPENC (3,7,C14,C2,1), 0),
4469 SR_CORE ("cntps_cval_el1", CPENC (3,7,C14,C2,2), 0),
4470 SR_V8_1 ("cnthv_tval_el2", CPENC (3,4,C14,C3,0), 0),
4471 SR_V8_1 ("cnthv_ctl_el2", CPENC (3,4,C14,C3,1), 0),
4472 SR_V8_1 ("cnthv_cval_el2", CPENC (3,4,C14,C3,2), 0),
4473 SR_CORE ("dacr32_el2", CPENC (3,4,C3,C0,0), 0),
4474 SR_CORE ("ifsr32_el2", CPENC (3,4,C5,C0,1), 0),
4475 SR_CORE ("teehbr32_el1", CPENC (2,2,C1,C0,0), 0),
4476 SR_CORE ("sder32_el3", CPENC (3,6,C1,C1,1), 0),
4477 SR_CORE ("mdscr_el1", CPENC (2,0,C0,C2,2), 0),
4478 SR_CORE ("mdccsr_el0", CPENC (2,3,C0,C1,0), F_REG_READ),
4479 SR_CORE ("mdccint_el1", CPENC (2,0,C0,C2,0), 0),
4480 SR_CORE ("dbgdtr_el0", CPENC (2,3,C0,C4,0), 0),
4481 SR_CORE ("dbgdtrrx_el0", CPENC (2,3,C0,C5,0), F_REG_READ),
4482 SR_CORE ("dbgdtrtx_el0", CPENC (2,3,C0,C5,0), F_REG_WRITE),
4483 SR_CORE ("osdtrrx_el1", CPENC (2,0,C0,C0,2), 0),
4484 SR_CORE ("osdtrtx_el1", CPENC (2,0,C0,C3,2), 0),
4485 SR_CORE ("oseccr_el1", CPENC (2,0,C0,C6,2), 0),
4486 SR_CORE ("dbgvcr32_el2", CPENC (2,4,C0,C7,0), 0),
4487 SR_CORE ("dbgbvr0_el1", CPENC (2,0,C0,C0,4), 0),
4488 SR_CORE ("dbgbvr1_el1", CPENC (2,0,C0,C1,4), 0),
4489 SR_CORE ("dbgbvr2_el1", CPENC (2,0,C0,C2,4), 0),
4490 SR_CORE ("dbgbvr3_el1", CPENC (2,0,C0,C3,4), 0),
4491 SR_CORE ("dbgbvr4_el1", CPENC (2,0,C0,C4,4), 0),
4492 SR_CORE ("dbgbvr5_el1", CPENC (2,0,C0,C5,4), 0),
4493 SR_CORE ("dbgbvr6_el1", CPENC (2,0,C0,C6,4), 0),
4494 SR_CORE ("dbgbvr7_el1", CPENC (2,0,C0,C7,4), 0),
4495 SR_CORE ("dbgbvr8_el1", CPENC (2,0,C0,C8,4), 0),
4496 SR_CORE ("dbgbvr9_el1", CPENC (2,0,C0,C9,4), 0),
4497 SR_CORE ("dbgbvr10_el1", CPENC (2,0,C0,C10,4), 0),
4498 SR_CORE ("dbgbvr11_el1", CPENC (2,0,C0,C11,4), 0),
4499 SR_CORE ("dbgbvr12_el1", CPENC (2,0,C0,C12,4), 0),
4500 SR_CORE ("dbgbvr13_el1", CPENC (2,0,C0,C13,4), 0),
4501 SR_CORE ("dbgbvr14_el1", CPENC (2,0,C0,C14,4), 0),
4502 SR_CORE ("dbgbvr15_el1", CPENC (2,0,C0,C15,4), 0),
4503 SR_CORE ("dbgbcr0_el1", CPENC (2,0,C0,C0,5), 0),
4504 SR_CORE ("dbgbcr1_el1", CPENC (2,0,C0,C1,5), 0),
4505 SR_CORE ("dbgbcr2_el1", CPENC (2,0,C0,C2,5), 0),
4506 SR_CORE ("dbgbcr3_el1", CPENC (2,0,C0,C3,5), 0),
4507 SR_CORE ("dbgbcr4_el1", CPENC (2,0,C0,C4,5), 0),
4508 SR_CORE ("dbgbcr5_el1", CPENC (2,0,C0,C5,5), 0),
4509 SR_CORE ("dbgbcr6_el1", CPENC (2,0,C0,C6,5), 0),
4510 SR_CORE ("dbgbcr7_el1", CPENC (2,0,C0,C7,5), 0),
4511 SR_CORE ("dbgbcr8_el1", CPENC (2,0,C0,C8,5), 0),
4512 SR_CORE ("dbgbcr9_el1", CPENC (2,0,C0,C9,5), 0),
4513 SR_CORE ("dbgbcr10_el1", CPENC (2,0,C0,C10,5), 0),
4514 SR_CORE ("dbgbcr11_el1", CPENC (2,0,C0,C11,5), 0),
4515 SR_CORE ("dbgbcr12_el1", CPENC (2,0,C0,C12,5), 0),
4516 SR_CORE ("dbgbcr13_el1", CPENC (2,0,C0,C13,5), 0),
4517 SR_CORE ("dbgbcr14_el1", CPENC (2,0,C0,C14,5), 0),
4518 SR_CORE ("dbgbcr15_el1", CPENC (2,0,C0,C15,5), 0),
4519 SR_CORE ("dbgwvr0_el1", CPENC (2,0,C0,C0,6), 0),
4520 SR_CORE ("dbgwvr1_el1", CPENC (2,0,C0,C1,6), 0),
4521 SR_CORE ("dbgwvr2_el1", CPENC (2,0,C0,C2,6), 0),
4522 SR_CORE ("dbgwvr3_el1", CPENC (2,0,C0,C3,6), 0),
4523 SR_CORE ("dbgwvr4_el1", CPENC (2,0,C0,C4,6), 0),
4524 SR_CORE ("dbgwvr5_el1", CPENC (2,0,C0,C5,6), 0),
4525 SR_CORE ("dbgwvr6_el1", CPENC (2,0,C0,C6,6), 0),
4526 SR_CORE ("dbgwvr7_el1", CPENC (2,0,C0,C7,6), 0),
4527 SR_CORE ("dbgwvr8_el1", CPENC (2,0,C0,C8,6), 0),
4528 SR_CORE ("dbgwvr9_el1", CPENC (2,0,C0,C9,6), 0),
4529 SR_CORE ("dbgwvr10_el1", CPENC (2,0,C0,C10,6), 0),
4530 SR_CORE ("dbgwvr11_el1", CPENC (2,0,C0,C11,6), 0),
4531 SR_CORE ("dbgwvr12_el1", CPENC (2,0,C0,C12,6), 0),
4532 SR_CORE ("dbgwvr13_el1", CPENC (2,0,C0,C13,6), 0),
4533 SR_CORE ("dbgwvr14_el1", CPENC (2,0,C0,C14,6), 0),
4534 SR_CORE ("dbgwvr15_el1", CPENC (2,0,C0,C15,6), 0),
4535 SR_CORE ("dbgwcr0_el1", CPENC (2,0,C0,C0,7), 0),
4536 SR_CORE ("dbgwcr1_el1", CPENC (2,0,C0,C1,7), 0),
4537 SR_CORE ("dbgwcr2_el1", CPENC (2,0,C0,C2,7), 0),
4538 SR_CORE ("dbgwcr3_el1", CPENC (2,0,C0,C3,7), 0),
4539 SR_CORE ("dbgwcr4_el1", CPENC (2,0,C0,C4,7), 0),
4540 SR_CORE ("dbgwcr5_el1", CPENC (2,0,C0,C5,7), 0),
4541 SR_CORE ("dbgwcr6_el1", CPENC (2,0,C0,C6,7), 0),
4542 SR_CORE ("dbgwcr7_el1", CPENC (2,0,C0,C7,7), 0),
4543 SR_CORE ("dbgwcr8_el1", CPENC (2,0,C0,C8,7), 0),
4544 SR_CORE ("dbgwcr9_el1", CPENC (2,0,C0,C9,7), 0),
4545 SR_CORE ("dbgwcr10_el1", CPENC (2,0,C0,C10,7), 0),
4546 SR_CORE ("dbgwcr11_el1", CPENC (2,0,C0,C11,7), 0),
4547 SR_CORE ("dbgwcr12_el1", CPENC (2,0,C0,C12,7), 0),
4548 SR_CORE ("dbgwcr13_el1", CPENC (2,0,C0,C13,7), 0),
4549 SR_CORE ("dbgwcr14_el1", CPENC (2,0,C0,C14,7), 0),
4550 SR_CORE ("dbgwcr15_el1", CPENC (2,0,C0,C15,7), 0),
4551 SR_CORE ("mdrar_el1", CPENC (2,0,C1,C0,0), F_REG_READ),
4552 SR_CORE ("oslar_el1", CPENC (2,0,C1,C0,4), F_REG_WRITE),
4553 SR_CORE ("oslsr_el1", CPENC (2,0,C1,C1,4), F_REG_READ),
4554 SR_CORE ("osdlr_el1", CPENC (2,0,C1,C3,4), 0),
4555 SR_CORE ("dbgprcr_el1", CPENC (2,0,C1,C4,4), 0),
4556 SR_CORE ("dbgclaimset_el1", CPENC (2,0,C7,C8,6), 0),
4557 SR_CORE ("dbgclaimclr_el1", CPENC (2,0,C7,C9,6), 0),
4558 SR_CORE ("dbgauthstatus_el1", CPENC (2,0,C7,C14,6), F_REG_READ),
4559 SR_PROFILE ("pmblimitr_el1", CPENC (3,0,C9,C10,0), 0),
4560 SR_PROFILE ("pmbptr_el1", CPENC (3,0,C9,C10,1), 0),
4561 SR_PROFILE ("pmbsr_el1", CPENC (3,0,C9,C10,3), 0),
4562 SR_PROFILE ("pmbidr_el1", CPENC (3,0,C9,C10,7), F_REG_READ),
4563 SR_PROFILE ("pmscr_el1", CPENC (3,0,C9,C9,0), 0),
4564 SR_PROFILE ("pmsicr_el1", CPENC (3,0,C9,C9,2), 0),
4565 SR_PROFILE ("pmsirr_el1", CPENC (3,0,C9,C9,3), 0),
4566 SR_PROFILE ("pmsfcr_el1", CPENC (3,0,C9,C9,4), 0),
4567 SR_PROFILE ("pmsevfr_el1", CPENC (3,0,C9,C9,5), 0),
4568 SR_PROFILE ("pmslatfr_el1", CPENC (3,0,C9,C9,6), 0),
4569 SR_PROFILE ("pmsidr_el1", CPENC (3,0,C9,C9,7), 0),
4570 SR_PROFILE ("pmscr_el2", CPENC (3,4,C9,C9,0), 0),
4571 SR_PROFILE ("pmscr_el12", CPENC (3,5,C9,C9,0), 0),
4572 SR_CORE ("pmcr_el0", CPENC (3,3,C9,C12,0), 0),
4573 SR_CORE ("pmcntenset_el0", CPENC (3,3,C9,C12,1), 0),
4574 SR_CORE ("pmcntenclr_el0", CPENC (3,3,C9,C12,2), 0),
4575 SR_CORE ("pmovsclr_el0", CPENC (3,3,C9,C12,3), 0),
4576 SR_CORE ("pmswinc_el0", CPENC (3,3,C9,C12,4), F_REG_WRITE),
4577 SR_CORE ("pmselr_el0", CPENC (3,3,C9,C12,5), 0),
4578 SR_CORE ("pmceid0_el0", CPENC (3,3,C9,C12,6), F_REG_READ),
4579 SR_CORE ("pmceid1_el0", CPENC (3,3,C9,C12,7), F_REG_READ),
4580 SR_CORE ("pmccntr_el0", CPENC (3,3,C9,C13,0), 0),
4581 SR_CORE ("pmxevtyper_el0", CPENC (3,3,C9,C13,1), 0),
4582 SR_CORE ("pmxevcntr_el0", CPENC (3,3,C9,C13,2), 0),
4583 SR_CORE ("pmuserenr_el0", CPENC (3,3,C9,C14,0), 0),
4584 SR_CORE ("pmintenset_el1", CPENC (3,0,C9,C14,1), 0),
4585 SR_CORE ("pmintenclr_el1", CPENC (3,0,C9,C14,2), 0),
4586 SR_CORE ("pmovsset_el0", CPENC (3,3,C9,C14,3), 0),
4587 SR_CORE ("pmevcntr0_el0", CPENC (3,3,C14,C8,0), 0),
4588 SR_CORE ("pmevcntr1_el0", CPENC (3,3,C14,C8,1), 0),
4589 SR_CORE ("pmevcntr2_el0", CPENC (3,3,C14,C8,2), 0),
4590 SR_CORE ("pmevcntr3_el0", CPENC (3,3,C14,C8,3), 0),
4591 SR_CORE ("pmevcntr4_el0", CPENC (3,3,C14,C8,4), 0),
4592 SR_CORE ("pmevcntr5_el0", CPENC (3,3,C14,C8,5), 0),
4593 SR_CORE ("pmevcntr6_el0", CPENC (3,3,C14,C8,6), 0),
4594 SR_CORE ("pmevcntr7_el0", CPENC (3,3,C14,C8,7), 0),
4595 SR_CORE ("pmevcntr8_el0", CPENC (3,3,C14,C9,0), 0),
4596 SR_CORE ("pmevcntr9_el0", CPENC (3,3,C14,C9,1), 0),
4597 SR_CORE ("pmevcntr10_el0", CPENC (3,3,C14,C9,2), 0),
4598 SR_CORE ("pmevcntr11_el0", CPENC (3,3,C14,C9,3), 0),
4599 SR_CORE ("pmevcntr12_el0", CPENC (3,3,C14,C9,4), 0),
4600 SR_CORE ("pmevcntr13_el0", CPENC (3,3,C14,C9,5), 0),
4601 SR_CORE ("pmevcntr14_el0", CPENC (3,3,C14,C9,6), 0),
4602 SR_CORE ("pmevcntr15_el0", CPENC (3,3,C14,C9,7), 0),
4603 SR_CORE ("pmevcntr16_el0", CPENC (3,3,C14,C10,0), 0),
4604 SR_CORE ("pmevcntr17_el0", CPENC (3,3,C14,C10,1), 0),
4605 SR_CORE ("pmevcntr18_el0", CPENC (3,3,C14,C10,2), 0),
4606 SR_CORE ("pmevcntr19_el0", CPENC (3,3,C14,C10,3), 0),
4607 SR_CORE ("pmevcntr20_el0", CPENC (3,3,C14,C10,4), 0),
4608 SR_CORE ("pmevcntr21_el0", CPENC (3,3,C14,C10,5), 0),
4609 SR_CORE ("pmevcntr22_el0", CPENC (3,3,C14,C10,6), 0),
4610 SR_CORE ("pmevcntr23_el0", CPENC (3,3,C14,C10,7), 0),
4611 SR_CORE ("pmevcntr24_el0", CPENC (3,3,C14,C11,0), 0),
4612 SR_CORE ("pmevcntr25_el0", CPENC (3,3,C14,C11,1), 0),
4613 SR_CORE ("pmevcntr26_el0", CPENC (3,3,C14,C11,2), 0),
4614 SR_CORE ("pmevcntr27_el0", CPENC (3,3,C14,C11,3), 0),
4615 SR_CORE ("pmevcntr28_el0", CPENC (3,3,C14,C11,4), 0),
4616 SR_CORE ("pmevcntr29_el0", CPENC (3,3,C14,C11,5), 0),
4617 SR_CORE ("pmevcntr30_el0", CPENC (3,3,C14,C11,6), 0),
4618 SR_CORE ("pmevtyper0_el0", CPENC (3,3,C14,C12,0), 0),
4619 SR_CORE ("pmevtyper1_el0", CPENC (3,3,C14,C12,1), 0),
4620 SR_CORE ("pmevtyper2_el0", CPENC (3,3,C14,C12,2), 0),
4621 SR_CORE ("pmevtyper3_el0", CPENC (3,3,C14,C12,3), 0),
4622 SR_CORE ("pmevtyper4_el0", CPENC (3,3,C14,C12,4), 0),
4623 SR_CORE ("pmevtyper5_el0", CPENC (3,3,C14,C12,5), 0),
4624 SR_CORE ("pmevtyper6_el0", CPENC (3,3,C14,C12,6), 0),
4625 SR_CORE ("pmevtyper7_el0", CPENC (3,3,C14,C12,7), 0),
4626 SR_CORE ("pmevtyper8_el0", CPENC (3,3,C14,C13,0), 0),
4627 SR_CORE ("pmevtyper9_el0", CPENC (3,3,C14,C13,1), 0),
4628 SR_CORE ("pmevtyper10_el0", CPENC (3,3,C14,C13,2), 0),
4629 SR_CORE ("pmevtyper11_el0", CPENC (3,3,C14,C13,3), 0),
4630 SR_CORE ("pmevtyper12_el0", CPENC (3,3,C14,C13,4), 0),
4631 SR_CORE ("pmevtyper13_el0", CPENC (3,3,C14,C13,5), 0),
4632 SR_CORE ("pmevtyper14_el0", CPENC (3,3,C14,C13,6), 0),
4633 SR_CORE ("pmevtyper15_el0", CPENC (3,3,C14,C13,7), 0),
4634 SR_CORE ("pmevtyper16_el0", CPENC (3,3,C14,C14,0), 0),
4635 SR_CORE ("pmevtyper17_el0", CPENC (3,3,C14,C14,1), 0),
4636 SR_CORE ("pmevtyper18_el0", CPENC (3,3,C14,C14,2), 0),
4637 SR_CORE ("pmevtyper19_el0", CPENC (3,3,C14,C14,3), 0),
4638 SR_CORE ("pmevtyper20_el0", CPENC (3,3,C14,C14,4), 0),
4639 SR_CORE ("pmevtyper21_el0", CPENC (3,3,C14,C14,5), 0),
4640 SR_CORE ("pmevtyper22_el0", CPENC (3,3,C14,C14,6), 0),
4641 SR_CORE ("pmevtyper23_el0", CPENC (3,3,C14,C14,7), 0),
4642 SR_CORE ("pmevtyper24_el0", CPENC (3,3,C14,C15,0), 0),
4643 SR_CORE ("pmevtyper25_el0", CPENC (3,3,C14,C15,1), 0),
4644 SR_CORE ("pmevtyper26_el0", CPENC (3,3,C14,C15,2), 0),
4645 SR_CORE ("pmevtyper27_el0", CPENC (3,3,C14,C15,3), 0),
4646 SR_CORE ("pmevtyper28_el0", CPENC (3,3,C14,C15,4), 0),
4647 SR_CORE ("pmevtyper29_el0", CPENC (3,3,C14,C15,5), 0),
4648 SR_CORE ("pmevtyper30_el0", CPENC (3,3,C14,C15,6), 0),
4649 SR_CORE ("pmccfiltr_el0", CPENC (3,3,C14,C15,7), 0),
4650
4651 SR_V8_4 ("dit", CPEN_ (3,C2,5), 0),
4652 SR_V8_4 ("vstcr_el2", CPENC (3,4,C2,C6,2), 0),
4653 SR_V8_4_A ("vsttbr_el2", CPENC (3,4,C2,C6,0), 0),
4654 SR_V8_4 ("cnthvs_tval_el2", CPENC (3,4,C14,C4,0), 0),
4655 SR_V8_4 ("cnthvs_cval_el2", CPENC (3,4,C14,C4,2), 0),
4656 SR_V8_4 ("cnthvs_ctl_el2", CPENC (3,4,C14,C4,1), 0),
4657 SR_V8_4 ("cnthps_tval_el2", CPENC (3,4,C14,C5,0), 0),
4658 SR_V8_4 ("cnthps_cval_el2", CPENC (3,4,C14,C5,2), 0),
4659 SR_V8_4 ("cnthps_ctl_el2", CPENC (3,4,C14,C5,1), 0),
4660 SR_V8_4 ("sder32_el2", CPENC (3,4,C1,C3,1), 0),
4661 SR_V8_4 ("vncr_el2", CPENC (3,4,C2,C2,0), 0),
4662
4663 SR_CORE ("mpam0_el1", CPENC (3,0,C10,C5,1), 0),
4664 SR_CORE ("mpam1_el1", CPENC (3,0,C10,C5,0), 0),
4665 SR_CORE ("mpam1_el12", CPENC (3,5,C10,C5,0), 0),
4666 SR_CORE ("mpam2_el2", CPENC (3,4,C10,C5,0), 0),
4667 SR_CORE ("mpam3_el3", CPENC (3,6,C10,C5,0), 0),
4668 SR_CORE ("mpamhcr_el2", CPENC (3,4,C10,C4,0), 0),
4669 SR_CORE ("mpamidr_el1", CPENC (3,0,C10,C4,4), F_REG_READ),
4670 SR_CORE ("mpamvpm0_el2", CPENC (3,4,C10,C6,0), 0),
4671 SR_CORE ("mpamvpm1_el2", CPENC (3,4,C10,C6,1), 0),
4672 SR_CORE ("mpamvpm2_el2", CPENC (3,4,C10,C6,2), 0),
4673 SR_CORE ("mpamvpm3_el2", CPENC (3,4,C10,C6,3), 0),
4674 SR_CORE ("mpamvpm4_el2", CPENC (3,4,C10,C6,4), 0),
4675 SR_CORE ("mpamvpm5_el2", CPENC (3,4,C10,C6,5), 0),
4676 SR_CORE ("mpamvpm6_el2", CPENC (3,4,C10,C6,6), 0),
4677 SR_CORE ("mpamvpm7_el2", CPENC (3,4,C10,C6,7), 0),
4678 SR_CORE ("mpamvpmv_el2", CPENC (3,4,C10,C4,1), 0),
4679
4680 SR_V8_R ("mpuir_el1", CPENC (3,0,C0,C0,4), F_REG_READ),
4681 SR_V8_R ("mpuir_el2", CPENC (3,4,C0,C0,4), F_REG_READ),
4682 SR_V8_R ("prbar_el1", CPENC (3,0,C6,C8,0), 0),
4683 SR_V8_R ("prbar_el2", CPENC (3,4,C6,C8,0), 0),
4684
4685 #define ENC_BARLAR(x,n,lar) \
4686 CPENC (3, (x-1) << 2, C6, 8 | (n >> 1), ((n & 1) << 2) | lar)
4687
4688 #define PRBARn_ELx(x,n) SR_V8_R ("prbar" #n "_el" #x, ENC_BARLAR (x,n,0), 0)
4689 #define PRLARn_ELx(x,n) SR_V8_R ("prlar" #n "_el" #x, ENC_BARLAR (x,n,1), 0)
4690
4691 SR_EXPAND_EL12 (PRBARn_ELx)
4692 SR_V8_R ("prenr_el1", CPENC (3,0,C6,C1,1), 0),
4693 SR_V8_R ("prenr_el2", CPENC (3,4,C6,C1,1), 0),
4694 SR_V8_R ("prlar_el1", CPENC (3,0,C6,C8,1), 0),
4695 SR_V8_R ("prlar_el2", CPENC (3,4,C6,C8,1), 0),
4696 SR_EXPAND_EL12 (PRLARn_ELx)
4697 SR_V8_R ("prselr_el1", CPENC (3,0,C6,C2,1), 0),
4698 SR_V8_R ("prselr_el2", CPENC (3,4,C6,C2,1), 0),
4699 SR_V8_R ("vsctlr_el2", CPENC (3,4,C2,C0,0), 0),
4700
4701 SR_CORE("trbbaser_el1", CPENC (3,0,C9,C11,2), 0),
4702 SR_CORE("trbidr_el1", CPENC (3,0,C9,C11,7), F_REG_READ),
4703 SR_CORE("trblimitr_el1", CPENC (3,0,C9,C11,0), 0),
4704 SR_CORE("trbmar_el1", CPENC (3,0,C9,C11,4), 0),
4705 SR_CORE("trbptr_el1", CPENC (3,0,C9,C11,1), 0),
4706 SR_CORE("trbsr_el1", CPENC (3,0,C9,C11,3), 0),
4707 SR_CORE("trbtrg_el1", CPENC (3,0,C9,C11,6), 0),
4708
4709 SR_CORE ("trcextinselr0", CPENC (2,1,C0,C8,4), 0),
4710 SR_CORE ("trcextinselr1", CPENC (2,1,C0,C9,4), 0),
4711 SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
4712 SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
4713 SR_CORE ("trcrsr", CPENC (2,1,C0,C10,0), 0),
4714
4715 SR_CORE ("trcauthstatus", CPENC (2,1,C7,C14,6), F_REG_READ),
4716 SR_CORE ("trccidr0", CPENC (2,1,C7,C12,7), F_REG_READ),
4717 SR_CORE ("trccidr1", CPENC (2,1,C7,C13,7), F_REG_READ),
4718 SR_CORE ("trccidr2", CPENC (2,1,C7,C14,7), F_REG_READ),
4719 SR_CORE ("trccidr3", CPENC (2,1,C7,C15,7), F_REG_READ),
4720 SR_CORE ("trcdevaff0", CPENC (2,1,C7,C10,6), F_REG_READ),
4721 SR_CORE ("trcdevaff1", CPENC (2,1,C7,C11,6), F_REG_READ),
4722 SR_CORE ("trcdevarch", CPENC (2,1,C7,C15,6), F_REG_READ),
4723 SR_CORE ("trcdevid", CPENC (2,1,C7,C2,7), F_REG_READ),
4724 SR_CORE ("trcdevtype", CPENC (2,1,C7,C3,7), F_REG_READ),
4725 SR_CORE ("trcidr0", CPENC (2,1,C0,C8,7), F_REG_READ),
4726 SR_CORE ("trcidr1", CPENC (2,1,C0,C9,7), F_REG_READ),
4727 SR_CORE ("trcidr2", CPENC (2,1,C0,C10,7), F_REG_READ),
4728 SR_CORE ("trcidr3", CPENC (2,1,C0,C11,7), F_REG_READ),
4729 SR_CORE ("trcidr4", CPENC (2,1,C0,C12,7), F_REG_READ),
4730 SR_CORE ("trcidr5", CPENC (2,1,C0,C13,7), F_REG_READ),
4731 SR_CORE ("trcidr6", CPENC (2,1,C0,C14,7), F_REG_READ),
4732 SR_CORE ("trcidr7", CPENC (2,1,C0,C15,7), F_REG_READ),
4733 SR_CORE ("trcidr8", CPENC (2,1,C0,C0,6), F_REG_READ),
4734 SR_CORE ("trcidr9", CPENC (2,1,C0,C1,6), F_REG_READ),
4735 SR_CORE ("trcidr10", CPENC (2,1,C0,C2,6), F_REG_READ),
4736 SR_CORE ("trcidr11", CPENC (2,1,C0,C3,6), F_REG_READ),
4737 SR_CORE ("trcidr12", CPENC (2,1,C0,C4,6), F_REG_READ),
4738 SR_CORE ("trcidr13", CPENC (2,1,C0,C5,6), F_REG_READ),
4739 SR_CORE ("trclsr", CPENC (2,1,C7,C13,6), F_REG_READ),
4740 SR_CORE ("trcoslsr", CPENC (2,1,C1,C1,4), F_REG_READ),
4741 SR_CORE ("trcpdsr", CPENC (2,1,C1,C5,4), F_REG_READ),
4742 SR_CORE ("trcpidr0", CPENC (2,1,C7,C8,7), F_REG_READ),
4743 SR_CORE ("trcpidr1", CPENC (2,1,C7,C9,7), F_REG_READ),
4744 SR_CORE ("trcpidr2", CPENC (2,1,C7,C10,7), F_REG_READ),
4745 SR_CORE ("trcpidr3", CPENC (2,1,C7,C11,7), F_REG_READ),
4746 SR_CORE ("trcpidr4", CPENC (2,1,C7,C4,7), F_REG_READ),
4747 SR_CORE ("trcpidr5", CPENC (2,1,C7,C5,7), F_REG_READ),
4748 SR_CORE ("trcpidr6", CPENC (2,1,C7,C6,7), F_REG_READ),
4749 SR_CORE ("trcpidr7", CPENC (2,1,C7,C7,7), F_REG_READ),
4750 SR_CORE ("trcstatr", CPENC (2,1,C0,C3,0), F_REG_READ),
4751 SR_CORE ("trcacatr0", CPENC (2,1,C2,C0,2), 0),
4752 SR_CORE ("trcacatr1", CPENC (2,1,C2,C2,2), 0),
4753 SR_CORE ("trcacatr2", CPENC (2,1,C2,C4,2), 0),
4754 SR_CORE ("trcacatr3", CPENC (2,1,C2,C6,2), 0),
4755 SR_CORE ("trcacatr4", CPENC (2,1,C2,C8,2), 0),
4756 SR_CORE ("trcacatr5", CPENC (2,1,C2,C10,2), 0),
4757 SR_CORE ("trcacatr6", CPENC (2,1,C2,C12,2), 0),
4758 SR_CORE ("trcacatr7", CPENC (2,1,C2,C14,2), 0),
4759 SR_CORE ("trcacatr8", CPENC (2,1,C2,C0,3), 0),
4760 SR_CORE ("trcacatr9", CPENC (2,1,C2,C2,3), 0),
4761 SR_CORE ("trcacatr10", CPENC (2,1,C2,C4,3), 0),
4762 SR_CORE ("trcacatr11", CPENC (2,1,C2,C6,3), 0),
4763 SR_CORE ("trcacatr12", CPENC (2,1,C2,C8,3), 0),
4764 SR_CORE ("trcacatr13", CPENC (2,1,C2,C10,3), 0),
4765 SR_CORE ("trcacatr14", CPENC (2,1,C2,C12,3), 0),
4766 SR_CORE ("trcacatr15", CPENC (2,1,C2,C14,3), 0),
4767 SR_CORE ("trcacvr0", CPENC (2,1,C2,C0,0), 0),
4768 SR_CORE ("trcacvr1", CPENC (2,1,C2,C2,0), 0),
4769 SR_CORE ("trcacvr2", CPENC (2,1,C2,C4,0), 0),
4770 SR_CORE ("trcacvr3", CPENC (2,1,C2,C6,0), 0),
4771 SR_CORE ("trcacvr4", CPENC (2,1,C2,C8,0), 0),
4772 SR_CORE ("trcacvr5", CPENC (2,1,C2,C10,0), 0),
4773 SR_CORE ("trcacvr6", CPENC (2,1,C2,C12,0), 0),
4774 SR_CORE ("trcacvr7", CPENC (2,1,C2,C14,0), 0),
4775 SR_CORE ("trcacvr8", CPENC (2,1,C2,C0,1), 0),
4776 SR_CORE ("trcacvr9", CPENC (2,1,C2,C2,1), 0),
4777 SR_CORE ("trcacvr10", CPENC (2,1,C2,C4,1), 0),
4778 SR_CORE ("trcacvr11", CPENC (2,1,C2,C6,1), 0),
4779 SR_CORE ("trcacvr12", CPENC (2,1,C2,C8,1), 0),
4780 SR_CORE ("trcacvr13", CPENC (2,1,C2,C10,1), 0),
4781 SR_CORE ("trcacvr14", CPENC (2,1,C2,C12,1), 0),
4782 SR_CORE ("trcacvr15", CPENC (2,1,C2,C14,1), 0),
4783 SR_CORE ("trcauxctlr", CPENC (2,1,C0,C6,0), 0),
4784 SR_CORE ("trcbbctlr", CPENC (2,1,C0,C15,0), 0),
4785 SR_CORE ("trcccctlr", CPENC (2,1,C0,C14,0), 0),
4786 SR_CORE ("trccidcctlr0", CPENC (2,1,C3,C0,2), 0),
4787 SR_CORE ("trccidcctlr1", CPENC (2,1,C3,C1,2), 0),
4788 SR_CORE ("trccidcvr0", CPENC (2,1,C3,C0,0), 0),
4789 SR_CORE ("trccidcvr1", CPENC (2,1,C3,C2,0), 0),
4790 SR_CORE ("trccidcvr2", CPENC (2,1,C3,C4,0), 0),
4791 SR_CORE ("trccidcvr3", CPENC (2,1,C3,C6,0), 0),
4792 SR_CORE ("trccidcvr4", CPENC (2,1,C3,C8,0), 0),
4793 SR_CORE ("trccidcvr5", CPENC (2,1,C3,C10,0), 0),
4794 SR_CORE ("trccidcvr6", CPENC (2,1,C3,C12,0), 0),
4795 SR_CORE ("trccidcvr7", CPENC (2,1,C3,C14,0), 0),
4796 SR_CORE ("trcclaimclr", CPENC (2,1,C7,C9,6), 0),
4797 SR_CORE ("trcclaimset", CPENC (2,1,C7,C8,6), 0),
4798 SR_CORE ("trccntctlr0", CPENC (2,1,C0,C4,5), 0),
4799 SR_CORE ("trccntctlr1", CPENC (2,1,C0,C5,5), 0),
4800 SR_CORE ("trccntctlr2", CPENC (2,1,C0,C6,5), 0),
4801 SR_CORE ("trccntctlr3", CPENC (2,1,C0,C7,5), 0),
4802 SR_CORE ("trccntrldvr0", CPENC (2,1,C0,C0,5), 0),
4803 SR_CORE ("trccntrldvr1", CPENC (2,1,C0,C1,5), 0),
4804 SR_CORE ("trccntrldvr2", CPENC (2,1,C0,C2,5), 0),
4805 SR_CORE ("trccntrldvr3", CPENC (2,1,C0,C3,5), 0),
4806 SR_CORE ("trccntvr0", CPENC (2,1,C0,C8,5), 0),
4807 SR_CORE ("trccntvr1", CPENC (2,1,C0,C9,5), 0),
4808 SR_CORE ("trccntvr2", CPENC (2,1,C0,C10,5), 0),
4809 SR_CORE ("trccntvr3", CPENC (2,1,C0,C11,5), 0),
4810 SR_CORE ("trcconfigr", CPENC (2,1,C0,C4,0), 0),
4811 SR_CORE ("trcdvcmr0", CPENC (2,1,C2,C0,6), 0),
4812 SR_CORE ("trcdvcmr1", CPENC (2,1,C2,C4,6), 0),
4813 SR_CORE ("trcdvcmr2", CPENC (2,1,C2,C8,6), 0),
4814 SR_CORE ("trcdvcmr3", CPENC (2,1,C2,C12,6), 0),
4815 SR_CORE ("trcdvcmr4", CPENC (2,1,C2,C0,7), 0),
4816 SR_CORE ("trcdvcmr5", CPENC (2,1,C2,C4,7), 0),
4817 SR_CORE ("trcdvcmr6", CPENC (2,1,C2,C8,7), 0),
4818 SR_CORE ("trcdvcmr7", CPENC (2,1,C2,C12,7), 0),
4819 SR_CORE ("trcdvcvr0", CPENC (2,1,C2,C0,4), 0),
4820 SR_CORE ("trcdvcvr1", CPENC (2,1,C2,C4,4), 0),
4821 SR_CORE ("trcdvcvr2", CPENC (2,1,C2,C8,4), 0),
4822 SR_CORE ("trcdvcvr3", CPENC (2,1,C2,C12,4), 0),
4823 SR_CORE ("trcdvcvr4", CPENC (2,1,C2,C0,5), 0),
4824 SR_CORE ("trcdvcvr5", CPENC (2,1,C2,C4,5), 0),
4825 SR_CORE ("trcdvcvr6", CPENC (2,1,C2,C8,5), 0),
4826 SR_CORE ("trcdvcvr7", CPENC (2,1,C2,C12,5), 0),
4827 SR_CORE ("trceventctl0r", CPENC (2,1,C0,C8,0), 0),
4828 SR_CORE ("trceventctl1r", CPENC (2,1,C0,C9,0), 0),
4829 SR_CORE ("trcextinselr0", CPENC (2,1,C0,C8,4), 0),
4830 SR_CORE ("trcextinselr", CPENC (2,1,C0,C8,4), 0),
4831 SR_CORE ("trcextinselr1", CPENC (2,1,C0,C9,4), 0),
4832 SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
4833 SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
4834 SR_CORE ("trcimspec0", CPENC (2,1,C0,C0,7), 0),
4835 SR_CORE ("trcimspec0", CPENC (2,1,C0,C0,7), 0),
4836 SR_CORE ("trcimspec1", CPENC (2,1,C0,C1,7), 0),
4837 SR_CORE ("trcimspec2", CPENC (2,1,C0,C2,7), 0),
4838 SR_CORE ("trcimspec3", CPENC (2,1,C0,C3,7), 0),
4839 SR_CORE ("trcimspec4", CPENC (2,1,C0,C4,7), 0),
4840 SR_CORE ("trcimspec5", CPENC (2,1,C0,C5,7), 0),
4841 SR_CORE ("trcimspec6", CPENC (2,1,C0,C6,7), 0),
4842 SR_CORE ("trcimspec7", CPENC (2,1,C0,C7,7), 0),
4843 SR_CORE ("trcitctrl", CPENC (2,1,C7,C0,4), 0),
4844 SR_CORE ("trcpdcr", CPENC (2,1,C1,C4,4), 0),
4845 SR_CORE ("trcprgctlr", CPENC (2,1,C0,C1,0), 0),
4846 SR_CORE ("trcprocselr", CPENC (2,1,C0,C2,0), 0),
4847 SR_CORE ("trcqctlr", CPENC (2,1,C0,C1,1), 0),
4848 SR_CORE ("trcrsctlr2", CPENC (2,1,C1,C2,0), 0),
4849 SR_CORE ("trcrsctlr3", CPENC (2,1,C1,C3,0), 0),
4850 SR_CORE ("trcrsctlr4", CPENC (2,1,C1,C4,0), 0),
4851 SR_CORE ("trcrsctlr5", CPENC (2,1,C1,C5,0), 0),
4852 SR_CORE ("trcrsctlr6", CPENC (2,1,C1,C6,0), 0),
4853 SR_CORE ("trcrsctlr7", CPENC (2,1,C1,C7,0), 0),
4854 SR_CORE ("trcrsctlr8", CPENC (2,1,C1,C8,0), 0),
4855 SR_CORE ("trcrsctlr9", CPENC (2,1,C1,C9,0), 0),
4856 SR_CORE ("trcrsctlr10", CPENC (2,1,C1,C10,0), 0),
4857 SR_CORE ("trcrsctlr11", CPENC (2,1,C1,C11,0), 0),
4858 SR_CORE ("trcrsctlr12", CPENC (2,1,C1,C12,0), 0),
4859 SR_CORE ("trcrsctlr13", CPENC (2,1,C1,C13,0), 0),
4860 SR_CORE ("trcrsctlr14", CPENC (2,1,C1,C14,0), 0),
4861 SR_CORE ("trcrsctlr15", CPENC (2,1,C1,C15,0), 0),
4862 SR_CORE ("trcrsctlr16", CPENC (2,1,C1,C0,1), 0),
4863 SR_CORE ("trcrsctlr17", CPENC (2,1,C1,C1,1), 0),
4864 SR_CORE ("trcrsctlr18", CPENC (2,1,C1,C2,1), 0),
4865 SR_CORE ("trcrsctlr19", CPENC (2,1,C1,C3,1), 0),
4866 SR_CORE ("trcrsctlr20", CPENC (2,1,C1,C4,1), 0),
4867 SR_CORE ("trcrsctlr21", CPENC (2,1,C1,C5,1), 0),
4868 SR_CORE ("trcrsctlr22", CPENC (2,1,C1,C6,1), 0),
4869 SR_CORE ("trcrsctlr23", CPENC (2,1,C1,C7,1), 0),
4870 SR_CORE ("trcrsctlr24", CPENC (2,1,C1,C8,1), 0),
4871 SR_CORE ("trcrsctlr25", CPENC (2,1,C1,C9,1), 0),
4872 SR_CORE ("trcrsctlr26", CPENC (2,1,C1,C10,1), 0),
4873 SR_CORE ("trcrsctlr27", CPENC (2,1,C1,C11,1), 0),
4874 SR_CORE ("trcrsctlr28", CPENC (2,1,C1,C12,1), 0),
4875 SR_CORE ("trcrsctlr29", CPENC (2,1,C1,C13,1), 0),
4876 SR_CORE ("trcrsctlr30", CPENC (2,1,C1,C14,1), 0),
4877 SR_CORE ("trcrsctlr31", CPENC (2,1,C1,C15,1), 0),
4878 SR_CORE ("trcseqevr0", CPENC (2,1,C0,C0,4), 0),
4879 SR_CORE ("trcseqevr1", CPENC (2,1,C0,C1,4), 0),
4880 SR_CORE ("trcseqevr2", CPENC (2,1,C0,C2,4), 0),
4881 SR_CORE ("trcseqrstevr", CPENC (2,1,C0,C6,4), 0),
4882 SR_CORE ("trcseqstr", CPENC (2,1,C0,C7,4), 0),
4883 SR_CORE ("trcssccr0", CPENC (2,1,C1,C0,2), 0),
4884 SR_CORE ("trcssccr1", CPENC (2,1,C1,C1,2), 0),
4885 SR_CORE ("trcssccr2", CPENC (2,1,C1,C2,2), 0),
4886 SR_CORE ("trcssccr3", CPENC (2,1,C1,C3,2), 0),
4887 SR_CORE ("trcssccr4", CPENC (2,1,C1,C4,2), 0),
4888 SR_CORE ("trcssccr5", CPENC (2,1,C1,C5,2), 0),
4889 SR_CORE ("trcssccr6", CPENC (2,1,C1,C6,2), 0),
4890 SR_CORE ("trcssccr7", CPENC (2,1,C1,C7,2), 0),
4891 SR_CORE ("trcsscsr0", CPENC (2,1,C1,C8,2), 0),
4892 SR_CORE ("trcsscsr1", CPENC (2,1,C1,C9,2), 0),
4893 SR_CORE ("trcsscsr2", CPENC (2,1,C1,C10,2), 0),
4894 SR_CORE ("trcsscsr3", CPENC (2,1,C1,C11,2), 0),
4895 SR_CORE ("trcsscsr4", CPENC (2,1,C1,C12,2), 0),
4896 SR_CORE ("trcsscsr5", CPENC (2,1,C1,C13,2), 0),
4897 SR_CORE ("trcsscsr6", CPENC (2,1,C1,C14,2), 0),
4898 SR_CORE ("trcsscsr7", CPENC (2,1,C1,C15,2), 0),
4899 SR_CORE ("trcsspcicr0", CPENC (2,1,C1,C0,3), 0),
4900 SR_CORE ("trcsspcicr1", CPENC (2,1,C1,C1,3), 0),
4901 SR_CORE ("trcsspcicr2", CPENC (2,1,C1,C2,3), 0),
4902 SR_CORE ("trcsspcicr3", CPENC (2,1,C1,C3,3), 0),
4903 SR_CORE ("trcsspcicr4", CPENC (2,1,C1,C4,3), 0),
4904 SR_CORE ("trcsspcicr5", CPENC (2,1,C1,C5,3), 0),
4905 SR_CORE ("trcsspcicr6", CPENC (2,1,C1,C6,3), 0),
4906 SR_CORE ("trcsspcicr7", CPENC (2,1,C1,C7,3), 0),
4907 SR_CORE ("trcstallctlr", CPENC (2,1,C0,C11,0), 0),
4908 SR_CORE ("trcsyncpr", CPENC (2,1,C0,C13,0), 0),
4909 SR_CORE ("trctraceidr", CPENC (2,1,C0,C0,1), 0),
4910 SR_CORE ("trctsctlr", CPENC (2,1,C0,C12,0), 0),
4911 SR_CORE ("trcvdarcctlr", CPENC (2,1,C0,C10,2), 0),
4912 SR_CORE ("trcvdctlr", CPENC (2,1,C0,C8,2), 0),
4913 SR_CORE ("trcvdsacctlr", CPENC (2,1,C0,C9,2), 0),
4914 SR_CORE ("trcvictlr", CPENC (2,1,C0,C0,2), 0),
4915 SR_CORE ("trcviiectlr", CPENC (2,1,C0,C1,2), 0),
4916 SR_CORE ("trcvipcssctlr", CPENC (2,1,C0,C3,2), 0),
4917 SR_CORE ("trcvissctlr", CPENC (2,1,C0,C2,2), 0),
4918 SR_CORE ("trcvmidcctlr0", CPENC (2,1,C3,C2,2), 0),
4919 SR_CORE ("trcvmidcctlr1", CPENC (2,1,C3,C3,2), 0),
4920 SR_CORE ("trcvmidcvr0", CPENC (2,1,C3,C0,1), 0),
4921 SR_CORE ("trcvmidcvr1", CPENC (2,1,C3,C2,1), 0),
4922 SR_CORE ("trcvmidcvr2", CPENC (2,1,C3,C4,1), 0),
4923 SR_CORE ("trcvmidcvr3", CPENC (2,1,C3,C6,1), 0),
4924 SR_CORE ("trcvmidcvr4", CPENC (2,1,C3,C8,1), 0),
4925 SR_CORE ("trcvmidcvr5", CPENC (2,1,C3,C10,1), 0),
4926 SR_CORE ("trcvmidcvr6", CPENC (2,1,C3,C12,1), 0),
4927 SR_CORE ("trcvmidcvr7", CPENC (2,1,C3,C14,1), 0),
4928 SR_CORE ("trclar", CPENC (2,1,C7,C12,6), F_REG_WRITE),
4929 SR_CORE ("trcoslar", CPENC (2,1,C1,C0,4), F_REG_WRITE),
4930
4931 /* A64C system registers. */
4932 SR_MORELLO ("cctlr_el0", CPENC (3,3,C1,C2,2), 0),
4933 SR_MORELLO ("cctlr_el1", CPENC (3,0,C1,C2,2), 0),
4934 SR_MORELLO ("cctlr_el12", CPENC (3,5,C1,C2,2), 0),
4935 SR_MORELLO ("cctlr_el2", CPENC (3,4,C1,C2,2), 0),
4936 SR_MORELLO ("cctlr_el3", CPENC (3,6,C1,C2,2), 0),
4937 SR_MORELLO ("cdbgdtr_el0", CPENC (2,3,C0,C4,0), F_CAPREG),
4938 SR_MORELLO ("cdlr_el0", CPENC (3,3,C4,C5,1), F_CAPREG),
4939 SR_MORELLO ("celr_el1", CPENC (3,0,C4,C0,1), F_CAPREG),
4940 SR_MORELLO ("celr_el12", CPENC (3,5,C4,C0,1), F_CAPREG),
4941 SR_MORELLO ("celr_el2", CPENC (3,4,C4,C0,1), F_CAPREG),
4942 SR_MORELLO ("celr_el3", CPENC (3,6,C4,C0,1), F_CAPREG),
4943 SR_MORELLO ("chcr_el2", CPENC (3,4,C1,C2,3), 0),
4944 SR_MORELLO ("cid_el0", CPENC (3,3,C13,C0,7), F_CAPREG),
4945 SR_MORELLO ("cscr_el3", CPENC (3,6,C1,C2,2), 0),
4946 SR_MORELLO ("csp_el0", CPENC (3,0,C4,C1,0), F_CAPREG),
4947 SR_MORELLO ("csp_el1", CPENC (3,4,C4,C1,0), F_CAPREG),
4948 SR_MORELLO ("csp_el2", CPENC (3,6,C4,C1,0), F_CAPREG),
4949 SR_MORELLO ("ctpidr_el0", CPENC (3,3,C13,C0,2), F_CAPREG),
4950 SR_MORELLO ("ctpidr_el1", CPENC (3,0,C13,C0,4), F_CAPREG),
4951 SR_MORELLO ("ctpidr_el2", CPENC (3,4,C13,C0,2), F_CAPREG),
4952 SR_MORELLO ("ctpidr_el3", CPENC (3,6,C13,C0,2), F_CAPREG),
4953 SR_MORELLO ("ctpidrro_el0", CPENC (3,3,C13,C0,3), F_CAPREG),
4954 SR_MORELLO ("cvbar_el1", CPENC (3,0,C12,C0,0), F_CAPREG),
4955 SR_MORELLO ("cvbar_el12", CPENC (3,5,C12,C0,0), F_CAPREG),
4956 SR_MORELLO ("cvbar_el2", CPENC (3,4,C12,C0,0), F_CAPREG),
4957 SR_MORELLO ("cvbar_el3", CPENC (3,6,C12,C0,0), F_CAPREG),
4958 SR_MORELLO ("ddc", CPENC (3,3,C4,C1,1), F_CAPREG),
4959 SR_MORELLO ("ddc_el0", CPENC (3,0,C4,C1,1), F_CAPREG),
4960 SR_MORELLO ("ddc_el1", CPENC (3,4,C4,C1,1), F_CAPREG),
4961 SR_MORELLO ("ddc_el2", CPENC (3,6,C4,C1,1), F_CAPREG),
4962 SR_MORELLO ("rcsp_el0", CPENC (3,7,C4,C1,3), F_CAPREG),
4963 SR_MORELLO ("rctpidr_el0", CPENC (3,3,C13,C0,4), F_CAPREG),
4964 SR_MORELLO ("rddc_el0", CPENC (3,3,C4,C3,1), F_CAPREG),
4965 SR_MORELLO ("rsp_el0", CPENC (3,7,C4,C1,3), 0),
4966 SR_MORELLO ("rtpidr_el0", CPENC (3,3,C13,C0,4), 0),
4967 { 0, CPENC (0,0,0,0,0), 0, 0 }
4968 };
4969
4970 bfd_boolean
4971 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
4972 {
4973 return (reg_flags & F_DEPRECATED) != 0;
4974 }
4975
4976 bfd_boolean
4977 aarch64_sys_reg_capreg_supported_p (enum aarch64_insn_class iclass,
4978 const aarch64_sys_reg *reg)
4979 {
4980 unsigned needs_capreg = iclass == a64c ? F_CAPREG : 0;
4981 return (reg->flags & F_CAPREG) == needs_capreg;
4982 }
4983
4984 /* The CPENC below is fairly misleading, the fields
4985 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4986 by ins_pstatefield, which just shifts the value by the width of the fields
4987 in a loop. So if you CPENC them only the first value will be set, the rest
4988 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4989 value of 0b110000000001000000 (0x30040) while what you want is
4990 0b011010 (0x1a). */
4991 const aarch64_sys_reg aarch64_pstatefields [] =
4992 {
4993 SR_CORE ("spsel", 0x05, 0),
4994 SR_CORE ("daifset", 0x1e, 0),
4995 SR_CORE ("daifclr", 0x1f, 0),
4996 SR_PAN ("pan", 0x04, 0),
4997 SR_V8_2 ("uao", 0x03, 0),
4998 SR_SSBS ("ssbs", 0x19, 0),
4999 SR_V8_4 ("dit", 0x1a, 0),
5000 SR_MEMTAG ("tco", 0x1c, 0),
5001 { 0, CPENC (0,0,0,0,0), 0, 0 },
5002 };
5003
5004 bfd_boolean
5005 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
5006 const aarch64_sys_reg *reg)
5007 {
5008 if (!(reg->flags & F_ARCHEXT))
5009 return TRUE;
5010
5011 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
5012 }
5013
5014 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
5015 {
5016 { "ialluis", CPENS(0,C7,C1,0), 0 },
5017 { "iallu", CPENS(0,C7,C5,0), 0 },
5018 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
5019 { 0, CPENS(0,0,0,0), 0 }
5020 };
5021
5022 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
5023 {
5024 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
5025 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
5026 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
5027 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
5028 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
5029 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
5030 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
5031 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
5032 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
5033 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
5034 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
5035 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
5036 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
5037 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
5038 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
5039 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
5040 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
5041 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
5042 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
5043 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
5044 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
5045 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
5046 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
5047 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
5048 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
5049 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
5050 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
5051 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
5052 { 0, CPENS(0,0,0,0), 0 }
5053 };
5054
5055 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
5056 {
5057 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
5058 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
5059 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
5060 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
5061 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
5062 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
5063 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
5064 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
5065 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
5066 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
5067 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
5068 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
5069 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
5070 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
5071 { 0, CPENS(0,0,0,0), 0 }
5072 };
5073
5074 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
5075 {
5076 { "vmalle1", CPENS(0,C8,C7,0), 0 },
5077 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
5078 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
5079 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
5080 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
5081 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
5082 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
5083 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
5084 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
5085 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
5086 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
5087 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
5088 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
5089 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
5090 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
5091 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
5092 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
5093 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
5094 { "alle2", CPENS(4,C8,C7,0), 0 },
5095 { "alle2is", CPENS(4,C8,C3,0), 0 },
5096 { "alle1", CPENS(4,C8,C7,4), 0 },
5097 { "alle1is", CPENS(4,C8,C3,4), 0 },
5098 { "alle3", CPENS(6,C8,C7,0), 0 },
5099 { "alle3is", CPENS(6,C8,C3,0), 0 },
5100 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
5101 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
5102 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
5103 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
5104 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
5105 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
5106 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
5107 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
5108
5109 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
5110 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
5111 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
5112 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
5113 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
5114 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
5115 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
5116 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
5117 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
5118 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
5119 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
5120 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
5121 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
5122 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
5123 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
5124 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
5125
5126 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
5127 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
5128 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
5129 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
5130 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
5131 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
5132 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
5133 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
5134 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
5135 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
5136 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
5137 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
5138 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
5139 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
5140 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
5141 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
5142 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
5143 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
5144 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
5145 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
5146 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
5147 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
5148 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
5149 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
5150 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
5151 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
5152 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
5153 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
5154 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
5155 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
5156
5157 { 0, CPENS(0,0,0,0), 0 }
5158 };
5159
5160 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
5161 {
5162 /* RCTX is somewhat unique in a way that it has different values
5163 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
5164 Thus op2 is masked out and instead encoded directly in the
5165 aarch64_opcode_table entries for the respective instructions. */
5166 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
5167
5168 { 0, CPENS(0,0,0,0), 0 }
5169 };
5170
5171 bfd_boolean
5172 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
5173 {
5174 return (sys_ins_reg->flags & F_HASXT) != 0;
5175 }
5176
5177 extern bfd_boolean
5178 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
5179 const char *reg_name,
5180 aarch64_insn reg_value,
5181 uint32_t reg_flags,
5182 aarch64_feature_set reg_features)
5183 {
5184 /* Armv8-R has no EL3. */
5185 if (AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_R))
5186 {
5187 const char *suffix = strrchr (reg_name, '_');
5188 if (suffix && !strcmp (suffix, "_el3"))
5189 return FALSE;
5190 }
5191
5192 if (!(reg_flags & F_ARCHEXT))
5193 return TRUE;
5194
5195 if (reg_features
5196 && AARCH64_CPU_HAS_ALL_FEATURES (features, reg_features))
5197 return TRUE;
5198
5199 /* ARMv8.4 TLB instructions. */
5200 if ((reg_value == CPENS (0, C8, C1, 0)
5201 || reg_value == CPENS (0, C8, C1, 1)
5202 || reg_value == CPENS (0, C8, C1, 2)
5203 || reg_value == CPENS (0, C8, C1, 3)
5204 || reg_value == CPENS (0, C8, C1, 5)
5205 || reg_value == CPENS (0, C8, C1, 7)
5206 || reg_value == CPENS (4, C8, C4, 0)
5207 || reg_value == CPENS (4, C8, C4, 4)
5208 || reg_value == CPENS (4, C8, C1, 1)
5209 || reg_value == CPENS (4, C8, C1, 5)
5210 || reg_value == CPENS (4, C8, C1, 6)
5211 || reg_value == CPENS (6, C8, C1, 1)
5212 || reg_value == CPENS (6, C8, C1, 5)
5213 || reg_value == CPENS (4, C8, C1, 0)
5214 || reg_value == CPENS (4, C8, C1, 4)
5215 || reg_value == CPENS (6, C8, C1, 0)
5216 || reg_value == CPENS (0, C8, C6, 1)
5217 || reg_value == CPENS (0, C8, C6, 3)
5218 || reg_value == CPENS (0, C8, C6, 5)
5219 || reg_value == CPENS (0, C8, C6, 7)
5220 || reg_value == CPENS (0, C8, C2, 1)
5221 || reg_value == CPENS (0, C8, C2, 3)
5222 || reg_value == CPENS (0, C8, C2, 5)
5223 || reg_value == CPENS (0, C8, C2, 7)
5224 || reg_value == CPENS (0, C8, C5, 1)
5225 || reg_value == CPENS (0, C8, C5, 3)
5226 || reg_value == CPENS (0, C8, C5, 5)
5227 || reg_value == CPENS (0, C8, C5, 7)
5228 || reg_value == CPENS (4, C8, C0, 2)
5229 || reg_value == CPENS (4, C8, C0, 6)
5230 || reg_value == CPENS (4, C8, C4, 2)
5231 || reg_value == CPENS (4, C8, C4, 6)
5232 || reg_value == CPENS (4, C8, C4, 3)
5233 || reg_value == CPENS (4, C8, C4, 7)
5234 || reg_value == CPENS (4, C8, C6, 1)
5235 || reg_value == CPENS (4, C8, C6, 5)
5236 || reg_value == CPENS (4, C8, C2, 1)
5237 || reg_value == CPENS (4, C8, C2, 5)
5238 || reg_value == CPENS (4, C8, C5, 1)
5239 || reg_value == CPENS (4, C8, C5, 5)
5240 || reg_value == CPENS (6, C8, C6, 1)
5241 || reg_value == CPENS (6, C8, C6, 5)
5242 || reg_value == CPENS (6, C8, C2, 1)
5243 || reg_value == CPENS (6, C8, C2, 5)
5244 || reg_value == CPENS (6, C8, C5, 1)
5245 || reg_value == CPENS (6, C8, C5, 5))
5246 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
5247 return TRUE;
5248
5249 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
5250 if (reg_value == CPENS (3, C7, C12, 1)
5251 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5252 return TRUE;
5253
5254 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
5255 if (reg_value == CPENS (3, C7, C13, 1)
5256 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
5257 return TRUE;
5258
5259 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
5260 if ((reg_value == CPENS (0, C7, C6, 3)
5261 || reg_value == CPENS (0, C7, C6, 4)
5262 || reg_value == CPENS (0, C7, C10, 4)
5263 || reg_value == CPENS (0, C7, C14, 4)
5264 || reg_value == CPENS (3, C7, C10, 3)
5265 || reg_value == CPENS (3, C7, C12, 3)
5266 || reg_value == CPENS (3, C7, C13, 3)
5267 || reg_value == CPENS (3, C7, C14, 3)
5268 || reg_value == CPENS (3, C7, C4, 3)
5269 || reg_value == CPENS (0, C7, C6, 5)
5270 || reg_value == CPENS (0, C7, C6, 6)
5271 || reg_value == CPENS (0, C7, C10, 6)
5272 || reg_value == CPENS (0, C7, C14, 6)
5273 || reg_value == CPENS (3, C7, C10, 5)
5274 || reg_value == CPENS (3, C7, C12, 5)
5275 || reg_value == CPENS (3, C7, C13, 5)
5276 || reg_value == CPENS (3, C7, C14, 5)
5277 || reg_value == CPENS (3, C7, C4, 4))
5278 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
5279 return TRUE;
5280
5281 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
5282 if ((reg_value == CPENS (0, C7, C9, 0)
5283 || reg_value == CPENS (0, C7, C9, 1))
5284 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5285 return TRUE;
5286
5287 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
5288 if (reg_value == CPENS (3, C7, C3, 0)
5289 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
5290 return TRUE;
5291
5292 return FALSE;
5293 }
5294
5295 #undef C0
5296 #undef C1
5297 #undef C2
5298 #undef C3
5299 #undef C4
5300 #undef C5
5301 #undef C6
5302 #undef C7
5303 #undef C8
5304 #undef C9
5305 #undef C10
5306 #undef C11
5307 #undef C12
5308 #undef C13
5309 #undef C14
5310 #undef C15
5311
5312 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5313 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5314
5315 static enum err_type
5316 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5317 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5318 bfd_boolean encoding ATTRIBUTE_UNUSED,
5319 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5320 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5321 {
5322 int t = BITS (insn, 4, 0);
5323 int n = BITS (insn, 9, 5);
5324 int t2 = BITS (insn, 14, 10);
5325
5326 if (BIT (insn, 23))
5327 {
5328 /* Write back enabled. */
5329 if ((t == n || t2 == n) && n != 31)
5330 return ERR_UND;
5331 }
5332
5333 if (BIT (insn, 22))
5334 {
5335 /* Load */
5336 if (t == t2)
5337 return ERR_UND;
5338 }
5339
5340 return ERR_OK;
5341 }
5342
5343 /* Verifier for vector by element 3 operands functions where the
5344 conditions `if sz:L == 11 then UNDEFINED` holds. */
5345
5346 static enum err_type
5347 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5348 bfd_vma pc ATTRIBUTE_UNUSED, bfd_boolean encoding,
5349 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5350 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5351 {
5352 const aarch64_insn undef_pattern = 0x3;
5353 aarch64_insn value;
5354
5355 assert (inst->opcode);
5356 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5357 value = encoding ? inst->value : insn;
5358 assert (value);
5359
5360 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5361 return ERR_UND;
5362
5363 return ERR_OK;
5364 }
5365
5366 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5367 If INST is NULL the given insn_sequence is cleared and the sequence is left
5368 uninitialized. */
5369
5370 void
5371 init_insn_sequence (const struct aarch64_inst *inst,
5372 aarch64_instr_sequence *insn_sequence)
5373 {
5374 int num_req_entries = 0;
5375 insn_sequence->next_insn = 0;
5376 insn_sequence->num_insns = num_req_entries;
5377 if (insn_sequence->instr)
5378 XDELETE (insn_sequence->instr);
5379 insn_sequence->instr = NULL;
5380
5381 if (inst)
5382 {
5383 insn_sequence->instr = XNEW (aarch64_inst);
5384 memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
5385 }
5386
5387 /* Handle all the cases here. May need to think of something smarter than
5388 a giant if/else chain if this grows. At that time, a lookup table may be
5389 best. */
5390 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5391 num_req_entries = 1;
5392
5393 if (insn_sequence->current_insns)
5394 XDELETEVEC (insn_sequence->current_insns);
5395 insn_sequence->current_insns = NULL;
5396
5397 if (num_req_entries != 0)
5398 {
5399 size_t size = num_req_entries * sizeof (aarch64_inst);
5400 insn_sequence->current_insns
5401 = (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
5402 memset (insn_sequence->current_insns, 0, size);
5403 }
5404 }
5405
5406
5407 /* This function verifies that the instruction INST adheres to its specified
5408 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5409 returned and MISMATCH_DETAIL contains the reason why verification failed.
5410
5411 The function is called both during assembly and disassembly. If assembling
5412 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5413 and will contain the PC of the current instruction w.r.t to the section.
5414
5415 If ENCODING and PC=0 then you are at a start of a section. The constraints
5416 are verified against the given state insn_sequence which is updated as it
5417 transitions through the verification. */
5418
5419 enum err_type
5420 verify_constraints (const struct aarch64_inst *inst,
5421 const aarch64_insn insn ATTRIBUTE_UNUSED,
5422 bfd_vma pc,
5423 bfd_boolean encoding,
5424 aarch64_operand_error *mismatch_detail,
5425 aarch64_instr_sequence *insn_sequence)
5426 {
5427 assert (inst);
5428 assert (inst->opcode);
5429
5430 const struct aarch64_opcode *opcode = inst->opcode;
5431 if (!opcode->constraints && !insn_sequence->instr)
5432 return ERR_OK;
5433
5434 assert (insn_sequence);
5435
5436 enum err_type res = ERR_OK;
5437
5438 /* This instruction puts a constraint on the insn_sequence. */
5439 if (opcode->flags & F_SCAN)
5440 {
5441 if (insn_sequence->instr)
5442 {
5443 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5444 mismatch_detail->error = _("instruction opens new dependency "
5445 "sequence without ending previous one");
5446 mismatch_detail->index = -1;
5447 mismatch_detail->non_fatal = TRUE;
5448 res = ERR_VFI;
5449 }
5450
5451 init_insn_sequence (inst, insn_sequence);
5452 return res;
5453 }
5454
5455 /* Verify constraints on an existing sequence. */
5456 if (insn_sequence->instr)
5457 {
5458 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5459 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5460 closed a previous one that we should have. */
5461 if (!encoding && pc == 0)
5462 {
5463 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5464 mismatch_detail->error = _("previous `movprfx' sequence not closed");
5465 mismatch_detail->index = -1;
5466 mismatch_detail->non_fatal = TRUE;
5467 res = ERR_VFI;
5468 /* Reset the sequence. */
5469 init_insn_sequence (NULL, insn_sequence);
5470 return res;
5471 }
5472
5473 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5474 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5475 {
5476 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5477 instruction for better error messages. */
5478 if (!opcode->avariant
5479 || !(*opcode->avariant &
5480 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
5481 {
5482 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5483 mismatch_detail->error = _("SVE instruction expected after "
5484 "`movprfx'");
5485 mismatch_detail->index = -1;
5486 mismatch_detail->non_fatal = TRUE;
5487 res = ERR_VFI;
5488 goto done;
5489 }
5490
5491 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5492 instruction that is allowed to be used with a MOVPRFX. */
5493 if (!(opcode->constraints & C_SCAN_MOVPRFX))
5494 {
5495 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5496 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
5497 "expected");
5498 mismatch_detail->index = -1;
5499 mismatch_detail->non_fatal = TRUE;
5500 res = ERR_VFI;
5501 goto done;
5502 }
5503
5504 /* Next check for usage of the predicate register. */
5505 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
5506 aarch64_opnd_info blk_pred, inst_pred;
5507 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
5508 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
5509 bfd_boolean predicated = FALSE;
5510 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
5511
5512 /* Determine if the movprfx instruction used is predicated or not. */
5513 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
5514 {
5515 predicated = TRUE;
5516 blk_pred = insn_sequence->instr->operands[1];
5517 }
5518
5519 unsigned char max_elem_size = 0;
5520 unsigned char current_elem_size;
5521 int num_op_used = 0, last_op_usage = 0;
5522 int i, inst_pred_idx = -1;
5523 int num_ops = aarch64_num_of_operands (opcode);
5524 for (i = 0; i < num_ops; i++)
5525 {
5526 aarch64_opnd_info inst_op = inst->operands[i];
5527 switch (inst_op.type)
5528 {
5529 case AARCH64_OPND_SVE_Zd:
5530 case AARCH64_OPND_SVE_Zm_5:
5531 case AARCH64_OPND_SVE_Zm_16:
5532 case AARCH64_OPND_SVE_Zn:
5533 case AARCH64_OPND_SVE_Zt:
5534 case AARCH64_OPND_SVE_Vm:
5535 case AARCH64_OPND_SVE_Vn:
5536 case AARCH64_OPND_Va:
5537 case AARCH64_OPND_Vn:
5538 case AARCH64_OPND_Vm:
5539 case AARCH64_OPND_Sn:
5540 case AARCH64_OPND_Sm:
5541 if (inst_op.reg.regno == blk_dest.reg.regno)
5542 {
5543 num_op_used++;
5544 last_op_usage = i;
5545 }
5546 current_elem_size
5547 = aarch64_get_qualifier_esize (inst_op.qualifier);
5548 if (current_elem_size > max_elem_size)
5549 max_elem_size = current_elem_size;
5550 break;
5551 case AARCH64_OPND_SVE_Pd:
5552 case AARCH64_OPND_SVE_Pg3:
5553 case AARCH64_OPND_SVE_Pg4_5:
5554 case AARCH64_OPND_SVE_Pg4_10:
5555 case AARCH64_OPND_SVE_Pg4_16:
5556 case AARCH64_OPND_SVE_Pm:
5557 case AARCH64_OPND_SVE_Pn:
5558 case AARCH64_OPND_SVE_Pt:
5559 inst_pred = inst_op;
5560 inst_pred_idx = i;
5561 break;
5562 default:
5563 break;
5564 }
5565 }
5566
5567 assert (max_elem_size != 0);
5568 aarch64_opnd_info inst_dest = inst->operands[0];
5569 /* Determine the size that should be used to compare against the
5570 movprfx size. */
5571 current_elem_size
5572 = opcode->constraints & C_MAX_ELEM
5573 ? max_elem_size
5574 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5575
5576 /* If movprfx is predicated do some extra checks. */
5577 if (predicated)
5578 {
5579 /* The instruction must be predicated. */
5580 if (inst_pred_idx < 0)
5581 {
5582 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5583 mismatch_detail->error = _("predicated instruction expected "
5584 "after `movprfx'");
5585 mismatch_detail->index = -1;
5586 mismatch_detail->non_fatal = TRUE;
5587 res = ERR_VFI;
5588 goto done;
5589 }
5590
5591 /* The instruction must have a merging predicate. */
5592 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5593 {
5594 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5595 mismatch_detail->error = _("merging predicate expected due "
5596 "to preceding `movprfx'");
5597 mismatch_detail->index = inst_pred_idx;
5598 mismatch_detail->non_fatal = TRUE;
5599 res = ERR_VFI;
5600 goto done;
5601 }
5602
5603 /* The same register must be used in instruction. */
5604 if (blk_pred.reg.regno != inst_pred.reg.regno)
5605 {
5606 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5607 mismatch_detail->error = _("predicate register differs "
5608 "from that in preceding "
5609 "`movprfx'");
5610 mismatch_detail->index = inst_pred_idx;
5611 mismatch_detail->non_fatal = TRUE;
5612 res = ERR_VFI;
5613 goto done;
5614 }
5615 }
5616
5617 /* Destructive operations by definition must allow one usage of the
5618 same register. */
5619 int allowed_usage
5620 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5621
5622 /* Operand is not used at all. */
5623 if (num_op_used == 0)
5624 {
5625 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5626 mismatch_detail->error = _("output register of preceding "
5627 "`movprfx' not used in current "
5628 "instruction");
5629 mismatch_detail->index = 0;
5630 mismatch_detail->non_fatal = TRUE;
5631 res = ERR_VFI;
5632 goto done;
5633 }
5634
5635 /* We now know it's used, now determine exactly where it's used. */
5636 if (blk_dest.reg.regno != inst_dest.reg.regno)
5637 {
5638 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5639 mismatch_detail->error = _("output register of preceding "
5640 "`movprfx' expected as output");
5641 mismatch_detail->index = 0;
5642 mismatch_detail->non_fatal = TRUE;
5643 res = ERR_VFI;
5644 goto done;
5645 }
5646
5647 /* Operand used more than allowed for the specific opcode type. */
5648 if (num_op_used > allowed_usage)
5649 {
5650 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5651 mismatch_detail->error = _("output register of preceding "
5652 "`movprfx' used as input");
5653 mismatch_detail->index = last_op_usage;
5654 mismatch_detail->non_fatal = TRUE;
5655 res = ERR_VFI;
5656 goto done;
5657 }
5658
5659 /* Now the only thing left is the qualifiers checks. The register
5660 must have the same maximum element size. */
5661 if (inst_dest.qualifier
5662 && blk_dest.qualifier
5663 && current_elem_size
5664 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5665 {
5666 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5667 mismatch_detail->error = _("register size not compatible with "
5668 "previous `movprfx'");
5669 mismatch_detail->index = 0;
5670 mismatch_detail->non_fatal = TRUE;
5671 res = ERR_VFI;
5672 goto done;
5673 }
5674 }
5675
5676 done:
5677 /* Add the new instruction to the sequence. */
5678 memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
5679 inst, sizeof (aarch64_inst));
5680
5681 /* Check if sequence is now full. */
5682 if (insn_sequence->next_insn >= insn_sequence->num_insns)
5683 {
5684 /* Sequence is full, but we don't have anything special to do for now,
5685 so clear and reset it. */
5686 init_insn_sequence (NULL, insn_sequence);
5687 }
5688 }
5689
5690 return res;
5691 }
5692
5693
5694 /* Return true if VALUE cannot be moved into an SVE register using DUP
5695 (with any element size, not just ESIZE) and if using DUPM would
5696 therefore be OK. ESIZE is the number of bytes in the immediate. */
5697
5698 bfd_boolean
5699 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5700 {
5701 int64_t svalue = uvalue;
5702 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5703
5704 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5705 return FALSE;
5706 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5707 {
5708 svalue = (int32_t) uvalue;
5709 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5710 {
5711 svalue = (int16_t) uvalue;
5712 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5713 return FALSE;
5714 }
5715 }
5716 if ((svalue & 0xff) == 0)
5717 svalue /= 256;
5718 return svalue < -128 || svalue >= 128;
5719 }
5720
5721 /* Include the opcode description table as well as the operand description
5722 table. */
5723 #define VERIFIER(x) verify_##x
5724 #include "aarch64-tbl.h"