]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - opcodes/aarch64-opc.c
Update year range in copyright notice of all files.
[thirdparty/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
257 { 22, 1 }, /* N: in logical (immediate) instructions. */
258 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
259 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
260 { 31, 1 }, /* sf: in integer data processing instructions. */
261 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
262 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
263 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
264 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
265 { 31, 1 }, /* b5: in the test bit and branch instructions. */
266 { 19, 5 }, /* b40: in the test bit and branch instructions. */
267 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
268 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
269 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
270 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
271 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
272 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
273 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
274 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
275 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
276 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
277 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
278 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
279 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
280 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
281 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
282 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
283 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
284 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
285 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
286 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
287 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
288 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
289 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
290 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
291 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
292 { 5, 1 }, /* SVE_i1: single-bit immediate. */
293 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
294 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
295 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
296 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
297 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
298 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
299 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
300 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
301 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
302 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
303 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
304 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
305 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
306 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
307 { 16, 4 }, /* SVE_tsz: triangular size select. */
308 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
309 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
310 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
311 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
312 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
313 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
314 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
315 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
316 };
317
318 enum aarch64_operand_class
319 aarch64_get_operand_class (enum aarch64_opnd type)
320 {
321 return aarch64_operands[type].op_class;
322 }
323
324 const char *
325 aarch64_get_operand_name (enum aarch64_opnd type)
326 {
327 return aarch64_operands[type].name;
328 }
329
330 /* Get operand description string.
331 This is usually for the diagnosis purpose. */
332 const char *
333 aarch64_get_operand_desc (enum aarch64_opnd type)
334 {
335 return aarch64_operands[type].desc;
336 }
337
338 /* Table of all conditional affixes. */
339 const aarch64_cond aarch64_conds[16] =
340 {
341 {{"eq", "none"}, 0x0},
342 {{"ne", "any"}, 0x1},
343 {{"cs", "hs", "nlast"}, 0x2},
344 {{"cc", "lo", "ul", "last"}, 0x3},
345 {{"mi", "first"}, 0x4},
346 {{"pl", "nfrst"}, 0x5},
347 {{"vs"}, 0x6},
348 {{"vc"}, 0x7},
349 {{"hi", "pmore"}, 0x8},
350 {{"ls", "plast"}, 0x9},
351 {{"ge", "tcont"}, 0xa},
352 {{"lt", "tstop"}, 0xb},
353 {{"gt"}, 0xc},
354 {{"le"}, 0xd},
355 {{"al"}, 0xe},
356 {{"nv"}, 0xf},
357 };
358
359 const aarch64_cond *
360 get_cond_from_value (aarch64_insn value)
361 {
362 assert (value < 16);
363 return &aarch64_conds[(unsigned int) value];
364 }
365
366 const aarch64_cond *
367 get_inverted_cond (const aarch64_cond *cond)
368 {
369 return &aarch64_conds[cond->value ^ 0x1];
370 }
371
372 /* Table describing the operand extension/shifting operators; indexed by
373 enum aarch64_modifier_kind.
374
375 The value column provides the most common values for encoding modifiers,
376 which enables table-driven encoding/decoding for the modifiers. */
377 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
378 {
379 {"none", 0x0},
380 {"msl", 0x0},
381 {"ror", 0x3},
382 {"asr", 0x2},
383 {"lsr", 0x1},
384 {"lsl", 0x0},
385 {"uxtb", 0x0},
386 {"uxth", 0x1},
387 {"uxtw", 0x2},
388 {"uxtx", 0x3},
389 {"sxtb", 0x4},
390 {"sxth", 0x5},
391 {"sxtw", 0x6},
392 {"sxtx", 0x7},
393 {"mul", 0x0},
394 {"mul vl", 0x0},
395 {NULL, 0},
396 };
397
398 enum aarch64_modifier_kind
399 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
400 {
401 return desc - aarch64_operand_modifiers;
402 }
403
404 aarch64_insn
405 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
406 {
407 return aarch64_operand_modifiers[kind].value;
408 }
409
410 enum aarch64_modifier_kind
411 aarch64_get_operand_modifier_from_value (aarch64_insn value,
412 bfd_boolean extend_p)
413 {
414 if (extend_p == TRUE)
415 return AARCH64_MOD_UXTB + value;
416 else
417 return AARCH64_MOD_LSL - value;
418 }
419
420 bfd_boolean
421 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
422 {
423 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
424 ? TRUE : FALSE;
425 }
426
427 static inline bfd_boolean
428 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
429 {
430 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
431 ? TRUE : FALSE;
432 }
433
434 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
435 {
436 { "#0x00", 0x0 },
437 { "oshld", 0x1 },
438 { "oshst", 0x2 },
439 { "osh", 0x3 },
440 { "#0x04", 0x4 },
441 { "nshld", 0x5 },
442 { "nshst", 0x6 },
443 { "nsh", 0x7 },
444 { "#0x08", 0x8 },
445 { "ishld", 0x9 },
446 { "ishst", 0xa },
447 { "ish", 0xb },
448 { "#0x0c", 0xc },
449 { "ld", 0xd },
450 { "st", 0xe },
451 { "sy", 0xf },
452 };
453
454 /* Table describing the operands supported by the aliases of the HINT
455 instruction.
456
457 The name column is the operand that is accepted for the alias. The value
458 column is the hint number of the alias. The list of operands is terminated
459 by NULL in the name column. */
460
461 const struct aarch64_name_value_pair aarch64_hint_options[] =
462 {
463 { "csync", 0x11 }, /* PSB CSYNC. */
464 { NULL, 0x0 },
465 };
466
467 /* op -> op: load = 0 instruction = 1 store = 2
468 l -> level: 1-3
469 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
470 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
471 const struct aarch64_name_value_pair aarch64_prfops[32] =
472 {
473 { "pldl1keep", B(0, 1, 0) },
474 { "pldl1strm", B(0, 1, 1) },
475 { "pldl2keep", B(0, 2, 0) },
476 { "pldl2strm", B(0, 2, 1) },
477 { "pldl3keep", B(0, 3, 0) },
478 { "pldl3strm", B(0, 3, 1) },
479 { NULL, 0x06 },
480 { NULL, 0x07 },
481 { "plil1keep", B(1, 1, 0) },
482 { "plil1strm", B(1, 1, 1) },
483 { "plil2keep", B(1, 2, 0) },
484 { "plil2strm", B(1, 2, 1) },
485 { "plil3keep", B(1, 3, 0) },
486 { "plil3strm", B(1, 3, 1) },
487 { NULL, 0x0e },
488 { NULL, 0x0f },
489 { "pstl1keep", B(2, 1, 0) },
490 { "pstl1strm", B(2, 1, 1) },
491 { "pstl2keep", B(2, 2, 0) },
492 { "pstl2strm", B(2, 2, 1) },
493 { "pstl3keep", B(2, 3, 0) },
494 { "pstl3strm", B(2, 3, 1) },
495 { NULL, 0x16 },
496 { NULL, 0x17 },
497 { NULL, 0x18 },
498 { NULL, 0x19 },
499 { NULL, 0x1a },
500 { NULL, 0x1b },
501 { NULL, 0x1c },
502 { NULL, 0x1d },
503 { NULL, 0x1e },
504 { NULL, 0x1f },
505 };
506 #undef B
507 \f
508 /* Utilities on value constraint. */
509
510 static inline int
511 value_in_range_p (int64_t value, int low, int high)
512 {
513 return (value >= low && value <= high) ? 1 : 0;
514 }
515
516 /* Return true if VALUE is a multiple of ALIGN. */
517 static inline int
518 value_aligned_p (int64_t value, int align)
519 {
520 return (value % align) == 0;
521 }
522
523 /* A signed value fits in a field. */
524 static inline int
525 value_fit_signed_field_p (int64_t value, unsigned width)
526 {
527 assert (width < 32);
528 if (width < sizeof (value) * 8)
529 {
530 int64_t lim = (int64_t)1 << (width - 1);
531 if (value >= -lim && value < lim)
532 return 1;
533 }
534 return 0;
535 }
536
537 /* An unsigned value fits in a field. */
538 static inline int
539 value_fit_unsigned_field_p (int64_t value, unsigned width)
540 {
541 assert (width < 32);
542 if (width < sizeof (value) * 8)
543 {
544 int64_t lim = (int64_t)1 << width;
545 if (value >= 0 && value < lim)
546 return 1;
547 }
548 return 0;
549 }
550
551 /* Return 1 if OPERAND is SP or WSP. */
552 int
553 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
554 {
555 return ((aarch64_get_operand_class (operand->type)
556 == AARCH64_OPND_CLASS_INT_REG)
557 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
558 && operand->reg.regno == 31);
559 }
560
561 /* Return 1 if OPERAND is XZR or WZP. */
562 int
563 aarch64_zero_register_p (const aarch64_opnd_info *operand)
564 {
565 return ((aarch64_get_operand_class (operand->type)
566 == AARCH64_OPND_CLASS_INT_REG)
567 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
568 && operand->reg.regno == 31);
569 }
570
571 /* Return true if the operand *OPERAND that has the operand code
572 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
573 qualified by the qualifier TARGET. */
574
575 static inline int
576 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
577 aarch64_opnd_qualifier_t target)
578 {
579 switch (operand->qualifier)
580 {
581 case AARCH64_OPND_QLF_W:
582 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
583 return 1;
584 break;
585 case AARCH64_OPND_QLF_X:
586 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
587 return 1;
588 break;
589 case AARCH64_OPND_QLF_WSP:
590 if (target == AARCH64_OPND_QLF_W
591 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
592 return 1;
593 break;
594 case AARCH64_OPND_QLF_SP:
595 if (target == AARCH64_OPND_QLF_X
596 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
597 return 1;
598 break;
599 default:
600 break;
601 }
602
603 return 0;
604 }
605
606 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
607 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
608
609 Return NIL if more than one expected qualifiers are found. */
610
611 aarch64_opnd_qualifier_t
612 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
613 int idx,
614 const aarch64_opnd_qualifier_t known_qlf,
615 int known_idx)
616 {
617 int i, saved_i;
618
619 /* Special case.
620
621 When the known qualifier is NIL, we have to assume that there is only
622 one qualifier sequence in the *QSEQ_LIST and return the corresponding
623 qualifier directly. One scenario is that for instruction
624 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
625 which has only one possible valid qualifier sequence
626 NIL, S_D
627 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
628 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
629
630 Because the qualifier NIL has dual roles in the qualifier sequence:
631 it can mean no qualifier for the operand, or the qualifer sequence is
632 not in use (when all qualifiers in the sequence are NILs), we have to
633 handle this special case here. */
634 if (known_qlf == AARCH64_OPND_NIL)
635 {
636 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
637 return qseq_list[0][idx];
638 }
639
640 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
641 {
642 if (qseq_list[i][known_idx] == known_qlf)
643 {
644 if (saved_i != -1)
645 /* More than one sequences are found to have KNOWN_QLF at
646 KNOWN_IDX. */
647 return AARCH64_OPND_NIL;
648 saved_i = i;
649 }
650 }
651
652 return qseq_list[saved_i][idx];
653 }
654
655 enum operand_qualifier_kind
656 {
657 OQK_NIL,
658 OQK_OPD_VARIANT,
659 OQK_VALUE_IN_RANGE,
660 OQK_MISC,
661 };
662
663 /* Operand qualifier description. */
664 struct operand_qualifier_data
665 {
666 /* The usage of the three data fields depends on the qualifier kind. */
667 int data0;
668 int data1;
669 int data2;
670 /* Description. */
671 const char *desc;
672 /* Kind. */
673 enum operand_qualifier_kind kind;
674 };
675
676 /* Indexed by the operand qualifier enumerators. */
677 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
678 {
679 {0, 0, 0, "NIL", OQK_NIL},
680
681 /* Operand variant qualifiers.
682 First 3 fields:
683 element size, number of elements and common value for encoding. */
684
685 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
686 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
687 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
688 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
689
690 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
691 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
692 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
693 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
694 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
695
696 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
697 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
698 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
699 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
700 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
701 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
702 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
703 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
704 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
705 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
706
707 {0, 0, 0, "z", OQK_OPD_VARIANT},
708 {0, 0, 0, "m", OQK_OPD_VARIANT},
709
710 /* Qualifiers constraining the value range.
711 First 3 fields:
712 Lower bound, higher bound, unused. */
713
714 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
715 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
716 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
717 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
718 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
719 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
720 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
721
722 /* Qualifiers for miscellaneous purpose.
723 First 3 fields:
724 unused, unused and unused. */
725
726 {0, 0, 0, "lsl", 0},
727 {0, 0, 0, "msl", 0},
728
729 {0, 0, 0, "retrieving", 0},
730 };
731
732 static inline bfd_boolean
733 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
734 {
735 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
736 ? TRUE : FALSE;
737 }
738
739 static inline bfd_boolean
740 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
741 {
742 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
743 ? TRUE : FALSE;
744 }
745
746 const char*
747 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
748 {
749 return aarch64_opnd_qualifiers[qualifier].desc;
750 }
751
752 /* Given an operand qualifier, return the expected data element size
753 of a qualified operand. */
754 unsigned char
755 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
756 {
757 assert (operand_variant_qualifier_p (qualifier) == TRUE);
758 return aarch64_opnd_qualifiers[qualifier].data0;
759 }
760
761 unsigned char
762 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
763 {
764 assert (operand_variant_qualifier_p (qualifier) == TRUE);
765 return aarch64_opnd_qualifiers[qualifier].data1;
766 }
767
768 aarch64_insn
769 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
770 {
771 assert (operand_variant_qualifier_p (qualifier) == TRUE);
772 return aarch64_opnd_qualifiers[qualifier].data2;
773 }
774
775 static int
776 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
777 {
778 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
779 return aarch64_opnd_qualifiers[qualifier].data0;
780 }
781
782 static int
783 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
784 {
785 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
786 return aarch64_opnd_qualifiers[qualifier].data1;
787 }
788
789 #ifdef DEBUG_AARCH64
790 void
791 aarch64_verbose (const char *str, ...)
792 {
793 va_list ap;
794 va_start (ap, str);
795 printf ("#### ");
796 vprintf (str, ap);
797 printf ("\n");
798 va_end (ap);
799 }
800
801 static inline void
802 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
803 {
804 int i;
805 printf ("#### \t");
806 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
807 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
808 printf ("\n");
809 }
810
811 static void
812 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
813 const aarch64_opnd_qualifier_t *qualifier)
814 {
815 int i;
816 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
817
818 aarch64_verbose ("dump_match_qualifiers:");
819 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
820 curr[i] = opnd[i].qualifier;
821 dump_qualifier_sequence (curr);
822 aarch64_verbose ("against");
823 dump_qualifier_sequence (qualifier);
824 }
825 #endif /* DEBUG_AARCH64 */
826
827 /* TODO improve this, we can have an extra field at the runtime to
828 store the number of operands rather than calculating it every time. */
829
830 int
831 aarch64_num_of_operands (const aarch64_opcode *opcode)
832 {
833 int i = 0;
834 const enum aarch64_opnd *opnds = opcode->operands;
835 while (opnds[i++] != AARCH64_OPND_NIL)
836 ;
837 --i;
838 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
839 return i;
840 }
841
842 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
843 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
844
845 N.B. on the entry, it is very likely that only some operands in *INST
846 have had their qualifiers been established.
847
848 If STOP_AT is not -1, the function will only try to match
849 the qualifier sequence for operands before and including the operand
850 of index STOP_AT; and on success *RET will only be filled with the first
851 (STOP_AT+1) qualifiers.
852
853 A couple examples of the matching algorithm:
854
855 X,W,NIL should match
856 X,W,NIL
857
858 NIL,NIL should match
859 X ,NIL
860
861 Apart from serving the main encoding routine, this can also be called
862 during or after the operand decoding. */
863
864 int
865 aarch64_find_best_match (const aarch64_inst *inst,
866 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
867 int stop_at, aarch64_opnd_qualifier_t *ret)
868 {
869 int found = 0;
870 int i, num_opnds;
871 const aarch64_opnd_qualifier_t *qualifiers;
872
873 num_opnds = aarch64_num_of_operands (inst->opcode);
874 if (num_opnds == 0)
875 {
876 DEBUG_TRACE ("SUCCEED: no operand");
877 return 1;
878 }
879
880 if (stop_at < 0 || stop_at >= num_opnds)
881 stop_at = num_opnds - 1;
882
883 /* For each pattern. */
884 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
885 {
886 int j;
887 qualifiers = *qualifiers_list;
888
889 /* Start as positive. */
890 found = 1;
891
892 DEBUG_TRACE ("%d", i);
893 #ifdef DEBUG_AARCH64
894 if (debug_dump)
895 dump_match_qualifiers (inst->operands, qualifiers);
896 #endif
897
898 /* Most opcodes has much fewer patterns in the list.
899 First NIL qualifier indicates the end in the list. */
900 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
901 {
902 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
903 if (i)
904 found = 0;
905 break;
906 }
907
908 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
909 {
910 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
911 {
912 /* Either the operand does not have qualifier, or the qualifier
913 for the operand needs to be deduced from the qualifier
914 sequence.
915 In the latter case, any constraint checking related with
916 the obtained qualifier should be done later in
917 operand_general_constraint_met_p. */
918 continue;
919 }
920 else if (*qualifiers != inst->operands[j].qualifier)
921 {
922 /* Unless the target qualifier can also qualify the operand
923 (which has already had a non-nil qualifier), non-equal
924 qualifiers are generally un-matched. */
925 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
926 continue;
927 else
928 {
929 found = 0;
930 break;
931 }
932 }
933 else
934 continue; /* Equal qualifiers are certainly matched. */
935 }
936
937 /* Qualifiers established. */
938 if (found == 1)
939 break;
940 }
941
942 if (found == 1)
943 {
944 /* Fill the result in *RET. */
945 int j;
946 qualifiers = *qualifiers_list;
947
948 DEBUG_TRACE ("complete qualifiers using list %d", i);
949 #ifdef DEBUG_AARCH64
950 if (debug_dump)
951 dump_qualifier_sequence (qualifiers);
952 #endif
953
954 for (j = 0; j <= stop_at; ++j, ++qualifiers)
955 ret[j] = *qualifiers;
956 for (; j < AARCH64_MAX_OPND_NUM; ++j)
957 ret[j] = AARCH64_OPND_QLF_NIL;
958
959 DEBUG_TRACE ("SUCCESS");
960 return 1;
961 }
962
963 DEBUG_TRACE ("FAIL");
964 return 0;
965 }
966
967 /* Operand qualifier matching and resolving.
968
969 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
970 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
971
972 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
973 succeeds. */
974
975 static int
976 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
977 {
978 int i, nops;
979 aarch64_opnd_qualifier_seq_t qualifiers;
980
981 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
982 qualifiers))
983 {
984 DEBUG_TRACE ("matching FAIL");
985 return 0;
986 }
987
988 if (inst->opcode->flags & F_STRICT)
989 {
990 /* Require an exact qualifier match, even for NIL qualifiers. */
991 nops = aarch64_num_of_operands (inst->opcode);
992 for (i = 0; i < nops; ++i)
993 if (inst->operands[i].qualifier != qualifiers[i])
994 return FALSE;
995 }
996
997 /* Update the qualifiers. */
998 if (update_p == TRUE)
999 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1000 {
1001 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1002 break;
1003 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1004 "update %s with %s for operand %d",
1005 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1006 aarch64_get_qualifier_name (qualifiers[i]), i);
1007 inst->operands[i].qualifier = qualifiers[i];
1008 }
1009
1010 DEBUG_TRACE ("matching SUCCESS");
1011 return 1;
1012 }
1013
1014 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1015 register by MOVZ.
1016
1017 IS32 indicates whether value is a 32-bit immediate or not.
1018 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1019 amount will be returned in *SHIFT_AMOUNT. */
1020
1021 bfd_boolean
1022 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1023 {
1024 int amount;
1025
1026 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1027
1028 if (is32)
1029 {
1030 /* Allow all zeros or all ones in top 32-bits, so that
1031 32-bit constant expressions like ~0x80000000 are
1032 permitted. */
1033 uint64_t ext = value;
1034 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1035 /* Immediate out of range. */
1036 return FALSE;
1037 value &= (int64_t) 0xffffffff;
1038 }
1039
1040 /* first, try movz then movn */
1041 amount = -1;
1042 if ((value & ((int64_t) 0xffff << 0)) == value)
1043 amount = 0;
1044 else if ((value & ((int64_t) 0xffff << 16)) == value)
1045 amount = 16;
1046 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1047 amount = 32;
1048 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1049 amount = 48;
1050
1051 if (amount == -1)
1052 {
1053 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1054 return FALSE;
1055 }
1056
1057 if (shift_amount != NULL)
1058 *shift_amount = amount;
1059
1060 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1061
1062 return TRUE;
1063 }
1064
1065 /* Build the accepted values for immediate logical SIMD instructions.
1066
1067 The standard encodings of the immediate value are:
1068 N imms immr SIMD size R S
1069 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1070 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1071 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1072 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1073 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1074 0 11110s 00000r 2 UInt(r) UInt(s)
1075 where all-ones value of S is reserved.
1076
1077 Let's call E the SIMD size.
1078
1079 The immediate value is: S+1 bits '1' rotated to the right by R.
1080
1081 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1082 (remember S != E - 1). */
1083
1084 #define TOTAL_IMM_NB 5334
1085
1086 typedef struct
1087 {
1088 uint64_t imm;
1089 aarch64_insn encoding;
1090 } simd_imm_encoding;
1091
1092 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1093
1094 static int
1095 simd_imm_encoding_cmp(const void *i1, const void *i2)
1096 {
1097 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1098 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1099
1100 if (imm1->imm < imm2->imm)
1101 return -1;
1102 if (imm1->imm > imm2->imm)
1103 return +1;
1104 return 0;
1105 }
1106
1107 /* immediate bitfield standard encoding
1108 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1109 1 ssssss rrrrrr 64 rrrrrr ssssss
1110 0 0sssss 0rrrrr 32 rrrrr sssss
1111 0 10ssss 00rrrr 16 rrrr ssss
1112 0 110sss 000rrr 8 rrr sss
1113 0 1110ss 0000rr 4 rr ss
1114 0 11110s 00000r 2 r s */
1115 static inline int
1116 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1117 {
1118 return (is64 << 12) | (r << 6) | s;
1119 }
1120
1121 static void
1122 build_immediate_table (void)
1123 {
1124 uint32_t log_e, e, s, r, s_mask;
1125 uint64_t mask, imm;
1126 int nb_imms;
1127 int is64;
1128
1129 nb_imms = 0;
1130 for (log_e = 1; log_e <= 6; log_e++)
1131 {
1132 /* Get element size. */
1133 e = 1u << log_e;
1134 if (log_e == 6)
1135 {
1136 is64 = 1;
1137 mask = 0xffffffffffffffffull;
1138 s_mask = 0;
1139 }
1140 else
1141 {
1142 is64 = 0;
1143 mask = (1ull << e) - 1;
1144 /* log_e s_mask
1145 1 ((1 << 4) - 1) << 2 = 111100
1146 2 ((1 << 3) - 1) << 3 = 111000
1147 3 ((1 << 2) - 1) << 4 = 110000
1148 4 ((1 << 1) - 1) << 5 = 100000
1149 5 ((1 << 0) - 1) << 6 = 000000 */
1150 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1151 }
1152 for (s = 0; s < e - 1; s++)
1153 for (r = 0; r < e; r++)
1154 {
1155 /* s+1 consecutive bits to 1 (s < 63) */
1156 imm = (1ull << (s + 1)) - 1;
1157 /* rotate right by r */
1158 if (r != 0)
1159 imm = (imm >> r) | ((imm << (e - r)) & mask);
1160 /* replicate the constant depending on SIMD size */
1161 switch (log_e)
1162 {
1163 case 1: imm = (imm << 2) | imm;
1164 /* Fall through. */
1165 case 2: imm = (imm << 4) | imm;
1166 /* Fall through. */
1167 case 3: imm = (imm << 8) | imm;
1168 /* Fall through. */
1169 case 4: imm = (imm << 16) | imm;
1170 /* Fall through. */
1171 case 5: imm = (imm << 32) | imm;
1172 /* Fall through. */
1173 case 6: break;
1174 default: abort ();
1175 }
1176 simd_immediates[nb_imms].imm = imm;
1177 simd_immediates[nb_imms].encoding =
1178 encode_immediate_bitfield(is64, s | s_mask, r);
1179 nb_imms++;
1180 }
1181 }
1182 assert (nb_imms == TOTAL_IMM_NB);
1183 qsort(simd_immediates, nb_imms,
1184 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1185 }
1186
1187 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1188 be accepted by logical (immediate) instructions
1189 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1190
1191 ESIZE is the number of bytes in the decoded immediate value.
1192 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1193 VALUE will be returned in *ENCODING. */
1194
1195 bfd_boolean
1196 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1197 {
1198 simd_imm_encoding imm_enc;
1199 const simd_imm_encoding *imm_encoding;
1200 static bfd_boolean initialized = FALSE;
1201 uint64_t upper;
1202 int i;
1203
1204 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1205 value, is32);
1206
1207 if (initialized == FALSE)
1208 {
1209 build_immediate_table ();
1210 initialized = TRUE;
1211 }
1212
1213 /* Allow all zeros or all ones in top bits, so that
1214 constant expressions like ~1 are permitted. */
1215 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1216 if ((value & ~upper) != value && (value | upper) != value)
1217 return FALSE;
1218
1219 /* Replicate to a full 64-bit value. */
1220 value &= ~upper;
1221 for (i = esize * 8; i < 64; i *= 2)
1222 value |= (value << i);
1223
1224 imm_enc.imm = value;
1225 imm_encoding = (const simd_imm_encoding *)
1226 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1227 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1228 if (imm_encoding == NULL)
1229 {
1230 DEBUG_TRACE ("exit with FALSE");
1231 return FALSE;
1232 }
1233 if (encoding != NULL)
1234 *encoding = imm_encoding->encoding;
1235 DEBUG_TRACE ("exit with TRUE");
1236 return TRUE;
1237 }
1238
1239 /* If 64-bit immediate IMM is in the format of
1240 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1241 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1242 of value "abcdefgh". Otherwise return -1. */
1243 int
1244 aarch64_shrink_expanded_imm8 (uint64_t imm)
1245 {
1246 int i, ret;
1247 uint32_t byte;
1248
1249 ret = 0;
1250 for (i = 0; i < 8; i++)
1251 {
1252 byte = (imm >> (8 * i)) & 0xff;
1253 if (byte == 0xff)
1254 ret |= 1 << i;
1255 else if (byte != 0x00)
1256 return -1;
1257 }
1258 return ret;
1259 }
1260
1261 /* Utility inline functions for operand_general_constraint_met_p. */
1262
1263 static inline void
1264 set_error (aarch64_operand_error *mismatch_detail,
1265 enum aarch64_operand_error_kind kind, int idx,
1266 const char* error)
1267 {
1268 if (mismatch_detail == NULL)
1269 return;
1270 mismatch_detail->kind = kind;
1271 mismatch_detail->index = idx;
1272 mismatch_detail->error = error;
1273 }
1274
1275 static inline void
1276 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1277 const char* error)
1278 {
1279 if (mismatch_detail == NULL)
1280 return;
1281 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1282 }
1283
1284 static inline void
1285 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1286 int idx, int lower_bound, int upper_bound,
1287 const char* error)
1288 {
1289 if (mismatch_detail == NULL)
1290 return;
1291 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1292 mismatch_detail->data[0] = lower_bound;
1293 mismatch_detail->data[1] = upper_bound;
1294 }
1295
1296 static inline void
1297 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1298 int idx, int lower_bound, int upper_bound)
1299 {
1300 if (mismatch_detail == NULL)
1301 return;
1302 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1303 _("immediate value"));
1304 }
1305
1306 static inline void
1307 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1308 int idx, int lower_bound, int upper_bound)
1309 {
1310 if (mismatch_detail == NULL)
1311 return;
1312 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1313 _("immediate offset"));
1314 }
1315
1316 static inline void
1317 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1318 int idx, int lower_bound, int upper_bound)
1319 {
1320 if (mismatch_detail == NULL)
1321 return;
1322 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1323 _("register number"));
1324 }
1325
1326 static inline void
1327 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1328 int idx, int lower_bound, int upper_bound)
1329 {
1330 if (mismatch_detail == NULL)
1331 return;
1332 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1333 _("register element index"));
1334 }
1335
1336 static inline void
1337 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1338 int idx, int lower_bound, int upper_bound)
1339 {
1340 if (mismatch_detail == NULL)
1341 return;
1342 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1343 _("shift amount"));
1344 }
1345
1346 /* Report that the MUL modifier in operand IDX should be in the range
1347 [LOWER_BOUND, UPPER_BOUND]. */
1348 static inline void
1349 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1350 int idx, int lower_bound, int upper_bound)
1351 {
1352 if (mismatch_detail == NULL)
1353 return;
1354 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1355 _("multiplier"));
1356 }
1357
1358 static inline void
1359 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1360 int alignment)
1361 {
1362 if (mismatch_detail == NULL)
1363 return;
1364 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1365 mismatch_detail->data[0] = alignment;
1366 }
1367
1368 static inline void
1369 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1370 int expected_num)
1371 {
1372 if (mismatch_detail == NULL)
1373 return;
1374 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1375 mismatch_detail->data[0] = expected_num;
1376 }
1377
1378 static inline void
1379 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1380 const char* error)
1381 {
1382 if (mismatch_detail == NULL)
1383 return;
1384 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1385 }
1386
1387 /* General constraint checking based on operand code.
1388
1389 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1390 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1391
1392 This function has to be called after the qualifiers for all operands
1393 have been resolved.
1394
1395 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1396 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1397 of error message during the disassembling where error message is not
1398 wanted. We avoid the dynamic construction of strings of error messages
1399 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1400 use a combination of error code, static string and some integer data to
1401 represent an error. */
1402
1403 static int
1404 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1405 enum aarch64_opnd type,
1406 const aarch64_opcode *opcode,
1407 aarch64_operand_error *mismatch_detail)
1408 {
1409 unsigned num, modifiers, shift;
1410 unsigned char size;
1411 int64_t imm, min_value, max_value;
1412 uint64_t uvalue, mask;
1413 const aarch64_opnd_info *opnd = opnds + idx;
1414 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1415
1416 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1417
1418 switch (aarch64_operands[type].op_class)
1419 {
1420 case AARCH64_OPND_CLASS_INT_REG:
1421 /* Check pair reg constraints for cas* instructions. */
1422 if (type == AARCH64_OPND_PAIRREG)
1423 {
1424 assert (idx == 1 || idx == 3);
1425 if (opnds[idx - 1].reg.regno % 2 != 0)
1426 {
1427 set_syntax_error (mismatch_detail, idx - 1,
1428 _("reg pair must start from even reg"));
1429 return 0;
1430 }
1431 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1432 {
1433 set_syntax_error (mismatch_detail, idx,
1434 _("reg pair must be contiguous"));
1435 return 0;
1436 }
1437 break;
1438 }
1439
1440 /* <Xt> may be optional in some IC and TLBI instructions. */
1441 if (type == AARCH64_OPND_Rt_SYS)
1442 {
1443 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1444 == AARCH64_OPND_CLASS_SYSTEM));
1445 if (opnds[1].present
1446 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1447 {
1448 set_other_error (mismatch_detail, idx, _("extraneous register"));
1449 return 0;
1450 }
1451 if (!opnds[1].present
1452 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1453 {
1454 set_other_error (mismatch_detail, idx, _("missing register"));
1455 return 0;
1456 }
1457 }
1458 switch (qualifier)
1459 {
1460 case AARCH64_OPND_QLF_WSP:
1461 case AARCH64_OPND_QLF_SP:
1462 if (!aarch64_stack_pointer_p (opnd))
1463 {
1464 set_other_error (mismatch_detail, idx,
1465 _("stack pointer register expected"));
1466 return 0;
1467 }
1468 break;
1469 default:
1470 break;
1471 }
1472 break;
1473
1474 case AARCH64_OPND_CLASS_SVE_REG:
1475 switch (type)
1476 {
1477 case AARCH64_OPND_SVE_Zn_INDEX:
1478 size = aarch64_get_qualifier_esize (opnd->qualifier);
1479 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1480 {
1481 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1482 0, 64 / size - 1);
1483 return 0;
1484 }
1485 break;
1486
1487 case AARCH64_OPND_SVE_ZnxN:
1488 case AARCH64_OPND_SVE_ZtxN:
1489 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1490 {
1491 set_other_error (mismatch_detail, idx,
1492 _("invalid register list"));
1493 return 0;
1494 }
1495 break;
1496
1497 default:
1498 break;
1499 }
1500 break;
1501
1502 case AARCH64_OPND_CLASS_PRED_REG:
1503 if (opnd->reg.regno >= 8
1504 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1505 {
1506 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1507 return 0;
1508 }
1509 break;
1510
1511 case AARCH64_OPND_CLASS_COND:
1512 if (type == AARCH64_OPND_COND1
1513 && (opnds[idx].cond->value & 0xe) == 0xe)
1514 {
1515 /* Not allow AL or NV. */
1516 set_syntax_error (mismatch_detail, idx, NULL);
1517 }
1518 break;
1519
1520 case AARCH64_OPND_CLASS_ADDRESS:
1521 /* Check writeback. */
1522 switch (opcode->iclass)
1523 {
1524 case ldst_pos:
1525 case ldst_unscaled:
1526 case ldstnapair_offs:
1527 case ldstpair_off:
1528 case ldst_unpriv:
1529 if (opnd->addr.writeback == 1)
1530 {
1531 set_syntax_error (mismatch_detail, idx,
1532 _("unexpected address writeback"));
1533 return 0;
1534 }
1535 break;
1536 case ldst_imm10:
1537 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1538 {
1539 set_syntax_error (mismatch_detail, idx,
1540 _("unexpected address writeback"));
1541 return 0;
1542 }
1543 break;
1544 case ldst_imm9:
1545 case ldstpair_indexed:
1546 case asisdlsep:
1547 case asisdlsop:
1548 if (opnd->addr.writeback == 0)
1549 {
1550 set_syntax_error (mismatch_detail, idx,
1551 _("address writeback expected"));
1552 return 0;
1553 }
1554 break;
1555 default:
1556 assert (opnd->addr.writeback == 0);
1557 break;
1558 }
1559 switch (type)
1560 {
1561 case AARCH64_OPND_ADDR_SIMM7:
1562 /* Scaled signed 7 bits immediate offset. */
1563 /* Get the size of the data element that is accessed, which may be
1564 different from that of the source register size,
1565 e.g. in strb/ldrb. */
1566 size = aarch64_get_qualifier_esize (opnd->qualifier);
1567 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1568 {
1569 set_offset_out_of_range_error (mismatch_detail, idx,
1570 -64 * size, 63 * size);
1571 return 0;
1572 }
1573 if (!value_aligned_p (opnd->addr.offset.imm, size))
1574 {
1575 set_unaligned_error (mismatch_detail, idx, size);
1576 return 0;
1577 }
1578 break;
1579 case AARCH64_OPND_ADDR_SIMM9:
1580 /* Unscaled signed 9 bits immediate offset. */
1581 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1582 {
1583 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1584 return 0;
1585 }
1586 break;
1587
1588 case AARCH64_OPND_ADDR_SIMM9_2:
1589 /* Unscaled signed 9 bits immediate offset, which has to be negative
1590 or unaligned. */
1591 size = aarch64_get_qualifier_esize (qualifier);
1592 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1593 && !value_aligned_p (opnd->addr.offset.imm, size))
1594 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1595 return 1;
1596 set_other_error (mismatch_detail, idx,
1597 _("negative or unaligned offset expected"));
1598 return 0;
1599
1600 case AARCH64_OPND_ADDR_SIMM10:
1601 /* Scaled signed 10 bits immediate offset. */
1602 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1603 {
1604 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1605 return 0;
1606 }
1607 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1608 {
1609 set_unaligned_error (mismatch_detail, idx, 8);
1610 return 0;
1611 }
1612 break;
1613
1614 case AARCH64_OPND_SIMD_ADDR_POST:
1615 /* AdvSIMD load/store multiple structures, post-index. */
1616 assert (idx == 1);
1617 if (opnd->addr.offset.is_reg)
1618 {
1619 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1620 return 1;
1621 else
1622 {
1623 set_other_error (mismatch_detail, idx,
1624 _("invalid register offset"));
1625 return 0;
1626 }
1627 }
1628 else
1629 {
1630 const aarch64_opnd_info *prev = &opnds[idx-1];
1631 unsigned num_bytes; /* total number of bytes transferred. */
1632 /* The opcode dependent area stores the number of elements in
1633 each structure to be loaded/stored. */
1634 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1635 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1636 /* Special handling of loading single structure to all lane. */
1637 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1638 * aarch64_get_qualifier_esize (prev->qualifier);
1639 else
1640 num_bytes = prev->reglist.num_regs
1641 * aarch64_get_qualifier_esize (prev->qualifier)
1642 * aarch64_get_qualifier_nelem (prev->qualifier);
1643 if ((int) num_bytes != opnd->addr.offset.imm)
1644 {
1645 set_other_error (mismatch_detail, idx,
1646 _("invalid post-increment amount"));
1647 return 0;
1648 }
1649 }
1650 break;
1651
1652 case AARCH64_OPND_ADDR_REGOFF:
1653 /* Get the size of the data element that is accessed, which may be
1654 different from that of the source register size,
1655 e.g. in strb/ldrb. */
1656 size = aarch64_get_qualifier_esize (opnd->qualifier);
1657 /* It is either no shift or shift by the binary logarithm of SIZE. */
1658 if (opnd->shifter.amount != 0
1659 && opnd->shifter.amount != (int)get_logsz (size))
1660 {
1661 set_other_error (mismatch_detail, idx,
1662 _("invalid shift amount"));
1663 return 0;
1664 }
1665 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1666 operators. */
1667 switch (opnd->shifter.kind)
1668 {
1669 case AARCH64_MOD_UXTW:
1670 case AARCH64_MOD_LSL:
1671 case AARCH64_MOD_SXTW:
1672 case AARCH64_MOD_SXTX: break;
1673 default:
1674 set_other_error (mismatch_detail, idx,
1675 _("invalid extend/shift operator"));
1676 return 0;
1677 }
1678 break;
1679
1680 case AARCH64_OPND_ADDR_UIMM12:
1681 imm = opnd->addr.offset.imm;
1682 /* Get the size of the data element that is accessed, which may be
1683 different from that of the source register size,
1684 e.g. in strb/ldrb. */
1685 size = aarch64_get_qualifier_esize (qualifier);
1686 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1687 {
1688 set_offset_out_of_range_error (mismatch_detail, idx,
1689 0, 4095 * size);
1690 return 0;
1691 }
1692 if (!value_aligned_p (opnd->addr.offset.imm, size))
1693 {
1694 set_unaligned_error (mismatch_detail, idx, size);
1695 return 0;
1696 }
1697 break;
1698
1699 case AARCH64_OPND_ADDR_PCREL14:
1700 case AARCH64_OPND_ADDR_PCREL19:
1701 case AARCH64_OPND_ADDR_PCREL21:
1702 case AARCH64_OPND_ADDR_PCREL26:
1703 imm = opnd->imm.value;
1704 if (operand_need_shift_by_two (get_operand_from_code (type)))
1705 {
1706 /* The offset value in a PC-relative branch instruction is alway
1707 4-byte aligned and is encoded without the lowest 2 bits. */
1708 if (!value_aligned_p (imm, 4))
1709 {
1710 set_unaligned_error (mismatch_detail, idx, 4);
1711 return 0;
1712 }
1713 /* Right shift by 2 so that we can carry out the following check
1714 canonically. */
1715 imm >>= 2;
1716 }
1717 size = get_operand_fields_width (get_operand_from_code (type));
1718 if (!value_fit_signed_field_p (imm, size))
1719 {
1720 set_other_error (mismatch_detail, idx,
1721 _("immediate out of range"));
1722 return 0;
1723 }
1724 break;
1725
1726 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1727 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1728 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1729 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1730 min_value = -8;
1731 max_value = 7;
1732 sve_imm_offset_vl:
1733 assert (!opnd->addr.offset.is_reg);
1734 assert (opnd->addr.preind);
1735 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1736 min_value *= num;
1737 max_value *= num;
1738 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1739 || (opnd->shifter.operator_present
1740 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1741 {
1742 set_other_error (mismatch_detail, idx,
1743 _("invalid addressing mode"));
1744 return 0;
1745 }
1746 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1747 {
1748 set_offset_out_of_range_error (mismatch_detail, idx,
1749 min_value, max_value);
1750 return 0;
1751 }
1752 if (!value_aligned_p (opnd->addr.offset.imm, num))
1753 {
1754 set_unaligned_error (mismatch_detail, idx, num);
1755 return 0;
1756 }
1757 break;
1758
1759 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1760 min_value = -32;
1761 max_value = 31;
1762 goto sve_imm_offset_vl;
1763
1764 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1765 min_value = -256;
1766 max_value = 255;
1767 goto sve_imm_offset_vl;
1768
1769 case AARCH64_OPND_SVE_ADDR_RI_U6:
1770 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1771 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1772 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1773 min_value = 0;
1774 max_value = 63;
1775 sve_imm_offset:
1776 assert (!opnd->addr.offset.is_reg);
1777 assert (opnd->addr.preind);
1778 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1779 min_value *= num;
1780 max_value *= num;
1781 if (opnd->shifter.operator_present
1782 || opnd->shifter.amount_present)
1783 {
1784 set_other_error (mismatch_detail, idx,
1785 _("invalid addressing mode"));
1786 return 0;
1787 }
1788 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1789 {
1790 set_offset_out_of_range_error (mismatch_detail, idx,
1791 min_value, max_value);
1792 return 0;
1793 }
1794 if (!value_aligned_p (opnd->addr.offset.imm, num))
1795 {
1796 set_unaligned_error (mismatch_detail, idx, num);
1797 return 0;
1798 }
1799 break;
1800
1801 case AARCH64_OPND_SVE_ADDR_RR:
1802 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1803 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1804 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1805 case AARCH64_OPND_SVE_ADDR_RX:
1806 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1807 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1808 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1809 case AARCH64_OPND_SVE_ADDR_RZ:
1810 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1811 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1812 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1813 modifiers = 1 << AARCH64_MOD_LSL;
1814 sve_rr_operand:
1815 assert (opnd->addr.offset.is_reg);
1816 assert (opnd->addr.preind);
1817 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1818 && opnd->addr.offset.regno == 31)
1819 {
1820 set_other_error (mismatch_detail, idx,
1821 _("index register xzr is not allowed"));
1822 return 0;
1823 }
1824 if (((1 << opnd->shifter.kind) & modifiers) == 0
1825 || (opnd->shifter.amount
1826 != get_operand_specific_data (&aarch64_operands[type])))
1827 {
1828 set_other_error (mismatch_detail, idx,
1829 _("invalid addressing mode"));
1830 return 0;
1831 }
1832 break;
1833
1834 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1835 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1836 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1837 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1838 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1839 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1840 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1841 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1842 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1843 goto sve_rr_operand;
1844
1845 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1846 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1847 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1848 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1849 min_value = 0;
1850 max_value = 31;
1851 goto sve_imm_offset;
1852
1853 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1854 modifiers = 1 << AARCH64_MOD_LSL;
1855 sve_zz_operand:
1856 assert (opnd->addr.offset.is_reg);
1857 assert (opnd->addr.preind);
1858 if (((1 << opnd->shifter.kind) & modifiers) == 0
1859 || opnd->shifter.amount < 0
1860 || opnd->shifter.amount > 3)
1861 {
1862 set_other_error (mismatch_detail, idx,
1863 _("invalid addressing mode"));
1864 return 0;
1865 }
1866 break;
1867
1868 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1869 modifiers = (1 << AARCH64_MOD_SXTW);
1870 goto sve_zz_operand;
1871
1872 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1873 modifiers = 1 << AARCH64_MOD_UXTW;
1874 goto sve_zz_operand;
1875
1876 default:
1877 break;
1878 }
1879 break;
1880
1881 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1882 if (type == AARCH64_OPND_LEt)
1883 {
1884 /* Get the upper bound for the element index. */
1885 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1886 if (!value_in_range_p (opnd->reglist.index, 0, num))
1887 {
1888 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1889 return 0;
1890 }
1891 }
1892 /* The opcode dependent area stores the number of elements in
1893 each structure to be loaded/stored. */
1894 num = get_opcode_dependent_value (opcode);
1895 switch (type)
1896 {
1897 case AARCH64_OPND_LVt:
1898 assert (num >= 1 && num <= 4);
1899 /* Unless LD1/ST1, the number of registers should be equal to that
1900 of the structure elements. */
1901 if (num != 1 && opnd->reglist.num_regs != num)
1902 {
1903 set_reg_list_error (mismatch_detail, idx, num);
1904 return 0;
1905 }
1906 break;
1907 case AARCH64_OPND_LVt_AL:
1908 case AARCH64_OPND_LEt:
1909 assert (num >= 1 && num <= 4);
1910 /* The number of registers should be equal to that of the structure
1911 elements. */
1912 if (opnd->reglist.num_regs != num)
1913 {
1914 set_reg_list_error (mismatch_detail, idx, num);
1915 return 0;
1916 }
1917 break;
1918 default:
1919 break;
1920 }
1921 break;
1922
1923 case AARCH64_OPND_CLASS_IMMEDIATE:
1924 /* Constraint check on immediate operand. */
1925 imm = opnd->imm.value;
1926 /* E.g. imm_0_31 constrains value to be 0..31. */
1927 if (qualifier_value_in_range_constraint_p (qualifier)
1928 && !value_in_range_p (imm, get_lower_bound (qualifier),
1929 get_upper_bound (qualifier)))
1930 {
1931 set_imm_out_of_range_error (mismatch_detail, idx,
1932 get_lower_bound (qualifier),
1933 get_upper_bound (qualifier));
1934 return 0;
1935 }
1936
1937 switch (type)
1938 {
1939 case AARCH64_OPND_AIMM:
1940 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1941 {
1942 set_other_error (mismatch_detail, idx,
1943 _("invalid shift operator"));
1944 return 0;
1945 }
1946 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1947 {
1948 set_other_error (mismatch_detail, idx,
1949 _("shift amount must be 0 or 12"));
1950 return 0;
1951 }
1952 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1953 {
1954 set_other_error (mismatch_detail, idx,
1955 _("immediate out of range"));
1956 return 0;
1957 }
1958 break;
1959
1960 case AARCH64_OPND_HALF:
1961 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1962 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1963 {
1964 set_other_error (mismatch_detail, idx,
1965 _("invalid shift operator"));
1966 return 0;
1967 }
1968 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1969 if (!value_aligned_p (opnd->shifter.amount, 16))
1970 {
1971 set_other_error (mismatch_detail, idx,
1972 _("shift amount must be a multiple of 16"));
1973 return 0;
1974 }
1975 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1976 {
1977 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1978 0, size * 8 - 16);
1979 return 0;
1980 }
1981 if (opnd->imm.value < 0)
1982 {
1983 set_other_error (mismatch_detail, idx,
1984 _("negative immediate value not allowed"));
1985 return 0;
1986 }
1987 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1988 {
1989 set_other_error (mismatch_detail, idx,
1990 _("immediate out of range"));
1991 return 0;
1992 }
1993 break;
1994
1995 case AARCH64_OPND_IMM_MOV:
1996 {
1997 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1998 imm = opnd->imm.value;
1999 assert (idx == 1);
2000 switch (opcode->op)
2001 {
2002 case OP_MOV_IMM_WIDEN:
2003 imm = ~imm;
2004 /* Fall through. */
2005 case OP_MOV_IMM_WIDE:
2006 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2007 {
2008 set_other_error (mismatch_detail, idx,
2009 _("immediate out of range"));
2010 return 0;
2011 }
2012 break;
2013 case OP_MOV_IMM_LOG:
2014 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2015 {
2016 set_other_error (mismatch_detail, idx,
2017 _("immediate out of range"));
2018 return 0;
2019 }
2020 break;
2021 default:
2022 assert (0);
2023 return 0;
2024 }
2025 }
2026 break;
2027
2028 case AARCH64_OPND_NZCV:
2029 case AARCH64_OPND_CCMP_IMM:
2030 case AARCH64_OPND_EXCEPTION:
2031 case AARCH64_OPND_UIMM4:
2032 case AARCH64_OPND_UIMM7:
2033 case AARCH64_OPND_UIMM3_OP1:
2034 case AARCH64_OPND_UIMM3_OP2:
2035 case AARCH64_OPND_SVE_UIMM3:
2036 case AARCH64_OPND_SVE_UIMM7:
2037 case AARCH64_OPND_SVE_UIMM8:
2038 case AARCH64_OPND_SVE_UIMM8_53:
2039 size = get_operand_fields_width (get_operand_from_code (type));
2040 assert (size < 32);
2041 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2042 {
2043 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2044 (1 << size) - 1);
2045 return 0;
2046 }
2047 break;
2048
2049 case AARCH64_OPND_SIMM5:
2050 case AARCH64_OPND_SVE_SIMM5:
2051 case AARCH64_OPND_SVE_SIMM5B:
2052 case AARCH64_OPND_SVE_SIMM6:
2053 case AARCH64_OPND_SVE_SIMM8:
2054 size = get_operand_fields_width (get_operand_from_code (type));
2055 assert (size < 32);
2056 if (!value_fit_signed_field_p (opnd->imm.value, size))
2057 {
2058 set_imm_out_of_range_error (mismatch_detail, idx,
2059 -(1 << (size - 1)),
2060 (1 << (size - 1)) - 1);
2061 return 0;
2062 }
2063 break;
2064
2065 case AARCH64_OPND_WIDTH:
2066 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2067 && opnds[0].type == AARCH64_OPND_Rd);
2068 size = get_upper_bound (qualifier);
2069 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2070 /* lsb+width <= reg.size */
2071 {
2072 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2073 size - opnds[idx-1].imm.value);
2074 return 0;
2075 }
2076 break;
2077
2078 case AARCH64_OPND_LIMM:
2079 case AARCH64_OPND_SVE_LIMM:
2080 {
2081 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2082 uint64_t uimm = opnd->imm.value;
2083 if (opcode->op == OP_BIC)
2084 uimm = ~uimm;
2085 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
2086 {
2087 set_other_error (mismatch_detail, idx,
2088 _("immediate out of range"));
2089 return 0;
2090 }
2091 }
2092 break;
2093
2094 case AARCH64_OPND_IMM0:
2095 case AARCH64_OPND_FPIMM0:
2096 if (opnd->imm.value != 0)
2097 {
2098 set_other_error (mismatch_detail, idx,
2099 _("immediate zero expected"));
2100 return 0;
2101 }
2102 break;
2103
2104 case AARCH64_OPND_IMM_ROT1:
2105 case AARCH64_OPND_IMM_ROT2:
2106 if (opnd->imm.value != 0
2107 && opnd->imm.value != 90
2108 && opnd->imm.value != 180
2109 && opnd->imm.value != 270)
2110 {
2111 set_other_error (mismatch_detail, idx,
2112 _("rotate expected to be 0, 90, 180 or 270"));
2113 return 0;
2114 }
2115 break;
2116
2117 case AARCH64_OPND_IMM_ROT3:
2118 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2119 {
2120 set_other_error (mismatch_detail, idx,
2121 _("rotate expected to be 90 or 270"));
2122 return 0;
2123 }
2124 break;
2125
2126 case AARCH64_OPND_SHLL_IMM:
2127 assert (idx == 2);
2128 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2129 if (opnd->imm.value != size)
2130 {
2131 set_other_error (mismatch_detail, idx,
2132 _("invalid shift amount"));
2133 return 0;
2134 }
2135 break;
2136
2137 case AARCH64_OPND_IMM_VLSL:
2138 size = aarch64_get_qualifier_esize (qualifier);
2139 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2140 {
2141 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2142 size * 8 - 1);
2143 return 0;
2144 }
2145 break;
2146
2147 case AARCH64_OPND_IMM_VLSR:
2148 size = aarch64_get_qualifier_esize (qualifier);
2149 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2150 {
2151 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2152 return 0;
2153 }
2154 break;
2155
2156 case AARCH64_OPND_SIMD_IMM:
2157 case AARCH64_OPND_SIMD_IMM_SFT:
2158 /* Qualifier check. */
2159 switch (qualifier)
2160 {
2161 case AARCH64_OPND_QLF_LSL:
2162 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2163 {
2164 set_other_error (mismatch_detail, idx,
2165 _("invalid shift operator"));
2166 return 0;
2167 }
2168 break;
2169 case AARCH64_OPND_QLF_MSL:
2170 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2171 {
2172 set_other_error (mismatch_detail, idx,
2173 _("invalid shift operator"));
2174 return 0;
2175 }
2176 break;
2177 case AARCH64_OPND_QLF_NIL:
2178 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2179 {
2180 set_other_error (mismatch_detail, idx,
2181 _("shift is not permitted"));
2182 return 0;
2183 }
2184 break;
2185 default:
2186 assert (0);
2187 return 0;
2188 }
2189 /* Is the immediate valid? */
2190 assert (idx == 1);
2191 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2192 {
2193 /* uimm8 or simm8 */
2194 if (!value_in_range_p (opnd->imm.value, -128, 255))
2195 {
2196 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2197 return 0;
2198 }
2199 }
2200 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2201 {
2202 /* uimm64 is not
2203 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2204 ffffffffgggggggghhhhhhhh'. */
2205 set_other_error (mismatch_detail, idx,
2206 _("invalid value for immediate"));
2207 return 0;
2208 }
2209 /* Is the shift amount valid? */
2210 switch (opnd->shifter.kind)
2211 {
2212 case AARCH64_MOD_LSL:
2213 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2214 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2215 {
2216 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2217 (size - 1) * 8);
2218 return 0;
2219 }
2220 if (!value_aligned_p (opnd->shifter.amount, 8))
2221 {
2222 set_unaligned_error (mismatch_detail, idx, 8);
2223 return 0;
2224 }
2225 break;
2226 case AARCH64_MOD_MSL:
2227 /* Only 8 and 16 are valid shift amount. */
2228 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2229 {
2230 set_other_error (mismatch_detail, idx,
2231 _("shift amount must be 0 or 16"));
2232 return 0;
2233 }
2234 break;
2235 default:
2236 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2237 {
2238 set_other_error (mismatch_detail, idx,
2239 _("invalid shift operator"));
2240 return 0;
2241 }
2242 break;
2243 }
2244 break;
2245
2246 case AARCH64_OPND_FPIMM:
2247 case AARCH64_OPND_SIMD_FPIMM:
2248 case AARCH64_OPND_SVE_FPIMM8:
2249 if (opnd->imm.is_fp == 0)
2250 {
2251 set_other_error (mismatch_detail, idx,
2252 _("floating-point immediate expected"));
2253 return 0;
2254 }
2255 /* The value is expected to be an 8-bit floating-point constant with
2256 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2257 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2258 instruction). */
2259 if (!value_in_range_p (opnd->imm.value, 0, 255))
2260 {
2261 set_other_error (mismatch_detail, idx,
2262 _("immediate out of range"));
2263 return 0;
2264 }
2265 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2266 {
2267 set_other_error (mismatch_detail, idx,
2268 _("invalid shift operator"));
2269 return 0;
2270 }
2271 break;
2272
2273 case AARCH64_OPND_SVE_AIMM:
2274 min_value = 0;
2275 sve_aimm:
2276 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2277 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2278 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2279 uvalue = opnd->imm.value;
2280 shift = opnd->shifter.amount;
2281 if (size == 1)
2282 {
2283 if (shift != 0)
2284 {
2285 set_other_error (mismatch_detail, idx,
2286 _("no shift amount allowed for"
2287 " 8-bit constants"));
2288 return 0;
2289 }
2290 }
2291 else
2292 {
2293 if (shift != 0 && shift != 8)
2294 {
2295 set_other_error (mismatch_detail, idx,
2296 _("shift amount must be 0 or 8"));
2297 return 0;
2298 }
2299 if (shift == 0 && (uvalue & 0xff) == 0)
2300 {
2301 shift = 8;
2302 uvalue = (int64_t) uvalue / 256;
2303 }
2304 }
2305 mask >>= shift;
2306 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2307 {
2308 set_other_error (mismatch_detail, idx,
2309 _("immediate too big for element size"));
2310 return 0;
2311 }
2312 uvalue = (uvalue - min_value) & mask;
2313 if (uvalue > 0xff)
2314 {
2315 set_other_error (mismatch_detail, idx,
2316 _("invalid arithmetic immediate"));
2317 return 0;
2318 }
2319 break;
2320
2321 case AARCH64_OPND_SVE_ASIMM:
2322 min_value = -128;
2323 goto sve_aimm;
2324
2325 case AARCH64_OPND_SVE_I1_HALF_ONE:
2326 assert (opnd->imm.is_fp);
2327 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2328 {
2329 set_other_error (mismatch_detail, idx,
2330 _("floating-point value must be 0.5 or 1.0"));
2331 return 0;
2332 }
2333 break;
2334
2335 case AARCH64_OPND_SVE_I1_HALF_TWO:
2336 assert (opnd->imm.is_fp);
2337 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2338 {
2339 set_other_error (mismatch_detail, idx,
2340 _("floating-point value must be 0.5 or 2.0"));
2341 return 0;
2342 }
2343 break;
2344
2345 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2346 assert (opnd->imm.is_fp);
2347 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2348 {
2349 set_other_error (mismatch_detail, idx,
2350 _("floating-point value must be 0.0 or 1.0"));
2351 return 0;
2352 }
2353 break;
2354
2355 case AARCH64_OPND_SVE_INV_LIMM:
2356 {
2357 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2358 uint64_t uimm = ~opnd->imm.value;
2359 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2360 {
2361 set_other_error (mismatch_detail, idx,
2362 _("immediate out of range"));
2363 return 0;
2364 }
2365 }
2366 break;
2367
2368 case AARCH64_OPND_SVE_LIMM_MOV:
2369 {
2370 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2371 uint64_t uimm = opnd->imm.value;
2372 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2373 {
2374 set_other_error (mismatch_detail, idx,
2375 _("immediate out of range"));
2376 return 0;
2377 }
2378 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2379 {
2380 set_other_error (mismatch_detail, idx,
2381 _("invalid replicated MOV immediate"));
2382 return 0;
2383 }
2384 }
2385 break;
2386
2387 case AARCH64_OPND_SVE_PATTERN_SCALED:
2388 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2389 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2390 {
2391 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2392 return 0;
2393 }
2394 break;
2395
2396 case AARCH64_OPND_SVE_SHLIMM_PRED:
2397 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2398 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2399 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2400 {
2401 set_imm_out_of_range_error (mismatch_detail, idx,
2402 0, 8 * size - 1);
2403 return 0;
2404 }
2405 break;
2406
2407 case AARCH64_OPND_SVE_SHRIMM_PRED:
2408 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2409 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2410 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2411 {
2412 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2413 return 0;
2414 }
2415 break;
2416
2417 default:
2418 break;
2419 }
2420 break;
2421
2422 case AARCH64_OPND_CLASS_SYSTEM:
2423 switch (type)
2424 {
2425 case AARCH64_OPND_PSTATEFIELD:
2426 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2427 /* MSR UAO, #uimm4
2428 MSR PAN, #uimm4
2429 The immediate must be #0 or #1. */
2430 if ((opnd->pstatefield == 0x03 /* UAO. */
2431 || opnd->pstatefield == 0x04) /* PAN. */
2432 && opnds[1].imm.value > 1)
2433 {
2434 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2435 return 0;
2436 }
2437 /* MSR SPSel, #uimm4
2438 Uses uimm4 as a control value to select the stack pointer: if
2439 bit 0 is set it selects the current exception level's stack
2440 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2441 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2442 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2443 {
2444 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2445 return 0;
2446 }
2447 break;
2448 default:
2449 break;
2450 }
2451 break;
2452
2453 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2454 /* Get the upper bound for the element index. */
2455 if (opcode->op == OP_FCMLA_ELEM)
2456 /* FCMLA index range depends on the vector size of other operands
2457 and is halfed because complex numbers take two elements. */
2458 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2459 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2460 else
2461 num = 16;
2462 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2463
2464 /* Index out-of-range. */
2465 if (!value_in_range_p (opnd->reglane.index, 0, num))
2466 {
2467 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2468 return 0;
2469 }
2470 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2471 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2472 number is encoded in "size:M:Rm":
2473 size <Vm>
2474 00 RESERVED
2475 01 0:Rm
2476 10 M:Rm
2477 11 RESERVED */
2478 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2479 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2480 {
2481 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2482 return 0;
2483 }
2484 break;
2485
2486 case AARCH64_OPND_CLASS_MODIFIED_REG:
2487 assert (idx == 1 || idx == 2);
2488 switch (type)
2489 {
2490 case AARCH64_OPND_Rm_EXT:
2491 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2492 && opnd->shifter.kind != AARCH64_MOD_LSL)
2493 {
2494 set_other_error (mismatch_detail, idx,
2495 _("extend operator expected"));
2496 return 0;
2497 }
2498 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2499 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2500 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2501 case. */
2502 if (!aarch64_stack_pointer_p (opnds + 0)
2503 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2504 {
2505 if (!opnd->shifter.operator_present)
2506 {
2507 set_other_error (mismatch_detail, idx,
2508 _("missing extend operator"));
2509 return 0;
2510 }
2511 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2512 {
2513 set_other_error (mismatch_detail, idx,
2514 _("'LSL' operator not allowed"));
2515 return 0;
2516 }
2517 }
2518 assert (opnd->shifter.operator_present /* Default to LSL. */
2519 || opnd->shifter.kind == AARCH64_MOD_LSL);
2520 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2521 {
2522 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2523 return 0;
2524 }
2525 /* In the 64-bit form, the final register operand is written as Wm
2526 for all but the (possibly omitted) UXTX/LSL and SXTX
2527 operators.
2528 N.B. GAS allows X register to be used with any operator as a
2529 programming convenience. */
2530 if (qualifier == AARCH64_OPND_QLF_X
2531 && opnd->shifter.kind != AARCH64_MOD_LSL
2532 && opnd->shifter.kind != AARCH64_MOD_UXTX
2533 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2534 {
2535 set_other_error (mismatch_detail, idx, _("W register expected"));
2536 return 0;
2537 }
2538 break;
2539
2540 case AARCH64_OPND_Rm_SFT:
2541 /* ROR is not available to the shifted register operand in
2542 arithmetic instructions. */
2543 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2544 {
2545 set_other_error (mismatch_detail, idx,
2546 _("shift operator expected"));
2547 return 0;
2548 }
2549 if (opnd->shifter.kind == AARCH64_MOD_ROR
2550 && opcode->iclass != log_shift)
2551 {
2552 set_other_error (mismatch_detail, idx,
2553 _("'ROR' operator not allowed"));
2554 return 0;
2555 }
2556 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2557 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2558 {
2559 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2560 return 0;
2561 }
2562 break;
2563
2564 default:
2565 break;
2566 }
2567 break;
2568
2569 default:
2570 break;
2571 }
2572
2573 return 1;
2574 }
2575
2576 /* Main entrypoint for the operand constraint checking.
2577
2578 Return 1 if operands of *INST meet the constraint applied by the operand
2579 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2580 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2581 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2582 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2583 error kind when it is notified that an instruction does not pass the check).
2584
2585 Un-determined operand qualifiers may get established during the process. */
2586
2587 int
2588 aarch64_match_operands_constraint (aarch64_inst *inst,
2589 aarch64_operand_error *mismatch_detail)
2590 {
2591 int i;
2592
2593 DEBUG_TRACE ("enter");
2594
2595 /* Check for cases where a source register needs to be the same as the
2596 destination register. Do this before matching qualifiers since if
2597 an instruction has both invalid tying and invalid qualifiers,
2598 the error about qualifiers would suggest several alternative
2599 instructions that also have invalid tying. */
2600 i = inst->opcode->tied_operand;
2601 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2602 {
2603 if (mismatch_detail)
2604 {
2605 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2606 mismatch_detail->index = i;
2607 mismatch_detail->error = NULL;
2608 }
2609 return 0;
2610 }
2611
2612 /* Match operands' qualifier.
2613 *INST has already had qualifier establish for some, if not all, of
2614 its operands; we need to find out whether these established
2615 qualifiers match one of the qualifier sequence in
2616 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2617 with the corresponding qualifier in such a sequence.
2618 Only basic operand constraint checking is done here; the more thorough
2619 constraint checking will carried out by operand_general_constraint_met_p,
2620 which has be to called after this in order to get all of the operands'
2621 qualifiers established. */
2622 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2623 {
2624 DEBUG_TRACE ("FAIL on operand qualifier matching");
2625 if (mismatch_detail)
2626 {
2627 /* Return an error type to indicate that it is the qualifier
2628 matching failure; we don't care about which operand as there
2629 are enough information in the opcode table to reproduce it. */
2630 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2631 mismatch_detail->index = -1;
2632 mismatch_detail->error = NULL;
2633 }
2634 return 0;
2635 }
2636
2637 /* Match operands' constraint. */
2638 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2639 {
2640 enum aarch64_opnd type = inst->opcode->operands[i];
2641 if (type == AARCH64_OPND_NIL)
2642 break;
2643 if (inst->operands[i].skip)
2644 {
2645 DEBUG_TRACE ("skip the incomplete operand %d", i);
2646 continue;
2647 }
2648 if (operand_general_constraint_met_p (inst->operands, i, type,
2649 inst->opcode, mismatch_detail) == 0)
2650 {
2651 DEBUG_TRACE ("FAIL on operand %d", i);
2652 return 0;
2653 }
2654 }
2655
2656 DEBUG_TRACE ("PASS");
2657
2658 return 1;
2659 }
2660
2661 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2662 Also updates the TYPE of each INST->OPERANDS with the corresponding
2663 value of OPCODE->OPERANDS.
2664
2665 Note that some operand qualifiers may need to be manually cleared by
2666 the caller before it further calls the aarch64_opcode_encode; by
2667 doing this, it helps the qualifier matching facilities work
2668 properly. */
2669
2670 const aarch64_opcode*
2671 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2672 {
2673 int i;
2674 const aarch64_opcode *old = inst->opcode;
2675
2676 inst->opcode = opcode;
2677
2678 /* Update the operand types. */
2679 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2680 {
2681 inst->operands[i].type = opcode->operands[i];
2682 if (opcode->operands[i] == AARCH64_OPND_NIL)
2683 break;
2684 }
2685
2686 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2687
2688 return old;
2689 }
2690
2691 int
2692 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2693 {
2694 int i;
2695 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2696 if (operands[i] == operand)
2697 return i;
2698 else if (operands[i] == AARCH64_OPND_NIL)
2699 break;
2700 return -1;
2701 }
2702 \f
2703 /* R0...R30, followed by FOR31. */
2704 #define BANK(R, FOR31) \
2705 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2706 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2707 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2708 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2709 /* [0][0] 32-bit integer regs with sp Wn
2710 [0][1] 64-bit integer regs with sp Xn sf=1
2711 [1][0] 32-bit integer regs with #0 Wn
2712 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2713 static const char *int_reg[2][2][32] = {
2714 #define R32(X) "w" #X
2715 #define R64(X) "x" #X
2716 { BANK (R32, "wsp"), BANK (R64, "sp") },
2717 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2718 #undef R64
2719 #undef R32
2720 };
2721
2722 /* Names of the SVE vector registers, first with .S suffixes,
2723 then with .D suffixes. */
2724
2725 static const char *sve_reg[2][32] = {
2726 #define ZS(X) "z" #X ".s"
2727 #define ZD(X) "z" #X ".d"
2728 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2729 #undef ZD
2730 #undef ZS
2731 };
2732 #undef BANK
2733
2734 /* Return the integer register name.
2735 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2736
2737 static inline const char *
2738 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2739 {
2740 const int has_zr = sp_reg_p ? 0 : 1;
2741 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2742 return int_reg[has_zr][is_64][regno];
2743 }
2744
2745 /* Like get_int_reg_name, but IS_64 is always 1. */
2746
2747 static inline const char *
2748 get_64bit_int_reg_name (int regno, int sp_reg_p)
2749 {
2750 const int has_zr = sp_reg_p ? 0 : 1;
2751 return int_reg[has_zr][1][regno];
2752 }
2753
2754 /* Get the name of the integer offset register in OPND, using the shift type
2755 to decide whether it's a word or doubleword. */
2756
2757 static inline const char *
2758 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2759 {
2760 switch (opnd->shifter.kind)
2761 {
2762 case AARCH64_MOD_UXTW:
2763 case AARCH64_MOD_SXTW:
2764 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2765
2766 case AARCH64_MOD_LSL:
2767 case AARCH64_MOD_SXTX:
2768 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2769
2770 default:
2771 abort ();
2772 }
2773 }
2774
2775 /* Get the name of the SVE vector offset register in OPND, using the operand
2776 qualifier to decide whether the suffix should be .S or .D. */
2777
2778 static inline const char *
2779 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2780 {
2781 assert (qualifier == AARCH64_OPND_QLF_S_S
2782 || qualifier == AARCH64_OPND_QLF_S_D);
2783 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2784 }
2785
2786 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2787
2788 typedef union
2789 {
2790 uint64_t i;
2791 double d;
2792 } double_conv_t;
2793
2794 typedef union
2795 {
2796 uint32_t i;
2797 float f;
2798 } single_conv_t;
2799
2800 typedef union
2801 {
2802 uint32_t i;
2803 float f;
2804 } half_conv_t;
2805
2806 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2807 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2808 (depending on the type of the instruction). IMM8 will be expanded to a
2809 single-precision floating-point value (SIZE == 4) or a double-precision
2810 floating-point value (SIZE == 8). A half-precision floating-point value
2811 (SIZE == 2) is expanded to a single-precision floating-point value. The
2812 expanded value is returned. */
2813
2814 static uint64_t
2815 expand_fp_imm (int size, uint32_t imm8)
2816 {
2817 uint64_t imm;
2818 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2819
2820 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2821 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2822 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2823 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2824 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2825 if (size == 8)
2826 {
2827 imm = (imm8_7 << (63-32)) /* imm8<7> */
2828 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2829 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2830 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2831 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2832 imm <<= 32;
2833 }
2834 else if (size == 4 || size == 2)
2835 {
2836 imm = (imm8_7 << 31) /* imm8<7> */
2837 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2838 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2839 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2840 }
2841 else
2842 {
2843 /* An unsupported size. */
2844 assert (0);
2845 }
2846
2847 return imm;
2848 }
2849
2850 /* Produce the string representation of the register list operand *OPND
2851 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2852 the register name that comes before the register number, such as "v". */
2853 static void
2854 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2855 const char *prefix)
2856 {
2857 const int num_regs = opnd->reglist.num_regs;
2858 const int first_reg = opnd->reglist.first_regno;
2859 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2860 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2861 char tb[8]; /* Temporary buffer. */
2862
2863 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2864 assert (num_regs >= 1 && num_regs <= 4);
2865
2866 /* Prepare the index if any. */
2867 if (opnd->reglist.has_index)
2868 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2869 else
2870 tb[0] = '\0';
2871
2872 /* The hyphenated form is preferred for disassembly if there are
2873 more than two registers in the list, and the register numbers
2874 are monotonically increasing in increments of one. */
2875 if (num_regs > 2 && last_reg > first_reg)
2876 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2877 prefix, last_reg, qlf_name, tb);
2878 else
2879 {
2880 const int reg0 = first_reg;
2881 const int reg1 = (first_reg + 1) & 0x1f;
2882 const int reg2 = (first_reg + 2) & 0x1f;
2883 const int reg3 = (first_reg + 3) & 0x1f;
2884
2885 switch (num_regs)
2886 {
2887 case 1:
2888 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2889 break;
2890 case 2:
2891 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2892 prefix, reg1, qlf_name, tb);
2893 break;
2894 case 3:
2895 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2896 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2897 prefix, reg2, qlf_name, tb);
2898 break;
2899 case 4:
2900 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2901 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2902 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2903 break;
2904 }
2905 }
2906 }
2907
2908 /* Print the register+immediate address in OPND to BUF, which has SIZE
2909 characters. BASE is the name of the base register. */
2910
2911 static void
2912 print_immediate_offset_address (char *buf, size_t size,
2913 const aarch64_opnd_info *opnd,
2914 const char *base)
2915 {
2916 if (opnd->addr.writeback)
2917 {
2918 if (opnd->addr.preind)
2919 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
2920 else
2921 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
2922 }
2923 else
2924 {
2925 if (opnd->shifter.operator_present)
2926 {
2927 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2928 snprintf (buf, size, "[%s, #%d, mul vl]",
2929 base, opnd->addr.offset.imm);
2930 }
2931 else if (opnd->addr.offset.imm)
2932 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
2933 else
2934 snprintf (buf, size, "[%s]", base);
2935 }
2936 }
2937
2938 /* Produce the string representation of the register offset address operand
2939 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2940 the names of the base and offset registers. */
2941 static void
2942 print_register_offset_address (char *buf, size_t size,
2943 const aarch64_opnd_info *opnd,
2944 const char *base, const char *offset)
2945 {
2946 char tb[16]; /* Temporary buffer. */
2947 bfd_boolean print_extend_p = TRUE;
2948 bfd_boolean print_amount_p = TRUE;
2949 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2950
2951 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2952 || !opnd->shifter.amount_present))
2953 {
2954 /* Not print the shift/extend amount when the amount is zero and
2955 when it is not the special case of 8-bit load/store instruction. */
2956 print_amount_p = FALSE;
2957 /* Likewise, no need to print the shift operator LSL in such a
2958 situation. */
2959 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2960 print_extend_p = FALSE;
2961 }
2962
2963 /* Prepare for the extend/shift. */
2964 if (print_extend_p)
2965 {
2966 if (print_amount_p)
2967 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
2968 opnd->shifter.amount);
2969 else
2970 snprintf (tb, sizeof (tb), ", %s", shift_name);
2971 }
2972 else
2973 tb[0] = '\0';
2974
2975 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
2976 }
2977
2978 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2979 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2980 PC, PCREL_P and ADDRESS are used to pass in and return information about
2981 the PC-relative address calculation, where the PC value is passed in
2982 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2983 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2984 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2985
2986 The function serves both the disassembler and the assembler diagnostics
2987 issuer, which is the reason why it lives in this file. */
2988
2989 void
2990 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2991 const aarch64_opcode *opcode,
2992 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2993 bfd_vma *address)
2994 {
2995 unsigned int i, num_conds;
2996 const char *name = NULL;
2997 const aarch64_opnd_info *opnd = opnds + idx;
2998 enum aarch64_modifier_kind kind;
2999 uint64_t addr, enum_value;
3000
3001 buf[0] = '\0';
3002 if (pcrel_p)
3003 *pcrel_p = 0;
3004
3005 switch (opnd->type)
3006 {
3007 case AARCH64_OPND_Rd:
3008 case AARCH64_OPND_Rn:
3009 case AARCH64_OPND_Rm:
3010 case AARCH64_OPND_Rt:
3011 case AARCH64_OPND_Rt2:
3012 case AARCH64_OPND_Rs:
3013 case AARCH64_OPND_Ra:
3014 case AARCH64_OPND_Rt_SYS:
3015 case AARCH64_OPND_PAIRREG:
3016 case AARCH64_OPND_SVE_Rm:
3017 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3018 the <ic_op>, therefore we we use opnd->present to override the
3019 generic optional-ness information. */
3020 if (opnd->type == AARCH64_OPND_Rt_SYS)
3021 {
3022 if (!opnd->present)
3023 break;
3024 }
3025 /* Omit the operand, e.g. RET. */
3026 else if (optional_operand_p (opcode, idx)
3027 && (opnd->reg.regno
3028 == get_optional_operand_default_value (opcode)))
3029 break;
3030 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3031 || opnd->qualifier == AARCH64_OPND_QLF_X);
3032 snprintf (buf, size, "%s",
3033 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3034 break;
3035
3036 case AARCH64_OPND_Rd_SP:
3037 case AARCH64_OPND_Rn_SP:
3038 case AARCH64_OPND_SVE_Rn_SP:
3039 case AARCH64_OPND_Rm_SP:
3040 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3041 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3042 || opnd->qualifier == AARCH64_OPND_QLF_X
3043 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3044 snprintf (buf, size, "%s",
3045 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3046 break;
3047
3048 case AARCH64_OPND_Rm_EXT:
3049 kind = opnd->shifter.kind;
3050 assert (idx == 1 || idx == 2);
3051 if ((aarch64_stack_pointer_p (opnds)
3052 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3053 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3054 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3055 && kind == AARCH64_MOD_UXTW)
3056 || (opnd->qualifier == AARCH64_OPND_QLF_X
3057 && kind == AARCH64_MOD_UXTX)))
3058 {
3059 /* 'LSL' is the preferred form in this case. */
3060 kind = AARCH64_MOD_LSL;
3061 if (opnd->shifter.amount == 0)
3062 {
3063 /* Shifter omitted. */
3064 snprintf (buf, size, "%s",
3065 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3066 break;
3067 }
3068 }
3069 if (opnd->shifter.amount)
3070 snprintf (buf, size, "%s, %s #%" PRIi64,
3071 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3072 aarch64_operand_modifiers[kind].name,
3073 opnd->shifter.amount);
3074 else
3075 snprintf (buf, size, "%s, %s",
3076 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3077 aarch64_operand_modifiers[kind].name);
3078 break;
3079
3080 case AARCH64_OPND_Rm_SFT:
3081 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3082 || opnd->qualifier == AARCH64_OPND_QLF_X);
3083 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3084 snprintf (buf, size, "%s",
3085 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3086 else
3087 snprintf (buf, size, "%s, %s #%" PRIi64,
3088 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3089 aarch64_operand_modifiers[opnd->shifter.kind].name,
3090 opnd->shifter.amount);
3091 break;
3092
3093 case AARCH64_OPND_Fd:
3094 case AARCH64_OPND_Fn:
3095 case AARCH64_OPND_Fm:
3096 case AARCH64_OPND_Fa:
3097 case AARCH64_OPND_Ft:
3098 case AARCH64_OPND_Ft2:
3099 case AARCH64_OPND_Sd:
3100 case AARCH64_OPND_Sn:
3101 case AARCH64_OPND_Sm:
3102 case AARCH64_OPND_SVE_VZn:
3103 case AARCH64_OPND_SVE_Vd:
3104 case AARCH64_OPND_SVE_Vm:
3105 case AARCH64_OPND_SVE_Vn:
3106 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3107 opnd->reg.regno);
3108 break;
3109
3110 case AARCH64_OPND_Vd:
3111 case AARCH64_OPND_Vn:
3112 case AARCH64_OPND_Vm:
3113 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3114 aarch64_get_qualifier_name (opnd->qualifier));
3115 break;
3116
3117 case AARCH64_OPND_Ed:
3118 case AARCH64_OPND_En:
3119 case AARCH64_OPND_Em:
3120 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3121 aarch64_get_qualifier_name (opnd->qualifier),
3122 opnd->reglane.index);
3123 break;
3124
3125 case AARCH64_OPND_VdD1:
3126 case AARCH64_OPND_VnD1:
3127 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3128 break;
3129
3130 case AARCH64_OPND_LVn:
3131 case AARCH64_OPND_LVt:
3132 case AARCH64_OPND_LVt_AL:
3133 case AARCH64_OPND_LEt:
3134 print_register_list (buf, size, opnd, "v");
3135 break;
3136
3137 case AARCH64_OPND_SVE_Pd:
3138 case AARCH64_OPND_SVE_Pg3:
3139 case AARCH64_OPND_SVE_Pg4_5:
3140 case AARCH64_OPND_SVE_Pg4_10:
3141 case AARCH64_OPND_SVE_Pg4_16:
3142 case AARCH64_OPND_SVE_Pm:
3143 case AARCH64_OPND_SVE_Pn:
3144 case AARCH64_OPND_SVE_Pt:
3145 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3146 snprintf (buf, size, "p%d", opnd->reg.regno);
3147 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3148 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3149 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3150 aarch64_get_qualifier_name (opnd->qualifier));
3151 else
3152 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3153 aarch64_get_qualifier_name (opnd->qualifier));
3154 break;
3155
3156 case AARCH64_OPND_SVE_Za_5:
3157 case AARCH64_OPND_SVE_Za_16:
3158 case AARCH64_OPND_SVE_Zd:
3159 case AARCH64_OPND_SVE_Zm_5:
3160 case AARCH64_OPND_SVE_Zm_16:
3161 case AARCH64_OPND_SVE_Zn:
3162 case AARCH64_OPND_SVE_Zt:
3163 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3164 snprintf (buf, size, "z%d", opnd->reg.regno);
3165 else
3166 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3167 aarch64_get_qualifier_name (opnd->qualifier));
3168 break;
3169
3170 case AARCH64_OPND_SVE_ZnxN:
3171 case AARCH64_OPND_SVE_ZtxN:
3172 print_register_list (buf, size, opnd, "z");
3173 break;
3174
3175 case AARCH64_OPND_SVE_Zn_INDEX:
3176 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3177 aarch64_get_qualifier_name (opnd->qualifier),
3178 opnd->reglane.index);
3179 break;
3180
3181 case AARCH64_OPND_CRn:
3182 case AARCH64_OPND_CRm:
3183 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3184 break;
3185
3186 case AARCH64_OPND_IDX:
3187 case AARCH64_OPND_IMM:
3188 case AARCH64_OPND_WIDTH:
3189 case AARCH64_OPND_UIMM3_OP1:
3190 case AARCH64_OPND_UIMM3_OP2:
3191 case AARCH64_OPND_BIT_NUM:
3192 case AARCH64_OPND_IMM_VLSL:
3193 case AARCH64_OPND_IMM_VLSR:
3194 case AARCH64_OPND_SHLL_IMM:
3195 case AARCH64_OPND_IMM0:
3196 case AARCH64_OPND_IMMR:
3197 case AARCH64_OPND_IMMS:
3198 case AARCH64_OPND_FBITS:
3199 case AARCH64_OPND_SIMM5:
3200 case AARCH64_OPND_SVE_SHLIMM_PRED:
3201 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3202 case AARCH64_OPND_SVE_SHRIMM_PRED:
3203 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3204 case AARCH64_OPND_SVE_SIMM5:
3205 case AARCH64_OPND_SVE_SIMM5B:
3206 case AARCH64_OPND_SVE_SIMM6:
3207 case AARCH64_OPND_SVE_SIMM8:
3208 case AARCH64_OPND_SVE_UIMM3:
3209 case AARCH64_OPND_SVE_UIMM7:
3210 case AARCH64_OPND_SVE_UIMM8:
3211 case AARCH64_OPND_SVE_UIMM8_53:
3212 case AARCH64_OPND_IMM_ROT1:
3213 case AARCH64_OPND_IMM_ROT2:
3214 case AARCH64_OPND_IMM_ROT3:
3215 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3216 break;
3217
3218 case AARCH64_OPND_SVE_I1_HALF_ONE:
3219 case AARCH64_OPND_SVE_I1_HALF_TWO:
3220 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3221 {
3222 single_conv_t c;
3223 c.i = opnd->imm.value;
3224 snprintf (buf, size, "#%.1f", c.f);
3225 break;
3226 }
3227
3228 case AARCH64_OPND_SVE_PATTERN:
3229 if (optional_operand_p (opcode, idx)
3230 && opnd->imm.value == get_optional_operand_default_value (opcode))
3231 break;
3232 enum_value = opnd->imm.value;
3233 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3234 if (aarch64_sve_pattern_array[enum_value])
3235 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3236 else
3237 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3238 break;
3239
3240 case AARCH64_OPND_SVE_PATTERN_SCALED:
3241 if (optional_operand_p (opcode, idx)
3242 && !opnd->shifter.operator_present
3243 && opnd->imm.value == get_optional_operand_default_value (opcode))
3244 break;
3245 enum_value = opnd->imm.value;
3246 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3247 if (aarch64_sve_pattern_array[opnd->imm.value])
3248 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3249 else
3250 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3251 if (opnd->shifter.operator_present)
3252 {
3253 size_t len = strlen (buf);
3254 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3255 aarch64_operand_modifiers[opnd->shifter.kind].name,
3256 opnd->shifter.amount);
3257 }
3258 break;
3259
3260 case AARCH64_OPND_SVE_PRFOP:
3261 enum_value = opnd->imm.value;
3262 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3263 if (aarch64_sve_prfop_array[enum_value])
3264 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3265 else
3266 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3267 break;
3268
3269 case AARCH64_OPND_IMM_MOV:
3270 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3271 {
3272 case 4: /* e.g. MOV Wd, #<imm32>. */
3273 {
3274 int imm32 = opnd->imm.value;
3275 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3276 }
3277 break;
3278 case 8: /* e.g. MOV Xd, #<imm64>. */
3279 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3280 opnd->imm.value, opnd->imm.value);
3281 break;
3282 default: assert (0);
3283 }
3284 break;
3285
3286 case AARCH64_OPND_FPIMM0:
3287 snprintf (buf, size, "#0.0");
3288 break;
3289
3290 case AARCH64_OPND_LIMM:
3291 case AARCH64_OPND_AIMM:
3292 case AARCH64_OPND_HALF:
3293 case AARCH64_OPND_SVE_INV_LIMM:
3294 case AARCH64_OPND_SVE_LIMM:
3295 case AARCH64_OPND_SVE_LIMM_MOV:
3296 if (opnd->shifter.amount)
3297 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3298 opnd->shifter.amount);
3299 else
3300 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3301 break;
3302
3303 case AARCH64_OPND_SIMD_IMM:
3304 case AARCH64_OPND_SIMD_IMM_SFT:
3305 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3306 || opnd->shifter.kind == AARCH64_MOD_NONE)
3307 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3308 else
3309 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3310 aarch64_operand_modifiers[opnd->shifter.kind].name,
3311 opnd->shifter.amount);
3312 break;
3313
3314 case AARCH64_OPND_SVE_AIMM:
3315 case AARCH64_OPND_SVE_ASIMM:
3316 if (opnd->shifter.amount)
3317 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3318 opnd->shifter.amount);
3319 else
3320 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3321 break;
3322
3323 case AARCH64_OPND_FPIMM:
3324 case AARCH64_OPND_SIMD_FPIMM:
3325 case AARCH64_OPND_SVE_FPIMM8:
3326 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3327 {
3328 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3329 {
3330 half_conv_t c;
3331 c.i = expand_fp_imm (2, opnd->imm.value);
3332 snprintf (buf, size, "#%.18e", c.f);
3333 }
3334 break;
3335 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3336 {
3337 single_conv_t c;
3338 c.i = expand_fp_imm (4, opnd->imm.value);
3339 snprintf (buf, size, "#%.18e", c.f);
3340 }
3341 break;
3342 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3343 {
3344 double_conv_t c;
3345 c.i = expand_fp_imm (8, opnd->imm.value);
3346 snprintf (buf, size, "#%.18e", c.d);
3347 }
3348 break;
3349 default: assert (0);
3350 }
3351 break;
3352
3353 case AARCH64_OPND_CCMP_IMM:
3354 case AARCH64_OPND_NZCV:
3355 case AARCH64_OPND_EXCEPTION:
3356 case AARCH64_OPND_UIMM4:
3357 case AARCH64_OPND_UIMM7:
3358 if (optional_operand_p (opcode, idx) == TRUE
3359 && (opnd->imm.value ==
3360 (int64_t) get_optional_operand_default_value (opcode)))
3361 /* Omit the operand, e.g. DCPS1. */
3362 break;
3363 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3364 break;
3365
3366 case AARCH64_OPND_COND:
3367 case AARCH64_OPND_COND1:
3368 snprintf (buf, size, "%s", opnd->cond->names[0]);
3369 num_conds = ARRAY_SIZE (opnd->cond->names);
3370 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3371 {
3372 size_t len = strlen (buf);
3373 if (i == 1)
3374 snprintf (buf + len, size - len, " // %s = %s",
3375 opnd->cond->names[0], opnd->cond->names[i]);
3376 else
3377 snprintf (buf + len, size - len, ", %s",
3378 opnd->cond->names[i]);
3379 }
3380 break;
3381
3382 case AARCH64_OPND_ADDR_ADRP:
3383 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3384 + opnd->imm.value;
3385 if (pcrel_p)
3386 *pcrel_p = 1;
3387 if (address)
3388 *address = addr;
3389 /* This is not necessary during the disassembling, as print_address_func
3390 in the disassemble_info will take care of the printing. But some
3391 other callers may be still interested in getting the string in *STR,
3392 so here we do snprintf regardless. */
3393 snprintf (buf, size, "#0x%" PRIx64, addr);
3394 break;
3395
3396 case AARCH64_OPND_ADDR_PCREL14:
3397 case AARCH64_OPND_ADDR_PCREL19:
3398 case AARCH64_OPND_ADDR_PCREL21:
3399 case AARCH64_OPND_ADDR_PCREL26:
3400 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3401 if (pcrel_p)
3402 *pcrel_p = 1;
3403 if (address)
3404 *address = addr;
3405 /* This is not necessary during the disassembling, as print_address_func
3406 in the disassemble_info will take care of the printing. But some
3407 other callers may be still interested in getting the string in *STR,
3408 so here we do snprintf regardless. */
3409 snprintf (buf, size, "#0x%" PRIx64, addr);
3410 break;
3411
3412 case AARCH64_OPND_ADDR_SIMPLE:
3413 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3414 case AARCH64_OPND_SIMD_ADDR_POST:
3415 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3416 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3417 {
3418 if (opnd->addr.offset.is_reg)
3419 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3420 else
3421 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3422 }
3423 else
3424 snprintf (buf, size, "[%s]", name);
3425 break;
3426
3427 case AARCH64_OPND_ADDR_REGOFF:
3428 case AARCH64_OPND_SVE_ADDR_RR:
3429 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3430 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3431 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3432 case AARCH64_OPND_SVE_ADDR_RX:
3433 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3434 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3435 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3436 print_register_offset_address
3437 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3438 get_offset_int_reg_name (opnd));
3439 break;
3440
3441 case AARCH64_OPND_SVE_ADDR_RZ:
3442 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3443 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3444 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3445 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3446 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3447 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3448 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3449 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3450 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3451 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3452 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3453 print_register_offset_address
3454 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3455 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3456 break;
3457
3458 case AARCH64_OPND_ADDR_SIMM7:
3459 case AARCH64_OPND_ADDR_SIMM9:
3460 case AARCH64_OPND_ADDR_SIMM9_2:
3461 case AARCH64_OPND_ADDR_SIMM10:
3462 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3463 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3464 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3465 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3466 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3467 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3468 case AARCH64_OPND_SVE_ADDR_RI_U6:
3469 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3470 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3471 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3472 print_immediate_offset_address
3473 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3474 break;
3475
3476 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3477 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3478 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3479 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3480 print_immediate_offset_address
3481 (buf, size, opnd,
3482 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3483 break;
3484
3485 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3486 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3487 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3488 print_register_offset_address
3489 (buf, size, opnd,
3490 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3491 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3492 break;
3493
3494 case AARCH64_OPND_ADDR_UIMM12:
3495 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3496 if (opnd->addr.offset.imm)
3497 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3498 else
3499 snprintf (buf, size, "[%s]", name);
3500 break;
3501
3502 case AARCH64_OPND_SYSREG:
3503 for (i = 0; aarch64_sys_regs[i].name; ++i)
3504 if (aarch64_sys_regs[i].value == opnd->sysreg
3505 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3506 break;
3507 if (aarch64_sys_regs[i].name)
3508 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3509 else
3510 {
3511 /* Implementation defined system register. */
3512 unsigned int value = opnd->sysreg;
3513 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3514 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3515 value & 0x7);
3516 }
3517 break;
3518
3519 case AARCH64_OPND_PSTATEFIELD:
3520 for (i = 0; aarch64_pstatefields[i].name; ++i)
3521 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3522 break;
3523 assert (aarch64_pstatefields[i].name);
3524 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3525 break;
3526
3527 case AARCH64_OPND_SYSREG_AT:
3528 case AARCH64_OPND_SYSREG_DC:
3529 case AARCH64_OPND_SYSREG_IC:
3530 case AARCH64_OPND_SYSREG_TLBI:
3531 snprintf (buf, size, "%s", opnd->sysins_op->name);
3532 break;
3533
3534 case AARCH64_OPND_BARRIER:
3535 snprintf (buf, size, "%s", opnd->barrier->name);
3536 break;
3537
3538 case AARCH64_OPND_BARRIER_ISB:
3539 /* Operand can be omitted, e.g. in DCPS1. */
3540 if (! optional_operand_p (opcode, idx)
3541 || (opnd->barrier->value
3542 != get_optional_operand_default_value (opcode)))
3543 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3544 break;
3545
3546 case AARCH64_OPND_PRFOP:
3547 if (opnd->prfop->name != NULL)
3548 snprintf (buf, size, "%s", opnd->prfop->name);
3549 else
3550 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3551 break;
3552
3553 case AARCH64_OPND_BARRIER_PSB:
3554 snprintf (buf, size, "%s", opnd->hint_option->name);
3555 break;
3556
3557 default:
3558 assert (0);
3559 }
3560 }
3561 \f
3562 #define CPENC(op0,op1,crn,crm,op2) \
3563 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3564 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3565 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3566 /* for 3.9.10 System Instructions */
3567 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3568
3569 #define C0 0
3570 #define C1 1
3571 #define C2 2
3572 #define C3 3
3573 #define C4 4
3574 #define C5 5
3575 #define C6 6
3576 #define C7 7
3577 #define C8 8
3578 #define C9 9
3579 #define C10 10
3580 #define C11 11
3581 #define C12 12
3582 #define C13 13
3583 #define C14 14
3584 #define C15 15
3585
3586 #ifdef F_DEPRECATED
3587 #undef F_DEPRECATED
3588 #endif
3589 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3590
3591 #ifdef F_ARCHEXT
3592 #undef F_ARCHEXT
3593 #endif
3594 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3595
3596 #ifdef F_HASXT
3597 #undef F_HASXT
3598 #endif
3599 #define F_HASXT 0x4 /* System instruction register <Xt>
3600 operand. */
3601
3602
3603 /* TODO there are two more issues need to be resolved
3604 1. handle read-only and write-only system registers
3605 2. handle cpu-implementation-defined system registers. */
3606 const aarch64_sys_reg aarch64_sys_regs [] =
3607 {
3608 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3609 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3610 { "elr_el1", CPEN_(0,C0,1), 0 },
3611 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3612 { "sp_el0", CPEN_(0,C1,0), 0 },
3613 { "spsel", CPEN_(0,C2,0), 0 },
3614 { "daif", CPEN_(3,C2,1), 0 },
3615 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3616 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3617 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3618 { "nzcv", CPEN_(3,C2,0), 0 },
3619 { "fpcr", CPEN_(3,C4,0), 0 },
3620 { "fpsr", CPEN_(3,C4,1), 0 },
3621 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3622 { "dlr_el0", CPEN_(3,C5,1), 0 },
3623 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3624 { "elr_el2", CPEN_(4,C0,1), 0 },
3625 { "sp_el1", CPEN_(4,C1,0), 0 },
3626 { "spsr_irq", CPEN_(4,C3,0), 0 },
3627 { "spsr_abt", CPEN_(4,C3,1), 0 },
3628 { "spsr_und", CPEN_(4,C3,2), 0 },
3629 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3630 { "spsr_el3", CPEN_(6,C0,0), 0 },
3631 { "elr_el3", CPEN_(6,C0,1), 0 },
3632 { "sp_el2", CPEN_(6,C1,0), 0 },
3633 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3634 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3635 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3636 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3637 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3638 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3639 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3640 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3641 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3642 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3643 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3644 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3645 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3646 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3647 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3648 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3649 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3650 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3651 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3652 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3653 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3654 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3655 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3656 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3657 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3658 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3659 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3660 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3661 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3662 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3663 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3664 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3665 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3666 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3667 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3668 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3669 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3670 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3671 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3672 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3673 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3674 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3675 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3676 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3677 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3678 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3679 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3680 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3681 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3682 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3683 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3684 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3685 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3686 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3687 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3688 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3689 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3690 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3691 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3692 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3693 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3694 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3695 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3696 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3697 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3698 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3699 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3700 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3701 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3702 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3703 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3704 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3705 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3706 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3707 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3708 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3709 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3710 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3711 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3712 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3713 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3714 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3715 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3716 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3717 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3718 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3719 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3720 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3721 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3722 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3723 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3724 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3725 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3726 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3727 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3728 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3729 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3730 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3731 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3732 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3733 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3734 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3735 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3736 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3737 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3738 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3739 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3740 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3741 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3742 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3743 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3744 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3745 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3746 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3747 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3748 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3749 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3750 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3751 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3752 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3753 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3754 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3755 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3756 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3757 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3758 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3759 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3760 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3761 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3762 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3763 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3764 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3765 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3766 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3767 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3768 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3769 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3770 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3771 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3772 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3773 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3774 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3775 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3776 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3777 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3778 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3779 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3780 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3781 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3782 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3783 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3784 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3785 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3786 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3787 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3788 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3789 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3790 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3791 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3792 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3793 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3794 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3795 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3796 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3797 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3798 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3799 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3800 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3801 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3802 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3803 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3804 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3805 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3806 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3807 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3808 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3809 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3810 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3811 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3812 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3813 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3814 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3815 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3816 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3817 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3818 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3819 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3820 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3821 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3822 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3823 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3824 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3825 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3826 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3827 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3828 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3829 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3830 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3831 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3832 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3833 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3834 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3835 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3836 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3837 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3838 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3839 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3840 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3841 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3842 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3843 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3844 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3845 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3846 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3847 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3848 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3849 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3850 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3851 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3852 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3853 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3854 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3855 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3856 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3857 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3858 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3859 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3860 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3861 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3862 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3863 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3864 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3865 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3866 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3867 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3868 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3869 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3870 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3871 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3872 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3873 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3874 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3875 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3876 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3877 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3878 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3879 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3880 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3881 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3882 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3883 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3884 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3885 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3886 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3887 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3888 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3889 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3890 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3891 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3892 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3893 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3894 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3895 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3896 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3897 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3898 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3899 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3900 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3901 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3902 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3903 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3904 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3905 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3906 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3907 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3908 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3909 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3910 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3911 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3912 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3913 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3914 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3915 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3916 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3917 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3918 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3919 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3920 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3921 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3922 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3923 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3924 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3925 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3926 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3927 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3928 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3929 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3930 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3931 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3932 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3933 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3934 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3935 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3936 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3937 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3938 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3939 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3940 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3941 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3942 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3943 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3944 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3945 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3946 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3947 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3948 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3949 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3950 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3951 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3952 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3953 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3954 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3955 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3956 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3957 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3958 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3959 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3960 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3961 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3962 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3963 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3964 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3965 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3966 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3967 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3968 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3969 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3970 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3971 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3972 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3973 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3974 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3975 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3976 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3977 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3978 { 0, CPENC(0,0,0,0,0), 0 },
3979 };
3980
3981 bfd_boolean
3982 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3983 {
3984 return (reg->flags & F_DEPRECATED) != 0;
3985 }
3986
3987 bfd_boolean
3988 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3989 const aarch64_sys_reg *reg)
3990 {
3991 if (!(reg->flags & F_ARCHEXT))
3992 return TRUE;
3993
3994 /* PAN. Values are from aarch64_sys_regs. */
3995 if (reg->value == CPEN_(0,C2,3)
3996 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3997 return FALSE;
3998
3999 /* Virtualization host extensions: system registers. */
4000 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4001 || reg->value == CPENC (3, 4, C13, C0, 1)
4002 || reg->value == CPENC (3, 4, C14, C3, 0)
4003 || reg->value == CPENC (3, 4, C14, C3, 1)
4004 || reg->value == CPENC (3, 4, C14, C3, 2))
4005 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4006 return FALSE;
4007
4008 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4009 if ((reg->value == CPEN_ (5, C0, 0)
4010 || reg->value == CPEN_ (5, C0, 1)
4011 || reg->value == CPENC (3, 5, C1, C0, 0)
4012 || reg->value == CPENC (3, 5, C1, C0, 2)
4013 || reg->value == CPENC (3, 5, C2, C0, 0)
4014 || reg->value == CPENC (3, 5, C2, C0, 1)
4015 || reg->value == CPENC (3, 5, C2, C0, 2)
4016 || reg->value == CPENC (3, 5, C5, C1, 0)
4017 || reg->value == CPENC (3, 5, C5, C1, 1)
4018 || reg->value == CPENC (3, 5, C5, C2, 0)
4019 || reg->value == CPENC (3, 5, C6, C0, 0)
4020 || reg->value == CPENC (3, 5, C10, C2, 0)
4021 || reg->value == CPENC (3, 5, C10, C3, 0)
4022 || reg->value == CPENC (3, 5, C12, C0, 0)
4023 || reg->value == CPENC (3, 5, C13, C0, 1)
4024 || reg->value == CPENC (3, 5, C14, C1, 0))
4025 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4026 return FALSE;
4027
4028 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4029 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4030 || reg->value == CPENC (3, 5, C14, C2, 1)
4031 || reg->value == CPENC (3, 5, C14, C2, 2)
4032 || reg->value == CPENC (3, 5, C14, C3, 0)
4033 || reg->value == CPENC (3, 5, C14, C3, 1)
4034 || reg->value == CPENC (3, 5, C14, C3, 2))
4035 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4036 return FALSE;
4037
4038 /* ARMv8.2 features. */
4039
4040 /* ID_AA64MMFR2_EL1. */
4041 if (reg->value == CPENC (3, 0, C0, C7, 2)
4042 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4043 return FALSE;
4044
4045 /* PSTATE.UAO. */
4046 if (reg->value == CPEN_ (0, C2, 4)
4047 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4048 return FALSE;
4049
4050 /* RAS extension. */
4051
4052 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4053 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4054 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4055 || reg->value == CPENC (3, 0, C5, C3, 1)
4056 || reg->value == CPENC (3, 0, C5, C3, 2)
4057 || reg->value == CPENC (3, 0, C5, C3, 3)
4058 || reg->value == CPENC (3, 0, C5, C4, 0)
4059 || reg->value == CPENC (3, 0, C5, C4, 1)
4060 || reg->value == CPENC (3, 0, C5, C4, 2)
4061 || reg->value == CPENC (3, 0, C5, C4, 3)
4062 || reg->value == CPENC (3, 0, C5, C5, 0)
4063 || reg->value == CPENC (3, 0, C5, C5, 1))
4064 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4065 return FALSE;
4066
4067 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4068 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4069 || reg->value == CPENC (3, 0, C12, C1, 1)
4070 || reg->value == CPENC (3, 4, C12, C1, 1))
4071 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4072 return FALSE;
4073
4074 /* Statistical Profiling extension. */
4075 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4076 || reg->value == CPENC (3, 0, C9, C10, 1)
4077 || reg->value == CPENC (3, 0, C9, C10, 3)
4078 || reg->value == CPENC (3, 0, C9, C10, 7)
4079 || reg->value == CPENC (3, 0, C9, C9, 0)
4080 || reg->value == CPENC (3, 0, C9, C9, 2)
4081 || reg->value == CPENC (3, 0, C9, C9, 3)
4082 || reg->value == CPENC (3, 0, C9, C9, 4)
4083 || reg->value == CPENC (3, 0, C9, C9, 5)
4084 || reg->value == CPENC (3, 0, C9, C9, 6)
4085 || reg->value == CPENC (3, 0, C9, C9, 7)
4086 || reg->value == CPENC (3, 4, C9, C9, 0)
4087 || reg->value == CPENC (3, 5, C9, C9, 0))
4088 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4089 return FALSE;
4090
4091 /* ARMv8.3 Pointer authentication keys. */
4092 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4093 || reg->value == CPENC (3, 0, C2, C1, 1)
4094 || reg->value == CPENC (3, 0, C2, C1, 2)
4095 || reg->value == CPENC (3, 0, C2, C1, 3)
4096 || reg->value == CPENC (3, 0, C2, C2, 0)
4097 || reg->value == CPENC (3, 0, C2, C2, 1)
4098 || reg->value == CPENC (3, 0, C2, C2, 2)
4099 || reg->value == CPENC (3, 0, C2, C2, 3)
4100 || reg->value == CPENC (3, 0, C2, C3, 0)
4101 || reg->value == CPENC (3, 0, C2, C3, 1))
4102 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4103 return FALSE;
4104
4105 return TRUE;
4106 }
4107
4108 const aarch64_sys_reg aarch64_pstatefields [] =
4109 {
4110 { "spsel", 0x05, 0 },
4111 { "daifset", 0x1e, 0 },
4112 { "daifclr", 0x1f, 0 },
4113 { "pan", 0x04, F_ARCHEXT },
4114 { "uao", 0x03, F_ARCHEXT },
4115 { 0, CPENC(0,0,0,0,0), 0 },
4116 };
4117
4118 bfd_boolean
4119 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4120 const aarch64_sys_reg *reg)
4121 {
4122 if (!(reg->flags & F_ARCHEXT))
4123 return TRUE;
4124
4125 /* PAN. Values are from aarch64_pstatefields. */
4126 if (reg->value == 0x04
4127 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4128 return FALSE;
4129
4130 /* UAO. Values are from aarch64_pstatefields. */
4131 if (reg->value == 0x03
4132 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4133 return FALSE;
4134
4135 return TRUE;
4136 }
4137
4138 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4139 {
4140 { "ialluis", CPENS(0,C7,C1,0), 0 },
4141 { "iallu", CPENS(0,C7,C5,0), 0 },
4142 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4143 { 0, CPENS(0,0,0,0), 0 }
4144 };
4145
4146 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4147 {
4148 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4149 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4150 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4151 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4152 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4153 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4154 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4155 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4156 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4157 { 0, CPENS(0,0,0,0), 0 }
4158 };
4159
4160 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4161 {
4162 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4163 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4164 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4165 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4166 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4167 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4168 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4169 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4170 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4171 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4172 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4173 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4174 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4175 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4176 { 0, CPENS(0,0,0,0), 0 }
4177 };
4178
4179 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4180 {
4181 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4182 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4183 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4184 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4185 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4186 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4187 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4188 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4189 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4190 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4191 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4192 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4193 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4194 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4195 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4196 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4197 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4198 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4199 { "alle2", CPENS(4,C8,C7,0), 0 },
4200 { "alle2is", CPENS(4,C8,C3,0), 0 },
4201 { "alle1", CPENS(4,C8,C7,4), 0 },
4202 { "alle1is", CPENS(4,C8,C3,4), 0 },
4203 { "alle3", CPENS(6,C8,C7,0), 0 },
4204 { "alle3is", CPENS(6,C8,C3,0), 0 },
4205 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4206 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4207 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4208 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4209 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4210 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4211 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4212 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4213 { 0, CPENS(0,0,0,0), 0 }
4214 };
4215
4216 bfd_boolean
4217 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4218 {
4219 return (sys_ins_reg->flags & F_HASXT) != 0;
4220 }
4221
4222 extern bfd_boolean
4223 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4224 const aarch64_sys_ins_reg *reg)
4225 {
4226 if (!(reg->flags & F_ARCHEXT))
4227 return TRUE;
4228
4229 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4230 if (reg->value == CPENS (3, C7, C12, 1)
4231 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4232 return FALSE;
4233
4234 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4235 if ((reg->value == CPENS (0, C7, C9, 0)
4236 || reg->value == CPENS (0, C7, C9, 1))
4237 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4238 return FALSE;
4239
4240 return TRUE;
4241 }
4242
4243 #undef C0
4244 #undef C1
4245 #undef C2
4246 #undef C3
4247 #undef C4
4248 #undef C5
4249 #undef C6
4250 #undef C7
4251 #undef C8
4252 #undef C9
4253 #undef C10
4254 #undef C11
4255 #undef C12
4256 #undef C13
4257 #undef C14
4258 #undef C15
4259
4260 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4261 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4262
4263 static bfd_boolean
4264 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4265 const aarch64_insn insn)
4266 {
4267 int t = BITS (insn, 4, 0);
4268 int n = BITS (insn, 9, 5);
4269 int t2 = BITS (insn, 14, 10);
4270
4271 if (BIT (insn, 23))
4272 {
4273 /* Write back enabled. */
4274 if ((t == n || t2 == n) && n != 31)
4275 return FALSE;
4276 }
4277
4278 if (BIT (insn, 22))
4279 {
4280 /* Load */
4281 if (t == t2)
4282 return FALSE;
4283 }
4284
4285 return TRUE;
4286 }
4287
4288 /* Return true if VALUE cannot be moved into an SVE register using DUP
4289 (with any element size, not just ESIZE) and if using DUPM would
4290 therefore be OK. ESIZE is the number of bytes in the immediate. */
4291
4292 bfd_boolean
4293 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4294 {
4295 int64_t svalue = uvalue;
4296 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4297
4298 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4299 return FALSE;
4300 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4301 {
4302 svalue = (int32_t) uvalue;
4303 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4304 {
4305 svalue = (int16_t) uvalue;
4306 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4307 return FALSE;
4308 }
4309 }
4310 if ((svalue & 0xff) == 0)
4311 svalue /= 256;
4312 return svalue < -128 || svalue >= 128;
4313 }
4314
4315 /* Include the opcode description table as well as the operand description
4316 table. */
4317 #define VERIFIER(x) verify_##x
4318 #include "aarch64-tbl.h"