1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump
= FALSE
;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array
[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array
[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
108 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
109 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
116 return ((qualifier
>= AARCH64_OPND_QLF_S_B
117 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
127 DP_VECTOR_ACROSS_LANES
,
130 static const char significant_operand_index
[] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
147 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers
[0] == qualifiers
[1]
152 && vector_qualifier_p (qualifiers
[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers
[0])
154 == aarch64_get_qualifier_esize (qualifiers
[1]))
155 && (aarch64_get_qualifier_esize (qualifiers
[0])
156 == aarch64_get_qualifier_esize (qualifiers
[2])))
157 return DP_VECTOR_3SAME
;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers
[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers
[0])
164 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
165 return DP_VECTOR_LONG
;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers
[0] == qualifiers
[1]
168 && vector_qualifier_p (qualifiers
[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers
[0])
171 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers
[0])
173 == aarch64_get_qualifier_esize (qualifiers
[1])))
174 return DP_VECTOR_WIDE
;
176 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers
[1]) == TRUE
180 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
181 return DP_VECTOR_ACROSS_LANES
;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
199 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
202 const aarch64_field fields
[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
247 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
248 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
249 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
250 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
251 { 5, 14 }, /* imm14: in test bit and branch instructions. */
252 { 5, 16 }, /* imm16: in exception instructions. */
253 { 0, 26 }, /* imm26: in unconditional branch instructions. */
254 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
255 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
256 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
257 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
258 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
259 { 22, 1 }, /* N: in logical (immediate) instructions. */
260 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
261 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
262 { 31, 1 }, /* sf: in integer data processing instructions. */
263 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
264 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
265 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
266 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
267 { 31, 1 }, /* b5: in the test bit and branch instructions. */
268 { 19, 5 }, /* b40: in the test bit and branch instructions. */
269 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
270 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
271 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
272 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
273 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
274 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
275 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
276 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
277 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
278 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
279 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
280 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
281 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
282 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
283 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
284 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
285 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
286 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
288 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
289 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
290 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
292 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
293 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
294 { 5, 1 }, /* SVE_i1: single-bit immediate. */
295 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
296 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
297 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
298 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
299 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
300 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
301 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
302 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
303 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
304 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
305 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
306 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
307 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
308 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
309 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
310 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
311 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
312 { 16, 4 }, /* SVE_tsz: triangular size select. */
313 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
314 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
315 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
316 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
317 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
318 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
319 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
320 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
321 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
324 enum aarch64_operand_class
325 aarch64_get_operand_class (enum aarch64_opnd type
)
327 return aarch64_operands
[type
].op_class
;
331 aarch64_get_operand_name (enum aarch64_opnd type
)
333 return aarch64_operands
[type
].name
;
336 /* Get operand description string.
337 This is usually for the diagnosis purpose. */
339 aarch64_get_operand_desc (enum aarch64_opnd type
)
341 return aarch64_operands
[type
].desc
;
344 /* Table of all conditional affixes. */
345 const aarch64_cond aarch64_conds
[16] =
347 {{"eq", "none"}, 0x0},
348 {{"ne", "any"}, 0x1},
349 {{"cs", "hs", "nlast"}, 0x2},
350 {{"cc", "lo", "ul", "last"}, 0x3},
351 {{"mi", "first"}, 0x4},
352 {{"pl", "nfrst"}, 0x5},
355 {{"hi", "pmore"}, 0x8},
356 {{"ls", "plast"}, 0x9},
357 {{"ge", "tcont"}, 0xa},
358 {{"lt", "tstop"}, 0xb},
366 get_cond_from_value (aarch64_insn value
)
369 return &aarch64_conds
[(unsigned int) value
];
373 get_inverted_cond (const aarch64_cond
*cond
)
375 return &aarch64_conds
[cond
->value
^ 0x1];
378 /* Table describing the operand extension/shifting operators; indexed by
379 enum aarch64_modifier_kind.
381 The value column provides the most common values for encoding modifiers,
382 which enables table-driven encoding/decoding for the modifiers. */
383 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
404 enum aarch64_modifier_kind
405 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
407 return desc
- aarch64_operand_modifiers
;
411 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
413 return aarch64_operand_modifiers
[kind
].value
;
416 enum aarch64_modifier_kind
417 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
418 bfd_boolean extend_p
)
420 if (extend_p
== TRUE
)
421 return AARCH64_MOD_UXTB
+ value
;
423 return AARCH64_MOD_LSL
- value
;
427 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
429 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
433 static inline bfd_boolean
434 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
436 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
440 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
460 /* Table describing the operands supported by the aliases of the HINT
463 The name column is the operand that is accepted for the alias. The value
464 column is the hint number of the alias. The list of operands is terminated
465 by NULL in the name column. */
467 const struct aarch64_name_value_pair aarch64_hint_options
[] =
469 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
470 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT
, 0x20) },
471 { "csync", HINT_OPD_CSYNC
}, /* PSB CSYNC. */
472 { "c", HINT_OPD_C
}, /* BTI C. */
473 { "j", HINT_OPD_J
}, /* BTI J. */
474 { "jc", HINT_OPD_JC
}, /* BTI JC. */
475 { NULL
, HINT_OPD_NULL
},
478 /* op -> op: load = 0 instruction = 1 store = 2
480 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
481 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
482 const struct aarch64_name_value_pair aarch64_prfops
[32] =
484 { "pldl1keep", B(0, 1, 0) },
485 { "pldl1strm", B(0, 1, 1) },
486 { "pldl2keep", B(0, 2, 0) },
487 { "pldl2strm", B(0, 2, 1) },
488 { "pldl3keep", B(0, 3, 0) },
489 { "pldl3strm", B(0, 3, 1) },
492 { "plil1keep", B(1, 1, 0) },
493 { "plil1strm", B(1, 1, 1) },
494 { "plil2keep", B(1, 2, 0) },
495 { "plil2strm", B(1, 2, 1) },
496 { "plil3keep", B(1, 3, 0) },
497 { "plil3strm", B(1, 3, 1) },
500 { "pstl1keep", B(2, 1, 0) },
501 { "pstl1strm", B(2, 1, 1) },
502 { "pstl2keep", B(2, 2, 0) },
503 { "pstl2strm", B(2, 2, 1) },
504 { "pstl3keep", B(2, 3, 0) },
505 { "pstl3strm", B(2, 3, 1) },
519 /* Utilities on value constraint. */
522 value_in_range_p (int64_t value
, int low
, int high
)
524 return (value
>= low
&& value
<= high
) ? 1 : 0;
527 /* Return true if VALUE is a multiple of ALIGN. */
529 value_aligned_p (int64_t value
, int align
)
531 return (value
% align
) == 0;
534 /* A signed value fits in a field. */
536 value_fit_signed_field_p (int64_t value
, unsigned width
)
539 if (width
< sizeof (value
) * 8)
541 int64_t lim
= (int64_t)1 << (width
- 1);
542 if (value
>= -lim
&& value
< lim
)
548 /* An unsigned value fits in a field. */
550 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
553 if (width
< sizeof (value
) * 8)
555 int64_t lim
= (int64_t)1 << width
;
556 if (value
>= 0 && value
< lim
)
562 /* Return 1 if OPERAND is SP or WSP. */
564 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
566 return ((aarch64_get_operand_class (operand
->type
)
567 == AARCH64_OPND_CLASS_INT_REG
)
568 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
569 && operand
->reg
.regno
== 31);
572 /* Return 1 if OPERAND is XZR or WZP. */
574 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
576 return ((aarch64_get_operand_class (operand
->type
)
577 == AARCH64_OPND_CLASS_INT_REG
)
578 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
579 && operand
->reg
.regno
== 31);
582 /* Return true if the operand *OPERAND that has the operand code
583 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
584 qualified by the qualifier TARGET. */
587 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
588 aarch64_opnd_qualifier_t target
)
590 switch (operand
->qualifier
)
592 case AARCH64_OPND_QLF_W
:
593 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
596 case AARCH64_OPND_QLF_X
:
597 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
600 case AARCH64_OPND_QLF_WSP
:
601 if (target
== AARCH64_OPND_QLF_W
602 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
605 case AARCH64_OPND_QLF_SP
:
606 if (target
== AARCH64_OPND_QLF_X
607 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
617 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
618 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
620 Return NIL if more than one expected qualifiers are found. */
622 aarch64_opnd_qualifier_t
623 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
625 const aarch64_opnd_qualifier_t known_qlf
,
632 When the known qualifier is NIL, we have to assume that there is only
633 one qualifier sequence in the *QSEQ_LIST and return the corresponding
634 qualifier directly. One scenario is that for instruction
635 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
636 which has only one possible valid qualifier sequence
638 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
639 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
641 Because the qualifier NIL has dual roles in the qualifier sequence:
642 it can mean no qualifier for the operand, or the qualifer sequence is
643 not in use (when all qualifiers in the sequence are NILs), we have to
644 handle this special case here. */
645 if (known_qlf
== AARCH64_OPND_NIL
)
647 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
648 return qseq_list
[0][idx
];
651 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
653 if (qseq_list
[i
][known_idx
] == known_qlf
)
656 /* More than one sequences are found to have KNOWN_QLF at
658 return AARCH64_OPND_NIL
;
663 return qseq_list
[saved_i
][idx
];
666 enum operand_qualifier_kind
674 /* Operand qualifier description. */
675 struct operand_qualifier_data
677 /* The usage of the three data fields depends on the qualifier kind. */
684 enum operand_qualifier_kind kind
;
687 /* Indexed by the operand qualifier enumerators. */
688 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
690 {0, 0, 0, "NIL", OQK_NIL
},
692 /* Operand variant qualifiers.
694 element size, number of elements and common value for encoding. */
696 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
697 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
698 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
699 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
701 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
702 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
703 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
704 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
705 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
706 {1, 4, 0x0, "4b", OQK_OPD_VARIANT
},
708 {1, 4, 0x0, "4b", OQK_OPD_VARIANT
},
709 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
710 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
711 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
712 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
713 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
714 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
715 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
716 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
717 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
718 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
720 {0, 0, 0, "z", OQK_OPD_VARIANT
},
721 {0, 0, 0, "m", OQK_OPD_VARIANT
},
723 /* Qualifiers constraining the value range.
725 Lower bound, higher bound, unused. */
727 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE
},
728 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
729 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
730 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
731 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
732 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
733 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
735 /* Qualifiers for miscellaneous purpose.
737 unused, unused and unused. */
742 {0, 0, 0, "retrieving", 0},
745 static inline bfd_boolean
746 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
748 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
752 static inline bfd_boolean
753 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
755 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
760 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
762 return aarch64_opnd_qualifiers
[qualifier
].desc
;
765 /* Given an operand qualifier, return the expected data element size
766 of a qualified operand. */
768 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
770 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
771 return aarch64_opnd_qualifiers
[qualifier
].data0
;
775 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
777 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
778 return aarch64_opnd_qualifiers
[qualifier
].data1
;
782 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
784 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
785 return aarch64_opnd_qualifiers
[qualifier
].data2
;
789 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
791 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
792 return aarch64_opnd_qualifiers
[qualifier
].data0
;
796 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
798 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
799 return aarch64_opnd_qualifiers
[qualifier
].data1
;
804 aarch64_verbose (const char *str
, ...)
815 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
819 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
820 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
825 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
826 const aarch64_opnd_qualifier_t
*qualifier
)
829 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
831 aarch64_verbose ("dump_match_qualifiers:");
832 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
833 curr
[i
] = opnd
[i
].qualifier
;
834 dump_qualifier_sequence (curr
);
835 aarch64_verbose ("against");
836 dump_qualifier_sequence (qualifier
);
838 #endif /* DEBUG_AARCH64 */
840 /* This function checks if the given instruction INSN is a destructive
841 instruction based on the usage of the registers. It does not recognize
842 unary destructive instructions. */
844 aarch64_is_destructive_by_operands (const aarch64_opcode
*opcode
)
847 const enum aarch64_opnd
*opnds
= opcode
->operands
;
849 if (opnds
[0] == AARCH64_OPND_NIL
)
852 while (opnds
[++i
] != AARCH64_OPND_NIL
)
853 if (opnds
[i
] == opnds
[0])
859 /* TODO improve this, we can have an extra field at the runtime to
860 store the number of operands rather than calculating it every time. */
863 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
866 const enum aarch64_opnd
*opnds
= opcode
->operands
;
867 while (opnds
[i
++] != AARCH64_OPND_NIL
)
870 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
874 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
875 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
877 N.B. on the entry, it is very likely that only some operands in *INST
878 have had their qualifiers been established.
880 If STOP_AT is not -1, the function will only try to match
881 the qualifier sequence for operands before and including the operand
882 of index STOP_AT; and on success *RET will only be filled with the first
883 (STOP_AT+1) qualifiers.
885 A couple examples of the matching algorithm:
893 Apart from serving the main encoding routine, this can also be called
894 during or after the operand decoding. */
897 aarch64_find_best_match (const aarch64_inst
*inst
,
898 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
899 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
903 const aarch64_opnd_qualifier_t
*qualifiers
;
905 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
908 DEBUG_TRACE ("SUCCEED: no operand");
912 if (stop_at
< 0 || stop_at
>= num_opnds
)
913 stop_at
= num_opnds
- 1;
915 /* For each pattern. */
916 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
919 qualifiers
= *qualifiers_list
;
921 /* Start as positive. */
924 DEBUG_TRACE ("%d", i
);
927 dump_match_qualifiers (inst
->operands
, qualifiers
);
930 /* Most opcodes has much fewer patterns in the list.
931 First NIL qualifier indicates the end in the list. */
932 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
934 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
940 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
942 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
944 /* Either the operand does not have qualifier, or the qualifier
945 for the operand needs to be deduced from the qualifier
947 In the latter case, any constraint checking related with
948 the obtained qualifier should be done later in
949 operand_general_constraint_met_p. */
952 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
954 /* Unless the target qualifier can also qualify the operand
955 (which has already had a non-nil qualifier), non-equal
956 qualifiers are generally un-matched. */
957 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
966 continue; /* Equal qualifiers are certainly matched. */
969 /* Qualifiers established. */
976 /* Fill the result in *RET. */
978 qualifiers
= *qualifiers_list
;
980 DEBUG_TRACE ("complete qualifiers using list %d", i
);
983 dump_qualifier_sequence (qualifiers
);
986 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
987 ret
[j
] = *qualifiers
;
988 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
989 ret
[j
] = AARCH64_OPND_QLF_NIL
;
991 DEBUG_TRACE ("SUCCESS");
995 DEBUG_TRACE ("FAIL");
999 /* Operand qualifier matching and resolving.
1001 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1002 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1004 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1008 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
1011 aarch64_opnd_qualifier_seq_t qualifiers
;
1013 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
1016 DEBUG_TRACE ("matching FAIL");
1020 if (inst
->opcode
->flags
& F_STRICT
)
1022 /* Require an exact qualifier match, even for NIL qualifiers. */
1023 nops
= aarch64_num_of_operands (inst
->opcode
);
1024 for (i
= 0; i
< nops
; ++i
)
1025 if (inst
->operands
[i
].qualifier
!= qualifiers
[i
])
1029 /* Update the qualifiers. */
1030 if (update_p
== TRUE
)
1031 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1033 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
1035 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
1036 "update %s with %s for operand %d",
1037 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
1038 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
1039 inst
->operands
[i
].qualifier
= qualifiers
[i
];
1042 DEBUG_TRACE ("matching SUCCESS");
1046 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1049 IS32 indicates whether value is a 32-bit immediate or not.
1050 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1051 amount will be returned in *SHIFT_AMOUNT. */
1054 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
1058 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1062 /* Allow all zeros or all ones in top 32-bits, so that
1063 32-bit constant expressions like ~0x80000000 are
1065 uint64_t ext
= value
;
1066 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
1067 /* Immediate out of range. */
1069 value
&= (int64_t) 0xffffffff;
1072 /* first, try movz then movn */
1074 if ((value
& ((int64_t) 0xffff << 0)) == value
)
1076 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
1078 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
1080 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
1085 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1089 if (shift_amount
!= NULL
)
1090 *shift_amount
= amount
;
1092 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
1097 /* Build the accepted values for immediate logical SIMD instructions.
1099 The standard encodings of the immediate value are:
1100 N imms immr SIMD size R S
1101 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1102 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1103 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1104 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1105 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1106 0 11110s 00000r 2 UInt(r) UInt(s)
1107 where all-ones value of S is reserved.
1109 Let's call E the SIMD size.
1111 The immediate value is: S+1 bits '1' rotated to the right by R.
1113 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1114 (remember S != E - 1). */
1116 #define TOTAL_IMM_NB 5334
1121 aarch64_insn encoding
;
1122 } simd_imm_encoding
;
1124 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
1127 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
1129 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
1130 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
1132 if (imm1
->imm
< imm2
->imm
)
1134 if (imm1
->imm
> imm2
->imm
)
1139 /* immediate bitfield standard encoding
1140 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1141 1 ssssss rrrrrr 64 rrrrrr ssssss
1142 0 0sssss 0rrrrr 32 rrrrr sssss
1143 0 10ssss 00rrrr 16 rrrr ssss
1144 0 110sss 000rrr 8 rrr sss
1145 0 1110ss 0000rr 4 rr ss
1146 0 11110s 00000r 2 r s */
1148 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
1150 return (is64
<< 12) | (r
<< 6) | s
;
1154 build_immediate_table (void)
1156 uint32_t log_e
, e
, s
, r
, s_mask
;
1162 for (log_e
= 1; log_e
<= 6; log_e
++)
1164 /* Get element size. */
1169 mask
= 0xffffffffffffffffull
;
1175 mask
= (1ull << e
) - 1;
1177 1 ((1 << 4) - 1) << 2 = 111100
1178 2 ((1 << 3) - 1) << 3 = 111000
1179 3 ((1 << 2) - 1) << 4 = 110000
1180 4 ((1 << 1) - 1) << 5 = 100000
1181 5 ((1 << 0) - 1) << 6 = 000000 */
1182 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1184 for (s
= 0; s
< e
- 1; s
++)
1185 for (r
= 0; r
< e
; r
++)
1187 /* s+1 consecutive bits to 1 (s < 63) */
1188 imm
= (1ull << (s
+ 1)) - 1;
1189 /* rotate right by r */
1191 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1192 /* replicate the constant depending on SIMD size */
1195 case 1: imm
= (imm
<< 2) | imm
;
1197 case 2: imm
= (imm
<< 4) | imm
;
1199 case 3: imm
= (imm
<< 8) | imm
;
1201 case 4: imm
= (imm
<< 16) | imm
;
1203 case 5: imm
= (imm
<< 32) | imm
;
1208 simd_immediates
[nb_imms
].imm
= imm
;
1209 simd_immediates
[nb_imms
].encoding
=
1210 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1214 assert (nb_imms
== TOTAL_IMM_NB
);
1215 qsort(simd_immediates
, nb_imms
,
1216 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1219 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1220 be accepted by logical (immediate) instructions
1221 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1223 ESIZE is the number of bytes in the decoded immediate value.
1224 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1225 VALUE will be returned in *ENCODING. */
1228 aarch64_logical_immediate_p (uint64_t value
, int esize
, aarch64_insn
*encoding
)
1230 simd_imm_encoding imm_enc
;
1231 const simd_imm_encoding
*imm_encoding
;
1232 static bfd_boolean initialized
= FALSE
;
1236 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), esize: %d", value
,
1241 build_immediate_table ();
1245 /* Allow all zeros or all ones in top bits, so that
1246 constant expressions like ~1 are permitted. */
1247 upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
1248 if ((value
& ~upper
) != value
&& (value
| upper
) != value
)
1251 /* Replicate to a full 64-bit value. */
1253 for (i
= esize
* 8; i
< 64; i
*= 2)
1254 value
|= (value
<< i
);
1256 imm_enc
.imm
= value
;
1257 imm_encoding
= (const simd_imm_encoding
*)
1258 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1259 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1260 if (imm_encoding
== NULL
)
1262 DEBUG_TRACE ("exit with FALSE");
1265 if (encoding
!= NULL
)
1266 *encoding
= imm_encoding
->encoding
;
1267 DEBUG_TRACE ("exit with TRUE");
1271 /* If 64-bit immediate IMM is in the format of
1272 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1273 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1274 of value "abcdefgh". Otherwise return -1. */
1276 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1282 for (i
= 0; i
< 8; i
++)
1284 byte
= (imm
>> (8 * i
)) & 0xff;
1287 else if (byte
!= 0x00)
1293 /* Utility inline functions for operand_general_constraint_met_p. */
1296 set_error (aarch64_operand_error
*mismatch_detail
,
1297 enum aarch64_operand_error_kind kind
, int idx
,
1300 if (mismatch_detail
== NULL
)
1302 mismatch_detail
->kind
= kind
;
1303 mismatch_detail
->index
= idx
;
1304 mismatch_detail
->error
= error
;
1308 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1311 if (mismatch_detail
== NULL
)
1313 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1317 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1318 int idx
, int lower_bound
, int upper_bound
,
1321 if (mismatch_detail
== NULL
)
1323 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1324 mismatch_detail
->data
[0] = lower_bound
;
1325 mismatch_detail
->data
[1] = upper_bound
;
1329 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1330 int idx
, int lower_bound
, int upper_bound
)
1332 if (mismatch_detail
== NULL
)
1334 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1335 _("immediate value"));
1339 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1340 int idx
, int lower_bound
, int upper_bound
)
1342 if (mismatch_detail
== NULL
)
1344 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1345 _("immediate offset"));
1349 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1350 int idx
, int lower_bound
, int upper_bound
)
1352 if (mismatch_detail
== NULL
)
1354 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1355 _("register number"));
1359 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1360 int idx
, int lower_bound
, int upper_bound
)
1362 if (mismatch_detail
== NULL
)
1364 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1365 _("register element index"));
1369 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1370 int idx
, int lower_bound
, int upper_bound
)
1372 if (mismatch_detail
== NULL
)
1374 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1378 /* Report that the MUL modifier in operand IDX should be in the range
1379 [LOWER_BOUND, UPPER_BOUND]. */
1381 set_multiplier_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1382 int idx
, int lower_bound
, int upper_bound
)
1384 if (mismatch_detail
== NULL
)
1386 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1391 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1394 if (mismatch_detail
== NULL
)
1396 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1397 mismatch_detail
->data
[0] = alignment
;
1401 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1404 if (mismatch_detail
== NULL
)
1406 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1407 mismatch_detail
->data
[0] = expected_num
;
1411 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1414 if (mismatch_detail
== NULL
)
1416 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1419 /* General constraint checking based on operand code.
1421 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1422 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1424 This function has to be called after the qualifiers for all operands
1427 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1428 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1429 of error message during the disassembling where error message is not
1430 wanted. We avoid the dynamic construction of strings of error messages
1431 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1432 use a combination of error code, static string and some integer data to
1433 represent an error. */
1436 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1437 enum aarch64_opnd type
,
1438 const aarch64_opcode
*opcode
,
1439 aarch64_operand_error
*mismatch_detail
)
1441 unsigned num
, modifiers
, shift
;
1443 int64_t imm
, min_value
, max_value
;
1444 uint64_t uvalue
, mask
;
1445 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1446 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1448 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1450 switch (aarch64_operands
[type
].op_class
)
1452 case AARCH64_OPND_CLASS_INT_REG
:
1453 /* Check pair reg constraints for cas* instructions. */
1454 if (type
== AARCH64_OPND_PAIRREG
)
1456 assert (idx
== 1 || idx
== 3);
1457 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1459 set_syntax_error (mismatch_detail
, idx
- 1,
1460 _("reg pair must start from even reg"));
1463 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1465 set_syntax_error (mismatch_detail
, idx
,
1466 _("reg pair must be contiguous"));
1472 /* <Xt> may be optional in some IC and TLBI instructions. */
1473 if (type
== AARCH64_OPND_Rt_SYS
)
1475 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1476 == AARCH64_OPND_CLASS_SYSTEM
));
1477 if (opnds
[1].present
1478 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1480 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1483 if (!opnds
[1].present
1484 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1486 set_other_error (mismatch_detail
, idx
, _("missing register"));
1492 case AARCH64_OPND_QLF_WSP
:
1493 case AARCH64_OPND_QLF_SP
:
1494 if (!aarch64_stack_pointer_p (opnd
))
1496 set_other_error (mismatch_detail
, idx
,
1497 _("stack pointer register expected"));
1506 case AARCH64_OPND_CLASS_SVE_REG
:
1509 case AARCH64_OPND_SVE_Zm3_INDEX
:
1510 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
1511 case AARCH64_OPND_SVE_Zm4_INDEX
:
1512 size
= get_operand_fields_width (get_operand_from_code (type
));
1513 shift
= get_operand_specific_data (&aarch64_operands
[type
]);
1514 mask
= (1 << shift
) - 1;
1515 if (opnd
->reg
.regno
> mask
)
1517 assert (mask
== 7 || mask
== 15);
1518 set_other_error (mismatch_detail
, idx
,
1520 ? _("z0-z15 expected")
1521 : _("z0-z7 expected"));
1524 mask
= (1 << (size
- shift
)) - 1;
1525 if (!value_in_range_p (opnd
->reglane
.index
, 0, mask
))
1527 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, mask
);
1532 case AARCH64_OPND_SVE_Zn_INDEX
:
1533 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1534 if (!value_in_range_p (opnd
->reglane
.index
, 0, 64 / size
- 1))
1536 set_elem_idx_out_of_range_error (mismatch_detail
, idx
,
1542 case AARCH64_OPND_SVE_ZnxN
:
1543 case AARCH64_OPND_SVE_ZtxN
:
1544 if (opnd
->reglist
.num_regs
!= get_opcode_dependent_value (opcode
))
1546 set_other_error (mismatch_detail
, idx
,
1547 _("invalid register list"));
1557 case AARCH64_OPND_CLASS_PRED_REG
:
1558 if (opnd
->reg
.regno
>= 8
1559 && get_operand_fields_width (get_operand_from_code (type
)) == 3)
1561 set_other_error (mismatch_detail
, idx
, _("p0-p7 expected"));
1566 case AARCH64_OPND_CLASS_COND
:
1567 if (type
== AARCH64_OPND_COND1
1568 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1570 /* Not allow AL or NV. */
1571 set_syntax_error (mismatch_detail
, idx
, NULL
);
1575 case AARCH64_OPND_CLASS_ADDRESS
:
1576 /* Check writeback. */
1577 switch (opcode
->iclass
)
1581 case ldstnapair_offs
:
1584 if (opnd
->addr
.writeback
== 1)
1586 set_syntax_error (mismatch_detail
, idx
,
1587 _("unexpected address writeback"));
1592 if (opnd
->addr
.writeback
== 1 && opnd
->addr
.preind
!= 1)
1594 set_syntax_error (mismatch_detail
, idx
,
1595 _("unexpected address writeback"));
1600 case ldstpair_indexed
:
1603 if (opnd
->addr
.writeback
== 0)
1605 set_syntax_error (mismatch_detail
, idx
,
1606 _("address writeback expected"));
1611 assert (opnd
->addr
.writeback
== 0);
1616 case AARCH64_OPND_ADDR_SIMM7
:
1617 /* Scaled signed 7 bits immediate offset. */
1618 /* Get the size of the data element that is accessed, which may be
1619 different from that of the source register size,
1620 e.g. in strb/ldrb. */
1621 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1622 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1624 set_offset_out_of_range_error (mismatch_detail
, idx
,
1625 -64 * size
, 63 * size
);
1628 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1630 set_unaligned_error (mismatch_detail
, idx
, size
);
1634 case AARCH64_OPND_ADDR_OFFSET
:
1635 case AARCH64_OPND_ADDR_SIMM9
:
1636 /* Unscaled signed 9 bits immediate offset. */
1637 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1639 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1644 case AARCH64_OPND_ADDR_SIMM9_2
:
1645 /* Unscaled signed 9 bits immediate offset, which has to be negative
1647 size
= aarch64_get_qualifier_esize (qualifier
);
1648 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1649 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1650 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1652 set_other_error (mismatch_detail
, idx
,
1653 _("negative or unaligned offset expected"));
1656 case AARCH64_OPND_ADDR_SIMM10
:
1657 /* Scaled signed 10 bits immediate offset. */
1658 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -4096, 4088))
1660 set_offset_out_of_range_error (mismatch_detail
, idx
, -4096, 4088);
1663 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 8))
1665 set_unaligned_error (mismatch_detail
, idx
, 8);
1670 case AARCH64_OPND_SIMD_ADDR_POST
:
1671 /* AdvSIMD load/store multiple structures, post-index. */
1673 if (opnd
->addr
.offset
.is_reg
)
1675 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1679 set_other_error (mismatch_detail
, idx
,
1680 _("invalid register offset"));
1686 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1687 unsigned num_bytes
; /* total number of bytes transferred. */
1688 /* The opcode dependent area stores the number of elements in
1689 each structure to be loaded/stored. */
1690 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1691 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1692 /* Special handling of loading single structure to all lane. */
1693 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1694 * aarch64_get_qualifier_esize (prev
->qualifier
);
1696 num_bytes
= prev
->reglist
.num_regs
1697 * aarch64_get_qualifier_esize (prev
->qualifier
)
1698 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1699 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1701 set_other_error (mismatch_detail
, idx
,
1702 _("invalid post-increment amount"));
1708 case AARCH64_OPND_ADDR_REGOFF
:
1709 /* Get the size of the data element that is accessed, which may be
1710 different from that of the source register size,
1711 e.g. in strb/ldrb. */
1712 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1713 /* It is either no shift or shift by the binary logarithm of SIZE. */
1714 if (opnd
->shifter
.amount
!= 0
1715 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1717 set_other_error (mismatch_detail
, idx
,
1718 _("invalid shift amount"));
1721 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1723 switch (opnd
->shifter
.kind
)
1725 case AARCH64_MOD_UXTW
:
1726 case AARCH64_MOD_LSL
:
1727 case AARCH64_MOD_SXTW
:
1728 case AARCH64_MOD_SXTX
: break;
1730 set_other_error (mismatch_detail
, idx
,
1731 _("invalid extend/shift operator"));
1736 case AARCH64_OPND_ADDR_UIMM12
:
1737 imm
= opnd
->addr
.offset
.imm
;
1738 /* Get the size of the data element that is accessed, which may be
1739 different from that of the source register size,
1740 e.g. in strb/ldrb. */
1741 size
= aarch64_get_qualifier_esize (qualifier
);
1742 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1744 set_offset_out_of_range_error (mismatch_detail
, idx
,
1748 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1750 set_unaligned_error (mismatch_detail
, idx
, size
);
1755 case AARCH64_OPND_ADDR_PCREL14
:
1756 case AARCH64_OPND_ADDR_PCREL19
:
1757 case AARCH64_OPND_ADDR_PCREL21
:
1758 case AARCH64_OPND_ADDR_PCREL26
:
1759 imm
= opnd
->imm
.value
;
1760 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1762 /* The offset value in a PC-relative branch instruction is alway
1763 4-byte aligned and is encoded without the lowest 2 bits. */
1764 if (!value_aligned_p (imm
, 4))
1766 set_unaligned_error (mismatch_detail
, idx
, 4);
1769 /* Right shift by 2 so that we can carry out the following check
1773 size
= get_operand_fields_width (get_operand_from_code (type
));
1774 if (!value_fit_signed_field_p (imm
, size
))
1776 set_other_error (mismatch_detail
, idx
,
1777 _("immediate out of range"));
1782 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
1783 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
1784 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
1785 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
1789 assert (!opnd
->addr
.offset
.is_reg
);
1790 assert (opnd
->addr
.preind
);
1791 num
= 1 + get_operand_specific_data (&aarch64_operands
[type
]);
1794 if ((opnd
->addr
.offset
.imm
!= 0 && !opnd
->shifter
.operator_present
)
1795 || (opnd
->shifter
.operator_present
1796 && opnd
->shifter
.kind
!= AARCH64_MOD_MUL_VL
))
1798 set_other_error (mismatch_detail
, idx
,
1799 _("invalid addressing mode"));
1802 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1804 set_offset_out_of_range_error (mismatch_detail
, idx
,
1805 min_value
, max_value
);
1808 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1810 set_unaligned_error (mismatch_detail
, idx
, num
);
1815 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
1818 goto sve_imm_offset_vl
;
1820 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
1823 goto sve_imm_offset_vl
;
1825 case AARCH64_OPND_SVE_ADDR_RI_U6
:
1826 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
1827 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
1828 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
1832 assert (!opnd
->addr
.offset
.is_reg
);
1833 assert (opnd
->addr
.preind
);
1834 num
= 1 << get_operand_specific_data (&aarch64_operands
[type
]);
1837 if (opnd
->shifter
.operator_present
1838 || opnd
->shifter
.amount_present
)
1840 set_other_error (mismatch_detail
, idx
,
1841 _("invalid addressing mode"));
1844 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1846 set_offset_out_of_range_error (mismatch_detail
, idx
,
1847 min_value
, max_value
);
1850 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1852 set_unaligned_error (mismatch_detail
, idx
, num
);
1857 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
1860 goto sve_imm_offset
;
1862 case AARCH64_OPND_SVE_ADDR_R
:
1863 case AARCH64_OPND_SVE_ADDR_RR
:
1864 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
1865 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
1866 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
1867 case AARCH64_OPND_SVE_ADDR_RX
:
1868 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
1869 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
1870 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
1871 case AARCH64_OPND_SVE_ADDR_RZ
:
1872 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
1873 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
1874 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
1875 modifiers
= 1 << AARCH64_MOD_LSL
;
1877 assert (opnd
->addr
.offset
.is_reg
);
1878 assert (opnd
->addr
.preind
);
1879 if ((aarch64_operands
[type
].flags
& OPD_F_NO_ZR
) != 0
1880 && opnd
->addr
.offset
.regno
== 31)
1882 set_other_error (mismatch_detail
, idx
,
1883 _("index register xzr is not allowed"));
1886 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1887 || (opnd
->shifter
.amount
1888 != get_operand_specific_data (&aarch64_operands
[type
])))
1890 set_other_error (mismatch_detail
, idx
,
1891 _("invalid addressing mode"));
1896 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
1897 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
1898 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
1899 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
1900 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
1901 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
1902 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
1903 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
1904 modifiers
= (1 << AARCH64_MOD_SXTW
) | (1 << AARCH64_MOD_UXTW
);
1905 goto sve_rr_operand
;
1907 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
1908 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
1909 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
1910 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
1913 goto sve_imm_offset
;
1915 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
1916 modifiers
= 1 << AARCH64_MOD_LSL
;
1918 assert (opnd
->addr
.offset
.is_reg
);
1919 assert (opnd
->addr
.preind
);
1920 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1921 || opnd
->shifter
.amount
< 0
1922 || opnd
->shifter
.amount
> 3)
1924 set_other_error (mismatch_detail
, idx
,
1925 _("invalid addressing mode"));
1930 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
1931 modifiers
= (1 << AARCH64_MOD_SXTW
);
1932 goto sve_zz_operand
;
1934 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
1935 modifiers
= 1 << AARCH64_MOD_UXTW
;
1936 goto sve_zz_operand
;
1943 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1944 if (type
== AARCH64_OPND_LEt
)
1946 /* Get the upper bound for the element index. */
1947 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1948 if (!value_in_range_p (opnd
->reglist
.index
, 0, num
))
1950 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1954 /* The opcode dependent area stores the number of elements in
1955 each structure to be loaded/stored. */
1956 num
= get_opcode_dependent_value (opcode
);
1959 case AARCH64_OPND_LVt
:
1960 assert (num
>= 1 && num
<= 4);
1961 /* Unless LD1/ST1, the number of registers should be equal to that
1962 of the structure elements. */
1963 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
1965 set_reg_list_error (mismatch_detail
, idx
, num
);
1969 case AARCH64_OPND_LVt_AL
:
1970 case AARCH64_OPND_LEt
:
1971 assert (num
>= 1 && num
<= 4);
1972 /* The number of registers should be equal to that of the structure
1974 if (opnd
->reglist
.num_regs
!= num
)
1976 set_reg_list_error (mismatch_detail
, idx
, num
);
1985 case AARCH64_OPND_CLASS_IMMEDIATE
:
1986 /* Constraint check on immediate operand. */
1987 imm
= opnd
->imm
.value
;
1988 /* E.g. imm_0_31 constrains value to be 0..31. */
1989 if (qualifier_value_in_range_constraint_p (qualifier
)
1990 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
1991 get_upper_bound (qualifier
)))
1993 set_imm_out_of_range_error (mismatch_detail
, idx
,
1994 get_lower_bound (qualifier
),
1995 get_upper_bound (qualifier
));
2001 case AARCH64_OPND_AIMM
:
2002 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2004 set_other_error (mismatch_detail
, idx
,
2005 _("invalid shift operator"));
2008 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
2010 set_other_error (mismatch_detail
, idx
,
2011 _("shift amount must be 0 or 12"));
2014 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
2016 set_other_error (mismatch_detail
, idx
,
2017 _("immediate out of range"));
2022 case AARCH64_OPND_HALF
:
2023 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
2024 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2026 set_other_error (mismatch_detail
, idx
,
2027 _("invalid shift operator"));
2030 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2031 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
2033 set_other_error (mismatch_detail
, idx
,
2034 _("shift amount must be a multiple of 16"));
2037 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
2039 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
2043 if (opnd
->imm
.value
< 0)
2045 set_other_error (mismatch_detail
, idx
,
2046 _("negative immediate value not allowed"));
2049 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
2051 set_other_error (mismatch_detail
, idx
,
2052 _("immediate out of range"));
2057 case AARCH64_OPND_IMM_MOV
:
2059 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2060 imm
= opnd
->imm
.value
;
2064 case OP_MOV_IMM_WIDEN
:
2067 case OP_MOV_IMM_WIDE
:
2068 if (!aarch64_wide_constant_p (imm
, esize
== 4, NULL
))
2070 set_other_error (mismatch_detail
, idx
,
2071 _("immediate out of range"));
2075 case OP_MOV_IMM_LOG
:
2076 if (!aarch64_logical_immediate_p (imm
, esize
, NULL
))
2078 set_other_error (mismatch_detail
, idx
,
2079 _("immediate out of range"));
2090 case AARCH64_OPND_NZCV
:
2091 case AARCH64_OPND_CCMP_IMM
:
2092 case AARCH64_OPND_EXCEPTION
:
2093 case AARCH64_OPND_UIMM4
:
2094 case AARCH64_OPND_UIMM7
:
2095 case AARCH64_OPND_UIMM3_OP1
:
2096 case AARCH64_OPND_UIMM3_OP2
:
2097 case AARCH64_OPND_SVE_UIMM3
:
2098 case AARCH64_OPND_SVE_UIMM7
:
2099 case AARCH64_OPND_SVE_UIMM8
:
2100 case AARCH64_OPND_SVE_UIMM8_53
:
2101 size
= get_operand_fields_width (get_operand_from_code (type
));
2103 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
2105 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2111 case AARCH64_OPND_SIMM5
:
2112 case AARCH64_OPND_SVE_SIMM5
:
2113 case AARCH64_OPND_SVE_SIMM5B
:
2114 case AARCH64_OPND_SVE_SIMM6
:
2115 case AARCH64_OPND_SVE_SIMM8
:
2116 size
= get_operand_fields_width (get_operand_from_code (type
));
2118 if (!value_fit_signed_field_p (opnd
->imm
.value
, size
))
2120 set_imm_out_of_range_error (mismatch_detail
, idx
,
2122 (1 << (size
- 1)) - 1);
2127 case AARCH64_OPND_WIDTH
:
2128 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
2129 && opnds
[0].type
== AARCH64_OPND_Rd
);
2130 size
= get_upper_bound (qualifier
);
2131 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
2132 /* lsb+width <= reg.size */
2134 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
2135 size
- opnds
[idx
-1].imm
.value
);
2140 case AARCH64_OPND_LIMM
:
2141 case AARCH64_OPND_SVE_LIMM
:
2143 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2144 uint64_t uimm
= opnd
->imm
.value
;
2145 if (opcode
->op
== OP_BIC
)
2147 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2149 set_other_error (mismatch_detail
, idx
,
2150 _("immediate out of range"));
2156 case AARCH64_OPND_IMM0
:
2157 case AARCH64_OPND_FPIMM0
:
2158 if (opnd
->imm
.value
!= 0)
2160 set_other_error (mismatch_detail
, idx
,
2161 _("immediate zero expected"));
2166 case AARCH64_OPND_IMM_ROT1
:
2167 case AARCH64_OPND_IMM_ROT2
:
2168 case AARCH64_OPND_SVE_IMM_ROT2
:
2169 if (opnd
->imm
.value
!= 0
2170 && opnd
->imm
.value
!= 90
2171 && opnd
->imm
.value
!= 180
2172 && opnd
->imm
.value
!= 270)
2174 set_other_error (mismatch_detail
, idx
,
2175 _("rotate expected to be 0, 90, 180 or 270"));
2180 case AARCH64_OPND_IMM_ROT3
:
2181 case AARCH64_OPND_SVE_IMM_ROT1
:
2182 if (opnd
->imm
.value
!= 90 && opnd
->imm
.value
!= 270)
2184 set_other_error (mismatch_detail
, idx
,
2185 _("rotate expected to be 90 or 270"));
2190 case AARCH64_OPND_SHLL_IMM
:
2192 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2193 if (opnd
->imm
.value
!= size
)
2195 set_other_error (mismatch_detail
, idx
,
2196 _("invalid shift amount"));
2201 case AARCH64_OPND_IMM_VLSL
:
2202 size
= aarch64_get_qualifier_esize (qualifier
);
2203 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
2205 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2211 case AARCH64_OPND_IMM_VLSR
:
2212 size
= aarch64_get_qualifier_esize (qualifier
);
2213 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
2215 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
2220 case AARCH64_OPND_SIMD_IMM
:
2221 case AARCH64_OPND_SIMD_IMM_SFT
:
2222 /* Qualifier check. */
2225 case AARCH64_OPND_QLF_LSL
:
2226 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2228 set_other_error (mismatch_detail
, idx
,
2229 _("invalid shift operator"));
2233 case AARCH64_OPND_QLF_MSL
:
2234 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
2236 set_other_error (mismatch_detail
, idx
,
2237 _("invalid shift operator"));
2241 case AARCH64_OPND_QLF_NIL
:
2242 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2244 set_other_error (mismatch_detail
, idx
,
2245 _("shift is not permitted"));
2253 /* Is the immediate valid? */
2255 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
2257 /* uimm8 or simm8 */
2258 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
2260 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
2264 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
2267 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2268 ffffffffgggggggghhhhhhhh'. */
2269 set_other_error (mismatch_detail
, idx
,
2270 _("invalid value for immediate"));
2273 /* Is the shift amount valid? */
2274 switch (opnd
->shifter
.kind
)
2276 case AARCH64_MOD_LSL
:
2277 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2278 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
2280 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
2284 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
2286 set_unaligned_error (mismatch_detail
, idx
, 8);
2290 case AARCH64_MOD_MSL
:
2291 /* Only 8 and 16 are valid shift amount. */
2292 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
2294 set_other_error (mismatch_detail
, idx
,
2295 _("shift amount must be 0 or 16"));
2300 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2302 set_other_error (mismatch_detail
, idx
,
2303 _("invalid shift operator"));
2310 case AARCH64_OPND_FPIMM
:
2311 case AARCH64_OPND_SIMD_FPIMM
:
2312 case AARCH64_OPND_SVE_FPIMM8
:
2313 if (opnd
->imm
.is_fp
== 0)
2315 set_other_error (mismatch_detail
, idx
,
2316 _("floating-point immediate expected"));
2319 /* The value is expected to be an 8-bit floating-point constant with
2320 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2321 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2323 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
2325 set_other_error (mismatch_detail
, idx
,
2326 _("immediate out of range"));
2329 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2331 set_other_error (mismatch_detail
, idx
,
2332 _("invalid shift operator"));
2337 case AARCH64_OPND_SVE_AIMM
:
2340 assert (opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2341 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2342 mask
= ~((uint64_t) -1 << (size
* 4) << (size
* 4));
2343 uvalue
= opnd
->imm
.value
;
2344 shift
= opnd
->shifter
.amount
;
2349 set_other_error (mismatch_detail
, idx
,
2350 _("no shift amount allowed for"
2351 " 8-bit constants"));
2357 if (shift
!= 0 && shift
!= 8)
2359 set_other_error (mismatch_detail
, idx
,
2360 _("shift amount must be 0 or 8"));
2363 if (shift
== 0 && (uvalue
& 0xff) == 0)
2366 uvalue
= (int64_t) uvalue
/ 256;
2370 if ((uvalue
& mask
) != uvalue
&& (uvalue
| ~mask
) != uvalue
)
2372 set_other_error (mismatch_detail
, idx
,
2373 _("immediate too big for element size"));
2376 uvalue
= (uvalue
- min_value
) & mask
;
2379 set_other_error (mismatch_detail
, idx
,
2380 _("invalid arithmetic immediate"));
2385 case AARCH64_OPND_SVE_ASIMM
:
2389 case AARCH64_OPND_SVE_I1_HALF_ONE
:
2390 assert (opnd
->imm
.is_fp
);
2391 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x3f800000)
2393 set_other_error (mismatch_detail
, idx
,
2394 _("floating-point value must be 0.5 or 1.0"));
2399 case AARCH64_OPND_SVE_I1_HALF_TWO
:
2400 assert (opnd
->imm
.is_fp
);
2401 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x40000000)
2403 set_other_error (mismatch_detail
, idx
,
2404 _("floating-point value must be 0.5 or 2.0"));
2409 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
2410 assert (opnd
->imm
.is_fp
);
2411 if (opnd
->imm
.value
!= 0 && opnd
->imm
.value
!= 0x3f800000)
2413 set_other_error (mismatch_detail
, idx
,
2414 _("floating-point value must be 0.0 or 1.0"));
2419 case AARCH64_OPND_SVE_INV_LIMM
:
2421 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2422 uint64_t uimm
= ~opnd
->imm
.value
;
2423 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2425 set_other_error (mismatch_detail
, idx
,
2426 _("immediate out of range"));
2432 case AARCH64_OPND_SVE_LIMM_MOV
:
2434 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2435 uint64_t uimm
= opnd
->imm
.value
;
2436 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2438 set_other_error (mismatch_detail
, idx
,
2439 _("immediate out of range"));
2442 if (!aarch64_sve_dupm_mov_immediate_p (uimm
, esize
))
2444 set_other_error (mismatch_detail
, idx
,
2445 _("invalid replicated MOV immediate"));
2451 case AARCH64_OPND_SVE_PATTERN_SCALED
:
2452 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL
);
2453 if (!value_in_range_p (opnd
->shifter
.amount
, 1, 16))
2455 set_multiplier_out_of_range_error (mismatch_detail
, idx
, 1, 16);
2460 case AARCH64_OPND_SVE_SHLIMM_PRED
:
2461 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
2462 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2463 if (!value_in_range_p (opnd
->imm
.value
, 0, 8 * size
- 1))
2465 set_imm_out_of_range_error (mismatch_detail
, idx
,
2471 case AARCH64_OPND_SVE_SHRIMM_PRED
:
2472 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
2473 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2474 if (!value_in_range_p (opnd
->imm
.value
, 1, 8 * size
))
2476 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, 8 * size
);
2486 case AARCH64_OPND_CLASS_SYSTEM
:
2489 case AARCH64_OPND_PSTATEFIELD
:
2490 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
2494 The immediate must be #0 or #1. */
2495 if ((opnd
->pstatefield
== 0x03 /* UAO. */
2496 || opnd
->pstatefield
== 0x04 /* PAN. */
2497 || opnd
->pstatefield
== 0x19 /* SSBS. */
2498 || opnd
->pstatefield
== 0x1a) /* DIT. */
2499 && opnds
[1].imm
.value
> 1)
2501 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2504 /* MSR SPSel, #uimm4
2505 Uses uimm4 as a control value to select the stack pointer: if
2506 bit 0 is set it selects the current exception level's stack
2507 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2508 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2509 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
2511 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2520 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
2521 /* Get the upper bound for the element index. */
2522 if (opcode
->op
== OP_FCMLA_ELEM
)
2523 /* FCMLA index range depends on the vector size of other operands
2524 and is halfed because complex numbers take two elements. */
2525 num
= aarch64_get_qualifier_nelem (opnds
[0].qualifier
)
2526 * aarch64_get_qualifier_esize (opnds
[0].qualifier
) / 2;
2529 num
= num
/ aarch64_get_qualifier_esize (qualifier
) - 1;
2531 /* Index out-of-range. */
2532 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
2534 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2537 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2538 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2539 number is encoded in "size:M:Rm":
2545 if (type
== AARCH64_OPND_Em16
&& qualifier
== AARCH64_OPND_QLF_S_H
2546 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
2548 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2553 case AARCH64_OPND_CLASS_MODIFIED_REG
:
2554 assert (idx
== 1 || idx
== 2);
2557 case AARCH64_OPND_Rm_EXT
:
2558 if (!aarch64_extend_operator_p (opnd
->shifter
.kind
)
2559 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2561 set_other_error (mismatch_detail
, idx
,
2562 _("extend operator expected"));
2565 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2566 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2567 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2569 if (!aarch64_stack_pointer_p (opnds
+ 0)
2570 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
2572 if (!opnd
->shifter
.operator_present
)
2574 set_other_error (mismatch_detail
, idx
,
2575 _("missing extend operator"));
2578 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2580 set_other_error (mismatch_detail
, idx
,
2581 _("'LSL' operator not allowed"));
2585 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
2586 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2587 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
2589 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
2592 /* In the 64-bit form, the final register operand is written as Wm
2593 for all but the (possibly omitted) UXTX/LSL and SXTX
2595 N.B. GAS allows X register to be used with any operator as a
2596 programming convenience. */
2597 if (qualifier
== AARCH64_OPND_QLF_X
2598 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
2599 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
2600 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
2602 set_other_error (mismatch_detail
, idx
, _("W register expected"));
2607 case AARCH64_OPND_Rm_SFT
:
2608 /* ROR is not available to the shifted register operand in
2609 arithmetic instructions. */
2610 if (!aarch64_shift_operator_p (opnd
->shifter
.kind
))
2612 set_other_error (mismatch_detail
, idx
,
2613 _("shift operator expected"));
2616 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
2617 && opcode
->iclass
!= log_shift
)
2619 set_other_error (mismatch_detail
, idx
,
2620 _("'ROR' operator not allowed"));
2623 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
2624 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2626 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2643 /* Main entrypoint for the operand constraint checking.
2645 Return 1 if operands of *INST meet the constraint applied by the operand
2646 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2647 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2648 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2649 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2650 error kind when it is notified that an instruction does not pass the check).
2652 Un-determined operand qualifiers may get established during the process. */
2655 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2656 aarch64_operand_error
*mismatch_detail
)
2660 DEBUG_TRACE ("enter");
2662 /* Check for cases where a source register needs to be the same as the
2663 destination register. Do this before matching qualifiers since if
2664 an instruction has both invalid tying and invalid qualifiers,
2665 the error about qualifiers would suggest several alternative
2666 instructions that also have invalid tying. */
2667 i
= inst
->opcode
->tied_operand
;
2668 if (i
> 0 && (inst
->operands
[0].reg
.regno
!= inst
->operands
[i
].reg
.regno
))
2670 if (mismatch_detail
)
2672 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_OPERAND
;
2673 mismatch_detail
->index
= i
;
2674 mismatch_detail
->error
= NULL
;
2679 /* Match operands' qualifier.
2680 *INST has already had qualifier establish for some, if not all, of
2681 its operands; we need to find out whether these established
2682 qualifiers match one of the qualifier sequence in
2683 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2684 with the corresponding qualifier in such a sequence.
2685 Only basic operand constraint checking is done here; the more thorough
2686 constraint checking will carried out by operand_general_constraint_met_p,
2687 which has be to called after this in order to get all of the operands'
2688 qualifiers established. */
2689 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2691 DEBUG_TRACE ("FAIL on operand qualifier matching");
2692 if (mismatch_detail
)
2694 /* Return an error type to indicate that it is the qualifier
2695 matching failure; we don't care about which operand as there
2696 are enough information in the opcode table to reproduce it. */
2697 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2698 mismatch_detail
->index
= -1;
2699 mismatch_detail
->error
= NULL
;
2704 /* Match operands' constraint. */
2705 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2707 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2708 if (type
== AARCH64_OPND_NIL
)
2710 if (inst
->operands
[i
].skip
)
2712 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2715 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2716 inst
->opcode
, mismatch_detail
) == 0)
2718 DEBUG_TRACE ("FAIL on operand %d", i
);
2723 DEBUG_TRACE ("PASS");
2728 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2729 Also updates the TYPE of each INST->OPERANDS with the corresponding
2730 value of OPCODE->OPERANDS.
2732 Note that some operand qualifiers may need to be manually cleared by
2733 the caller before it further calls the aarch64_opcode_encode; by
2734 doing this, it helps the qualifier matching facilities work
2737 const aarch64_opcode
*
2738 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2741 const aarch64_opcode
*old
= inst
->opcode
;
2743 inst
->opcode
= opcode
;
2745 /* Update the operand types. */
2746 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2748 inst
->operands
[i
].type
= opcode
->operands
[i
];
2749 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2753 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2759 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2762 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2763 if (operands
[i
] == operand
)
2765 else if (operands
[i
] == AARCH64_OPND_NIL
)
2770 /* R0...R30, followed by FOR31. */
2771 #define BANK(R, FOR31) \
2772 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2773 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2774 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2775 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2776 /* [0][0] 32-bit integer regs with sp Wn
2777 [0][1] 64-bit integer regs with sp Xn sf=1
2778 [1][0] 32-bit integer regs with #0 Wn
2779 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2780 static const char *int_reg
[2][2][32] = {
2781 #define R32(X) "w" #X
2782 #define R64(X) "x" #X
2783 { BANK (R32
, "wsp"), BANK (R64
, "sp") },
2784 { BANK (R32
, "wzr"), BANK (R64
, "xzr") }
2789 /* Names of the SVE vector registers, first with .S suffixes,
2790 then with .D suffixes. */
2792 static const char *sve_reg
[2][32] = {
2793 #define ZS(X) "z" #X ".s"
2794 #define ZD(X) "z" #X ".d"
2795 BANK (ZS
, ZS (31)), BANK (ZD
, ZD (31))
2801 /* Return the integer register name.
2802 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2804 static inline const char *
2805 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2807 const int has_zr
= sp_reg_p
? 0 : 1;
2808 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2809 return int_reg
[has_zr
][is_64
][regno
];
2812 /* Like get_int_reg_name, but IS_64 is always 1. */
2814 static inline const char *
2815 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2817 const int has_zr
= sp_reg_p
? 0 : 1;
2818 return int_reg
[has_zr
][1][regno
];
2821 /* Get the name of the integer offset register in OPND, using the shift type
2822 to decide whether it's a word or doubleword. */
2824 static inline const char *
2825 get_offset_int_reg_name (const aarch64_opnd_info
*opnd
)
2827 switch (opnd
->shifter
.kind
)
2829 case AARCH64_MOD_UXTW
:
2830 case AARCH64_MOD_SXTW
:
2831 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_W
, 0);
2833 case AARCH64_MOD_LSL
:
2834 case AARCH64_MOD_SXTX
:
2835 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_X
, 0);
2842 /* Get the name of the SVE vector offset register in OPND, using the operand
2843 qualifier to decide whether the suffix should be .S or .D. */
2845 static inline const char *
2846 get_addr_sve_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
)
2848 assert (qualifier
== AARCH64_OPND_QLF_S_S
2849 || qualifier
== AARCH64_OPND_QLF_S_D
);
2850 return sve_reg
[qualifier
== AARCH64_OPND_QLF_S_D
][regno
];
2853 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2873 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2874 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2875 (depending on the type of the instruction). IMM8 will be expanded to a
2876 single-precision floating-point value (SIZE == 4) or a double-precision
2877 floating-point value (SIZE == 8). A half-precision floating-point value
2878 (SIZE == 2) is expanded to a single-precision floating-point value. The
2879 expanded value is returned. */
2882 expand_fp_imm (int size
, uint32_t imm8
)
2885 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2887 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2888 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2889 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2890 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2891 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2894 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2895 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2896 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2897 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2898 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2901 else if (size
== 4 || size
== 2)
2903 imm
= (imm8_7
<< 31) /* imm8<7> */
2904 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2905 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2906 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2910 /* An unsupported size. */
2917 /* Produce the string representation of the register list operand *OPND
2918 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2919 the register name that comes before the register number, such as "v". */
2921 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
,
2924 const int num_regs
= opnd
->reglist
.num_regs
;
2925 const int first_reg
= opnd
->reglist
.first_regno
;
2926 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2927 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2928 char tb
[8]; /* Temporary buffer. */
2930 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2931 assert (num_regs
>= 1 && num_regs
<= 4);
2933 /* Prepare the index if any. */
2934 if (opnd
->reglist
.has_index
)
2935 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2936 snprintf (tb
, 8, "[%" PRIi64
"]", (opnd
->reglist
.index
% 100));
2940 /* The hyphenated form is preferred for disassembly if there are
2941 more than two registers in the list, and the register numbers
2942 are monotonically increasing in increments of one. */
2943 if (num_regs
> 2 && last_reg
> first_reg
)
2944 snprintf (buf
, size
, "{%s%d.%s-%s%d.%s}%s", prefix
, first_reg
, qlf_name
,
2945 prefix
, last_reg
, qlf_name
, tb
);
2948 const int reg0
= first_reg
;
2949 const int reg1
= (first_reg
+ 1) & 0x1f;
2950 const int reg2
= (first_reg
+ 2) & 0x1f;
2951 const int reg3
= (first_reg
+ 3) & 0x1f;
2956 snprintf (buf
, size
, "{%s%d.%s}%s", prefix
, reg0
, qlf_name
, tb
);
2959 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s}%s", prefix
, reg0
, qlf_name
,
2960 prefix
, reg1
, qlf_name
, tb
);
2963 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2964 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2965 prefix
, reg2
, qlf_name
, tb
);
2968 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2969 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2970 prefix
, reg2
, qlf_name
, prefix
, reg3
, qlf_name
, tb
);
2976 /* Print the register+immediate address in OPND to BUF, which has SIZE
2977 characters. BASE is the name of the base register. */
2980 print_immediate_offset_address (char *buf
, size_t size
,
2981 const aarch64_opnd_info
*opnd
,
2984 if (opnd
->addr
.writeback
)
2986 if (opnd
->addr
.preind
)
2987 snprintf (buf
, size
, "[%s, #%d]!", base
, opnd
->addr
.offset
.imm
);
2989 snprintf (buf
, size
, "[%s], #%d", base
, opnd
->addr
.offset
.imm
);
2993 if (opnd
->shifter
.operator_present
)
2995 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL_VL
);
2996 snprintf (buf
, size
, "[%s, #%d, mul vl]",
2997 base
, opnd
->addr
.offset
.imm
);
2999 else if (opnd
->addr
.offset
.imm
)
3000 snprintf (buf
, size
, "[%s, #%d]", base
, opnd
->addr
.offset
.imm
);
3002 snprintf (buf
, size
, "[%s]", base
);
3006 /* Produce the string representation of the register offset address operand
3007 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3008 the names of the base and offset registers. */
3010 print_register_offset_address (char *buf
, size_t size
,
3011 const aarch64_opnd_info
*opnd
,
3012 const char *base
, const char *offset
)
3014 char tb
[16]; /* Temporary buffer. */
3015 bfd_boolean print_extend_p
= TRUE
;
3016 bfd_boolean print_amount_p
= TRUE
;
3017 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
3019 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
3020 || !opnd
->shifter
.amount_present
))
3022 /* Not print the shift/extend amount when the amount is zero and
3023 when it is not the special case of 8-bit load/store instruction. */
3024 print_amount_p
= FALSE
;
3025 /* Likewise, no need to print the shift operator LSL in such a
3027 if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3028 print_extend_p
= FALSE
;
3031 /* Prepare for the extend/shift. */
3035 snprintf (tb
, sizeof (tb
), ", %s #%" PRIi64
, shift_name
,
3036 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3037 (opnd
->shifter
.amount
% 100));
3039 snprintf (tb
, sizeof (tb
), ", %s", shift_name
);
3044 snprintf (buf
, size
, "[%s, %s%s]", base
, offset
, tb
);
3047 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3048 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3049 PC, PCREL_P and ADDRESS are used to pass in and return information about
3050 the PC-relative address calculation, where the PC value is passed in
3051 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3052 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3053 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3055 The function serves both the disassembler and the assembler diagnostics
3056 issuer, which is the reason why it lives in this file. */
3059 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
3060 const aarch64_opcode
*opcode
,
3061 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
3062 bfd_vma
*address
, char** notes
)
3064 unsigned int i
, num_conds
;
3065 const char *name
= NULL
;
3066 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
3067 enum aarch64_modifier_kind kind
;
3068 uint64_t addr
, enum_value
;
3076 case AARCH64_OPND_Rd
:
3077 case AARCH64_OPND_Rn
:
3078 case AARCH64_OPND_Rm
:
3079 case AARCH64_OPND_Rt
:
3080 case AARCH64_OPND_Rt2
:
3081 case AARCH64_OPND_Rs
:
3082 case AARCH64_OPND_Ra
:
3083 case AARCH64_OPND_Rt_SYS
:
3084 case AARCH64_OPND_PAIRREG
:
3085 case AARCH64_OPND_SVE_Rm
:
3086 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3087 the <ic_op>, therefore we use opnd->present to override the
3088 generic optional-ness information. */
3089 if (opnd
->type
== AARCH64_OPND_Rt_SYS
)
3094 /* Omit the operand, e.g. RET. */
3095 else if (optional_operand_p (opcode
, idx
)
3097 == get_optional_operand_default_value (opcode
)))
3099 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3100 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3101 snprintf (buf
, size
, "%s",
3102 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3105 case AARCH64_OPND_Rd_SP
:
3106 case AARCH64_OPND_Rn_SP
:
3107 case AARCH64_OPND_SVE_Rn_SP
:
3108 case AARCH64_OPND_Rm_SP
:
3109 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3110 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
3111 || opnd
->qualifier
== AARCH64_OPND_QLF_X
3112 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
3113 snprintf (buf
, size
, "%s",
3114 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
3117 case AARCH64_OPND_Rm_EXT
:
3118 kind
= opnd
->shifter
.kind
;
3119 assert (idx
== 1 || idx
== 2);
3120 if ((aarch64_stack_pointer_p (opnds
)
3121 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
3122 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
3123 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
3124 && kind
== AARCH64_MOD_UXTW
)
3125 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
3126 && kind
== AARCH64_MOD_UXTX
)))
3128 /* 'LSL' is the preferred form in this case. */
3129 kind
= AARCH64_MOD_LSL
;
3130 if (opnd
->shifter
.amount
== 0)
3132 /* Shifter omitted. */
3133 snprintf (buf
, size
, "%s",
3134 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3138 if (opnd
->shifter
.amount
)
3139 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3140 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3141 aarch64_operand_modifiers
[kind
].name
,
3142 opnd
->shifter
.amount
);
3144 snprintf (buf
, size
, "%s, %s",
3145 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3146 aarch64_operand_modifiers
[kind
].name
);
3149 case AARCH64_OPND_Rm_SFT
:
3150 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3151 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3152 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3153 snprintf (buf
, size
, "%s",
3154 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3156 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3157 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3158 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3159 opnd
->shifter
.amount
);
3162 case AARCH64_OPND_Fd
:
3163 case AARCH64_OPND_Fn
:
3164 case AARCH64_OPND_Fm
:
3165 case AARCH64_OPND_Fa
:
3166 case AARCH64_OPND_Ft
:
3167 case AARCH64_OPND_Ft2
:
3168 case AARCH64_OPND_Sd
:
3169 case AARCH64_OPND_Sn
:
3170 case AARCH64_OPND_Sm
:
3171 case AARCH64_OPND_SVE_VZn
:
3172 case AARCH64_OPND_SVE_Vd
:
3173 case AARCH64_OPND_SVE_Vm
:
3174 case AARCH64_OPND_SVE_Vn
:
3175 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
3179 case AARCH64_OPND_Va
:
3180 case AARCH64_OPND_Vd
:
3181 case AARCH64_OPND_Vn
:
3182 case AARCH64_OPND_Vm
:
3183 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
3184 aarch64_get_qualifier_name (opnd
->qualifier
));
3187 case AARCH64_OPND_Ed
:
3188 case AARCH64_OPND_En
:
3189 case AARCH64_OPND_Em
:
3190 case AARCH64_OPND_Em16
:
3191 case AARCH64_OPND_SM3_IMM2
:
3192 snprintf (buf
, size
, "v%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3193 aarch64_get_qualifier_name (opnd
->qualifier
),
3194 opnd
->reglane
.index
);
3197 case AARCH64_OPND_VdD1
:
3198 case AARCH64_OPND_VnD1
:
3199 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
3202 case AARCH64_OPND_LVn
:
3203 case AARCH64_OPND_LVt
:
3204 case AARCH64_OPND_LVt_AL
:
3205 case AARCH64_OPND_LEt
:
3206 print_register_list (buf
, size
, opnd
, "v");
3209 case AARCH64_OPND_SVE_Pd
:
3210 case AARCH64_OPND_SVE_Pg3
:
3211 case AARCH64_OPND_SVE_Pg4_5
:
3212 case AARCH64_OPND_SVE_Pg4_10
:
3213 case AARCH64_OPND_SVE_Pg4_16
:
3214 case AARCH64_OPND_SVE_Pm
:
3215 case AARCH64_OPND_SVE_Pn
:
3216 case AARCH64_OPND_SVE_Pt
:
3217 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3218 snprintf (buf
, size
, "p%d", opnd
->reg
.regno
);
3219 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
3220 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
3221 snprintf (buf
, size
, "p%d/%s", opnd
->reg
.regno
,
3222 aarch64_get_qualifier_name (opnd
->qualifier
));
3224 snprintf (buf
, size
, "p%d.%s", opnd
->reg
.regno
,
3225 aarch64_get_qualifier_name (opnd
->qualifier
));
3228 case AARCH64_OPND_SVE_Za_5
:
3229 case AARCH64_OPND_SVE_Za_16
:
3230 case AARCH64_OPND_SVE_Zd
:
3231 case AARCH64_OPND_SVE_Zm_5
:
3232 case AARCH64_OPND_SVE_Zm_16
:
3233 case AARCH64_OPND_SVE_Zn
:
3234 case AARCH64_OPND_SVE_Zt
:
3235 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3236 snprintf (buf
, size
, "z%d", opnd
->reg
.regno
);
3238 snprintf (buf
, size
, "z%d.%s", opnd
->reg
.regno
,
3239 aarch64_get_qualifier_name (opnd
->qualifier
));
3242 case AARCH64_OPND_SVE_ZnxN
:
3243 case AARCH64_OPND_SVE_ZtxN
:
3244 print_register_list (buf
, size
, opnd
, "z");
3247 case AARCH64_OPND_SVE_Zm3_INDEX
:
3248 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
3249 case AARCH64_OPND_SVE_Zm4_INDEX
:
3250 case AARCH64_OPND_SVE_Zn_INDEX
:
3251 snprintf (buf
, size
, "z%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3252 aarch64_get_qualifier_name (opnd
->qualifier
),
3253 opnd
->reglane
.index
);
3256 case AARCH64_OPND_CRn
:
3257 case AARCH64_OPND_CRm
:
3258 snprintf (buf
, size
, "C%" PRIi64
, opnd
->imm
.value
);
3261 case AARCH64_OPND_IDX
:
3262 case AARCH64_OPND_MASK
:
3263 case AARCH64_OPND_IMM
:
3264 case AARCH64_OPND_IMM_2
:
3265 case AARCH64_OPND_WIDTH
:
3266 case AARCH64_OPND_UIMM3_OP1
:
3267 case AARCH64_OPND_UIMM3_OP2
:
3268 case AARCH64_OPND_BIT_NUM
:
3269 case AARCH64_OPND_IMM_VLSL
:
3270 case AARCH64_OPND_IMM_VLSR
:
3271 case AARCH64_OPND_SHLL_IMM
:
3272 case AARCH64_OPND_IMM0
:
3273 case AARCH64_OPND_IMMR
:
3274 case AARCH64_OPND_IMMS
:
3275 case AARCH64_OPND_FBITS
:
3276 case AARCH64_OPND_SIMM5
:
3277 case AARCH64_OPND_SVE_SHLIMM_PRED
:
3278 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
3279 case AARCH64_OPND_SVE_SHRIMM_PRED
:
3280 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
3281 case AARCH64_OPND_SVE_SIMM5
:
3282 case AARCH64_OPND_SVE_SIMM5B
:
3283 case AARCH64_OPND_SVE_SIMM6
:
3284 case AARCH64_OPND_SVE_SIMM8
:
3285 case AARCH64_OPND_SVE_UIMM3
:
3286 case AARCH64_OPND_SVE_UIMM7
:
3287 case AARCH64_OPND_SVE_UIMM8
:
3288 case AARCH64_OPND_SVE_UIMM8_53
:
3289 case AARCH64_OPND_IMM_ROT1
:
3290 case AARCH64_OPND_IMM_ROT2
:
3291 case AARCH64_OPND_IMM_ROT3
:
3292 case AARCH64_OPND_SVE_IMM_ROT1
:
3293 case AARCH64_OPND_SVE_IMM_ROT2
:
3294 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3297 case AARCH64_OPND_SVE_I1_HALF_ONE
:
3298 case AARCH64_OPND_SVE_I1_HALF_TWO
:
3299 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
3302 c
.i
= opnd
->imm
.value
;
3303 snprintf (buf
, size
, "#%.1f", c
.f
);
3307 case AARCH64_OPND_SVE_PATTERN
:
3308 if (optional_operand_p (opcode
, idx
)
3309 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3311 enum_value
= opnd
->imm
.value
;
3312 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3313 if (aarch64_sve_pattern_array
[enum_value
])
3314 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[enum_value
]);
3316 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3319 case AARCH64_OPND_SVE_PATTERN_SCALED
:
3320 if (optional_operand_p (opcode
, idx
)
3321 && !opnd
->shifter
.operator_present
3322 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3324 enum_value
= opnd
->imm
.value
;
3325 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3326 if (aarch64_sve_pattern_array
[opnd
->imm
.value
])
3327 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[opnd
->imm
.value
]);
3329 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3330 if (opnd
->shifter
.operator_present
)
3332 size_t len
= strlen (buf
);
3333 snprintf (buf
+ len
, size
- len
, ", %s #%" PRIi64
,
3334 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3335 opnd
->shifter
.amount
);
3339 case AARCH64_OPND_SVE_PRFOP
:
3340 enum_value
= opnd
->imm
.value
;
3341 assert (enum_value
< ARRAY_SIZE (aarch64_sve_prfop_array
));
3342 if (aarch64_sve_prfop_array
[enum_value
])
3343 snprintf (buf
, size
, "%s", aarch64_sve_prfop_array
[enum_value
]);
3345 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3348 case AARCH64_OPND_IMM_MOV
:
3349 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3351 case 4: /* e.g. MOV Wd, #<imm32>. */
3353 int imm32
= opnd
->imm
.value
;
3354 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
3357 case 8: /* e.g. MOV Xd, #<imm64>. */
3358 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
3359 opnd
->imm
.value
, opnd
->imm
.value
);
3361 default: assert (0);
3365 case AARCH64_OPND_FPIMM0
:
3366 snprintf (buf
, size
, "#0.0");
3369 case AARCH64_OPND_LIMM
:
3370 case AARCH64_OPND_AIMM
:
3371 case AARCH64_OPND_HALF
:
3372 case AARCH64_OPND_SVE_INV_LIMM
:
3373 case AARCH64_OPND_SVE_LIMM
:
3374 case AARCH64_OPND_SVE_LIMM_MOV
:
3375 if (opnd
->shifter
.amount
)
3376 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3377 opnd
->shifter
.amount
);
3379 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3382 case AARCH64_OPND_SIMD_IMM
:
3383 case AARCH64_OPND_SIMD_IMM_SFT
:
3384 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3385 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
3386 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3388 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%" PRIi64
, opnd
->imm
.value
,
3389 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3390 opnd
->shifter
.amount
);
3393 case AARCH64_OPND_SVE_AIMM
:
3394 case AARCH64_OPND_SVE_ASIMM
:
3395 if (opnd
->shifter
.amount
)
3396 snprintf (buf
, size
, "#%" PRIi64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3397 opnd
->shifter
.amount
);
3399 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3402 case AARCH64_OPND_FPIMM
:
3403 case AARCH64_OPND_SIMD_FPIMM
:
3404 case AARCH64_OPND_SVE_FPIMM8
:
3405 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3407 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3410 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
3411 snprintf (buf
, size
, "#%.18e", c
.f
);
3414 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3417 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
3418 snprintf (buf
, size
, "#%.18e", c
.f
);
3421 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3424 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
3425 snprintf (buf
, size
, "#%.18e", c
.d
);
3428 default: assert (0);
3432 case AARCH64_OPND_CCMP_IMM
:
3433 case AARCH64_OPND_NZCV
:
3434 case AARCH64_OPND_EXCEPTION
:
3435 case AARCH64_OPND_UIMM4
:
3436 case AARCH64_OPND_UIMM7
:
3437 if (optional_operand_p (opcode
, idx
) == TRUE
3438 && (opnd
->imm
.value
==
3439 (int64_t) get_optional_operand_default_value (opcode
)))
3440 /* Omit the operand, e.g. DCPS1. */
3442 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
3445 case AARCH64_OPND_COND
:
3446 case AARCH64_OPND_COND1
:
3447 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
3448 num_conds
= ARRAY_SIZE (opnd
->cond
->names
);
3449 for (i
= 1; i
< num_conds
&& opnd
->cond
->names
[i
]; ++i
)
3451 size_t len
= strlen (buf
);
3453 snprintf (buf
+ len
, size
- len
, " // %s = %s",
3454 opnd
->cond
->names
[0], opnd
->cond
->names
[i
]);
3456 snprintf (buf
+ len
, size
- len
, ", %s",
3457 opnd
->cond
->names
[i
]);
3461 case AARCH64_OPND_ADDR_ADRP
:
3462 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
3468 /* This is not necessary during the disassembling, as print_address_func
3469 in the disassemble_info will take care of the printing. But some
3470 other callers may be still interested in getting the string in *STR,
3471 so here we do snprintf regardless. */
3472 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3475 case AARCH64_OPND_ADDR_PCREL14
:
3476 case AARCH64_OPND_ADDR_PCREL19
:
3477 case AARCH64_OPND_ADDR_PCREL21
:
3478 case AARCH64_OPND_ADDR_PCREL26
:
3479 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
3484 /* This is not necessary during the disassembling, as print_address_func
3485 in the disassemble_info will take care of the printing. But some
3486 other callers may be still interested in getting the string in *STR,
3487 so here we do snprintf regardless. */
3488 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3491 case AARCH64_OPND_ADDR_SIMPLE
:
3492 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
3493 case AARCH64_OPND_SIMD_ADDR_POST
:
3494 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3495 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
3497 if (opnd
->addr
.offset
.is_reg
)
3498 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
3500 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
3503 snprintf (buf
, size
, "[%s]", name
);
3506 case AARCH64_OPND_ADDR_REGOFF
:
3507 case AARCH64_OPND_SVE_ADDR_R
:
3508 case AARCH64_OPND_SVE_ADDR_RR
:
3509 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
3510 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
3511 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
3512 case AARCH64_OPND_SVE_ADDR_RX
:
3513 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
3514 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
3515 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
3516 print_register_offset_address
3517 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3518 get_offset_int_reg_name (opnd
));
3521 case AARCH64_OPND_SVE_ADDR_RZ
:
3522 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
3523 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
3524 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
3525 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
3526 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
3527 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
3528 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
3529 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
3530 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
3531 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
3532 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
3533 print_register_offset_address
3534 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3535 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3538 case AARCH64_OPND_ADDR_SIMM7
:
3539 case AARCH64_OPND_ADDR_SIMM9
:
3540 case AARCH64_OPND_ADDR_SIMM9_2
:
3541 case AARCH64_OPND_ADDR_SIMM10
:
3542 case AARCH64_OPND_ADDR_OFFSET
:
3543 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
3544 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
3545 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
3546 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
3547 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
3548 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
3549 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
3550 case AARCH64_OPND_SVE_ADDR_RI_U6
:
3551 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
3552 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
3553 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
3554 print_immediate_offset_address
3555 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1));
3558 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
3559 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
3560 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
3561 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
3562 print_immediate_offset_address
3564 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
));
3567 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
3568 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
3569 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
3570 print_register_offset_address
3572 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
3573 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3576 case AARCH64_OPND_ADDR_UIMM12
:
3577 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3578 if (opnd
->addr
.offset
.imm
)
3579 snprintf (buf
, size
, "[%s, #%d]", name
, opnd
->addr
.offset
.imm
);
3581 snprintf (buf
, size
, "[%s]", name
);
3584 case AARCH64_OPND_SYSREG
:
3585 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
3587 bfd_boolean exact_match
3588 = (aarch64_sys_regs
[i
].flags
& opnd
->sysreg
.flags
)
3589 == opnd
->sysreg
.flags
;
3591 /* Try and find an exact match, But if that fails, return the first
3592 partial match that was found. */
3593 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
.value
3594 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
])
3595 && (name
== NULL
|| exact_match
))
3597 name
= aarch64_sys_regs
[i
].name
;
3605 /* If we didn't match exactly, that means the presense of a flag
3606 indicates what we didn't want for this instruction. e.g. If
3607 F_REG_READ is there, that means we were looking for a write
3608 register. See aarch64_ext_sysreg. */
3609 if (aarch64_sys_regs
[i
].flags
& F_REG_WRITE
)
3610 *notes
= _("reading from a write-only register");
3611 else if (aarch64_sys_regs
[i
].flags
& F_REG_READ
)
3612 *notes
= _("writing to a read-only register");
3617 snprintf (buf
, size
, "%s", name
);
3620 /* Implementation defined system register. */
3621 unsigned int value
= opnd
->sysreg
.value
;
3622 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
3623 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
3628 case AARCH64_OPND_PSTATEFIELD
:
3629 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
3630 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
3632 assert (aarch64_pstatefields
[i
].name
);
3633 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
3636 case AARCH64_OPND_SYSREG_AT
:
3637 case AARCH64_OPND_SYSREG_DC
:
3638 case AARCH64_OPND_SYSREG_IC
:
3639 case AARCH64_OPND_SYSREG_TLBI
:
3640 case AARCH64_OPND_SYSREG_SR
:
3641 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
3644 case AARCH64_OPND_BARRIER
:
3645 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
3648 case AARCH64_OPND_BARRIER_ISB
:
3649 /* Operand can be omitted, e.g. in DCPS1. */
3650 if (! optional_operand_p (opcode
, idx
)
3651 || (opnd
->barrier
->value
3652 != get_optional_operand_default_value (opcode
)))
3653 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
3656 case AARCH64_OPND_PRFOP
:
3657 if (opnd
->prfop
->name
!= NULL
)
3658 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
3660 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
3663 case AARCH64_OPND_BARRIER_PSB
:
3664 case AARCH64_OPND_BTI_TARGET
:
3665 if ((HINT_FLAG (opnd
->hint_option
->value
) & HINT_OPD_F_NOPRINT
) == 0)
3666 snprintf (buf
, size
, "%s", opnd
->hint_option
->name
);
3674 #define CPENC(op0,op1,crn,crm,op2) \
3675 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3676 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3677 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3678 /* for 3.9.10 System Instructions */
3679 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3698 /* TODO there is one more issues need to be resolved
3699 1. handle cpu-implementation-defined system registers. */
3700 const aarch64_sys_reg aarch64_sys_regs
[] =
3702 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
3703 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
3704 { "elr_el1", CPEN_(0,C0
,1), 0 },
3705 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
3706 { "sp_el0", CPEN_(0,C1
,0), 0 },
3707 { "spsel", CPEN_(0,C2
,0), 0 },
3708 { "daif", CPEN_(3,C2
,1), 0 },
3709 { "currentel", CPEN_(0,C2
,2), F_REG_READ
}, /* RO */
3710 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
3711 { "uao", CPEN_ (0, C2
, 4), F_ARCHEXT
},
3712 { "nzcv", CPEN_(3,C2
,0), 0 },
3713 { "ssbs", CPEN_(3,C2
,6), F_ARCHEXT
},
3714 { "fpcr", CPEN_(3,C4
,0), 0 },
3715 { "fpsr", CPEN_(3,C4
,1), 0 },
3716 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
3717 { "dlr_el0", CPEN_(3,C5
,1), 0 },
3718 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
3719 { "elr_el2", CPEN_(4,C0
,1), 0 },
3720 { "sp_el1", CPEN_(4,C1
,0), 0 },
3721 { "spsr_irq", CPEN_(4,C3
,0), 0 },
3722 { "spsr_abt", CPEN_(4,C3
,1), 0 },
3723 { "spsr_und", CPEN_(4,C3
,2), 0 },
3724 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
3725 { "spsr_el3", CPEN_(6,C0
,0), 0 },
3726 { "elr_el3", CPEN_(6,C0
,1), 0 },
3727 { "sp_el2", CPEN_(6,C1
,0), 0 },
3728 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
3729 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
3730 { "midr_el1", CPENC(3,0,C0
,C0
,0), F_REG_READ
}, /* RO */
3731 { "ctr_el0", CPENC(3,3,C0
,C0
,1), F_REG_READ
}, /* RO */
3732 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), F_REG_READ
}, /* RO */
3733 { "revidr_el1", CPENC(3,0,C0
,C0
,6), F_REG_READ
}, /* RO */
3734 { "aidr_el1", CPENC(3,1,C0
,C0
,7), F_REG_READ
}, /* RO */
3735 { "dczid_el0", CPENC(3,3,C0
,C0
,7), F_REG_READ
}, /* RO */
3736 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), F_REG_READ
}, /* RO */
3737 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), F_REG_READ
}, /* RO */
3738 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), F_REG_READ
}, /* RO */
3739 { "id_pfr2_el1", CPENC(3,0,C0
,C3
,4), F_ARCHEXT
| F_REG_READ
}, /* RO */
3740 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), F_REG_READ
}, /* RO */
3741 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), F_REG_READ
}, /* RO */
3742 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), F_REG_READ
}, /* RO */
3743 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), F_REG_READ
}, /* RO */
3744 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), F_REG_READ
}, /* RO */
3745 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), F_REG_READ
}, /* RO */
3746 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), F_REG_READ
}, /* RO */
3747 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), F_REG_READ
}, /* RO */
3748 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), F_REG_READ
}, /* RO */
3749 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), F_REG_READ
}, /* RO */
3750 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), F_REG_READ
}, /* RO */
3751 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), F_REG_READ
}, /* RO */
3752 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), F_REG_READ
}, /* RO */
3753 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), F_REG_READ
}, /* RO */
3754 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), F_REG_READ
}, /* RO */
3755 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), F_REG_READ
}, /* RO */
3756 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), F_REG_READ
}, /* RO */
3757 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), F_REG_READ
}, /* RO */
3758 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), F_REG_READ
}, /* RO */
3759 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), F_REG_READ
}, /* RO */
3760 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), F_REG_READ
}, /* RO */
3761 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), F_REG_READ
}, /* RO */
3762 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), F_REG_READ
}, /* RO */
3763 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), F_REG_READ
}, /* RO */
3764 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
| F_REG_READ
}, /* RO */
3765 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), F_REG_READ
}, /* RO */
3766 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), F_REG_READ
}, /* RO */
3767 { "id_aa64zfr0_el1", CPENC (3, 0, C0
, C4
, 4), F_ARCHEXT
| F_REG_READ
}, /* RO */
3768 { "clidr_el1", CPENC(3,1,C0
,C0
,1), F_REG_READ
}, /* RO */
3769 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 },
3770 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
3771 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
3772 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
3773 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
3774 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
3775 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
3776 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
3777 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
3778 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
3779 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
3780 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
3781 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
3782 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
3783 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
3784 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
3785 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
3786 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
3787 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
3788 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
3789 { "zcr_el1", CPENC (3, 0, C1
, C2
, 0), F_ARCHEXT
},
3790 { "zcr_el12", CPENC (3, 5, C1
, C2
, 0), F_ARCHEXT
},
3791 { "zcr_el2", CPENC (3, 4, C1
, C2
, 0), F_ARCHEXT
},
3792 { "zcr_el3", CPENC (3, 6, C1
, C2
, 0), F_ARCHEXT
},
3793 { "zidr_el1", CPENC (3, 0, C0
, C0
, 7), F_ARCHEXT
},
3794 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
3795 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
3796 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
3797 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
3798 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
3799 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
3800 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
3801 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
3802 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
3803 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
3804 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
3805 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
3806 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
3807 { "apiakeylo_el1", CPENC (3, 0, C2
, C1
, 0), F_ARCHEXT
},
3808 { "apiakeyhi_el1", CPENC (3, 0, C2
, C1
, 1), F_ARCHEXT
},
3809 { "apibkeylo_el1", CPENC (3, 0, C2
, C1
, 2), F_ARCHEXT
},
3810 { "apibkeyhi_el1", CPENC (3, 0, C2
, C1
, 3), F_ARCHEXT
},
3811 { "apdakeylo_el1", CPENC (3, 0, C2
, C2
, 0), F_ARCHEXT
},
3812 { "apdakeyhi_el1", CPENC (3, 0, C2
, C2
, 1), F_ARCHEXT
},
3813 { "apdbkeylo_el1", CPENC (3, 0, C2
, C2
, 2), F_ARCHEXT
},
3814 { "apdbkeyhi_el1", CPENC (3, 0, C2
, C2
, 3), F_ARCHEXT
},
3815 { "apgakeylo_el1", CPENC (3, 0, C2
, C3
, 0), F_ARCHEXT
},
3816 { "apgakeyhi_el1", CPENC (3, 0, C2
, C3
, 1), F_ARCHEXT
},
3817 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
3818 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
3819 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
3820 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
3821 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
3822 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
3823 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
3824 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
3825 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
3826 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
3827 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
3828 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
3829 { "vsesr_el2", CPENC (3, 4, C5
, C2
, 3), F_ARCHEXT
},
3830 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
3831 { "erridr_el1", CPENC (3, 0, C5
, C3
, 0), F_ARCHEXT
| F_REG_READ
}, /* RO */
3832 { "errselr_el1", CPENC (3, 0, C5
, C3
, 1), F_ARCHEXT
},
3833 { "erxfr_el1", CPENC (3, 0, C5
, C4
, 0), F_ARCHEXT
| F_REG_READ
}, /* RO */
3834 { "erxctlr_el1", CPENC (3, 0, C5
, C4
, 1), F_ARCHEXT
},
3835 { "erxstatus_el1", CPENC (3, 0, C5
, C4
, 2), F_ARCHEXT
},
3836 { "erxaddr_el1", CPENC (3, 0, C5
, C4
, 3), F_ARCHEXT
},
3837 { "erxmisc0_el1", CPENC (3, 0, C5
, C5
, 0), F_ARCHEXT
},
3838 { "erxmisc1_el1", CPENC (3, 0, C5
, C5
, 1), F_ARCHEXT
},
3839 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
3840 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
3841 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
3842 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
3843 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
3844 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
3845 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
3846 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
3847 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
3848 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
3849 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
3850 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
3851 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
3852 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
3853 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
3854 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
3855 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
3856 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
3857 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), F_REG_READ
}, /* RO */
3858 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), F_REG_READ
}, /* RO */
3859 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), F_REG_READ
}, /* RO */
3860 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
3861 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
3862 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
3863 { "isr_el1", CPENC(3,0,C12
,C1
,0), F_REG_READ
}, /* RO */
3864 { "disr_el1", CPENC (3, 0, C12
, C1
, 1), F_ARCHEXT
},
3865 { "vdisr_el2", CPENC (3, 4, C12
, C1
, 1), F_ARCHEXT
},
3866 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
3867 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
3868 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
3869 { "rndr", CPENC(3,3,C2
,C4
,0), F_ARCHEXT
| F_REG_READ
}, /* RO */
3870 { "rndrrs", CPENC(3,3,C2
,C4
,1), F_ARCHEXT
| F_REG_READ
}, /* RO */
3871 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
3872 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RW */
3873 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
3874 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
3875 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
3876 { "scxtnum_el0", CPENC(3,3,C13
,C0
,7), F_ARCHEXT
},
3877 { "scxtnum_el1", CPENC(3,0,C13
,C0
,7), F_ARCHEXT
},
3878 { "scxtnum_el2", CPENC(3,4,C13
,C0
,7), F_ARCHEXT
},
3879 { "scxtnum_el12", CPENC(3,5,C13
,C0
,7), F_ARCHEXT
},
3880 { "scxtnum_el3", CPENC(3,6,C13
,C0
,7), F_ARCHEXT
},
3881 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
3882 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RW */
3883 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), F_REG_READ
}, /* RO */
3884 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), F_REG_READ
}, /* RO */
3885 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
3886 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
3887 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
3888 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
3889 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
3890 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
3891 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
3892 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
3893 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
3894 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
3895 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
3896 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
3897 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
3898 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
3899 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
3900 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
3901 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
3902 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
3903 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
3904 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
3905 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
3906 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
3907 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
3908 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
3909 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
3910 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
3911 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
3912 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
3913 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
3914 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
3915 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), F_REG_READ
}, /* r */
3916 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
3917 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
3918 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), F_REG_READ
}, /* r */
3919 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), F_REG_WRITE
}, /* w */
3920 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 },
3921 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 },
3922 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
3923 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
3924 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
3925 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
3926 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
3927 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
3928 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
3929 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
3930 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
3931 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
3932 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
3933 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
3934 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
3935 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
3936 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
3937 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
3938 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
3939 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
3940 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
3941 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
3942 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
3943 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
3944 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
3945 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
3946 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
3947 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
3948 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
3949 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
3950 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
3951 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
3952 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
3953 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
3954 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
3955 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
3956 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
3957 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
3958 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
3959 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
3960 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
3961 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
3962 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
3963 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
3964 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
3965 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
3966 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
3967 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
3968 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
3969 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
3970 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
3971 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
3972 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
3973 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
3974 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
3975 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
3976 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
3977 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
3978 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
3979 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
3980 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
3981 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
3982 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
3983 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
3984 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
3985 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
3986 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
3987 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
3988 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), F_REG_READ
}, /* r */
3989 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), F_REG_WRITE
}, /* w */
3990 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), F_REG_READ
}, /* r */
3991 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
3992 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
3993 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
3994 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
3995 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), F_REG_READ
}, /* r */
3996 { "pmblimitr_el1", CPENC (3, 0, C9
, C10
, 0), F_ARCHEXT
}, /* rw */
3997 { "pmbptr_el1", CPENC (3, 0, C9
, C10
, 1), F_ARCHEXT
}, /* rw */
3998 { "pmbsr_el1", CPENC (3, 0, C9
, C10
, 3), F_ARCHEXT
}, /* rw */
3999 { "pmbidr_el1", CPENC (3, 0, C9
, C10
, 7), F_ARCHEXT
| F_REG_READ
}, /* ro */
4000 { "pmscr_el1", CPENC (3, 0, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
4001 { "pmsicr_el1", CPENC (3, 0, C9
, C9
, 2), F_ARCHEXT
}, /* rw */
4002 { "pmsirr_el1", CPENC (3, 0, C9
, C9
, 3), F_ARCHEXT
}, /* rw */
4003 { "pmsfcr_el1", CPENC (3, 0, C9
, C9
, 4), F_ARCHEXT
}, /* rw */
4004 { "pmsevfr_el1", CPENC (3, 0, C9
, C9
, 5), F_ARCHEXT
}, /* rw */
4005 { "pmslatfr_el1", CPENC (3, 0, C9
, C9
, 6), F_ARCHEXT
}, /* rw */
4006 { "pmsidr_el1", CPENC (3, 0, C9
, C9
, 7), F_ARCHEXT
}, /* rw */
4007 { "pmscr_el2", CPENC (3, 4, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
4008 { "pmscr_el12", CPENC (3, 5, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
4009 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
4010 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
4011 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
4012 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
4013 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), F_REG_WRITE
}, /* w */
4014 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
4015 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), F_REG_READ
}, /* r */
4016 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), F_REG_READ
}, /* r */
4017 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
4018 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
4019 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
4020 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
4021 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
4022 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
4023 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
4024 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
4025 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
4026 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
4027 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
4028 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
4029 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
4030 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
4031 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
4032 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
4033 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
4034 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
4035 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
4036 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
4037 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
4038 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
4039 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
4040 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
4041 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
4042 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
4043 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
4044 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
4045 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
4046 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
4047 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
4048 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
4049 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
4050 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
4051 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
4052 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
4053 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
4054 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
4055 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
4056 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
4057 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
4058 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
4059 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
4060 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
4061 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
4062 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
4063 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
4064 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
4065 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
4066 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
4067 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
4068 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
4069 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
4070 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
4071 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
4072 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
4073 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
4074 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
4075 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
4076 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
4077 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
4078 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
4079 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
4080 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
4081 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
4082 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
4083 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
4084 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
4085 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
4086 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
4088 { "dit", CPEN_ (3, C2
, 5), F_ARCHEXT
},
4089 { "vstcr_el2", CPENC(3, 4, C2
, C6
, 2), F_ARCHEXT
},
4090 { "vsttbr_el2", CPENC(3, 4, C2
, C6
, 0), F_ARCHEXT
},
4091 { "cnthvs_tval_el2", CPENC(3, 4, C14
, C4
, 0), F_ARCHEXT
},
4092 { "cnthvs_cval_el2", CPENC(3, 4, C14
, C4
, 2), F_ARCHEXT
},
4093 { "cnthvs_ctl_el2", CPENC(3, 4, C14
, C4
, 1), F_ARCHEXT
},
4094 { "cnthps_tval_el2", CPENC(3, 4, C14
, C5
, 0), F_ARCHEXT
},
4095 { "cnthps_cval_el2", CPENC(3, 4, C14
, C5
, 2), F_ARCHEXT
},
4096 { "cnthps_ctl_el2", CPENC(3, 4, C14
, C5
, 1), F_ARCHEXT
},
4097 { "sder32_el2", CPENC(3, 4, C1
, C3
, 1), F_ARCHEXT
},
4098 { "vncr_el2", CPENC(3, 4, C2
, C2
, 0), F_ARCHEXT
},
4099 { 0, CPENC(0,0,0,0,0), 0 },
4103 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
4105 return (reg
->flags
& F_DEPRECATED
) != 0;
4109 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
4110 const aarch64_sys_reg
*reg
)
4112 if (!(reg
->flags
& F_ARCHEXT
))
4115 /* PAN. Values are from aarch64_sys_regs. */
4116 if (reg
->value
== CPEN_(0,C2
,3)
4117 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
4120 /* SCXTNUM_ELx registers. */
4121 if ((reg
->value
== CPENC (3, 3, C13
, C0
, 7)
4122 || reg
->value
== CPENC (3, 0, C13
, C0
, 7)
4123 || reg
->value
== CPENC (3, 4, C13
, C0
, 7)
4124 || reg
->value
== CPENC (3, 6, C13
, C0
, 7)
4125 || reg
->value
== CPENC (3, 5, C13
, C0
, 7))
4126 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_SCXTNUM
))
4129 /* ID_PFR2_EL1 register. */
4130 if (reg
->value
== CPENC(3, 0, C0
, C3
, 4)
4131 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_ID_PFR2
))
4134 /* SSBS. Values are from aarch64_sys_regs. */
4135 if (reg
->value
== CPEN_(3,C2
,6)
4136 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_SSBS
))
4139 /* Virtualization host extensions: system registers. */
4140 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
4141 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
4142 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
4143 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
4144 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
4145 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4148 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4149 if ((reg
->value
== CPEN_ (5, C0
, 0)
4150 || reg
->value
== CPEN_ (5, C0
, 1)
4151 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
4152 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
4153 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
4154 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
4155 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
4156 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
4157 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
4158 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
4159 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
4160 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
4161 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
4162 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
4163 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
4164 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
4165 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4168 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4169 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
4170 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
4171 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
4172 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
4173 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
4174 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
4175 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4178 /* ARMv8.2 features. */
4180 /* ID_AA64MMFR2_EL1. */
4181 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
4182 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4186 if (reg
->value
== CPEN_ (0, C2
, 4)
4187 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4190 /* RAS extension. */
4192 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4193 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4194 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
4195 || reg
->value
== CPENC (3, 0, C5
, C3
, 1)
4196 || reg
->value
== CPENC (3, 0, C5
, C3
, 2)
4197 || reg
->value
== CPENC (3, 0, C5
, C3
, 3)
4198 || reg
->value
== CPENC (3, 0, C5
, C4
, 0)
4199 || reg
->value
== CPENC (3, 0, C5
, C4
, 1)
4200 || reg
->value
== CPENC (3, 0, C5
, C4
, 2)
4201 || reg
->value
== CPENC (3, 0, C5
, C4
, 3)
4202 || reg
->value
== CPENC (3, 0, C5
, C5
, 0)
4203 || reg
->value
== CPENC (3, 0, C5
, C5
, 1))
4204 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
4207 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4208 if ((reg
->value
== CPENC (3, 4, C5
, C2
, 3)
4209 || reg
->value
== CPENC (3, 0, C12
, C1
, 1)
4210 || reg
->value
== CPENC (3, 4, C12
, C1
, 1))
4211 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
4214 /* Statistical Profiling extension. */
4215 if ((reg
->value
== CPENC (3, 0, C9
, C10
, 0)
4216 || reg
->value
== CPENC (3, 0, C9
, C10
, 1)
4217 || reg
->value
== CPENC (3, 0, C9
, C10
, 3)
4218 || reg
->value
== CPENC (3, 0, C9
, C10
, 7)
4219 || reg
->value
== CPENC (3, 0, C9
, C9
, 0)
4220 || reg
->value
== CPENC (3, 0, C9
, C9
, 2)
4221 || reg
->value
== CPENC (3, 0, C9
, C9
, 3)
4222 || reg
->value
== CPENC (3, 0, C9
, C9
, 4)
4223 || reg
->value
== CPENC (3, 0, C9
, C9
, 5)
4224 || reg
->value
== CPENC (3, 0, C9
, C9
, 6)
4225 || reg
->value
== CPENC (3, 0, C9
, C9
, 7)
4226 || reg
->value
== CPENC (3, 4, C9
, C9
, 0)
4227 || reg
->value
== CPENC (3, 5, C9
, C9
, 0))
4228 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PROFILE
))
4231 /* ARMv8.3 Pointer authentication keys. */
4232 if ((reg
->value
== CPENC (3, 0, C2
, C1
, 0)
4233 || reg
->value
== CPENC (3, 0, C2
, C1
, 1)
4234 || reg
->value
== CPENC (3, 0, C2
, C1
, 2)
4235 || reg
->value
== CPENC (3, 0, C2
, C1
, 3)
4236 || reg
->value
== CPENC (3, 0, C2
, C2
, 0)
4237 || reg
->value
== CPENC (3, 0, C2
, C2
, 1)
4238 || reg
->value
== CPENC (3, 0, C2
, C2
, 2)
4239 || reg
->value
== CPENC (3, 0, C2
, C2
, 3)
4240 || reg
->value
== CPENC (3, 0, C2
, C3
, 0)
4241 || reg
->value
== CPENC (3, 0, C2
, C3
, 1))
4242 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_3
))
4246 if ((reg
->value
== CPENC (3, 0, C0
, C4
, 4)
4247 || reg
->value
== CPENC (3, 0, C1
, C2
, 0)
4248 || reg
->value
== CPENC (3, 4, C1
, C2
, 0)
4249 || reg
->value
== CPENC (3, 6, C1
, C2
, 0)
4250 || reg
->value
== CPENC (3, 5, C1
, C2
, 0)
4251 || reg
->value
== CPENC (3, 0, C0
, C0
, 7))
4252 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_SVE
))
4255 /* ARMv8.4 features. */
4258 if (reg
->value
== CPEN_ (3, C2
, 5)
4259 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4262 /* Virtualization extensions. */
4263 if ((reg
->value
== CPENC(3, 4, C2
, C6
, 2)
4264 || reg
->value
== CPENC(3, 4, C2
, C6
, 0)
4265 || reg
->value
== CPENC(3, 4, C14
, C4
, 0)
4266 || reg
->value
== CPENC(3, 4, C14
, C4
, 2)
4267 || reg
->value
== CPENC(3, 4, C14
, C4
, 1)
4268 || reg
->value
== CPENC(3, 4, C14
, C5
, 0)
4269 || reg
->value
== CPENC(3, 4, C14
, C5
, 2)
4270 || reg
->value
== CPENC(3, 4, C14
, C5
, 1)
4271 || reg
->value
== CPENC(3, 4, C1
, C3
, 1)
4272 || reg
->value
== CPENC(3, 4, C2
, C2
, 0))
4273 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4276 /* ARMv8.4 TLB instructions. */
4277 if ((reg
->value
== CPENS (0, C8
, C1
, 0)
4278 || reg
->value
== CPENS (0, C8
, C1
, 1)
4279 || reg
->value
== CPENS (0, C8
, C1
, 2)
4280 || reg
->value
== CPENS (0, C8
, C1
, 3)
4281 || reg
->value
== CPENS (0, C8
, C1
, 5)
4282 || reg
->value
== CPENS (0, C8
, C1
, 7)
4283 || reg
->value
== CPENS (4, C8
, C4
, 0)
4284 || reg
->value
== CPENS (4, C8
, C4
, 4)
4285 || reg
->value
== CPENS (4, C8
, C1
, 1)
4286 || reg
->value
== CPENS (4, C8
, C1
, 5)
4287 || reg
->value
== CPENS (4, C8
, C1
, 6)
4288 || reg
->value
== CPENS (6, C8
, C1
, 1)
4289 || reg
->value
== CPENS (6, C8
, C1
, 5)
4290 || reg
->value
== CPENS (4, C8
, C1
, 0)
4291 || reg
->value
== CPENS (4, C8
, C1
, 4)
4292 || reg
->value
== CPENS (6, C8
, C1
, 0)
4293 || reg
->value
== CPENS (0, C8
, C6
, 1)
4294 || reg
->value
== CPENS (0, C8
, C6
, 3)
4295 || reg
->value
== CPENS (0, C8
, C6
, 5)
4296 || reg
->value
== CPENS (0, C8
, C6
, 7)
4297 || reg
->value
== CPENS (0, C8
, C2
, 1)
4298 || reg
->value
== CPENS (0, C8
, C2
, 3)
4299 || reg
->value
== CPENS (0, C8
, C2
, 5)
4300 || reg
->value
== CPENS (0, C8
, C2
, 7)
4301 || reg
->value
== CPENS (0, C8
, C5
, 1)
4302 || reg
->value
== CPENS (0, C8
, C5
, 3)
4303 || reg
->value
== CPENS (0, C8
, C5
, 5)
4304 || reg
->value
== CPENS (0, C8
, C5
, 7)
4305 || reg
->value
== CPENS (4, C8
, C0
, 2)
4306 || reg
->value
== CPENS (4, C8
, C0
, 6)
4307 || reg
->value
== CPENS (4, C8
, C4
, 2)
4308 || reg
->value
== CPENS (4, C8
, C4
, 6)
4309 || reg
->value
== CPENS (4, C8
, C4
, 3)
4310 || reg
->value
== CPENS (4, C8
, C4
, 7)
4311 || reg
->value
== CPENS (4, C8
, C6
, 1)
4312 || reg
->value
== CPENS (4, C8
, C6
, 5)
4313 || reg
->value
== CPENS (4, C8
, C2
, 1)
4314 || reg
->value
== CPENS (4, C8
, C2
, 5)
4315 || reg
->value
== CPENS (4, C8
, C5
, 1)
4316 || reg
->value
== CPENS (4, C8
, C5
, 5)
4317 || reg
->value
== CPENS (6, C8
, C6
, 1)
4318 || reg
->value
== CPENS (6, C8
, C6
, 5)
4319 || reg
->value
== CPENS (6, C8
, C2
, 1)
4320 || reg
->value
== CPENS (6, C8
, C2
, 5)
4321 || reg
->value
== CPENS (6, C8
, C5
, 1)
4322 || reg
->value
== CPENS (6, C8
, C5
, 5))
4323 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4326 /* Random Number Instructions. For now they are available
4327 (and optional) only with ARMv8.5-A. */
4328 if ((reg
->value
== CPENC (3, 3, C2
, C4
, 0)
4329 || reg
->value
== CPENC (3, 3, C2
, C4
, 1))
4330 && !(AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RNG
)
4331 && AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_5
)))
4337 /* The CPENC below is fairly misleading, the fields
4338 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4339 by ins_pstatefield, which just shifts the value by the width of the fields
4340 in a loop. So if you CPENC them only the first value will be set, the rest
4341 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4342 value of 0b110000000001000000 (0x30040) while what you want is
4344 const aarch64_sys_reg aarch64_pstatefields
[] =
4346 { "spsel", 0x05, 0 },
4347 { "daifset", 0x1e, 0 },
4348 { "daifclr", 0x1f, 0 },
4349 { "pan", 0x04, F_ARCHEXT
},
4350 { "uao", 0x03, F_ARCHEXT
},
4351 { "ssbs", 0x19, F_ARCHEXT
},
4352 { "dit", 0x1a, F_ARCHEXT
},
4353 { 0, CPENC(0,0,0,0,0), 0 },
4357 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
4358 const aarch64_sys_reg
*reg
)
4360 if (!(reg
->flags
& F_ARCHEXT
))
4363 /* PAN. Values are from aarch64_pstatefields. */
4364 if (reg
->value
== 0x04
4365 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
4368 /* UAO. Values are from aarch64_pstatefields. */
4369 if (reg
->value
== 0x03
4370 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4373 /* SSBS. Values are from aarch64_pstatefields. */
4374 if (reg
->value
== 0x19
4375 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_SSBS
))
4378 /* DIT. Values are from aarch64_pstatefields. */
4379 if (reg
->value
== 0x1a
4380 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4386 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
4388 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
4389 { "iallu", CPENS(0,C7
,C5
,0), 0 },
4390 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
4391 { 0, CPENS(0,0,0,0), 0 }
4394 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
4396 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
4397 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
4398 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
4399 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
4400 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
4401 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
4402 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
4403 { "cvadp", CPENS (3, C7
, C13
, 1), F_HASXT
| F_ARCHEXT
},
4404 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
4405 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
4406 { 0, CPENS(0,0,0,0), 0 }
4409 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
4411 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
4412 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
4413 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
4414 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
4415 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
4416 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
4417 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
4418 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
4419 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
4420 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
4421 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
4422 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
4423 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
4424 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
4425 { 0, CPENS(0,0,0,0), 0 }
4428 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
4430 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
4431 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
4432 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
4433 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
4434 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
4435 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
4436 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
4437 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
4438 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
4439 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
4440 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
4441 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
4442 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
4443 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
4444 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
4445 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
4446 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
4447 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
4448 { "alle2", CPENS(4,C8
,C7
,0), 0 },
4449 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
4450 { "alle1", CPENS(4,C8
,C7
,4), 0 },
4451 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
4452 { "alle3", CPENS(6,C8
,C7
,0), 0 },
4453 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
4454 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
4455 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
4456 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
4457 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
4458 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
4459 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
4460 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
4461 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
4463 { "vmalle1os", CPENS (0, C8
, C1
, 0), F_ARCHEXT
},
4464 { "vae1os", CPENS (0, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4465 { "aside1os", CPENS (0, C8
, C1
, 2), F_HASXT
| F_ARCHEXT
},
4466 { "vaae1os", CPENS (0, C8
, C1
, 3), F_HASXT
| F_ARCHEXT
},
4467 { "vale1os", CPENS (0, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4468 { "vaale1os", CPENS (0, C8
, C1
, 7), F_HASXT
| F_ARCHEXT
},
4469 { "ipas2e1os", CPENS (4, C8
, C4
, 0), F_HASXT
| F_ARCHEXT
},
4470 { "ipas2le1os", CPENS (4, C8
, C4
, 4), F_HASXT
| F_ARCHEXT
},
4471 { "vae2os", CPENS (4, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4472 { "vale2os", CPENS (4, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4473 { "vmalls12e1os", CPENS (4, C8
, C1
, 6), F_ARCHEXT
},
4474 { "vae3os", CPENS (6, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4475 { "vale3os", CPENS (6, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4476 { "alle2os", CPENS (4, C8
, C1
, 0), F_ARCHEXT
},
4477 { "alle1os", CPENS (4, C8
, C1
, 4), F_ARCHEXT
},
4478 { "alle3os", CPENS (6, C8
, C1
, 0), F_ARCHEXT
},
4480 { "rvae1", CPENS (0, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4481 { "rvaae1", CPENS (0, C8
, C6
, 3), F_HASXT
| F_ARCHEXT
},
4482 { "rvale1", CPENS (0, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4483 { "rvaale1", CPENS (0, C8
, C6
, 7), F_HASXT
| F_ARCHEXT
},
4484 { "rvae1is", CPENS (0, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4485 { "rvaae1is", CPENS (0, C8
, C2
, 3), F_HASXT
| F_ARCHEXT
},
4486 { "rvale1is", CPENS (0, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4487 { "rvaale1is", CPENS (0, C8
, C2
, 7), F_HASXT
| F_ARCHEXT
},
4488 { "rvae1os", CPENS (0, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4489 { "rvaae1os", CPENS (0, C8
, C5
, 3), F_HASXT
| F_ARCHEXT
},
4490 { "rvale1os", CPENS (0, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4491 { "rvaale1os", CPENS (0, C8
, C5
, 7), F_HASXT
| F_ARCHEXT
},
4492 { "ripas2e1is", CPENS (4, C8
, C0
, 2), F_HASXT
| F_ARCHEXT
},
4493 { "ripas2le1is",CPENS (4, C8
, C0
, 6), F_HASXT
| F_ARCHEXT
},
4494 { "ripas2e1", CPENS (4, C8
, C4
, 2), F_HASXT
| F_ARCHEXT
},
4495 { "ripas2le1", CPENS (4, C8
, C4
, 6), F_HASXT
| F_ARCHEXT
},
4496 { "ripas2e1os", CPENS (4, C8
, C4
, 3), F_HASXT
| F_ARCHEXT
},
4497 { "ripas2le1os",CPENS (4, C8
, C4
, 7), F_HASXT
| F_ARCHEXT
},
4498 { "rvae2", CPENS (4, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4499 { "rvale2", CPENS (4, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4500 { "rvae2is", CPENS (4, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4501 { "rvale2is", CPENS (4, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4502 { "rvae2os", CPENS (4, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4503 { "rvale2os", CPENS (4, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4504 { "rvae3", CPENS (6, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4505 { "rvale3", CPENS (6, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4506 { "rvae3is", CPENS (6, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4507 { "rvale3is", CPENS (6, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4508 { "rvae3os", CPENS (6, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4509 { "rvale3os", CPENS (6, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4511 { 0, CPENS(0,0,0,0), 0 }
4514 const aarch64_sys_ins_reg aarch64_sys_regs_sr
[] =
4516 /* RCTX is somewhat unique in a way that it has different values
4517 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4518 Thus op2 is masked out and instead encoded directly in the
4519 aarch64_opcode_table entries for the respective instructions. */
4520 { "rctx", CPENS(3,C7
,C3
,0), F_HASXT
| F_ARCHEXT
| F_REG_WRITE
}, /* WO */
4522 { 0, CPENS(0,0,0,0), 0 }
4526 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
4528 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
4532 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
4533 const aarch64_sys_ins_reg
*reg
)
4535 if (!(reg
->flags
& F_ARCHEXT
))
4538 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4539 if (reg
->value
== CPENS (3, C7
, C12
, 1)
4540 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4543 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
4544 if (reg
->value
== CPENS (3, C7
, C13
, 1)
4545 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_CVADP
))
4548 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4549 if ((reg
->value
== CPENS (0, C7
, C9
, 0)
4550 || reg
->value
== CPENS (0, C7
, C9
, 1))
4551 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4554 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
4555 if (reg
->value
== CPENS (3, C7
, C3
, 0)
4556 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PREDRES
))
4579 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4580 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4582 static enum err_type
4583 verify_ldpsw (const struct aarch64_inst
*inst ATTRIBUTE_UNUSED
,
4584 const aarch64_insn insn
, bfd_vma pc ATTRIBUTE_UNUSED
,
4585 bfd_boolean encoding ATTRIBUTE_UNUSED
,
4586 aarch64_operand_error
*mismatch_detail ATTRIBUTE_UNUSED
,
4587 aarch64_instr_sequence
*insn_sequence ATTRIBUTE_UNUSED
)
4589 int t
= BITS (insn
, 4, 0);
4590 int n
= BITS (insn
, 9, 5);
4591 int t2
= BITS (insn
, 14, 10);
4595 /* Write back enabled. */
4596 if ((t
== n
|| t2
== n
) && n
!= 31)
4610 /* Initialize an instruction sequence insn_sequence with the instruction INST.
4611 If INST is NULL the given insn_sequence is cleared and the sequence is left
4615 init_insn_sequence (const struct aarch64_inst
*inst
,
4616 aarch64_instr_sequence
*insn_sequence
)
4618 int num_req_entries
= 0;
4619 insn_sequence
->next_insn
= 0;
4620 insn_sequence
->num_insns
= num_req_entries
;
4621 if (insn_sequence
->instr
)
4622 XDELETE (insn_sequence
->instr
);
4623 insn_sequence
->instr
= NULL
;
4627 insn_sequence
->instr
= XNEW (aarch64_inst
);
4628 memcpy (insn_sequence
->instr
, inst
, sizeof (aarch64_inst
));
4631 /* Handle all the cases here. May need to think of something smarter than
4632 a giant if/else chain if this grows. At that time, a lookup table may be
4634 if (inst
&& inst
->opcode
->constraints
& C_SCAN_MOVPRFX
)
4635 num_req_entries
= 1;
4637 if (insn_sequence
->current_insns
)
4638 XDELETEVEC (insn_sequence
->current_insns
);
4639 insn_sequence
->current_insns
= NULL
;
4641 if (num_req_entries
!= 0)
4643 size_t size
= num_req_entries
* sizeof (aarch64_inst
);
4644 insn_sequence
->current_insns
4645 = (aarch64_inst
**) XNEWVEC (aarch64_inst
, num_req_entries
);
4646 memset (insn_sequence
->current_insns
, 0, size
);
4651 /* This function verifies that the instruction INST adheres to its specified
4652 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
4653 returned and MISMATCH_DETAIL contains the reason why verification failed.
4655 The function is called both during assembly and disassembly. If assembling
4656 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
4657 and will contain the PC of the current instruction w.r.t to the section.
4659 If ENCODING and PC=0 then you are at a start of a section. The constraints
4660 are verified against the given state insn_sequence which is updated as it
4661 transitions through the verification. */
4664 verify_constraints (const struct aarch64_inst
*inst
,
4665 const aarch64_insn insn ATTRIBUTE_UNUSED
,
4667 bfd_boolean encoding
,
4668 aarch64_operand_error
*mismatch_detail
,
4669 aarch64_instr_sequence
*insn_sequence
)
4672 assert (inst
->opcode
);
4674 const struct aarch64_opcode
*opcode
= inst
->opcode
;
4675 if (!opcode
->constraints
&& !insn_sequence
->instr
)
4678 assert (insn_sequence
);
4680 enum err_type res
= ERR_OK
;
4682 /* This instruction puts a constraint on the insn_sequence. */
4683 if (opcode
->flags
& F_SCAN
)
4685 if (insn_sequence
->instr
)
4687 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4688 mismatch_detail
->error
= _("instruction opens new dependency "
4689 "sequence without ending previous one");
4690 mismatch_detail
->index
= -1;
4691 mismatch_detail
->non_fatal
= TRUE
;
4695 init_insn_sequence (inst
, insn_sequence
);
4699 /* Verify constraints on an existing sequence. */
4700 if (insn_sequence
->instr
)
4702 const struct aarch64_opcode
* inst_opcode
= insn_sequence
->instr
->opcode
;
4703 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
4704 closed a previous one that we should have. */
4705 if (!encoding
&& pc
== 0)
4707 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4708 mismatch_detail
->error
= _("previous `movprfx' sequence not closed");
4709 mismatch_detail
->index
= -1;
4710 mismatch_detail
->non_fatal
= TRUE
;
4712 /* Reset the sequence. */
4713 init_insn_sequence (NULL
, insn_sequence
);
4717 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
4718 if (inst_opcode
->constraints
& C_SCAN_MOVPRFX
)
4720 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4721 instruction for better error messages. */
4722 if (!opcode
->avariant
|| !(*opcode
->avariant
& AARCH64_FEATURE_SVE
))
4724 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4725 mismatch_detail
->error
= _("SVE instruction expected after "
4727 mismatch_detail
->index
= -1;
4728 mismatch_detail
->non_fatal
= TRUE
;
4733 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4734 instruction that is allowed to be used with a MOVPRFX. */
4735 if (!(opcode
->constraints
& C_SCAN_MOVPRFX
))
4737 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4738 mismatch_detail
->error
= _("SVE `movprfx' compatible instruction "
4740 mismatch_detail
->index
= -1;
4741 mismatch_detail
->non_fatal
= TRUE
;
4746 /* Next check for usage of the predicate register. */
4747 aarch64_opnd_info blk_dest
= insn_sequence
->instr
->operands
[0];
4748 aarch64_opnd_info blk_pred
, inst_pred
;
4749 memset (&blk_pred
, 0, sizeof (aarch64_opnd_info
));
4750 memset (&inst_pred
, 0, sizeof (aarch64_opnd_info
));
4751 bfd_boolean predicated
= FALSE
;
4752 assert (blk_dest
.type
== AARCH64_OPND_SVE_Zd
);
4754 /* Determine if the movprfx instruction used is predicated or not. */
4755 if (insn_sequence
->instr
->operands
[1].type
== AARCH64_OPND_SVE_Pg3
)
4758 blk_pred
= insn_sequence
->instr
->operands
[1];
4761 unsigned char max_elem_size
= 0;
4762 unsigned char current_elem_size
;
4763 int num_op_used
= 0, last_op_usage
= 0;
4764 int i
, inst_pred_idx
= -1;
4765 int num_ops
= aarch64_num_of_operands (opcode
);
4766 for (i
= 0; i
< num_ops
; i
++)
4768 aarch64_opnd_info inst_op
= inst
->operands
[i
];
4769 switch (inst_op
.type
)
4771 case AARCH64_OPND_SVE_Zd
:
4772 case AARCH64_OPND_SVE_Zm_5
:
4773 case AARCH64_OPND_SVE_Zm_16
:
4774 case AARCH64_OPND_SVE_Zn
:
4775 case AARCH64_OPND_SVE_Zt
:
4776 case AARCH64_OPND_SVE_Vm
:
4777 case AARCH64_OPND_SVE_Vn
:
4778 case AARCH64_OPND_Va
:
4779 case AARCH64_OPND_Vn
:
4780 case AARCH64_OPND_Vm
:
4781 case AARCH64_OPND_Sn
:
4782 case AARCH64_OPND_Sm
:
4783 case AARCH64_OPND_Rn
:
4784 case AARCH64_OPND_Rm
:
4785 case AARCH64_OPND_Rn_SP
:
4786 case AARCH64_OPND_Rm_SP
:
4787 if (inst_op
.reg
.regno
== blk_dest
.reg
.regno
)
4793 = aarch64_get_qualifier_esize (inst_op
.qualifier
);
4794 if (current_elem_size
> max_elem_size
)
4795 max_elem_size
= current_elem_size
;
4797 case AARCH64_OPND_SVE_Pd
:
4798 case AARCH64_OPND_SVE_Pg3
:
4799 case AARCH64_OPND_SVE_Pg4_5
:
4800 case AARCH64_OPND_SVE_Pg4_10
:
4801 case AARCH64_OPND_SVE_Pg4_16
:
4802 case AARCH64_OPND_SVE_Pm
:
4803 case AARCH64_OPND_SVE_Pn
:
4804 case AARCH64_OPND_SVE_Pt
:
4805 inst_pred
= inst_op
;
4813 assert (max_elem_size
!= 0);
4814 aarch64_opnd_info inst_dest
= inst
->operands
[0];
4815 /* Determine the size that should be used to compare against the
4818 = opcode
->constraints
& C_MAX_ELEM
4820 : aarch64_get_qualifier_esize (inst_dest
.qualifier
);
4822 /* If movprfx is predicated do some extra checks. */
4825 /* The instruction must be predicated. */
4826 if (inst_pred_idx
< 0)
4828 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4829 mismatch_detail
->error
= _("predicated instruction expected "
4831 mismatch_detail
->index
= -1;
4832 mismatch_detail
->non_fatal
= TRUE
;
4837 /* The instruction must have a merging predicate. */
4838 if (inst_pred
.qualifier
!= AARCH64_OPND_QLF_P_M
)
4840 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4841 mismatch_detail
->error
= _("merging predicate expected due "
4842 "to preceding `movprfx'");
4843 mismatch_detail
->index
= inst_pred_idx
;
4844 mismatch_detail
->non_fatal
= TRUE
;
4849 /* The same register must be used in instruction. */
4850 if (blk_pred
.reg
.regno
!= inst_pred
.reg
.regno
)
4852 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4853 mismatch_detail
->error
= _("predicate register differs "
4854 "from that in preceding "
4856 mismatch_detail
->index
= inst_pred_idx
;
4857 mismatch_detail
->non_fatal
= TRUE
;
4863 /* Destructive operations by definition must allow one usage of the
4866 = aarch64_is_destructive_by_operands (opcode
) ? 2 : 1;
4868 /* Operand is not used at all. */
4869 if (num_op_used
== 0)
4871 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4872 mismatch_detail
->error
= _("output register of preceding "
4873 "`movprfx' not used in current "
4875 mismatch_detail
->index
= 0;
4876 mismatch_detail
->non_fatal
= TRUE
;
4881 /* We now know it's used, now determine exactly where it's used. */
4882 if (blk_dest
.reg
.regno
!= inst_dest
.reg
.regno
)
4884 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4885 mismatch_detail
->error
= _("output register of preceding "
4886 "`movprfx' expected as output");
4887 mismatch_detail
->index
= 0;
4888 mismatch_detail
->non_fatal
= TRUE
;
4893 /* Operand used more than allowed for the specific opcode type. */
4894 if (num_op_used
> allowed_usage
)
4896 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4897 mismatch_detail
->error
= _("output register of preceding "
4898 "`movprfx' used as input");
4899 mismatch_detail
->index
= last_op_usage
;
4900 mismatch_detail
->non_fatal
= TRUE
;
4905 /* Now the only thing left is the qualifiers checks. The register
4906 must have the same maximum element size. */
4907 if (inst_dest
.qualifier
4908 && blk_dest
.qualifier
4909 && current_elem_size
4910 != aarch64_get_qualifier_esize (blk_dest
.qualifier
))
4912 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4913 mismatch_detail
->error
= _("register size not compatible with "
4914 "previous `movprfx'");
4915 mismatch_detail
->index
= 0;
4916 mismatch_detail
->non_fatal
= TRUE
;
4923 /* Add the new instruction to the sequence. */
4924 memcpy (insn_sequence
->current_insns
+ insn_sequence
->next_insn
++,
4925 inst
, sizeof (aarch64_inst
));
4927 /* Check if sequence is now full. */
4928 if (insn_sequence
->next_insn
>= insn_sequence
->num_insns
)
4930 /* Sequence is full, but we don't have anything special to do for now,
4931 so clear and reset it. */
4932 init_insn_sequence (NULL
, insn_sequence
);
4940 /* Return true if VALUE cannot be moved into an SVE register using DUP
4941 (with any element size, not just ESIZE) and if using DUPM would
4942 therefore be OK. ESIZE is the number of bytes in the immediate. */
4945 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue
, int esize
)
4947 int64_t svalue
= uvalue
;
4948 uint64_t upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
4950 if ((uvalue
& ~upper
) != uvalue
&& (uvalue
| upper
) != uvalue
)
4952 if (esize
<= 4 || (uint32_t) uvalue
== (uint32_t) (uvalue
>> 32))
4954 svalue
= (int32_t) uvalue
;
4955 if (esize
<= 2 || (uint16_t) uvalue
== (uint16_t) (uvalue
>> 16))
4957 svalue
= (int16_t) uvalue
;
4958 if (esize
== 1 || (uint8_t) uvalue
== (uint8_t) (uvalue
>> 8))
4962 if ((svalue
& 0xff) == 0)
4964 return svalue
< -128 || svalue
>= 128;
4967 /* Include the opcode description table as well as the operand description
4969 #define VERIFIER(x) verify_##x
4970 #include "aarch64-tbl.h"