1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump
= FALSE
;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array
[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array
[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
108 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
109 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
116 return ((qualifier
>= AARCH64_OPND_QLF_S_B
117 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
127 DP_VECTOR_ACROSS_LANES
,
130 static const char significant_operand_index
[] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
147 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers
[0] == qualifiers
[1]
152 && vector_qualifier_p (qualifiers
[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers
[0])
154 == aarch64_get_qualifier_esize (qualifiers
[1]))
155 && (aarch64_get_qualifier_esize (qualifiers
[0])
156 == aarch64_get_qualifier_esize (qualifiers
[2])))
157 return DP_VECTOR_3SAME
;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers
[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers
[0])
164 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
165 return DP_VECTOR_LONG
;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers
[0] == qualifiers
[1]
168 && vector_qualifier_p (qualifiers
[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers
[0])
171 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers
[0])
173 == aarch64_get_qualifier_esize (qualifiers
[1])))
174 return DP_VECTOR_WIDE
;
176 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers
[1]) == TRUE
180 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
181 return DP_VECTOR_ACROSS_LANES
;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
199 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
202 const aarch64_field fields
[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
257 { 22, 1 }, /* N: in logical (immediate) instructions. */
258 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
259 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
260 { 31, 1 }, /* sf: in integer data processing instructions. */
261 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
262 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
263 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
264 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
265 { 31, 1 }, /* b5: in the test bit and branch instructions. */
266 { 19, 5 }, /* b40: in the test bit and branch instructions. */
267 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
268 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
269 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
270 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
271 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
272 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
273 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
274 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
275 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
276 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
277 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
278 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
279 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
280 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
281 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
282 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
283 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
284 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
285 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
286 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
287 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
288 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
289 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
290 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
291 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
292 { 5, 1 }, /* SVE_i1: single-bit immediate. */
293 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
294 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
295 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
296 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
297 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
298 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
299 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
300 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
301 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
302 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
303 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
304 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
305 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
306 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
307 { 16, 4 }, /* SVE_tsz: triangular size select. */
308 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
309 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
310 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
311 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
312 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
313 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
314 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
315 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
318 enum aarch64_operand_class
319 aarch64_get_operand_class (enum aarch64_opnd type
)
321 return aarch64_operands
[type
].op_class
;
325 aarch64_get_operand_name (enum aarch64_opnd type
)
327 return aarch64_operands
[type
].name
;
330 /* Get operand description string.
331 This is usually for the diagnosis purpose. */
333 aarch64_get_operand_desc (enum aarch64_opnd type
)
335 return aarch64_operands
[type
].desc
;
338 /* Table of all conditional affixes. */
339 const aarch64_cond aarch64_conds
[16] =
341 {{"eq", "none"}, 0x0},
342 {{"ne", "any"}, 0x1},
343 {{"cs", "hs", "nlast"}, 0x2},
344 {{"cc", "lo", "ul", "last"}, 0x3},
345 {{"mi", "first"}, 0x4},
346 {{"pl", "nfrst"}, 0x5},
349 {{"hi", "pmore"}, 0x8},
350 {{"ls", "plast"}, 0x9},
351 {{"ge", "tcont"}, 0xa},
352 {{"lt", "tstop"}, 0xb},
360 get_cond_from_value (aarch64_insn value
)
363 return &aarch64_conds
[(unsigned int) value
];
367 get_inverted_cond (const aarch64_cond
*cond
)
369 return &aarch64_conds
[cond
->value
^ 0x1];
372 /* Table describing the operand extension/shifting operators; indexed by
373 enum aarch64_modifier_kind.
375 The value column provides the most common values for encoding modifiers,
376 which enables table-driven encoding/decoding for the modifiers. */
377 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
398 enum aarch64_modifier_kind
399 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
401 return desc
- aarch64_operand_modifiers
;
405 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
407 return aarch64_operand_modifiers
[kind
].value
;
410 enum aarch64_modifier_kind
411 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
412 bfd_boolean extend_p
)
414 if (extend_p
== TRUE
)
415 return AARCH64_MOD_UXTB
+ value
;
417 return AARCH64_MOD_LSL
- value
;
421 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
423 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
427 static inline bfd_boolean
428 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
430 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
434 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
454 /* Table describing the operands supported by the aliases of the HINT
457 The name column is the operand that is accepted for the alias. The value
458 column is the hint number of the alias. The list of operands is terminated
459 by NULL in the name column. */
461 const struct aarch64_name_value_pair aarch64_hint_options
[] =
463 { "csync", 0x11 }, /* PSB CSYNC. */
467 /* op -> op: load = 0 instruction = 1 store = 2
469 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
470 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
471 const struct aarch64_name_value_pair aarch64_prfops
[32] =
473 { "pldl1keep", B(0, 1, 0) },
474 { "pldl1strm", B(0, 1, 1) },
475 { "pldl2keep", B(0, 2, 0) },
476 { "pldl2strm", B(0, 2, 1) },
477 { "pldl3keep", B(0, 3, 0) },
478 { "pldl3strm", B(0, 3, 1) },
481 { "plil1keep", B(1, 1, 0) },
482 { "plil1strm", B(1, 1, 1) },
483 { "plil2keep", B(1, 2, 0) },
484 { "plil2strm", B(1, 2, 1) },
485 { "plil3keep", B(1, 3, 0) },
486 { "plil3strm", B(1, 3, 1) },
489 { "pstl1keep", B(2, 1, 0) },
490 { "pstl1strm", B(2, 1, 1) },
491 { "pstl2keep", B(2, 2, 0) },
492 { "pstl2strm", B(2, 2, 1) },
493 { "pstl3keep", B(2, 3, 0) },
494 { "pstl3strm", B(2, 3, 1) },
508 /* Utilities on value constraint. */
511 value_in_range_p (int64_t value
, int low
, int high
)
513 return (value
>= low
&& value
<= high
) ? 1 : 0;
516 /* Return true if VALUE is a multiple of ALIGN. */
518 value_aligned_p (int64_t value
, int align
)
520 return (value
% align
) == 0;
523 /* A signed value fits in a field. */
525 value_fit_signed_field_p (int64_t value
, unsigned width
)
528 if (width
< sizeof (value
) * 8)
530 int64_t lim
= (int64_t)1 << (width
- 1);
531 if (value
>= -lim
&& value
< lim
)
537 /* An unsigned value fits in a field. */
539 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
542 if (width
< sizeof (value
) * 8)
544 int64_t lim
= (int64_t)1 << width
;
545 if (value
>= 0 && value
< lim
)
551 /* Return 1 if OPERAND is SP or WSP. */
553 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
555 return ((aarch64_get_operand_class (operand
->type
)
556 == AARCH64_OPND_CLASS_INT_REG
)
557 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
558 && operand
->reg
.regno
== 31);
561 /* Return 1 if OPERAND is XZR or WZP. */
563 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
565 return ((aarch64_get_operand_class (operand
->type
)
566 == AARCH64_OPND_CLASS_INT_REG
)
567 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
568 && operand
->reg
.regno
== 31);
571 /* Return true if the operand *OPERAND that has the operand code
572 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
573 qualified by the qualifier TARGET. */
576 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
577 aarch64_opnd_qualifier_t target
)
579 switch (operand
->qualifier
)
581 case AARCH64_OPND_QLF_W
:
582 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
585 case AARCH64_OPND_QLF_X
:
586 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
589 case AARCH64_OPND_QLF_WSP
:
590 if (target
== AARCH64_OPND_QLF_W
591 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
594 case AARCH64_OPND_QLF_SP
:
595 if (target
== AARCH64_OPND_QLF_X
596 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
606 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
607 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
609 Return NIL if more than one expected qualifiers are found. */
611 aarch64_opnd_qualifier_t
612 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
614 const aarch64_opnd_qualifier_t known_qlf
,
621 When the known qualifier is NIL, we have to assume that there is only
622 one qualifier sequence in the *QSEQ_LIST and return the corresponding
623 qualifier directly. One scenario is that for instruction
624 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
625 which has only one possible valid qualifier sequence
627 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
628 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
630 Because the qualifier NIL has dual roles in the qualifier sequence:
631 it can mean no qualifier for the operand, or the qualifer sequence is
632 not in use (when all qualifiers in the sequence are NILs), we have to
633 handle this special case here. */
634 if (known_qlf
== AARCH64_OPND_NIL
)
636 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
637 return qseq_list
[0][idx
];
640 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
642 if (qseq_list
[i
][known_idx
] == known_qlf
)
645 /* More than one sequences are found to have KNOWN_QLF at
647 return AARCH64_OPND_NIL
;
652 return qseq_list
[saved_i
][idx
];
655 enum operand_qualifier_kind
663 /* Operand qualifier description. */
664 struct operand_qualifier_data
666 /* The usage of the three data fields depends on the qualifier kind. */
673 enum operand_qualifier_kind kind
;
676 /* Indexed by the operand qualifier enumerators. */
677 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
679 {0, 0, 0, "NIL", OQK_NIL
},
681 /* Operand variant qualifiers.
683 element size, number of elements and common value for encoding. */
685 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
686 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
687 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
688 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
690 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
691 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
692 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
693 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
694 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
696 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
697 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
698 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
699 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
700 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
701 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
702 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
703 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
704 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
705 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
707 {0, 0, 0, "z", OQK_OPD_VARIANT
},
708 {0, 0, 0, "m", OQK_OPD_VARIANT
},
710 /* Qualifiers constraining the value range.
712 Lower bound, higher bound, unused. */
714 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE
},
715 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
716 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
717 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
718 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
719 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
720 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
722 /* Qualifiers for miscellaneous purpose.
724 unused, unused and unused. */
729 {0, 0, 0, "retrieving", 0},
732 static inline bfd_boolean
733 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
735 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
739 static inline bfd_boolean
740 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
742 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
747 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
749 return aarch64_opnd_qualifiers
[qualifier
].desc
;
752 /* Given an operand qualifier, return the expected data element size
753 of a qualified operand. */
755 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
757 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
758 return aarch64_opnd_qualifiers
[qualifier
].data0
;
762 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
764 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
765 return aarch64_opnd_qualifiers
[qualifier
].data1
;
769 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
771 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
772 return aarch64_opnd_qualifiers
[qualifier
].data2
;
776 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
778 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
779 return aarch64_opnd_qualifiers
[qualifier
].data0
;
783 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
785 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
786 return aarch64_opnd_qualifiers
[qualifier
].data1
;
791 aarch64_verbose (const char *str
, ...)
802 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
806 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
807 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
812 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
813 const aarch64_opnd_qualifier_t
*qualifier
)
816 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
818 aarch64_verbose ("dump_match_qualifiers:");
819 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
820 curr
[i
] = opnd
[i
].qualifier
;
821 dump_qualifier_sequence (curr
);
822 aarch64_verbose ("against");
823 dump_qualifier_sequence (qualifier
);
825 #endif /* DEBUG_AARCH64 */
827 /* TODO improve this, we can have an extra field at the runtime to
828 store the number of operands rather than calculating it every time. */
831 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
834 const enum aarch64_opnd
*opnds
= opcode
->operands
;
835 while (opnds
[i
++] != AARCH64_OPND_NIL
)
838 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
842 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
843 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
845 N.B. on the entry, it is very likely that only some operands in *INST
846 have had their qualifiers been established.
848 If STOP_AT is not -1, the function will only try to match
849 the qualifier sequence for operands before and including the operand
850 of index STOP_AT; and on success *RET will only be filled with the first
851 (STOP_AT+1) qualifiers.
853 A couple examples of the matching algorithm:
861 Apart from serving the main encoding routine, this can also be called
862 during or after the operand decoding. */
865 aarch64_find_best_match (const aarch64_inst
*inst
,
866 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
867 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
871 const aarch64_opnd_qualifier_t
*qualifiers
;
873 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
876 DEBUG_TRACE ("SUCCEED: no operand");
880 if (stop_at
< 0 || stop_at
>= num_opnds
)
881 stop_at
= num_opnds
- 1;
883 /* For each pattern. */
884 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
887 qualifiers
= *qualifiers_list
;
889 /* Start as positive. */
892 DEBUG_TRACE ("%d", i
);
895 dump_match_qualifiers (inst
->operands
, qualifiers
);
898 /* Most opcodes has much fewer patterns in the list.
899 First NIL qualifier indicates the end in the list. */
900 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
902 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
908 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
910 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
912 /* Either the operand does not have qualifier, or the qualifier
913 for the operand needs to be deduced from the qualifier
915 In the latter case, any constraint checking related with
916 the obtained qualifier should be done later in
917 operand_general_constraint_met_p. */
920 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
922 /* Unless the target qualifier can also qualify the operand
923 (which has already had a non-nil qualifier), non-equal
924 qualifiers are generally un-matched. */
925 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
934 continue; /* Equal qualifiers are certainly matched. */
937 /* Qualifiers established. */
944 /* Fill the result in *RET. */
946 qualifiers
= *qualifiers_list
;
948 DEBUG_TRACE ("complete qualifiers using list %d", i
);
951 dump_qualifier_sequence (qualifiers
);
954 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
955 ret
[j
] = *qualifiers
;
956 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
957 ret
[j
] = AARCH64_OPND_QLF_NIL
;
959 DEBUG_TRACE ("SUCCESS");
963 DEBUG_TRACE ("FAIL");
967 /* Operand qualifier matching and resolving.
969 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
970 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
972 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
976 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
979 aarch64_opnd_qualifier_seq_t qualifiers
;
981 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
984 DEBUG_TRACE ("matching FAIL");
988 if (inst
->opcode
->flags
& F_STRICT
)
990 /* Require an exact qualifier match, even for NIL qualifiers. */
991 nops
= aarch64_num_of_operands (inst
->opcode
);
992 for (i
= 0; i
< nops
; ++i
)
993 if (inst
->operands
[i
].qualifier
!= qualifiers
[i
])
997 /* Update the qualifiers. */
998 if (update_p
== TRUE
)
999 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1001 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
1003 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
1004 "update %s with %s for operand %d",
1005 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
1006 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
1007 inst
->operands
[i
].qualifier
= qualifiers
[i
];
1010 DEBUG_TRACE ("matching SUCCESS");
1014 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1017 IS32 indicates whether value is a 32-bit immediate or not.
1018 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1019 amount will be returned in *SHIFT_AMOUNT. */
1022 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
1026 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1030 /* Allow all zeros or all ones in top 32-bits, so that
1031 32-bit constant expressions like ~0x80000000 are
1033 uint64_t ext
= value
;
1034 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
1035 /* Immediate out of range. */
1037 value
&= (int64_t) 0xffffffff;
1040 /* first, try movz then movn */
1042 if ((value
& ((int64_t) 0xffff << 0)) == value
)
1044 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
1046 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
1048 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
1053 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1057 if (shift_amount
!= NULL
)
1058 *shift_amount
= amount
;
1060 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
1065 /* Build the accepted values for immediate logical SIMD instructions.
1067 The standard encodings of the immediate value are:
1068 N imms immr SIMD size R S
1069 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1070 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1071 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1072 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1073 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1074 0 11110s 00000r 2 UInt(r) UInt(s)
1075 where all-ones value of S is reserved.
1077 Let's call E the SIMD size.
1079 The immediate value is: S+1 bits '1' rotated to the right by R.
1081 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1082 (remember S != E - 1). */
1084 #define TOTAL_IMM_NB 5334
1089 aarch64_insn encoding
;
1090 } simd_imm_encoding
;
1092 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
1095 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
1097 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
1098 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
1100 if (imm1
->imm
< imm2
->imm
)
1102 if (imm1
->imm
> imm2
->imm
)
1107 /* immediate bitfield standard encoding
1108 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1109 1 ssssss rrrrrr 64 rrrrrr ssssss
1110 0 0sssss 0rrrrr 32 rrrrr sssss
1111 0 10ssss 00rrrr 16 rrrr ssss
1112 0 110sss 000rrr 8 rrr sss
1113 0 1110ss 0000rr 4 rr ss
1114 0 11110s 00000r 2 r s */
1116 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
1118 return (is64
<< 12) | (r
<< 6) | s
;
1122 build_immediate_table (void)
1124 uint32_t log_e
, e
, s
, r
, s_mask
;
1130 for (log_e
= 1; log_e
<= 6; log_e
++)
1132 /* Get element size. */
1137 mask
= 0xffffffffffffffffull
;
1143 mask
= (1ull << e
) - 1;
1145 1 ((1 << 4) - 1) << 2 = 111100
1146 2 ((1 << 3) - 1) << 3 = 111000
1147 3 ((1 << 2) - 1) << 4 = 110000
1148 4 ((1 << 1) - 1) << 5 = 100000
1149 5 ((1 << 0) - 1) << 6 = 000000 */
1150 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1152 for (s
= 0; s
< e
- 1; s
++)
1153 for (r
= 0; r
< e
; r
++)
1155 /* s+1 consecutive bits to 1 (s < 63) */
1156 imm
= (1ull << (s
+ 1)) - 1;
1157 /* rotate right by r */
1159 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1160 /* replicate the constant depending on SIMD size */
1163 case 1: imm
= (imm
<< 2) | imm
;
1165 case 2: imm
= (imm
<< 4) | imm
;
1167 case 3: imm
= (imm
<< 8) | imm
;
1169 case 4: imm
= (imm
<< 16) | imm
;
1171 case 5: imm
= (imm
<< 32) | imm
;
1176 simd_immediates
[nb_imms
].imm
= imm
;
1177 simd_immediates
[nb_imms
].encoding
=
1178 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1182 assert (nb_imms
== TOTAL_IMM_NB
);
1183 qsort(simd_immediates
, nb_imms
,
1184 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1187 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1188 be accepted by logical (immediate) instructions
1189 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1191 ESIZE is the number of bytes in the decoded immediate value.
1192 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1193 VALUE will be returned in *ENCODING. */
1196 aarch64_logical_immediate_p (uint64_t value
, int esize
, aarch64_insn
*encoding
)
1198 simd_imm_encoding imm_enc
;
1199 const simd_imm_encoding
*imm_encoding
;
1200 static bfd_boolean initialized
= FALSE
;
1204 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), is32: %d", value
,
1207 if (initialized
== FALSE
)
1209 build_immediate_table ();
1213 /* Allow all zeros or all ones in top bits, so that
1214 constant expressions like ~1 are permitted. */
1215 upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
1216 if ((value
& ~upper
) != value
&& (value
| upper
) != value
)
1219 /* Replicate to a full 64-bit value. */
1221 for (i
= esize
* 8; i
< 64; i
*= 2)
1222 value
|= (value
<< i
);
1224 imm_enc
.imm
= value
;
1225 imm_encoding
= (const simd_imm_encoding
*)
1226 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1227 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1228 if (imm_encoding
== NULL
)
1230 DEBUG_TRACE ("exit with FALSE");
1233 if (encoding
!= NULL
)
1234 *encoding
= imm_encoding
->encoding
;
1235 DEBUG_TRACE ("exit with TRUE");
1239 /* If 64-bit immediate IMM is in the format of
1240 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1241 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1242 of value "abcdefgh". Otherwise return -1. */
1244 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1250 for (i
= 0; i
< 8; i
++)
1252 byte
= (imm
>> (8 * i
)) & 0xff;
1255 else if (byte
!= 0x00)
1261 /* Utility inline functions for operand_general_constraint_met_p. */
1264 set_error (aarch64_operand_error
*mismatch_detail
,
1265 enum aarch64_operand_error_kind kind
, int idx
,
1268 if (mismatch_detail
== NULL
)
1270 mismatch_detail
->kind
= kind
;
1271 mismatch_detail
->index
= idx
;
1272 mismatch_detail
->error
= error
;
1276 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1279 if (mismatch_detail
== NULL
)
1281 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1285 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1286 int idx
, int lower_bound
, int upper_bound
,
1289 if (mismatch_detail
== NULL
)
1291 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1292 mismatch_detail
->data
[0] = lower_bound
;
1293 mismatch_detail
->data
[1] = upper_bound
;
1297 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1298 int idx
, int lower_bound
, int upper_bound
)
1300 if (mismatch_detail
== NULL
)
1302 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1303 _("immediate value"));
1307 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1308 int idx
, int lower_bound
, int upper_bound
)
1310 if (mismatch_detail
== NULL
)
1312 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1313 _("immediate offset"));
1317 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1318 int idx
, int lower_bound
, int upper_bound
)
1320 if (mismatch_detail
== NULL
)
1322 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1323 _("register number"));
1327 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1328 int idx
, int lower_bound
, int upper_bound
)
1330 if (mismatch_detail
== NULL
)
1332 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1333 _("register element index"));
1337 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1338 int idx
, int lower_bound
, int upper_bound
)
1340 if (mismatch_detail
== NULL
)
1342 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1346 /* Report that the MUL modifier in operand IDX should be in the range
1347 [LOWER_BOUND, UPPER_BOUND]. */
1349 set_multiplier_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1350 int idx
, int lower_bound
, int upper_bound
)
1352 if (mismatch_detail
== NULL
)
1354 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1359 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1362 if (mismatch_detail
== NULL
)
1364 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1365 mismatch_detail
->data
[0] = alignment
;
1369 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1372 if (mismatch_detail
== NULL
)
1374 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1375 mismatch_detail
->data
[0] = expected_num
;
1379 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1382 if (mismatch_detail
== NULL
)
1384 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1387 /* General constraint checking based on operand code.
1389 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1390 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1392 This function has to be called after the qualifiers for all operands
1395 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1396 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1397 of error message during the disassembling where error message is not
1398 wanted. We avoid the dynamic construction of strings of error messages
1399 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1400 use a combination of error code, static string and some integer data to
1401 represent an error. */
1404 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1405 enum aarch64_opnd type
,
1406 const aarch64_opcode
*opcode
,
1407 aarch64_operand_error
*mismatch_detail
)
1409 unsigned num
, modifiers
, shift
;
1411 int64_t imm
, min_value
, max_value
;
1412 uint64_t uvalue
, mask
;
1413 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1414 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1416 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1418 switch (aarch64_operands
[type
].op_class
)
1420 case AARCH64_OPND_CLASS_INT_REG
:
1421 /* Check pair reg constraints for cas* instructions. */
1422 if (type
== AARCH64_OPND_PAIRREG
)
1424 assert (idx
== 1 || idx
== 3);
1425 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1427 set_syntax_error (mismatch_detail
, idx
- 1,
1428 _("reg pair must start from even reg"));
1431 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1433 set_syntax_error (mismatch_detail
, idx
,
1434 _("reg pair must be contiguous"));
1440 /* <Xt> may be optional in some IC and TLBI instructions. */
1441 if (type
== AARCH64_OPND_Rt_SYS
)
1443 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1444 == AARCH64_OPND_CLASS_SYSTEM
));
1445 if (opnds
[1].present
1446 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1448 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1451 if (!opnds
[1].present
1452 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1454 set_other_error (mismatch_detail
, idx
, _("missing register"));
1460 case AARCH64_OPND_QLF_WSP
:
1461 case AARCH64_OPND_QLF_SP
:
1462 if (!aarch64_stack_pointer_p (opnd
))
1464 set_other_error (mismatch_detail
, idx
,
1465 _("stack pointer register expected"));
1474 case AARCH64_OPND_CLASS_SVE_REG
:
1477 case AARCH64_OPND_SVE_Zn_INDEX
:
1478 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1479 if (!value_in_range_p (opnd
->reglane
.index
, 0, 64 / size
- 1))
1481 set_elem_idx_out_of_range_error (mismatch_detail
, idx
,
1487 case AARCH64_OPND_SVE_ZnxN
:
1488 case AARCH64_OPND_SVE_ZtxN
:
1489 if (opnd
->reglist
.num_regs
!= get_opcode_dependent_value (opcode
))
1491 set_other_error (mismatch_detail
, idx
,
1492 _("invalid register list"));
1502 case AARCH64_OPND_CLASS_PRED_REG
:
1503 if (opnd
->reg
.regno
>= 8
1504 && get_operand_fields_width (get_operand_from_code (type
)) == 3)
1506 set_other_error (mismatch_detail
, idx
, _("p0-p7 expected"));
1511 case AARCH64_OPND_CLASS_COND
:
1512 if (type
== AARCH64_OPND_COND1
1513 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1515 /* Not allow AL or NV. */
1516 set_syntax_error (mismatch_detail
, idx
, NULL
);
1520 case AARCH64_OPND_CLASS_ADDRESS
:
1521 /* Check writeback. */
1522 switch (opcode
->iclass
)
1526 case ldstnapair_offs
:
1529 if (opnd
->addr
.writeback
== 1)
1531 set_syntax_error (mismatch_detail
, idx
,
1532 _("unexpected address writeback"));
1537 if (opnd
->addr
.writeback
== 1 && opnd
->addr
.preind
!= 1)
1539 set_syntax_error (mismatch_detail
, idx
,
1540 _("unexpected address writeback"));
1545 case ldstpair_indexed
:
1548 if (opnd
->addr
.writeback
== 0)
1550 set_syntax_error (mismatch_detail
, idx
,
1551 _("address writeback expected"));
1556 assert (opnd
->addr
.writeback
== 0);
1561 case AARCH64_OPND_ADDR_SIMM7
:
1562 /* Scaled signed 7 bits immediate offset. */
1563 /* Get the size of the data element that is accessed, which may be
1564 different from that of the source register size,
1565 e.g. in strb/ldrb. */
1566 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1567 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1569 set_offset_out_of_range_error (mismatch_detail
, idx
,
1570 -64 * size
, 63 * size
);
1573 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1575 set_unaligned_error (mismatch_detail
, idx
, size
);
1579 case AARCH64_OPND_ADDR_SIMM9
:
1580 /* Unscaled signed 9 bits immediate offset. */
1581 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1583 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1588 case AARCH64_OPND_ADDR_SIMM9_2
:
1589 /* Unscaled signed 9 bits immediate offset, which has to be negative
1591 size
= aarch64_get_qualifier_esize (qualifier
);
1592 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1593 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1594 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1596 set_other_error (mismatch_detail
, idx
,
1597 _("negative or unaligned offset expected"));
1600 case AARCH64_OPND_ADDR_SIMM10
:
1601 /* Scaled signed 10 bits immediate offset. */
1602 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -4096, 4088))
1604 set_offset_out_of_range_error (mismatch_detail
, idx
, -4096, 4088);
1607 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 8))
1609 set_unaligned_error (mismatch_detail
, idx
, 8);
1614 case AARCH64_OPND_SIMD_ADDR_POST
:
1615 /* AdvSIMD load/store multiple structures, post-index. */
1617 if (opnd
->addr
.offset
.is_reg
)
1619 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1623 set_other_error (mismatch_detail
, idx
,
1624 _("invalid register offset"));
1630 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1631 unsigned num_bytes
; /* total number of bytes transferred. */
1632 /* The opcode dependent area stores the number of elements in
1633 each structure to be loaded/stored. */
1634 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1635 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1636 /* Special handling of loading single structure to all lane. */
1637 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1638 * aarch64_get_qualifier_esize (prev
->qualifier
);
1640 num_bytes
= prev
->reglist
.num_regs
1641 * aarch64_get_qualifier_esize (prev
->qualifier
)
1642 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1643 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1645 set_other_error (mismatch_detail
, idx
,
1646 _("invalid post-increment amount"));
1652 case AARCH64_OPND_ADDR_REGOFF
:
1653 /* Get the size of the data element that is accessed, which may be
1654 different from that of the source register size,
1655 e.g. in strb/ldrb. */
1656 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1657 /* It is either no shift or shift by the binary logarithm of SIZE. */
1658 if (opnd
->shifter
.amount
!= 0
1659 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1661 set_other_error (mismatch_detail
, idx
,
1662 _("invalid shift amount"));
1665 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1667 switch (opnd
->shifter
.kind
)
1669 case AARCH64_MOD_UXTW
:
1670 case AARCH64_MOD_LSL
:
1671 case AARCH64_MOD_SXTW
:
1672 case AARCH64_MOD_SXTX
: break;
1674 set_other_error (mismatch_detail
, idx
,
1675 _("invalid extend/shift operator"));
1680 case AARCH64_OPND_ADDR_UIMM12
:
1681 imm
= opnd
->addr
.offset
.imm
;
1682 /* Get the size of the data element that is accessed, which may be
1683 different from that of the source register size,
1684 e.g. in strb/ldrb. */
1685 size
= aarch64_get_qualifier_esize (qualifier
);
1686 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1688 set_offset_out_of_range_error (mismatch_detail
, idx
,
1692 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1694 set_unaligned_error (mismatch_detail
, idx
, size
);
1699 case AARCH64_OPND_ADDR_PCREL14
:
1700 case AARCH64_OPND_ADDR_PCREL19
:
1701 case AARCH64_OPND_ADDR_PCREL21
:
1702 case AARCH64_OPND_ADDR_PCREL26
:
1703 imm
= opnd
->imm
.value
;
1704 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1706 /* The offset value in a PC-relative branch instruction is alway
1707 4-byte aligned and is encoded without the lowest 2 bits. */
1708 if (!value_aligned_p (imm
, 4))
1710 set_unaligned_error (mismatch_detail
, idx
, 4);
1713 /* Right shift by 2 so that we can carry out the following check
1717 size
= get_operand_fields_width (get_operand_from_code (type
));
1718 if (!value_fit_signed_field_p (imm
, size
))
1720 set_other_error (mismatch_detail
, idx
,
1721 _("immediate out of range"));
1726 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
1727 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
1728 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
1729 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
1733 assert (!opnd
->addr
.offset
.is_reg
);
1734 assert (opnd
->addr
.preind
);
1735 num
= 1 + get_operand_specific_data (&aarch64_operands
[type
]);
1738 if ((opnd
->addr
.offset
.imm
!= 0 && !opnd
->shifter
.operator_present
)
1739 || (opnd
->shifter
.operator_present
1740 && opnd
->shifter
.kind
!= AARCH64_MOD_MUL_VL
))
1742 set_other_error (mismatch_detail
, idx
,
1743 _("invalid addressing mode"));
1746 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1748 set_offset_out_of_range_error (mismatch_detail
, idx
,
1749 min_value
, max_value
);
1752 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1754 set_unaligned_error (mismatch_detail
, idx
, num
);
1759 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
1762 goto sve_imm_offset_vl
;
1764 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
1767 goto sve_imm_offset_vl
;
1769 case AARCH64_OPND_SVE_ADDR_RI_U6
:
1770 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
1771 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
1772 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
1776 assert (!opnd
->addr
.offset
.is_reg
);
1777 assert (opnd
->addr
.preind
);
1778 num
= 1 << get_operand_specific_data (&aarch64_operands
[type
]);
1781 if (opnd
->shifter
.operator_present
1782 || opnd
->shifter
.amount_present
)
1784 set_other_error (mismatch_detail
, idx
,
1785 _("invalid addressing mode"));
1788 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1790 set_offset_out_of_range_error (mismatch_detail
, idx
,
1791 min_value
, max_value
);
1794 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1796 set_unaligned_error (mismatch_detail
, idx
, num
);
1801 case AARCH64_OPND_SVE_ADDR_RR
:
1802 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
1803 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
1804 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
1805 case AARCH64_OPND_SVE_ADDR_RX
:
1806 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
1807 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
1808 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
1809 case AARCH64_OPND_SVE_ADDR_RZ
:
1810 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
1811 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
1812 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
1813 modifiers
= 1 << AARCH64_MOD_LSL
;
1815 assert (opnd
->addr
.offset
.is_reg
);
1816 assert (opnd
->addr
.preind
);
1817 if ((aarch64_operands
[type
].flags
& OPD_F_NO_ZR
) != 0
1818 && opnd
->addr
.offset
.regno
== 31)
1820 set_other_error (mismatch_detail
, idx
,
1821 _("index register xzr is not allowed"));
1824 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1825 || (opnd
->shifter
.amount
1826 != get_operand_specific_data (&aarch64_operands
[type
])))
1828 set_other_error (mismatch_detail
, idx
,
1829 _("invalid addressing mode"));
1834 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
1835 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
1836 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
1837 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
1838 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
1839 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
1840 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
1841 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
1842 modifiers
= (1 << AARCH64_MOD_SXTW
) | (1 << AARCH64_MOD_UXTW
);
1843 goto sve_rr_operand
;
1845 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
1846 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
1847 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
1848 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
1851 goto sve_imm_offset
;
1853 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
1854 modifiers
= 1 << AARCH64_MOD_LSL
;
1856 assert (opnd
->addr
.offset
.is_reg
);
1857 assert (opnd
->addr
.preind
);
1858 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1859 || opnd
->shifter
.amount
< 0
1860 || opnd
->shifter
.amount
> 3)
1862 set_other_error (mismatch_detail
, idx
,
1863 _("invalid addressing mode"));
1868 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
1869 modifiers
= (1 << AARCH64_MOD_SXTW
);
1870 goto sve_zz_operand
;
1872 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
1873 modifiers
= 1 << AARCH64_MOD_UXTW
;
1874 goto sve_zz_operand
;
1881 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1882 if (type
== AARCH64_OPND_LEt
)
1884 /* Get the upper bound for the element index. */
1885 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1886 if (!value_in_range_p (opnd
->reglist
.index
, 0, num
))
1888 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1892 /* The opcode dependent area stores the number of elements in
1893 each structure to be loaded/stored. */
1894 num
= get_opcode_dependent_value (opcode
);
1897 case AARCH64_OPND_LVt
:
1898 assert (num
>= 1 && num
<= 4);
1899 /* Unless LD1/ST1, the number of registers should be equal to that
1900 of the structure elements. */
1901 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
1903 set_reg_list_error (mismatch_detail
, idx
, num
);
1907 case AARCH64_OPND_LVt_AL
:
1908 case AARCH64_OPND_LEt
:
1909 assert (num
>= 1 && num
<= 4);
1910 /* The number of registers should be equal to that of the structure
1912 if (opnd
->reglist
.num_regs
!= num
)
1914 set_reg_list_error (mismatch_detail
, idx
, num
);
1923 case AARCH64_OPND_CLASS_IMMEDIATE
:
1924 /* Constraint check on immediate operand. */
1925 imm
= opnd
->imm
.value
;
1926 /* E.g. imm_0_31 constrains value to be 0..31. */
1927 if (qualifier_value_in_range_constraint_p (qualifier
)
1928 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
1929 get_upper_bound (qualifier
)))
1931 set_imm_out_of_range_error (mismatch_detail
, idx
,
1932 get_lower_bound (qualifier
),
1933 get_upper_bound (qualifier
));
1939 case AARCH64_OPND_AIMM
:
1940 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1942 set_other_error (mismatch_detail
, idx
,
1943 _("invalid shift operator"));
1946 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
1948 set_other_error (mismatch_detail
, idx
,
1949 _("shift amount must be 0 or 12"));
1952 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
1954 set_other_error (mismatch_detail
, idx
,
1955 _("immediate out of range"));
1960 case AARCH64_OPND_HALF
:
1961 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
1962 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1964 set_other_error (mismatch_detail
, idx
,
1965 _("invalid shift operator"));
1968 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1969 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
1971 set_other_error (mismatch_detail
, idx
,
1972 _("shift amount must be a multiple of 16"));
1975 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
1977 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
1981 if (opnd
->imm
.value
< 0)
1983 set_other_error (mismatch_detail
, idx
,
1984 _("negative immediate value not allowed"));
1987 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
1989 set_other_error (mismatch_detail
, idx
,
1990 _("immediate out of range"));
1995 case AARCH64_OPND_IMM_MOV
:
1997 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1998 imm
= opnd
->imm
.value
;
2002 case OP_MOV_IMM_WIDEN
:
2005 case OP_MOV_IMM_WIDE
:
2006 if (!aarch64_wide_constant_p (imm
, esize
== 4, NULL
))
2008 set_other_error (mismatch_detail
, idx
,
2009 _("immediate out of range"));
2013 case OP_MOV_IMM_LOG
:
2014 if (!aarch64_logical_immediate_p (imm
, esize
, NULL
))
2016 set_other_error (mismatch_detail
, idx
,
2017 _("immediate out of range"));
2028 case AARCH64_OPND_NZCV
:
2029 case AARCH64_OPND_CCMP_IMM
:
2030 case AARCH64_OPND_EXCEPTION
:
2031 case AARCH64_OPND_UIMM4
:
2032 case AARCH64_OPND_UIMM7
:
2033 case AARCH64_OPND_UIMM3_OP1
:
2034 case AARCH64_OPND_UIMM3_OP2
:
2035 case AARCH64_OPND_SVE_UIMM3
:
2036 case AARCH64_OPND_SVE_UIMM7
:
2037 case AARCH64_OPND_SVE_UIMM8
:
2038 case AARCH64_OPND_SVE_UIMM8_53
:
2039 size
= get_operand_fields_width (get_operand_from_code (type
));
2041 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
2043 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2049 case AARCH64_OPND_SIMM5
:
2050 case AARCH64_OPND_SVE_SIMM5
:
2051 case AARCH64_OPND_SVE_SIMM5B
:
2052 case AARCH64_OPND_SVE_SIMM6
:
2053 case AARCH64_OPND_SVE_SIMM8
:
2054 size
= get_operand_fields_width (get_operand_from_code (type
));
2056 if (!value_fit_signed_field_p (opnd
->imm
.value
, size
))
2058 set_imm_out_of_range_error (mismatch_detail
, idx
,
2060 (1 << (size
- 1)) - 1);
2065 case AARCH64_OPND_WIDTH
:
2066 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
2067 && opnds
[0].type
== AARCH64_OPND_Rd
);
2068 size
= get_upper_bound (qualifier
);
2069 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
2070 /* lsb+width <= reg.size */
2072 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
2073 size
- opnds
[idx
-1].imm
.value
);
2078 case AARCH64_OPND_LIMM
:
2079 case AARCH64_OPND_SVE_LIMM
:
2081 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2082 uint64_t uimm
= opnd
->imm
.value
;
2083 if (opcode
->op
== OP_BIC
)
2085 if (aarch64_logical_immediate_p (uimm
, esize
, NULL
) == FALSE
)
2087 set_other_error (mismatch_detail
, idx
,
2088 _("immediate out of range"));
2094 case AARCH64_OPND_IMM0
:
2095 case AARCH64_OPND_FPIMM0
:
2096 if (opnd
->imm
.value
!= 0)
2098 set_other_error (mismatch_detail
, idx
,
2099 _("immediate zero expected"));
2104 case AARCH64_OPND_IMM_ROT1
:
2105 case AARCH64_OPND_IMM_ROT2
:
2106 if (opnd
->imm
.value
!= 0
2107 && opnd
->imm
.value
!= 90
2108 && opnd
->imm
.value
!= 180
2109 && opnd
->imm
.value
!= 270)
2111 set_other_error (mismatch_detail
, idx
,
2112 _("rotate expected to be 0, 90, 180 or 270"));
2117 case AARCH64_OPND_IMM_ROT3
:
2118 if (opnd
->imm
.value
!= 90 && opnd
->imm
.value
!= 270)
2120 set_other_error (mismatch_detail
, idx
,
2121 _("rotate expected to be 90 or 270"));
2126 case AARCH64_OPND_SHLL_IMM
:
2128 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2129 if (opnd
->imm
.value
!= size
)
2131 set_other_error (mismatch_detail
, idx
,
2132 _("invalid shift amount"));
2137 case AARCH64_OPND_IMM_VLSL
:
2138 size
= aarch64_get_qualifier_esize (qualifier
);
2139 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
2141 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2147 case AARCH64_OPND_IMM_VLSR
:
2148 size
= aarch64_get_qualifier_esize (qualifier
);
2149 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
2151 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
2156 case AARCH64_OPND_SIMD_IMM
:
2157 case AARCH64_OPND_SIMD_IMM_SFT
:
2158 /* Qualifier check. */
2161 case AARCH64_OPND_QLF_LSL
:
2162 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2164 set_other_error (mismatch_detail
, idx
,
2165 _("invalid shift operator"));
2169 case AARCH64_OPND_QLF_MSL
:
2170 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
2172 set_other_error (mismatch_detail
, idx
,
2173 _("invalid shift operator"));
2177 case AARCH64_OPND_QLF_NIL
:
2178 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2180 set_other_error (mismatch_detail
, idx
,
2181 _("shift is not permitted"));
2189 /* Is the immediate valid? */
2191 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
2193 /* uimm8 or simm8 */
2194 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
2196 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
2200 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
2203 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2204 ffffffffgggggggghhhhhhhh'. */
2205 set_other_error (mismatch_detail
, idx
,
2206 _("invalid value for immediate"));
2209 /* Is the shift amount valid? */
2210 switch (opnd
->shifter
.kind
)
2212 case AARCH64_MOD_LSL
:
2213 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2214 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
2216 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
2220 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
2222 set_unaligned_error (mismatch_detail
, idx
, 8);
2226 case AARCH64_MOD_MSL
:
2227 /* Only 8 and 16 are valid shift amount. */
2228 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
2230 set_other_error (mismatch_detail
, idx
,
2231 _("shift amount must be 0 or 16"));
2236 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2238 set_other_error (mismatch_detail
, idx
,
2239 _("invalid shift operator"));
2246 case AARCH64_OPND_FPIMM
:
2247 case AARCH64_OPND_SIMD_FPIMM
:
2248 case AARCH64_OPND_SVE_FPIMM8
:
2249 if (opnd
->imm
.is_fp
== 0)
2251 set_other_error (mismatch_detail
, idx
,
2252 _("floating-point immediate expected"));
2255 /* The value is expected to be an 8-bit floating-point constant with
2256 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2257 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2259 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
2261 set_other_error (mismatch_detail
, idx
,
2262 _("immediate out of range"));
2265 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2267 set_other_error (mismatch_detail
, idx
,
2268 _("invalid shift operator"));
2273 case AARCH64_OPND_SVE_AIMM
:
2276 assert (opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2277 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2278 mask
= ~((uint64_t) -1 << (size
* 4) << (size
* 4));
2279 uvalue
= opnd
->imm
.value
;
2280 shift
= opnd
->shifter
.amount
;
2285 set_other_error (mismatch_detail
, idx
,
2286 _("no shift amount allowed for"
2287 " 8-bit constants"));
2293 if (shift
!= 0 && shift
!= 8)
2295 set_other_error (mismatch_detail
, idx
,
2296 _("shift amount must be 0 or 8"));
2299 if (shift
== 0 && (uvalue
& 0xff) == 0)
2302 uvalue
= (int64_t) uvalue
/ 256;
2306 if ((uvalue
& mask
) != uvalue
&& (uvalue
| ~mask
) != uvalue
)
2308 set_other_error (mismatch_detail
, idx
,
2309 _("immediate too big for element size"));
2312 uvalue
= (uvalue
- min_value
) & mask
;
2315 set_other_error (mismatch_detail
, idx
,
2316 _("invalid arithmetic immediate"));
2321 case AARCH64_OPND_SVE_ASIMM
:
2325 case AARCH64_OPND_SVE_I1_HALF_ONE
:
2326 assert (opnd
->imm
.is_fp
);
2327 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x3f800000)
2329 set_other_error (mismatch_detail
, idx
,
2330 _("floating-point value must be 0.5 or 1.0"));
2335 case AARCH64_OPND_SVE_I1_HALF_TWO
:
2336 assert (opnd
->imm
.is_fp
);
2337 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x40000000)
2339 set_other_error (mismatch_detail
, idx
,
2340 _("floating-point value must be 0.5 or 2.0"));
2345 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
2346 assert (opnd
->imm
.is_fp
);
2347 if (opnd
->imm
.value
!= 0 && opnd
->imm
.value
!= 0x3f800000)
2349 set_other_error (mismatch_detail
, idx
,
2350 _("floating-point value must be 0.0 or 1.0"));
2355 case AARCH64_OPND_SVE_INV_LIMM
:
2357 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2358 uint64_t uimm
= ~opnd
->imm
.value
;
2359 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2361 set_other_error (mismatch_detail
, idx
,
2362 _("immediate out of range"));
2368 case AARCH64_OPND_SVE_LIMM_MOV
:
2370 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2371 uint64_t uimm
= opnd
->imm
.value
;
2372 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2374 set_other_error (mismatch_detail
, idx
,
2375 _("immediate out of range"));
2378 if (!aarch64_sve_dupm_mov_immediate_p (uimm
, esize
))
2380 set_other_error (mismatch_detail
, idx
,
2381 _("invalid replicated MOV immediate"));
2387 case AARCH64_OPND_SVE_PATTERN_SCALED
:
2388 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL
);
2389 if (!value_in_range_p (opnd
->shifter
.amount
, 1, 16))
2391 set_multiplier_out_of_range_error (mismatch_detail
, idx
, 1, 16);
2396 case AARCH64_OPND_SVE_SHLIMM_PRED
:
2397 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
2398 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2399 if (!value_in_range_p (opnd
->imm
.value
, 0, 8 * size
- 1))
2401 set_imm_out_of_range_error (mismatch_detail
, idx
,
2407 case AARCH64_OPND_SVE_SHRIMM_PRED
:
2408 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
2409 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2410 if (!value_in_range_p (opnd
->imm
.value
, 1, 8 * size
))
2412 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, 8 * size
);
2422 case AARCH64_OPND_CLASS_SYSTEM
:
2425 case AARCH64_OPND_PSTATEFIELD
:
2426 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
2429 The immediate must be #0 or #1. */
2430 if ((opnd
->pstatefield
== 0x03 /* UAO. */
2431 || opnd
->pstatefield
== 0x04) /* PAN. */
2432 && opnds
[1].imm
.value
> 1)
2434 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2437 /* MSR SPSel, #uimm4
2438 Uses uimm4 as a control value to select the stack pointer: if
2439 bit 0 is set it selects the current exception level's stack
2440 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2441 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2442 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
2444 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2453 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
2454 /* Get the upper bound for the element index. */
2455 if (opcode
->op
== OP_FCMLA_ELEM
)
2456 /* FCMLA index range depends on the vector size of other operands
2457 and is halfed because complex numbers take two elements. */
2458 num
= aarch64_get_qualifier_nelem (opnds
[0].qualifier
)
2459 * aarch64_get_qualifier_esize (opnds
[0].qualifier
) / 2;
2462 num
= num
/ aarch64_get_qualifier_esize (qualifier
) - 1;
2464 /* Index out-of-range. */
2465 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
2467 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2470 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2471 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2472 number is encoded in "size:M:Rm":
2478 if (type
== AARCH64_OPND_Em
&& qualifier
== AARCH64_OPND_QLF_S_H
2479 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
2481 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2486 case AARCH64_OPND_CLASS_MODIFIED_REG
:
2487 assert (idx
== 1 || idx
== 2);
2490 case AARCH64_OPND_Rm_EXT
:
2491 if (aarch64_extend_operator_p (opnd
->shifter
.kind
) == FALSE
2492 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2494 set_other_error (mismatch_detail
, idx
,
2495 _("extend operator expected"));
2498 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2499 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2500 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2502 if (!aarch64_stack_pointer_p (opnds
+ 0)
2503 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
2505 if (!opnd
->shifter
.operator_present
)
2507 set_other_error (mismatch_detail
, idx
,
2508 _("missing extend operator"));
2511 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2513 set_other_error (mismatch_detail
, idx
,
2514 _("'LSL' operator not allowed"));
2518 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
2519 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2520 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
2522 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
2525 /* In the 64-bit form, the final register operand is written as Wm
2526 for all but the (possibly omitted) UXTX/LSL and SXTX
2528 N.B. GAS allows X register to be used with any operator as a
2529 programming convenience. */
2530 if (qualifier
== AARCH64_OPND_QLF_X
2531 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
2532 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
2533 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
2535 set_other_error (mismatch_detail
, idx
, _("W register expected"));
2540 case AARCH64_OPND_Rm_SFT
:
2541 /* ROR is not available to the shifted register operand in
2542 arithmetic instructions. */
2543 if (aarch64_shift_operator_p (opnd
->shifter
.kind
) == FALSE
)
2545 set_other_error (mismatch_detail
, idx
,
2546 _("shift operator expected"));
2549 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
2550 && opcode
->iclass
!= log_shift
)
2552 set_other_error (mismatch_detail
, idx
,
2553 _("'ROR' operator not allowed"));
2556 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
2557 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2559 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2576 /* Main entrypoint for the operand constraint checking.
2578 Return 1 if operands of *INST meet the constraint applied by the operand
2579 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2580 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2581 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2582 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2583 error kind when it is notified that an instruction does not pass the check).
2585 Un-determined operand qualifiers may get established during the process. */
2588 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2589 aarch64_operand_error
*mismatch_detail
)
2593 DEBUG_TRACE ("enter");
2595 /* Check for cases where a source register needs to be the same as the
2596 destination register. Do this before matching qualifiers since if
2597 an instruction has both invalid tying and invalid qualifiers,
2598 the error about qualifiers would suggest several alternative
2599 instructions that also have invalid tying. */
2600 i
= inst
->opcode
->tied_operand
;
2601 if (i
> 0 && (inst
->operands
[0].reg
.regno
!= inst
->operands
[i
].reg
.regno
))
2603 if (mismatch_detail
)
2605 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_OPERAND
;
2606 mismatch_detail
->index
= i
;
2607 mismatch_detail
->error
= NULL
;
2612 /* Match operands' qualifier.
2613 *INST has already had qualifier establish for some, if not all, of
2614 its operands; we need to find out whether these established
2615 qualifiers match one of the qualifier sequence in
2616 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2617 with the corresponding qualifier in such a sequence.
2618 Only basic operand constraint checking is done here; the more thorough
2619 constraint checking will carried out by operand_general_constraint_met_p,
2620 which has be to called after this in order to get all of the operands'
2621 qualifiers established. */
2622 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2624 DEBUG_TRACE ("FAIL on operand qualifier matching");
2625 if (mismatch_detail
)
2627 /* Return an error type to indicate that it is the qualifier
2628 matching failure; we don't care about which operand as there
2629 are enough information in the opcode table to reproduce it. */
2630 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2631 mismatch_detail
->index
= -1;
2632 mismatch_detail
->error
= NULL
;
2637 /* Match operands' constraint. */
2638 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2640 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2641 if (type
== AARCH64_OPND_NIL
)
2643 if (inst
->operands
[i
].skip
)
2645 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2648 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2649 inst
->opcode
, mismatch_detail
) == 0)
2651 DEBUG_TRACE ("FAIL on operand %d", i
);
2656 DEBUG_TRACE ("PASS");
2661 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2662 Also updates the TYPE of each INST->OPERANDS with the corresponding
2663 value of OPCODE->OPERANDS.
2665 Note that some operand qualifiers may need to be manually cleared by
2666 the caller before it further calls the aarch64_opcode_encode; by
2667 doing this, it helps the qualifier matching facilities work
2670 const aarch64_opcode
*
2671 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2674 const aarch64_opcode
*old
= inst
->opcode
;
2676 inst
->opcode
= opcode
;
2678 /* Update the operand types. */
2679 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2681 inst
->operands
[i
].type
= opcode
->operands
[i
];
2682 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2686 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2692 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2695 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2696 if (operands
[i
] == operand
)
2698 else if (operands
[i
] == AARCH64_OPND_NIL
)
2703 /* R0...R30, followed by FOR31. */
2704 #define BANK(R, FOR31) \
2705 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2706 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2707 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2708 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2709 /* [0][0] 32-bit integer regs with sp Wn
2710 [0][1] 64-bit integer regs with sp Xn sf=1
2711 [1][0] 32-bit integer regs with #0 Wn
2712 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2713 static const char *int_reg
[2][2][32] = {
2714 #define R32(X) "w" #X
2715 #define R64(X) "x" #X
2716 { BANK (R32
, "wsp"), BANK (R64
, "sp") },
2717 { BANK (R32
, "wzr"), BANK (R64
, "xzr") }
2722 /* Names of the SVE vector registers, first with .S suffixes,
2723 then with .D suffixes. */
2725 static const char *sve_reg
[2][32] = {
2726 #define ZS(X) "z" #X ".s"
2727 #define ZD(X) "z" #X ".d"
2728 BANK (ZS
, ZS (31)), BANK (ZD
, ZD (31))
2734 /* Return the integer register name.
2735 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2737 static inline const char *
2738 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2740 const int has_zr
= sp_reg_p
? 0 : 1;
2741 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2742 return int_reg
[has_zr
][is_64
][regno
];
2745 /* Like get_int_reg_name, but IS_64 is always 1. */
2747 static inline const char *
2748 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2750 const int has_zr
= sp_reg_p
? 0 : 1;
2751 return int_reg
[has_zr
][1][regno
];
2754 /* Get the name of the integer offset register in OPND, using the shift type
2755 to decide whether it's a word or doubleword. */
2757 static inline const char *
2758 get_offset_int_reg_name (const aarch64_opnd_info
*opnd
)
2760 switch (opnd
->shifter
.kind
)
2762 case AARCH64_MOD_UXTW
:
2763 case AARCH64_MOD_SXTW
:
2764 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_W
, 0);
2766 case AARCH64_MOD_LSL
:
2767 case AARCH64_MOD_SXTX
:
2768 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_X
, 0);
2775 /* Get the name of the SVE vector offset register in OPND, using the operand
2776 qualifier to decide whether the suffix should be .S or .D. */
2778 static inline const char *
2779 get_addr_sve_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
)
2781 assert (qualifier
== AARCH64_OPND_QLF_S_S
2782 || qualifier
== AARCH64_OPND_QLF_S_D
);
2783 return sve_reg
[qualifier
== AARCH64_OPND_QLF_S_D
][regno
];
2786 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2806 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2807 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2808 (depending on the type of the instruction). IMM8 will be expanded to a
2809 single-precision floating-point value (SIZE == 4) or a double-precision
2810 floating-point value (SIZE == 8). A half-precision floating-point value
2811 (SIZE == 2) is expanded to a single-precision floating-point value. The
2812 expanded value is returned. */
2815 expand_fp_imm (int size
, uint32_t imm8
)
2818 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2820 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2821 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2822 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2823 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2824 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2827 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2828 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2829 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2830 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2831 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2834 else if (size
== 4 || size
== 2)
2836 imm
= (imm8_7
<< 31) /* imm8<7> */
2837 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2838 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2839 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2843 /* An unsupported size. */
2850 /* Produce the string representation of the register list operand *OPND
2851 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2852 the register name that comes before the register number, such as "v". */
2854 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
,
2857 const int num_regs
= opnd
->reglist
.num_regs
;
2858 const int first_reg
= opnd
->reglist
.first_regno
;
2859 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2860 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2861 char tb
[8]; /* Temporary buffer. */
2863 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2864 assert (num_regs
>= 1 && num_regs
<= 4);
2866 /* Prepare the index if any. */
2867 if (opnd
->reglist
.has_index
)
2868 snprintf (tb
, 8, "[%" PRIi64
"]", opnd
->reglist
.index
);
2872 /* The hyphenated form is preferred for disassembly if there are
2873 more than two registers in the list, and the register numbers
2874 are monotonically increasing in increments of one. */
2875 if (num_regs
> 2 && last_reg
> first_reg
)
2876 snprintf (buf
, size
, "{%s%d.%s-%s%d.%s}%s", prefix
, first_reg
, qlf_name
,
2877 prefix
, last_reg
, qlf_name
, tb
);
2880 const int reg0
= first_reg
;
2881 const int reg1
= (first_reg
+ 1) & 0x1f;
2882 const int reg2
= (first_reg
+ 2) & 0x1f;
2883 const int reg3
= (first_reg
+ 3) & 0x1f;
2888 snprintf (buf
, size
, "{%s%d.%s}%s", prefix
, reg0
, qlf_name
, tb
);
2891 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s}%s", prefix
, reg0
, qlf_name
,
2892 prefix
, reg1
, qlf_name
, tb
);
2895 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2896 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2897 prefix
, reg2
, qlf_name
, tb
);
2900 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2901 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2902 prefix
, reg2
, qlf_name
, prefix
, reg3
, qlf_name
, tb
);
2908 /* Print the register+immediate address in OPND to BUF, which has SIZE
2909 characters. BASE is the name of the base register. */
2912 print_immediate_offset_address (char *buf
, size_t size
,
2913 const aarch64_opnd_info
*opnd
,
2916 if (opnd
->addr
.writeback
)
2918 if (opnd
->addr
.preind
)
2919 snprintf (buf
, size
, "[%s, #%d]!", base
, opnd
->addr
.offset
.imm
);
2921 snprintf (buf
, size
, "[%s], #%d", base
, opnd
->addr
.offset
.imm
);
2925 if (opnd
->shifter
.operator_present
)
2927 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL_VL
);
2928 snprintf (buf
, size
, "[%s, #%d, mul vl]",
2929 base
, opnd
->addr
.offset
.imm
);
2931 else if (opnd
->addr
.offset
.imm
)
2932 snprintf (buf
, size
, "[%s, #%d]", base
, opnd
->addr
.offset
.imm
);
2934 snprintf (buf
, size
, "[%s]", base
);
2938 /* Produce the string representation of the register offset address operand
2939 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2940 the names of the base and offset registers. */
2942 print_register_offset_address (char *buf
, size_t size
,
2943 const aarch64_opnd_info
*opnd
,
2944 const char *base
, const char *offset
)
2946 char tb
[16]; /* Temporary buffer. */
2947 bfd_boolean print_extend_p
= TRUE
;
2948 bfd_boolean print_amount_p
= TRUE
;
2949 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
2951 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
2952 || !opnd
->shifter
.amount_present
))
2954 /* Not print the shift/extend amount when the amount is zero and
2955 when it is not the special case of 8-bit load/store instruction. */
2956 print_amount_p
= FALSE
;
2957 /* Likewise, no need to print the shift operator LSL in such a
2959 if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2960 print_extend_p
= FALSE
;
2963 /* Prepare for the extend/shift. */
2967 snprintf (tb
, sizeof (tb
), ", %s #%" PRIi64
, shift_name
,
2968 opnd
->shifter
.amount
);
2970 snprintf (tb
, sizeof (tb
), ", %s", shift_name
);
2975 snprintf (buf
, size
, "[%s, %s%s]", base
, offset
, tb
);
2978 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2979 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2980 PC, PCREL_P and ADDRESS are used to pass in and return information about
2981 the PC-relative address calculation, where the PC value is passed in
2982 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2983 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2984 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2986 The function serves both the disassembler and the assembler diagnostics
2987 issuer, which is the reason why it lives in this file. */
2990 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
2991 const aarch64_opcode
*opcode
,
2992 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
2995 unsigned int i
, num_conds
;
2996 const char *name
= NULL
;
2997 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
2998 enum aarch64_modifier_kind kind
;
2999 uint64_t addr
, enum_value
;
3007 case AARCH64_OPND_Rd
:
3008 case AARCH64_OPND_Rn
:
3009 case AARCH64_OPND_Rm
:
3010 case AARCH64_OPND_Rt
:
3011 case AARCH64_OPND_Rt2
:
3012 case AARCH64_OPND_Rs
:
3013 case AARCH64_OPND_Ra
:
3014 case AARCH64_OPND_Rt_SYS
:
3015 case AARCH64_OPND_PAIRREG
:
3016 case AARCH64_OPND_SVE_Rm
:
3017 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3018 the <ic_op>, therefore we we use opnd->present to override the
3019 generic optional-ness information. */
3020 if (opnd
->type
== AARCH64_OPND_Rt_SYS
)
3025 /* Omit the operand, e.g. RET. */
3026 else if (optional_operand_p (opcode
, idx
)
3028 == get_optional_operand_default_value (opcode
)))
3030 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3031 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3032 snprintf (buf
, size
, "%s",
3033 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3036 case AARCH64_OPND_Rd_SP
:
3037 case AARCH64_OPND_Rn_SP
:
3038 case AARCH64_OPND_SVE_Rn_SP
:
3039 case AARCH64_OPND_Rm_SP
:
3040 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3041 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
3042 || opnd
->qualifier
== AARCH64_OPND_QLF_X
3043 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
3044 snprintf (buf
, size
, "%s",
3045 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
3048 case AARCH64_OPND_Rm_EXT
:
3049 kind
= opnd
->shifter
.kind
;
3050 assert (idx
== 1 || idx
== 2);
3051 if ((aarch64_stack_pointer_p (opnds
)
3052 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
3053 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
3054 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
3055 && kind
== AARCH64_MOD_UXTW
)
3056 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
3057 && kind
== AARCH64_MOD_UXTX
)))
3059 /* 'LSL' is the preferred form in this case. */
3060 kind
= AARCH64_MOD_LSL
;
3061 if (opnd
->shifter
.amount
== 0)
3063 /* Shifter omitted. */
3064 snprintf (buf
, size
, "%s",
3065 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3069 if (opnd
->shifter
.amount
)
3070 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3071 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3072 aarch64_operand_modifiers
[kind
].name
,
3073 opnd
->shifter
.amount
);
3075 snprintf (buf
, size
, "%s, %s",
3076 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3077 aarch64_operand_modifiers
[kind
].name
);
3080 case AARCH64_OPND_Rm_SFT
:
3081 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3082 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3083 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3084 snprintf (buf
, size
, "%s",
3085 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3087 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3088 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3089 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3090 opnd
->shifter
.amount
);
3093 case AARCH64_OPND_Fd
:
3094 case AARCH64_OPND_Fn
:
3095 case AARCH64_OPND_Fm
:
3096 case AARCH64_OPND_Fa
:
3097 case AARCH64_OPND_Ft
:
3098 case AARCH64_OPND_Ft2
:
3099 case AARCH64_OPND_Sd
:
3100 case AARCH64_OPND_Sn
:
3101 case AARCH64_OPND_Sm
:
3102 case AARCH64_OPND_SVE_VZn
:
3103 case AARCH64_OPND_SVE_Vd
:
3104 case AARCH64_OPND_SVE_Vm
:
3105 case AARCH64_OPND_SVE_Vn
:
3106 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
3110 case AARCH64_OPND_Vd
:
3111 case AARCH64_OPND_Vn
:
3112 case AARCH64_OPND_Vm
:
3113 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
3114 aarch64_get_qualifier_name (opnd
->qualifier
));
3117 case AARCH64_OPND_Ed
:
3118 case AARCH64_OPND_En
:
3119 case AARCH64_OPND_Em
:
3120 snprintf (buf
, size
, "v%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3121 aarch64_get_qualifier_name (opnd
->qualifier
),
3122 opnd
->reglane
.index
);
3125 case AARCH64_OPND_VdD1
:
3126 case AARCH64_OPND_VnD1
:
3127 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
3130 case AARCH64_OPND_LVn
:
3131 case AARCH64_OPND_LVt
:
3132 case AARCH64_OPND_LVt_AL
:
3133 case AARCH64_OPND_LEt
:
3134 print_register_list (buf
, size
, opnd
, "v");
3137 case AARCH64_OPND_SVE_Pd
:
3138 case AARCH64_OPND_SVE_Pg3
:
3139 case AARCH64_OPND_SVE_Pg4_5
:
3140 case AARCH64_OPND_SVE_Pg4_10
:
3141 case AARCH64_OPND_SVE_Pg4_16
:
3142 case AARCH64_OPND_SVE_Pm
:
3143 case AARCH64_OPND_SVE_Pn
:
3144 case AARCH64_OPND_SVE_Pt
:
3145 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3146 snprintf (buf
, size
, "p%d", opnd
->reg
.regno
);
3147 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
3148 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
3149 snprintf (buf
, size
, "p%d/%s", opnd
->reg
.regno
,
3150 aarch64_get_qualifier_name (opnd
->qualifier
));
3152 snprintf (buf
, size
, "p%d.%s", opnd
->reg
.regno
,
3153 aarch64_get_qualifier_name (opnd
->qualifier
));
3156 case AARCH64_OPND_SVE_Za_5
:
3157 case AARCH64_OPND_SVE_Za_16
:
3158 case AARCH64_OPND_SVE_Zd
:
3159 case AARCH64_OPND_SVE_Zm_5
:
3160 case AARCH64_OPND_SVE_Zm_16
:
3161 case AARCH64_OPND_SVE_Zn
:
3162 case AARCH64_OPND_SVE_Zt
:
3163 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3164 snprintf (buf
, size
, "z%d", opnd
->reg
.regno
);
3166 snprintf (buf
, size
, "z%d.%s", opnd
->reg
.regno
,
3167 aarch64_get_qualifier_name (opnd
->qualifier
));
3170 case AARCH64_OPND_SVE_ZnxN
:
3171 case AARCH64_OPND_SVE_ZtxN
:
3172 print_register_list (buf
, size
, opnd
, "z");
3175 case AARCH64_OPND_SVE_Zn_INDEX
:
3176 snprintf (buf
, size
, "z%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3177 aarch64_get_qualifier_name (opnd
->qualifier
),
3178 opnd
->reglane
.index
);
3181 case AARCH64_OPND_CRn
:
3182 case AARCH64_OPND_CRm
:
3183 snprintf (buf
, size
, "C%" PRIi64
, opnd
->imm
.value
);
3186 case AARCH64_OPND_IDX
:
3187 case AARCH64_OPND_IMM
:
3188 case AARCH64_OPND_WIDTH
:
3189 case AARCH64_OPND_UIMM3_OP1
:
3190 case AARCH64_OPND_UIMM3_OP2
:
3191 case AARCH64_OPND_BIT_NUM
:
3192 case AARCH64_OPND_IMM_VLSL
:
3193 case AARCH64_OPND_IMM_VLSR
:
3194 case AARCH64_OPND_SHLL_IMM
:
3195 case AARCH64_OPND_IMM0
:
3196 case AARCH64_OPND_IMMR
:
3197 case AARCH64_OPND_IMMS
:
3198 case AARCH64_OPND_FBITS
:
3199 case AARCH64_OPND_SIMM5
:
3200 case AARCH64_OPND_SVE_SHLIMM_PRED
:
3201 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
3202 case AARCH64_OPND_SVE_SHRIMM_PRED
:
3203 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
3204 case AARCH64_OPND_SVE_SIMM5
:
3205 case AARCH64_OPND_SVE_SIMM5B
:
3206 case AARCH64_OPND_SVE_SIMM6
:
3207 case AARCH64_OPND_SVE_SIMM8
:
3208 case AARCH64_OPND_SVE_UIMM3
:
3209 case AARCH64_OPND_SVE_UIMM7
:
3210 case AARCH64_OPND_SVE_UIMM8
:
3211 case AARCH64_OPND_SVE_UIMM8_53
:
3212 case AARCH64_OPND_IMM_ROT1
:
3213 case AARCH64_OPND_IMM_ROT2
:
3214 case AARCH64_OPND_IMM_ROT3
:
3215 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3218 case AARCH64_OPND_SVE_I1_HALF_ONE
:
3219 case AARCH64_OPND_SVE_I1_HALF_TWO
:
3220 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
3223 c
.i
= opnd
->imm
.value
;
3224 snprintf (buf
, size
, "#%.1f", c
.f
);
3228 case AARCH64_OPND_SVE_PATTERN
:
3229 if (optional_operand_p (opcode
, idx
)
3230 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3232 enum_value
= opnd
->imm
.value
;
3233 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3234 if (aarch64_sve_pattern_array
[enum_value
])
3235 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[enum_value
]);
3237 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3240 case AARCH64_OPND_SVE_PATTERN_SCALED
:
3241 if (optional_operand_p (opcode
, idx
)
3242 && !opnd
->shifter
.operator_present
3243 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3245 enum_value
= opnd
->imm
.value
;
3246 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3247 if (aarch64_sve_pattern_array
[opnd
->imm
.value
])
3248 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[opnd
->imm
.value
]);
3250 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3251 if (opnd
->shifter
.operator_present
)
3253 size_t len
= strlen (buf
);
3254 snprintf (buf
+ len
, size
- len
, ", %s #%" PRIi64
,
3255 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3256 opnd
->shifter
.amount
);
3260 case AARCH64_OPND_SVE_PRFOP
:
3261 enum_value
= opnd
->imm
.value
;
3262 assert (enum_value
< ARRAY_SIZE (aarch64_sve_prfop_array
));
3263 if (aarch64_sve_prfop_array
[enum_value
])
3264 snprintf (buf
, size
, "%s", aarch64_sve_prfop_array
[enum_value
]);
3266 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3269 case AARCH64_OPND_IMM_MOV
:
3270 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3272 case 4: /* e.g. MOV Wd, #<imm32>. */
3274 int imm32
= opnd
->imm
.value
;
3275 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
3278 case 8: /* e.g. MOV Xd, #<imm64>. */
3279 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
3280 opnd
->imm
.value
, opnd
->imm
.value
);
3282 default: assert (0);
3286 case AARCH64_OPND_FPIMM0
:
3287 snprintf (buf
, size
, "#0.0");
3290 case AARCH64_OPND_LIMM
:
3291 case AARCH64_OPND_AIMM
:
3292 case AARCH64_OPND_HALF
:
3293 case AARCH64_OPND_SVE_INV_LIMM
:
3294 case AARCH64_OPND_SVE_LIMM
:
3295 case AARCH64_OPND_SVE_LIMM_MOV
:
3296 if (opnd
->shifter
.amount
)
3297 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3298 opnd
->shifter
.amount
);
3300 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3303 case AARCH64_OPND_SIMD_IMM
:
3304 case AARCH64_OPND_SIMD_IMM_SFT
:
3305 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3306 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
3307 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3309 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%" PRIi64
, opnd
->imm
.value
,
3310 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3311 opnd
->shifter
.amount
);
3314 case AARCH64_OPND_SVE_AIMM
:
3315 case AARCH64_OPND_SVE_ASIMM
:
3316 if (opnd
->shifter
.amount
)
3317 snprintf (buf
, size
, "#%" PRIi64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3318 opnd
->shifter
.amount
);
3320 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3323 case AARCH64_OPND_FPIMM
:
3324 case AARCH64_OPND_SIMD_FPIMM
:
3325 case AARCH64_OPND_SVE_FPIMM8
:
3326 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3328 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3331 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
3332 snprintf (buf
, size
, "#%.18e", c
.f
);
3335 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3338 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
3339 snprintf (buf
, size
, "#%.18e", c
.f
);
3342 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3345 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
3346 snprintf (buf
, size
, "#%.18e", c
.d
);
3349 default: assert (0);
3353 case AARCH64_OPND_CCMP_IMM
:
3354 case AARCH64_OPND_NZCV
:
3355 case AARCH64_OPND_EXCEPTION
:
3356 case AARCH64_OPND_UIMM4
:
3357 case AARCH64_OPND_UIMM7
:
3358 if (optional_operand_p (opcode
, idx
) == TRUE
3359 && (opnd
->imm
.value
==
3360 (int64_t) get_optional_operand_default_value (opcode
)))
3361 /* Omit the operand, e.g. DCPS1. */
3363 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
3366 case AARCH64_OPND_COND
:
3367 case AARCH64_OPND_COND1
:
3368 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
3369 num_conds
= ARRAY_SIZE (opnd
->cond
->names
);
3370 for (i
= 1; i
< num_conds
&& opnd
->cond
->names
[i
]; ++i
)
3372 size_t len
= strlen (buf
);
3374 snprintf (buf
+ len
, size
- len
, " // %s = %s",
3375 opnd
->cond
->names
[0], opnd
->cond
->names
[i
]);
3377 snprintf (buf
+ len
, size
- len
, ", %s",
3378 opnd
->cond
->names
[i
]);
3382 case AARCH64_OPND_ADDR_ADRP
:
3383 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
3389 /* This is not necessary during the disassembling, as print_address_func
3390 in the disassemble_info will take care of the printing. But some
3391 other callers may be still interested in getting the string in *STR,
3392 so here we do snprintf regardless. */
3393 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3396 case AARCH64_OPND_ADDR_PCREL14
:
3397 case AARCH64_OPND_ADDR_PCREL19
:
3398 case AARCH64_OPND_ADDR_PCREL21
:
3399 case AARCH64_OPND_ADDR_PCREL26
:
3400 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
3405 /* This is not necessary during the disassembling, as print_address_func
3406 in the disassemble_info will take care of the printing. But some
3407 other callers may be still interested in getting the string in *STR,
3408 so here we do snprintf regardless. */
3409 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3412 case AARCH64_OPND_ADDR_SIMPLE
:
3413 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
3414 case AARCH64_OPND_SIMD_ADDR_POST
:
3415 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3416 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
3418 if (opnd
->addr
.offset
.is_reg
)
3419 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
3421 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
3424 snprintf (buf
, size
, "[%s]", name
);
3427 case AARCH64_OPND_ADDR_REGOFF
:
3428 case AARCH64_OPND_SVE_ADDR_RR
:
3429 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
3430 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
3431 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
3432 case AARCH64_OPND_SVE_ADDR_RX
:
3433 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
3434 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
3435 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
3436 print_register_offset_address
3437 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3438 get_offset_int_reg_name (opnd
));
3441 case AARCH64_OPND_SVE_ADDR_RZ
:
3442 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
3443 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
3444 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
3445 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
3446 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
3447 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
3448 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
3449 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
3450 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
3451 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
3452 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
3453 print_register_offset_address
3454 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3455 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3458 case AARCH64_OPND_ADDR_SIMM7
:
3459 case AARCH64_OPND_ADDR_SIMM9
:
3460 case AARCH64_OPND_ADDR_SIMM9_2
:
3461 case AARCH64_OPND_ADDR_SIMM10
:
3462 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
3463 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
3464 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
3465 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
3466 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
3467 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
3468 case AARCH64_OPND_SVE_ADDR_RI_U6
:
3469 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
3470 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
3471 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
3472 print_immediate_offset_address
3473 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1));
3476 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
3477 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
3478 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
3479 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
3480 print_immediate_offset_address
3482 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
));
3485 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
3486 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
3487 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
3488 print_register_offset_address
3490 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
3491 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3494 case AARCH64_OPND_ADDR_UIMM12
:
3495 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3496 if (opnd
->addr
.offset
.imm
)
3497 snprintf (buf
, size
, "[%s, #%d]", name
, opnd
->addr
.offset
.imm
);
3499 snprintf (buf
, size
, "[%s]", name
);
3502 case AARCH64_OPND_SYSREG
:
3503 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
3504 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
3505 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
]))
3507 if (aarch64_sys_regs
[i
].name
)
3508 snprintf (buf
, size
, "%s", aarch64_sys_regs
[i
].name
);
3511 /* Implementation defined system register. */
3512 unsigned int value
= opnd
->sysreg
;
3513 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
3514 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
3519 case AARCH64_OPND_PSTATEFIELD
:
3520 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
3521 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
3523 assert (aarch64_pstatefields
[i
].name
);
3524 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
3527 case AARCH64_OPND_SYSREG_AT
:
3528 case AARCH64_OPND_SYSREG_DC
:
3529 case AARCH64_OPND_SYSREG_IC
:
3530 case AARCH64_OPND_SYSREG_TLBI
:
3531 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
3534 case AARCH64_OPND_BARRIER
:
3535 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
3538 case AARCH64_OPND_BARRIER_ISB
:
3539 /* Operand can be omitted, e.g. in DCPS1. */
3540 if (! optional_operand_p (opcode
, idx
)
3541 || (opnd
->barrier
->value
3542 != get_optional_operand_default_value (opcode
)))
3543 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
3546 case AARCH64_OPND_PRFOP
:
3547 if (opnd
->prfop
->name
!= NULL
)
3548 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
3550 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
3553 case AARCH64_OPND_BARRIER_PSB
:
3554 snprintf (buf
, size
, "%s", opnd
->hint_option
->name
);
3562 #define CPENC(op0,op1,crn,crm,op2) \
3563 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3564 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3565 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3566 /* for 3.9.10 System Instructions */
3567 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3589 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3594 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3599 #define F_HASXT 0x4 /* System instruction register <Xt>
3603 /* TODO there are two more issues need to be resolved
3604 1. handle read-only and write-only system registers
3605 2. handle cpu-implementation-defined system registers. */
3606 const aarch64_sys_reg aarch64_sys_regs
[] =
3608 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
3609 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
3610 { "elr_el1", CPEN_(0,C0
,1), 0 },
3611 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
3612 { "sp_el0", CPEN_(0,C1
,0), 0 },
3613 { "spsel", CPEN_(0,C2
,0), 0 },
3614 { "daif", CPEN_(3,C2
,1), 0 },
3615 { "currentel", CPEN_(0,C2
,2), 0 }, /* RO */
3616 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
3617 { "uao", CPEN_ (0, C2
, 4), F_ARCHEXT
},
3618 { "nzcv", CPEN_(3,C2
,0), 0 },
3619 { "fpcr", CPEN_(3,C4
,0), 0 },
3620 { "fpsr", CPEN_(3,C4
,1), 0 },
3621 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
3622 { "dlr_el0", CPEN_(3,C5
,1), 0 },
3623 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
3624 { "elr_el2", CPEN_(4,C0
,1), 0 },
3625 { "sp_el1", CPEN_(4,C1
,0), 0 },
3626 { "spsr_irq", CPEN_(4,C3
,0), 0 },
3627 { "spsr_abt", CPEN_(4,C3
,1), 0 },
3628 { "spsr_und", CPEN_(4,C3
,2), 0 },
3629 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
3630 { "spsr_el3", CPEN_(6,C0
,0), 0 },
3631 { "elr_el3", CPEN_(6,C0
,1), 0 },
3632 { "sp_el2", CPEN_(6,C1
,0), 0 },
3633 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
3634 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
3635 { "midr_el1", CPENC(3,0,C0
,C0
,0), 0 }, /* RO */
3636 { "ctr_el0", CPENC(3,3,C0
,C0
,1), 0 }, /* RO */
3637 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), 0 }, /* RO */
3638 { "revidr_el1", CPENC(3,0,C0
,C0
,6), 0 }, /* RO */
3639 { "aidr_el1", CPENC(3,1,C0
,C0
,7), 0 }, /* RO */
3640 { "dczid_el0", CPENC(3,3,C0
,C0
,7), 0 }, /* RO */
3641 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), 0 }, /* RO */
3642 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), 0 }, /* RO */
3643 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), 0 }, /* RO */
3644 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), 0 }, /* RO */
3645 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), 0 }, /* RO */
3646 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), 0 }, /* RO */
3647 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), 0 }, /* RO */
3648 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), 0 }, /* RO */
3649 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), 0 }, /* RO */
3650 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), 0 }, /* RO */
3651 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), 0 }, /* RO */
3652 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), 0 }, /* RO */
3653 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), 0 }, /* RO */
3654 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), 0 }, /* RO */
3655 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), 0 }, /* RO */
3656 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), 0 }, /* RO */
3657 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), 0 }, /* RO */
3658 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), 0 }, /* RO */
3659 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), 0 }, /* RO */
3660 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), 0 }, /* RO */
3661 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), 0 }, /* RO */
3662 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), 0 }, /* RO */
3663 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), 0 }, /* RO */
3664 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), 0 }, /* RO */
3665 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), 0 }, /* RO */
3666 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), 0 }, /* RO */
3667 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), 0 }, /* RO */
3668 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
}, /* RO */
3669 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), 0 }, /* RO */
3670 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), 0 }, /* RO */
3671 { "clidr_el1", CPENC(3,1,C0
,C0
,1), 0 }, /* RO */
3672 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 }, /* RO */
3673 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
3674 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
3675 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
3676 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
3677 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
3678 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
3679 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
3680 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
3681 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
3682 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
3683 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
3684 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
3685 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
3686 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
3687 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
3688 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
3689 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
3690 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
3691 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
3692 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
3693 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
3694 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
3695 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
3696 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
3697 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
3698 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
3699 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
3700 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
3701 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
3702 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
3703 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
3704 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
3705 { "apiakeylo_el1", CPENC (3, 0, C2
, C1
, 0), F_ARCHEXT
},
3706 { "apiakeyhi_el1", CPENC (3, 0, C2
, C1
, 1), F_ARCHEXT
},
3707 { "apibkeylo_el1", CPENC (3, 0, C2
, C1
, 2), F_ARCHEXT
},
3708 { "apibkeyhi_el1", CPENC (3, 0, C2
, C1
, 3), F_ARCHEXT
},
3709 { "apdakeylo_el1", CPENC (3, 0, C2
, C2
, 0), F_ARCHEXT
},
3710 { "apdakeyhi_el1", CPENC (3, 0, C2
, C2
, 1), F_ARCHEXT
},
3711 { "apdbkeylo_el1", CPENC (3, 0, C2
, C2
, 2), F_ARCHEXT
},
3712 { "apdbkeyhi_el1", CPENC (3, 0, C2
, C2
, 3), F_ARCHEXT
},
3713 { "apgakeylo_el1", CPENC (3, 0, C2
, C3
, 0), F_ARCHEXT
},
3714 { "apgakeyhi_el1", CPENC (3, 0, C2
, C3
, 1), F_ARCHEXT
},
3715 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
3716 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
3717 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
3718 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
3719 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
3720 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
3721 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
3722 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
3723 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
3724 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
3725 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
3726 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
3727 { "vsesr_el2", CPENC (3, 4, C5
, C2
, 3), F_ARCHEXT
}, /* RO */
3728 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
3729 { "erridr_el1", CPENC (3, 0, C5
, C3
, 0), F_ARCHEXT
}, /* RO */
3730 { "errselr_el1", CPENC (3, 0, C5
, C3
, 1), F_ARCHEXT
},
3731 { "erxfr_el1", CPENC (3, 0, C5
, C4
, 0), F_ARCHEXT
}, /* RO */
3732 { "erxctlr_el1", CPENC (3, 0, C5
, C4
, 1), F_ARCHEXT
},
3733 { "erxstatus_el1", CPENC (3, 0, C5
, C4
, 2), F_ARCHEXT
},
3734 { "erxaddr_el1", CPENC (3, 0, C5
, C4
, 3), F_ARCHEXT
},
3735 { "erxmisc0_el1", CPENC (3, 0, C5
, C5
, 0), F_ARCHEXT
},
3736 { "erxmisc1_el1", CPENC (3, 0, C5
, C5
, 1), F_ARCHEXT
},
3737 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
3738 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
3739 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
3740 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
3741 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
3742 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
3743 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
3744 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
3745 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
3746 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
3747 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
3748 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
3749 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
3750 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
3751 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
3752 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
3753 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
3754 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
3755 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), 0 }, /* RO */
3756 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), 0 }, /* RO */
3757 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), 0 }, /* RO */
3758 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
3759 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
3760 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
3761 { "isr_el1", CPENC(3,0,C12
,C1
,0), 0 }, /* RO */
3762 { "disr_el1", CPENC (3, 0, C12
, C1
, 1), F_ARCHEXT
},
3763 { "vdisr_el2", CPENC (3, 4, C12
, C1
, 1), F_ARCHEXT
},
3764 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
3765 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
3766 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
3767 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
3768 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RO */
3769 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
3770 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
3771 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
3772 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
3773 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RO */
3774 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), 0 }, /* RO */
3775 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), 0 }, /* RO */
3776 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
3777 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
3778 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
3779 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
3780 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
3781 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
3782 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
3783 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
3784 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
3785 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
3786 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
3787 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
3788 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
3789 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
3790 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
3791 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
3792 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
3793 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
3794 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
3795 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
3796 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
3797 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
3798 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
3799 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
3800 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
3801 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
3802 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
3803 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
3804 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
3805 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
3806 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), 0 }, /* r */
3807 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
3808 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
3809 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* r */
3810 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* w */
3811 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 }, /* r */
3812 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 }, /* w */
3813 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
3814 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
3815 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
3816 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
3817 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
3818 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
3819 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
3820 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
3821 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
3822 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
3823 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
3824 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
3825 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
3826 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
3827 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
3828 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
3829 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
3830 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
3831 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
3832 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
3833 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
3834 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
3835 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
3836 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
3837 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
3838 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
3839 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
3840 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
3841 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
3842 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
3843 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
3844 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
3845 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
3846 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
3847 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
3848 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
3849 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
3850 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
3851 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
3852 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
3853 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
3854 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
3855 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
3856 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
3857 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
3858 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
3859 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
3860 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
3861 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
3862 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
3863 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
3864 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
3865 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
3866 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
3867 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
3868 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
3869 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
3870 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
3871 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
3872 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
3873 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
3874 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
3875 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
3876 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
3877 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
3878 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
3879 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), 0 }, /* r */
3880 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), 0 }, /* w */
3881 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), 0 }, /* r */
3882 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
3883 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
3884 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
3885 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
3886 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), 0 }, /* r */
3887 { "pmblimitr_el1", CPENC (3, 0, C9
, C10
, 0), F_ARCHEXT
}, /* rw */
3888 { "pmbptr_el1", CPENC (3, 0, C9
, C10
, 1), F_ARCHEXT
}, /* rw */
3889 { "pmbsr_el1", CPENC (3, 0, C9
, C10
, 3), F_ARCHEXT
}, /* rw */
3890 { "pmbidr_el1", CPENC (3, 0, C9
, C10
, 7), F_ARCHEXT
}, /* ro */
3891 { "pmscr_el1", CPENC (3, 0, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3892 { "pmsicr_el1", CPENC (3, 0, C9
, C9
, 2), F_ARCHEXT
}, /* rw */
3893 { "pmsirr_el1", CPENC (3, 0, C9
, C9
, 3), F_ARCHEXT
}, /* rw */
3894 { "pmsfcr_el1", CPENC (3, 0, C9
, C9
, 4), F_ARCHEXT
}, /* rw */
3895 { "pmsevfr_el1", CPENC (3, 0, C9
, C9
, 5), F_ARCHEXT
}, /* rw */
3896 { "pmslatfr_el1", CPENC (3, 0, C9
, C9
, 6), F_ARCHEXT
}, /* rw */
3897 { "pmsidr_el1", CPENC (3, 0, C9
, C9
, 7), F_ARCHEXT
}, /* ro */
3898 { "pmscr_el2", CPENC (3, 4, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3899 { "pmscr_el12", CPENC (3, 5, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3900 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
3901 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
3902 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
3903 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
3904 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), 0 }, /* w */
3905 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
3906 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), 0 }, /* r */
3907 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), 0 }, /* r */
3908 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
3909 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
3910 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
3911 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
3912 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
3913 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
3914 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
3915 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
3916 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
3917 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
3918 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
3919 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
3920 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
3921 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
3922 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
3923 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
3924 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
3925 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
3926 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
3927 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
3928 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
3929 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
3930 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
3931 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
3932 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
3933 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
3934 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
3935 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
3936 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
3937 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
3938 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
3939 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
3940 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
3941 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
3942 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
3943 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
3944 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
3945 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
3946 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
3947 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
3948 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
3949 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
3950 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
3951 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
3952 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
3953 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
3954 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
3955 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
3956 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
3957 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
3958 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
3959 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
3960 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
3961 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
3962 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
3963 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
3964 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
3965 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
3966 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
3967 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
3968 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
3969 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
3970 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
3971 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
3972 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
3973 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
3974 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
3975 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
3976 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
3977 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
3978 { 0, CPENC(0,0,0,0,0), 0 },
3982 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
3984 return (reg
->flags
& F_DEPRECATED
) != 0;
3988 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
3989 const aarch64_sys_reg
*reg
)
3991 if (!(reg
->flags
& F_ARCHEXT
))
3994 /* PAN. Values are from aarch64_sys_regs. */
3995 if (reg
->value
== CPEN_(0,C2
,3)
3996 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3999 /* Virtualization host extensions: system registers. */
4000 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
4001 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
4002 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
4003 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
4004 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
4005 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4008 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4009 if ((reg
->value
== CPEN_ (5, C0
, 0)
4010 || reg
->value
== CPEN_ (5, C0
, 1)
4011 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
4012 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
4013 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
4014 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
4015 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
4016 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
4017 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
4018 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
4019 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
4020 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
4021 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
4022 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
4023 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
4024 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
4025 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4028 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4029 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
4030 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
4031 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
4032 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
4033 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
4034 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
4035 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4038 /* ARMv8.2 features. */
4040 /* ID_AA64MMFR2_EL1. */
4041 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
4042 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4046 if (reg
->value
== CPEN_ (0, C2
, 4)
4047 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4050 /* RAS extension. */
4052 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4053 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4054 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
4055 || reg
->value
== CPENC (3, 0, C5
, C3
, 1)
4056 || reg
->value
== CPENC (3, 0, C5
, C3
, 2)
4057 || reg
->value
== CPENC (3, 0, C5
, C3
, 3)
4058 || reg
->value
== CPENC (3, 0, C5
, C4
, 0)
4059 || reg
->value
== CPENC (3, 0, C5
, C4
, 1)
4060 || reg
->value
== CPENC (3, 0, C5
, C4
, 2)
4061 || reg
->value
== CPENC (3, 0, C5
, C4
, 3)
4062 || reg
->value
== CPENC (3, 0, C5
, C5
, 0)
4063 || reg
->value
== CPENC (3, 0, C5
, C5
, 1))
4064 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
4067 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4068 if ((reg
->value
== CPENC (3, 4, C5
, C2
, 3)
4069 || reg
->value
== CPENC (3, 0, C12
, C1
, 1)
4070 || reg
->value
== CPENC (3, 4, C12
, C1
, 1))
4071 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
4074 /* Statistical Profiling extension. */
4075 if ((reg
->value
== CPENC (3, 0, C9
, C10
, 0)
4076 || reg
->value
== CPENC (3, 0, C9
, C10
, 1)
4077 || reg
->value
== CPENC (3, 0, C9
, C10
, 3)
4078 || reg
->value
== CPENC (3, 0, C9
, C10
, 7)
4079 || reg
->value
== CPENC (3, 0, C9
, C9
, 0)
4080 || reg
->value
== CPENC (3, 0, C9
, C9
, 2)
4081 || reg
->value
== CPENC (3, 0, C9
, C9
, 3)
4082 || reg
->value
== CPENC (3, 0, C9
, C9
, 4)
4083 || reg
->value
== CPENC (3, 0, C9
, C9
, 5)
4084 || reg
->value
== CPENC (3, 0, C9
, C9
, 6)
4085 || reg
->value
== CPENC (3, 0, C9
, C9
, 7)
4086 || reg
->value
== CPENC (3, 4, C9
, C9
, 0)
4087 || reg
->value
== CPENC (3, 5, C9
, C9
, 0))
4088 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PROFILE
))
4091 /* ARMv8.3 Pointer authentication keys. */
4092 if ((reg
->value
== CPENC (3, 0, C2
, C1
, 0)
4093 || reg
->value
== CPENC (3, 0, C2
, C1
, 1)
4094 || reg
->value
== CPENC (3, 0, C2
, C1
, 2)
4095 || reg
->value
== CPENC (3, 0, C2
, C1
, 3)
4096 || reg
->value
== CPENC (3, 0, C2
, C2
, 0)
4097 || reg
->value
== CPENC (3, 0, C2
, C2
, 1)
4098 || reg
->value
== CPENC (3, 0, C2
, C2
, 2)
4099 || reg
->value
== CPENC (3, 0, C2
, C2
, 3)
4100 || reg
->value
== CPENC (3, 0, C2
, C3
, 0)
4101 || reg
->value
== CPENC (3, 0, C2
, C3
, 1))
4102 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_3
))
4108 const aarch64_sys_reg aarch64_pstatefields
[] =
4110 { "spsel", 0x05, 0 },
4111 { "daifset", 0x1e, 0 },
4112 { "daifclr", 0x1f, 0 },
4113 { "pan", 0x04, F_ARCHEXT
},
4114 { "uao", 0x03, F_ARCHEXT
},
4115 { 0, CPENC(0,0,0,0,0), 0 },
4119 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
4120 const aarch64_sys_reg
*reg
)
4122 if (!(reg
->flags
& F_ARCHEXT
))
4125 /* PAN. Values are from aarch64_pstatefields. */
4126 if (reg
->value
== 0x04
4127 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
4130 /* UAO. Values are from aarch64_pstatefields. */
4131 if (reg
->value
== 0x03
4132 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4138 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
4140 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
4141 { "iallu", CPENS(0,C7
,C5
,0), 0 },
4142 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
4143 { 0, CPENS(0,0,0,0), 0 }
4146 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
4148 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
4149 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
4150 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
4151 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
4152 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
4153 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
4154 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
4155 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
4156 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
4157 { 0, CPENS(0,0,0,0), 0 }
4160 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
4162 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
4163 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
4164 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
4165 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
4166 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
4167 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
4168 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
4169 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
4170 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
4171 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
4172 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
4173 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
4174 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
4175 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
4176 { 0, CPENS(0,0,0,0), 0 }
4179 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
4181 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
4182 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
4183 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
4184 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
4185 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
4186 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
4187 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
4188 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
4189 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
4190 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
4191 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
4192 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
4193 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
4194 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
4195 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
4196 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
4197 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
4198 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
4199 { "alle2", CPENS(4,C8
,C7
,0), 0 },
4200 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
4201 { "alle1", CPENS(4,C8
,C7
,4), 0 },
4202 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
4203 { "alle3", CPENS(6,C8
,C7
,0), 0 },
4204 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
4205 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
4206 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
4207 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
4208 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
4209 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
4210 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
4211 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
4212 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
4213 { 0, CPENS(0,0,0,0), 0 }
4217 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
4219 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
4223 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
4224 const aarch64_sys_ins_reg
*reg
)
4226 if (!(reg
->flags
& F_ARCHEXT
))
4229 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4230 if (reg
->value
== CPENS (3, C7
, C12
, 1)
4231 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4234 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4235 if ((reg
->value
== CPENS (0, C7
, C9
, 0)
4236 || reg
->value
== CPENS (0, C7
, C9
, 1))
4237 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4260 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4261 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4264 verify_ldpsw (const struct aarch64_opcode
* opcode ATTRIBUTE_UNUSED
,
4265 const aarch64_insn insn
)
4267 int t
= BITS (insn
, 4, 0);
4268 int n
= BITS (insn
, 9, 5);
4269 int t2
= BITS (insn
, 14, 10);
4273 /* Write back enabled. */
4274 if ((t
== n
|| t2
== n
) && n
!= 31)
4288 /* Return true if VALUE cannot be moved into an SVE register using DUP
4289 (with any element size, not just ESIZE) and if using DUPM would
4290 therefore be OK. ESIZE is the number of bytes in the immediate. */
4293 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue
, int esize
)
4295 int64_t svalue
= uvalue
;
4296 uint64_t upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
4298 if ((uvalue
& ~upper
) != uvalue
&& (uvalue
| upper
) != uvalue
)
4300 if (esize
<= 4 || (uint32_t) uvalue
== (uint32_t) (uvalue
>> 32))
4302 svalue
= (int32_t) uvalue
;
4303 if (esize
<= 2 || (uint16_t) uvalue
== (uint16_t) (uvalue
>> 16))
4305 svalue
= (int16_t) uvalue
;
4306 if (esize
== 1 || (uint8_t) uvalue
== (uint8_t) (uvalue
>> 8))
4310 if ((svalue
& 0xff) == 0)
4312 return svalue
< -128 || svalue
>= 128;
4315 /* Include the opcode description table as well as the operand description
4317 #define VERIFIER(x) verify_##x
4318 #include "aarch64-tbl.h"