1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump
= FALSE
;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array
[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array
[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
108 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
109 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
116 return ((qualifier
>= AARCH64_OPND_QLF_S_B
117 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
127 DP_VECTOR_ACROSS_LANES
,
130 static const char significant_operand_index
[] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
147 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers
[0] == qualifiers
[1]
152 && vector_qualifier_p (qualifiers
[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers
[0])
154 == aarch64_get_qualifier_esize (qualifiers
[1]))
155 && (aarch64_get_qualifier_esize (qualifiers
[0])
156 == aarch64_get_qualifier_esize (qualifiers
[2])))
157 return DP_VECTOR_3SAME
;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers
[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers
[0])
164 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
165 return DP_VECTOR_LONG
;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers
[0] == qualifiers
[1]
168 && vector_qualifier_p (qualifiers
[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers
[0])
171 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers
[0])
173 == aarch64_get_qualifier_esize (qualifiers
[1])))
174 return DP_VECTOR_WIDE
;
176 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers
[1]) == TRUE
180 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
181 return DP_VECTOR_ACROSS_LANES
;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
199 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
202 const aarch64_field fields
[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* N: in logical (immediate) instructions. */
257 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
258 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
259 { 31, 1 }, /* sf: in integer data processing instructions. */
260 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
261 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
262 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
263 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
264 { 31, 1 }, /* b5: in the test bit and branch instructions. */
265 { 19, 5 }, /* b40: in the test bit and branch instructions. */
266 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
267 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
268 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
269 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
270 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
271 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
272 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
273 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
274 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
275 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
276 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
277 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
278 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
279 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
280 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
281 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
282 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
283 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
284 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
285 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
286 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
287 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
288 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
289 { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
292 enum aarch64_operand_class
293 aarch64_get_operand_class (enum aarch64_opnd type
)
295 return aarch64_operands
[type
].op_class
;
299 aarch64_get_operand_name (enum aarch64_opnd type
)
301 return aarch64_operands
[type
].name
;
304 /* Get operand description string.
305 This is usually for the diagnosis purpose. */
307 aarch64_get_operand_desc (enum aarch64_opnd type
)
309 return aarch64_operands
[type
].desc
;
312 /* Table of all conditional affixes. */
313 const aarch64_cond aarch64_conds
[16] =
318 {{"cc", "lo", "ul"}, 0x3},
334 get_cond_from_value (aarch64_insn value
)
337 return &aarch64_conds
[(unsigned int) value
];
341 get_inverted_cond (const aarch64_cond
*cond
)
343 return &aarch64_conds
[cond
->value
^ 0x1];
346 /* Table describing the operand extension/shifting operators; indexed by
347 enum aarch64_modifier_kind.
349 The value column provides the most common values for encoding modifiers,
350 which enables table-driven encoding/decoding for the modifiers. */
351 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
372 enum aarch64_modifier_kind
373 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
375 return desc
- aarch64_operand_modifiers
;
379 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
381 return aarch64_operand_modifiers
[kind
].value
;
384 enum aarch64_modifier_kind
385 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
386 bfd_boolean extend_p
)
388 if (extend_p
== TRUE
)
389 return AARCH64_MOD_UXTB
+ value
;
391 return AARCH64_MOD_LSL
- value
;
395 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
397 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
401 static inline bfd_boolean
402 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
404 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
408 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
428 /* Table describing the operands supported by the aliases of the HINT
431 The name column is the operand that is accepted for the alias. The value
432 column is the hint number of the alias. The list of operands is terminated
433 by NULL in the name column. */
435 const struct aarch64_name_value_pair aarch64_hint_options
[] =
437 { "csync", 0x11 }, /* PSB CSYNC. */
441 /* op -> op: load = 0 instruction = 1 store = 2
443 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
444 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
445 const struct aarch64_name_value_pair aarch64_prfops
[32] =
447 { "pldl1keep", B(0, 1, 0) },
448 { "pldl1strm", B(0, 1, 1) },
449 { "pldl2keep", B(0, 2, 0) },
450 { "pldl2strm", B(0, 2, 1) },
451 { "pldl3keep", B(0, 3, 0) },
452 { "pldl3strm", B(0, 3, 1) },
455 { "plil1keep", B(1, 1, 0) },
456 { "plil1strm", B(1, 1, 1) },
457 { "plil2keep", B(1, 2, 0) },
458 { "plil2strm", B(1, 2, 1) },
459 { "plil3keep", B(1, 3, 0) },
460 { "plil3strm", B(1, 3, 1) },
463 { "pstl1keep", B(2, 1, 0) },
464 { "pstl1strm", B(2, 1, 1) },
465 { "pstl2keep", B(2, 2, 0) },
466 { "pstl2strm", B(2, 2, 1) },
467 { "pstl3keep", B(2, 3, 0) },
468 { "pstl3strm", B(2, 3, 1) },
482 /* Utilities on value constraint. */
485 value_in_range_p (int64_t value
, int low
, int high
)
487 return (value
>= low
&& value
<= high
) ? 1 : 0;
490 /* Return true if VALUE is a multiple of ALIGN. */
492 value_aligned_p (int64_t value
, int align
)
494 return (value
% align
) == 0;
497 /* A signed value fits in a field. */
499 value_fit_signed_field_p (int64_t value
, unsigned width
)
502 if (width
< sizeof (value
) * 8)
504 int64_t lim
= (int64_t)1 << (width
- 1);
505 if (value
>= -lim
&& value
< lim
)
511 /* An unsigned value fits in a field. */
513 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
516 if (width
< sizeof (value
) * 8)
518 int64_t lim
= (int64_t)1 << width
;
519 if (value
>= 0 && value
< lim
)
525 /* Return 1 if OPERAND is SP or WSP. */
527 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
529 return ((aarch64_get_operand_class (operand
->type
)
530 == AARCH64_OPND_CLASS_INT_REG
)
531 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
532 && operand
->reg
.regno
== 31);
535 /* Return 1 if OPERAND is XZR or WZP. */
537 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
539 return ((aarch64_get_operand_class (operand
->type
)
540 == AARCH64_OPND_CLASS_INT_REG
)
541 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
542 && operand
->reg
.regno
== 31);
545 /* Return true if the operand *OPERAND that has the operand code
546 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
547 qualified by the qualifier TARGET. */
550 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
551 aarch64_opnd_qualifier_t target
)
553 switch (operand
->qualifier
)
555 case AARCH64_OPND_QLF_W
:
556 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
559 case AARCH64_OPND_QLF_X
:
560 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
563 case AARCH64_OPND_QLF_WSP
:
564 if (target
== AARCH64_OPND_QLF_W
565 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
568 case AARCH64_OPND_QLF_SP
:
569 if (target
== AARCH64_OPND_QLF_X
570 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
580 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
581 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
583 Return NIL if more than one expected qualifiers are found. */
585 aarch64_opnd_qualifier_t
586 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
588 const aarch64_opnd_qualifier_t known_qlf
,
595 When the known qualifier is NIL, we have to assume that there is only
596 one qualifier sequence in the *QSEQ_LIST and return the corresponding
597 qualifier directly. One scenario is that for instruction
598 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
599 which has only one possible valid qualifier sequence
601 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
602 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
604 Because the qualifier NIL has dual roles in the qualifier sequence:
605 it can mean no qualifier for the operand, or the qualifer sequence is
606 not in use (when all qualifiers in the sequence are NILs), we have to
607 handle this special case here. */
608 if (known_qlf
== AARCH64_OPND_NIL
)
610 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
611 return qseq_list
[0][idx
];
614 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
616 if (qseq_list
[i
][known_idx
] == known_qlf
)
619 /* More than one sequences are found to have KNOWN_QLF at
621 return AARCH64_OPND_NIL
;
626 return qseq_list
[saved_i
][idx
];
629 enum operand_qualifier_kind
637 /* Operand qualifier description. */
638 struct operand_qualifier_data
640 /* The usage of the three data fields depends on the qualifier kind. */
647 enum operand_qualifier_kind kind
;
650 /* Indexed by the operand qualifier enumerators. */
651 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
653 {0, 0, 0, "NIL", OQK_NIL
},
655 /* Operand variant qualifiers.
657 element size, number of elements and common value for encoding. */
659 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
660 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
661 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
662 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
664 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
665 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
666 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
667 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
668 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
670 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
671 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
672 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
673 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
674 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
675 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
676 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
677 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
678 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
679 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
681 {0, 0, 0, "z", OQK_OPD_VARIANT
},
682 {0, 0, 0, "m", OQK_OPD_VARIANT
},
684 /* Qualifiers constraining the value range.
686 Lower bound, higher bound, unused. */
688 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
689 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
690 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
691 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
692 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
693 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
695 /* Qualifiers for miscellaneous purpose.
697 unused, unused and unused. */
702 {0, 0, 0, "retrieving", 0},
705 static inline bfd_boolean
706 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
708 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
712 static inline bfd_boolean
713 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
715 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
720 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
722 return aarch64_opnd_qualifiers
[qualifier
].desc
;
725 /* Given an operand qualifier, return the expected data element size
726 of a qualified operand. */
728 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
730 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
731 return aarch64_opnd_qualifiers
[qualifier
].data0
;
735 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
737 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
738 return aarch64_opnd_qualifiers
[qualifier
].data1
;
742 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
744 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
745 return aarch64_opnd_qualifiers
[qualifier
].data2
;
749 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
751 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
752 return aarch64_opnd_qualifiers
[qualifier
].data0
;
756 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
758 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
759 return aarch64_opnd_qualifiers
[qualifier
].data1
;
764 aarch64_verbose (const char *str
, ...)
775 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
779 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
780 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
785 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
786 const aarch64_opnd_qualifier_t
*qualifier
)
789 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
791 aarch64_verbose ("dump_match_qualifiers:");
792 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
793 curr
[i
] = opnd
[i
].qualifier
;
794 dump_qualifier_sequence (curr
);
795 aarch64_verbose ("against");
796 dump_qualifier_sequence (qualifier
);
798 #endif /* DEBUG_AARCH64 */
800 /* TODO improve this, we can have an extra field at the runtime to
801 store the number of operands rather than calculating it every time. */
804 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
807 const enum aarch64_opnd
*opnds
= opcode
->operands
;
808 while (opnds
[i
++] != AARCH64_OPND_NIL
)
811 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
815 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
816 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
818 N.B. on the entry, it is very likely that only some operands in *INST
819 have had their qualifiers been established.
821 If STOP_AT is not -1, the function will only try to match
822 the qualifier sequence for operands before and including the operand
823 of index STOP_AT; and on success *RET will only be filled with the first
824 (STOP_AT+1) qualifiers.
826 A couple examples of the matching algorithm:
834 Apart from serving the main encoding routine, this can also be called
835 during or after the operand decoding. */
838 aarch64_find_best_match (const aarch64_inst
*inst
,
839 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
840 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
844 const aarch64_opnd_qualifier_t
*qualifiers
;
846 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
849 DEBUG_TRACE ("SUCCEED: no operand");
853 if (stop_at
< 0 || stop_at
>= num_opnds
)
854 stop_at
= num_opnds
- 1;
856 /* For each pattern. */
857 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
860 qualifiers
= *qualifiers_list
;
862 /* Start as positive. */
865 DEBUG_TRACE ("%d", i
);
868 dump_match_qualifiers (inst
->operands
, qualifiers
);
871 /* Most opcodes has much fewer patterns in the list.
872 First NIL qualifier indicates the end in the list. */
873 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
875 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
881 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
883 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
885 /* Either the operand does not have qualifier, or the qualifier
886 for the operand needs to be deduced from the qualifier
888 In the latter case, any constraint checking related with
889 the obtained qualifier should be done later in
890 operand_general_constraint_met_p. */
893 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
895 /* Unless the target qualifier can also qualify the operand
896 (which has already had a non-nil qualifier), non-equal
897 qualifiers are generally un-matched. */
898 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
907 continue; /* Equal qualifiers are certainly matched. */
910 /* Qualifiers established. */
917 /* Fill the result in *RET. */
919 qualifiers
= *qualifiers_list
;
921 DEBUG_TRACE ("complete qualifiers using list %d", i
);
924 dump_qualifier_sequence (qualifiers
);
927 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
928 ret
[j
] = *qualifiers
;
929 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
930 ret
[j
] = AARCH64_OPND_QLF_NIL
;
932 DEBUG_TRACE ("SUCCESS");
936 DEBUG_TRACE ("FAIL");
940 /* Operand qualifier matching and resolving.
942 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
943 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
945 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
949 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
952 aarch64_opnd_qualifier_seq_t qualifiers
;
954 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
957 DEBUG_TRACE ("matching FAIL");
961 if (inst
->opcode
->flags
& F_STRICT
)
963 /* Require an exact qualifier match, even for NIL qualifiers. */
964 nops
= aarch64_num_of_operands (inst
->opcode
);
965 for (i
= 0; i
< nops
; ++i
)
966 if (inst
->operands
[i
].qualifier
!= qualifiers
[i
])
970 /* Update the qualifiers. */
971 if (update_p
== TRUE
)
972 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
974 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
976 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
977 "update %s with %s for operand %d",
978 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
979 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
980 inst
->operands
[i
].qualifier
= qualifiers
[i
];
983 DEBUG_TRACE ("matching SUCCESS");
987 /* Return TRUE if VALUE is a wide constant that can be moved into a general
990 IS32 indicates whether value is a 32-bit immediate or not.
991 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
992 amount will be returned in *SHIFT_AMOUNT. */
995 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
999 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1003 /* Allow all zeros or all ones in top 32-bits, so that
1004 32-bit constant expressions like ~0x80000000 are
1006 uint64_t ext
= value
;
1007 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
1008 /* Immediate out of range. */
1010 value
&= (int64_t) 0xffffffff;
1013 /* first, try movz then movn */
1015 if ((value
& ((int64_t) 0xffff << 0)) == value
)
1017 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
1019 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
1021 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
1026 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1030 if (shift_amount
!= NULL
)
1031 *shift_amount
= amount
;
1033 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
1038 /* Build the accepted values for immediate logical SIMD instructions.
1040 The standard encodings of the immediate value are:
1041 N imms immr SIMD size R S
1042 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1043 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1044 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1045 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1046 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1047 0 11110s 00000r 2 UInt(r) UInt(s)
1048 where all-ones value of S is reserved.
1050 Let's call E the SIMD size.
1052 The immediate value is: S+1 bits '1' rotated to the right by R.
1054 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1055 (remember S != E - 1). */
1057 #define TOTAL_IMM_NB 5334
1062 aarch64_insn encoding
;
1063 } simd_imm_encoding
;
1065 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
1068 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
1070 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
1071 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
1073 if (imm1
->imm
< imm2
->imm
)
1075 if (imm1
->imm
> imm2
->imm
)
1080 /* immediate bitfield standard encoding
1081 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1082 1 ssssss rrrrrr 64 rrrrrr ssssss
1083 0 0sssss 0rrrrr 32 rrrrr sssss
1084 0 10ssss 00rrrr 16 rrrr ssss
1085 0 110sss 000rrr 8 rrr sss
1086 0 1110ss 0000rr 4 rr ss
1087 0 11110s 00000r 2 r s */
1089 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
1091 return (is64
<< 12) | (r
<< 6) | s
;
1095 build_immediate_table (void)
1097 uint32_t log_e
, e
, s
, r
, s_mask
;
1103 for (log_e
= 1; log_e
<= 6; log_e
++)
1105 /* Get element size. */
1110 mask
= 0xffffffffffffffffull
;
1116 mask
= (1ull << e
) - 1;
1118 1 ((1 << 4) - 1) << 2 = 111100
1119 2 ((1 << 3) - 1) << 3 = 111000
1120 3 ((1 << 2) - 1) << 4 = 110000
1121 4 ((1 << 1) - 1) << 5 = 100000
1122 5 ((1 << 0) - 1) << 6 = 000000 */
1123 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1125 for (s
= 0; s
< e
- 1; s
++)
1126 for (r
= 0; r
< e
; r
++)
1128 /* s+1 consecutive bits to 1 (s < 63) */
1129 imm
= (1ull << (s
+ 1)) - 1;
1130 /* rotate right by r */
1132 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1133 /* replicate the constant depending on SIMD size */
1136 case 1: imm
= (imm
<< 2) | imm
;
1137 case 2: imm
= (imm
<< 4) | imm
;
1138 case 3: imm
= (imm
<< 8) | imm
;
1139 case 4: imm
= (imm
<< 16) | imm
;
1140 case 5: imm
= (imm
<< 32) | imm
;
1144 simd_immediates
[nb_imms
].imm
= imm
;
1145 simd_immediates
[nb_imms
].encoding
=
1146 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1150 assert (nb_imms
== TOTAL_IMM_NB
);
1151 qsort(simd_immediates
, nb_imms
,
1152 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1155 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1156 be accepted by logical (immediate) instructions
1157 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1159 ESIZE is the number of bytes in the decoded immediate value.
1160 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1161 VALUE will be returned in *ENCODING. */
1164 aarch64_logical_immediate_p (uint64_t value
, int esize
, aarch64_insn
*encoding
)
1166 simd_imm_encoding imm_enc
;
1167 const simd_imm_encoding
*imm_encoding
;
1168 static bfd_boolean initialized
= FALSE
;
1172 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), is32: %d", value
,
1175 if (initialized
== FALSE
)
1177 build_immediate_table ();
1181 /* Allow all zeros or all ones in top bits, so that
1182 constant expressions like ~1 are permitted. */
1183 upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
1184 if ((value
& ~upper
) != value
&& (value
| upper
) != value
)
1187 /* Replicate to a full 64-bit value. */
1189 for (i
= esize
* 8; i
< 64; i
*= 2)
1190 value
|= (value
<< i
);
1192 imm_enc
.imm
= value
;
1193 imm_encoding
= (const simd_imm_encoding
*)
1194 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1195 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1196 if (imm_encoding
== NULL
)
1198 DEBUG_TRACE ("exit with FALSE");
1201 if (encoding
!= NULL
)
1202 *encoding
= imm_encoding
->encoding
;
1203 DEBUG_TRACE ("exit with TRUE");
1207 /* If 64-bit immediate IMM is in the format of
1208 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1209 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1210 of value "abcdefgh". Otherwise return -1. */
1212 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1218 for (i
= 0; i
< 8; i
++)
1220 byte
= (imm
>> (8 * i
)) & 0xff;
1223 else if (byte
!= 0x00)
1229 /* Utility inline functions for operand_general_constraint_met_p. */
1232 set_error (aarch64_operand_error
*mismatch_detail
,
1233 enum aarch64_operand_error_kind kind
, int idx
,
1236 if (mismatch_detail
== NULL
)
1238 mismatch_detail
->kind
= kind
;
1239 mismatch_detail
->index
= idx
;
1240 mismatch_detail
->error
= error
;
1244 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1247 if (mismatch_detail
== NULL
)
1249 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1253 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1254 int idx
, int lower_bound
, int upper_bound
,
1257 if (mismatch_detail
== NULL
)
1259 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1260 mismatch_detail
->data
[0] = lower_bound
;
1261 mismatch_detail
->data
[1] = upper_bound
;
1265 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1266 int idx
, int lower_bound
, int upper_bound
)
1268 if (mismatch_detail
== NULL
)
1270 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1271 _("immediate value"));
1275 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1276 int idx
, int lower_bound
, int upper_bound
)
1278 if (mismatch_detail
== NULL
)
1280 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1281 _("immediate offset"));
1285 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1286 int idx
, int lower_bound
, int upper_bound
)
1288 if (mismatch_detail
== NULL
)
1290 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1291 _("register number"));
1295 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1296 int idx
, int lower_bound
, int upper_bound
)
1298 if (mismatch_detail
== NULL
)
1300 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1301 _("register element index"));
1305 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1306 int idx
, int lower_bound
, int upper_bound
)
1308 if (mismatch_detail
== NULL
)
1310 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1314 /* Report that the MUL modifier in operand IDX should be in the range
1315 [LOWER_BOUND, UPPER_BOUND]. */
1317 set_multiplier_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1318 int idx
, int lower_bound
, int upper_bound
)
1320 if (mismatch_detail
== NULL
)
1322 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1327 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1330 if (mismatch_detail
== NULL
)
1332 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1333 mismatch_detail
->data
[0] = alignment
;
1337 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1340 if (mismatch_detail
== NULL
)
1342 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1343 mismatch_detail
->data
[0] = expected_num
;
1347 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1350 if (mismatch_detail
== NULL
)
1352 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1355 /* General constraint checking based on operand code.
1357 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1358 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1360 This function has to be called after the qualifiers for all operands
1363 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1364 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1365 of error message during the disassembling where error message is not
1366 wanted. We avoid the dynamic construction of strings of error messages
1367 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1368 use a combination of error code, static string and some integer data to
1369 represent an error. */
1372 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1373 enum aarch64_opnd type
,
1374 const aarch64_opcode
*opcode
,
1375 aarch64_operand_error
*mismatch_detail
)
1377 unsigned num
, modifiers
;
1379 int64_t imm
, min_value
, max_value
;
1380 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1381 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1383 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1385 switch (aarch64_operands
[type
].op_class
)
1387 case AARCH64_OPND_CLASS_INT_REG
:
1388 /* Check pair reg constraints for cas* instructions. */
1389 if (type
== AARCH64_OPND_PAIRREG
)
1391 assert (idx
== 1 || idx
== 3);
1392 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1394 set_syntax_error (mismatch_detail
, idx
- 1,
1395 _("reg pair must start from even reg"));
1398 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1400 set_syntax_error (mismatch_detail
, idx
,
1401 _("reg pair must be contiguous"));
1407 /* <Xt> may be optional in some IC and TLBI instructions. */
1408 if (type
== AARCH64_OPND_Rt_SYS
)
1410 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1411 == AARCH64_OPND_CLASS_SYSTEM
));
1412 if (opnds
[1].present
1413 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1415 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1418 if (!opnds
[1].present
1419 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1421 set_other_error (mismatch_detail
, idx
, _("missing register"));
1427 case AARCH64_OPND_QLF_WSP
:
1428 case AARCH64_OPND_QLF_SP
:
1429 if (!aarch64_stack_pointer_p (opnd
))
1431 set_other_error (mismatch_detail
, idx
,
1432 _("stack pointer register expected"));
1441 case AARCH64_OPND_CLASS_SVE_REG
:
1444 case AARCH64_OPND_SVE_Zn_INDEX
:
1445 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1446 if (!value_in_range_p (opnd
->reglane
.index
, 0, 64 / size
- 1))
1448 set_elem_idx_out_of_range_error (mismatch_detail
, idx
,
1454 case AARCH64_OPND_SVE_ZnxN
:
1455 case AARCH64_OPND_SVE_ZtxN
:
1456 if (opnd
->reglist
.num_regs
!= get_opcode_dependent_value (opcode
))
1458 set_other_error (mismatch_detail
, idx
,
1459 _("invalid register list"));
1469 case AARCH64_OPND_CLASS_PRED_REG
:
1470 if (opnd
->reg
.regno
>= 8
1471 && get_operand_fields_width (get_operand_from_code (type
)) == 3)
1473 set_other_error (mismatch_detail
, idx
, _("p0-p7 expected"));
1478 case AARCH64_OPND_CLASS_COND
:
1479 if (type
== AARCH64_OPND_COND1
1480 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1482 /* Not allow AL or NV. */
1483 set_syntax_error (mismatch_detail
, idx
, NULL
);
1487 case AARCH64_OPND_CLASS_ADDRESS
:
1488 /* Check writeback. */
1489 switch (opcode
->iclass
)
1493 case ldstnapair_offs
:
1496 if (opnd
->addr
.writeback
== 1)
1498 set_syntax_error (mismatch_detail
, idx
,
1499 _("unexpected address writeback"));
1504 case ldstpair_indexed
:
1507 if (opnd
->addr
.writeback
== 0)
1509 set_syntax_error (mismatch_detail
, idx
,
1510 _("address writeback expected"));
1515 assert (opnd
->addr
.writeback
== 0);
1520 case AARCH64_OPND_ADDR_SIMM7
:
1521 /* Scaled signed 7 bits immediate offset. */
1522 /* Get the size of the data element that is accessed, which may be
1523 different from that of the source register size,
1524 e.g. in strb/ldrb. */
1525 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1526 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1528 set_offset_out_of_range_error (mismatch_detail
, idx
,
1529 -64 * size
, 63 * size
);
1532 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1534 set_unaligned_error (mismatch_detail
, idx
, size
);
1538 case AARCH64_OPND_ADDR_SIMM9
:
1539 /* Unscaled signed 9 bits immediate offset. */
1540 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1542 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1547 case AARCH64_OPND_ADDR_SIMM9_2
:
1548 /* Unscaled signed 9 bits immediate offset, which has to be negative
1550 size
= aarch64_get_qualifier_esize (qualifier
);
1551 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1552 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1553 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1555 set_other_error (mismatch_detail
, idx
,
1556 _("negative or unaligned offset expected"));
1559 case AARCH64_OPND_SIMD_ADDR_POST
:
1560 /* AdvSIMD load/store multiple structures, post-index. */
1562 if (opnd
->addr
.offset
.is_reg
)
1564 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1568 set_other_error (mismatch_detail
, idx
,
1569 _("invalid register offset"));
1575 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1576 unsigned num_bytes
; /* total number of bytes transferred. */
1577 /* The opcode dependent area stores the number of elements in
1578 each structure to be loaded/stored. */
1579 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1580 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1581 /* Special handling of loading single structure to all lane. */
1582 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1583 * aarch64_get_qualifier_esize (prev
->qualifier
);
1585 num_bytes
= prev
->reglist
.num_regs
1586 * aarch64_get_qualifier_esize (prev
->qualifier
)
1587 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1588 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1590 set_other_error (mismatch_detail
, idx
,
1591 _("invalid post-increment amount"));
1597 case AARCH64_OPND_ADDR_REGOFF
:
1598 /* Get the size of the data element that is accessed, which may be
1599 different from that of the source register size,
1600 e.g. in strb/ldrb. */
1601 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1602 /* It is either no shift or shift by the binary logarithm of SIZE. */
1603 if (opnd
->shifter
.amount
!= 0
1604 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1606 set_other_error (mismatch_detail
, idx
,
1607 _("invalid shift amount"));
1610 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1612 switch (opnd
->shifter
.kind
)
1614 case AARCH64_MOD_UXTW
:
1615 case AARCH64_MOD_LSL
:
1616 case AARCH64_MOD_SXTW
:
1617 case AARCH64_MOD_SXTX
: break;
1619 set_other_error (mismatch_detail
, idx
,
1620 _("invalid extend/shift operator"));
1625 case AARCH64_OPND_ADDR_UIMM12
:
1626 imm
= opnd
->addr
.offset
.imm
;
1627 /* Get the size of the data element that is accessed, which may be
1628 different from that of the source register size,
1629 e.g. in strb/ldrb. */
1630 size
= aarch64_get_qualifier_esize (qualifier
);
1631 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1633 set_offset_out_of_range_error (mismatch_detail
, idx
,
1637 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1639 set_unaligned_error (mismatch_detail
, idx
, size
);
1644 case AARCH64_OPND_ADDR_PCREL14
:
1645 case AARCH64_OPND_ADDR_PCREL19
:
1646 case AARCH64_OPND_ADDR_PCREL21
:
1647 case AARCH64_OPND_ADDR_PCREL26
:
1648 imm
= opnd
->imm
.value
;
1649 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1651 /* The offset value in a PC-relative branch instruction is alway
1652 4-byte aligned and is encoded without the lowest 2 bits. */
1653 if (!value_aligned_p (imm
, 4))
1655 set_unaligned_error (mismatch_detail
, idx
, 4);
1658 /* Right shift by 2 so that we can carry out the following check
1662 size
= get_operand_fields_width (get_operand_from_code (type
));
1663 if (!value_fit_signed_field_p (imm
, size
))
1665 set_other_error (mismatch_detail
, idx
,
1666 _("immediate out of range"));
1671 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
1672 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
1673 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
1674 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
1678 assert (!opnd
->addr
.offset
.is_reg
);
1679 assert (opnd
->addr
.preind
);
1680 num
= 1 + get_operand_specific_data (&aarch64_operands
[type
]);
1683 if ((opnd
->addr
.offset
.imm
!= 0 && !opnd
->shifter
.operator_present
)
1684 || (opnd
->shifter
.operator_present
1685 && opnd
->shifter
.kind
!= AARCH64_MOD_MUL_VL
))
1687 set_other_error (mismatch_detail
, idx
,
1688 _("invalid addressing mode"));
1691 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1693 set_offset_out_of_range_error (mismatch_detail
, idx
,
1694 min_value
, max_value
);
1697 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1699 set_unaligned_error (mismatch_detail
, idx
, num
);
1704 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
1707 goto sve_imm_offset_vl
;
1709 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
1712 goto sve_imm_offset_vl
;
1714 case AARCH64_OPND_SVE_ADDR_RI_U6
:
1715 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
1716 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
1717 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
1721 assert (!opnd
->addr
.offset
.is_reg
);
1722 assert (opnd
->addr
.preind
);
1723 num
= 1 << get_operand_specific_data (&aarch64_operands
[type
]);
1726 if (opnd
->shifter
.operator_present
1727 || opnd
->shifter
.amount_present
)
1729 set_other_error (mismatch_detail
, idx
,
1730 _("invalid addressing mode"));
1733 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1735 set_offset_out_of_range_error (mismatch_detail
, idx
,
1736 min_value
, max_value
);
1739 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1741 set_unaligned_error (mismatch_detail
, idx
, num
);
1746 case AARCH64_OPND_SVE_ADDR_RR
:
1747 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
1748 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
1749 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
1750 case AARCH64_OPND_SVE_ADDR_RX
:
1751 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
1752 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
1753 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
1754 case AARCH64_OPND_SVE_ADDR_RZ
:
1755 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
1756 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
1757 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
1758 modifiers
= 1 << AARCH64_MOD_LSL
;
1760 assert (opnd
->addr
.offset
.is_reg
);
1761 assert (opnd
->addr
.preind
);
1762 if ((aarch64_operands
[type
].flags
& OPD_F_NO_ZR
) != 0
1763 && opnd
->addr
.offset
.regno
== 31)
1765 set_other_error (mismatch_detail
, idx
,
1766 _("index register xzr is not allowed"));
1769 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1770 || (opnd
->shifter
.amount
1771 != get_operand_specific_data (&aarch64_operands
[type
])))
1773 set_other_error (mismatch_detail
, idx
,
1774 _("invalid addressing mode"));
1779 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
1780 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
1781 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
1782 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
1783 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
1784 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
1785 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
1786 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
1787 modifiers
= (1 << AARCH64_MOD_SXTW
) | (1 << AARCH64_MOD_UXTW
);
1788 goto sve_rr_operand
;
1790 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
1791 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
1792 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
1793 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
1796 goto sve_imm_offset
;
1798 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
1799 modifiers
= 1 << AARCH64_MOD_LSL
;
1801 assert (opnd
->addr
.offset
.is_reg
);
1802 assert (opnd
->addr
.preind
);
1803 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1804 || opnd
->shifter
.amount
< 0
1805 || opnd
->shifter
.amount
> 3)
1807 set_other_error (mismatch_detail
, idx
,
1808 _("invalid addressing mode"));
1813 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
1814 modifiers
= (1 << AARCH64_MOD_SXTW
);
1815 goto sve_zz_operand
;
1817 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
1818 modifiers
= 1 << AARCH64_MOD_UXTW
;
1819 goto sve_zz_operand
;
1826 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1827 if (type
== AARCH64_OPND_LEt
)
1829 /* Get the upper bound for the element index. */
1830 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1831 if (!value_in_range_p (opnd
->reglist
.index
, 0, num
))
1833 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1837 /* The opcode dependent area stores the number of elements in
1838 each structure to be loaded/stored. */
1839 num
= get_opcode_dependent_value (opcode
);
1842 case AARCH64_OPND_LVt
:
1843 assert (num
>= 1 && num
<= 4);
1844 /* Unless LD1/ST1, the number of registers should be equal to that
1845 of the structure elements. */
1846 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
1848 set_reg_list_error (mismatch_detail
, idx
, num
);
1852 case AARCH64_OPND_LVt_AL
:
1853 case AARCH64_OPND_LEt
:
1854 assert (num
>= 1 && num
<= 4);
1855 /* The number of registers should be equal to that of the structure
1857 if (opnd
->reglist
.num_regs
!= num
)
1859 set_reg_list_error (mismatch_detail
, idx
, num
);
1868 case AARCH64_OPND_CLASS_IMMEDIATE
:
1869 /* Constraint check on immediate operand. */
1870 imm
= opnd
->imm
.value
;
1871 /* E.g. imm_0_31 constrains value to be 0..31. */
1872 if (qualifier_value_in_range_constraint_p (qualifier
)
1873 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
1874 get_upper_bound (qualifier
)))
1876 set_imm_out_of_range_error (mismatch_detail
, idx
,
1877 get_lower_bound (qualifier
),
1878 get_upper_bound (qualifier
));
1884 case AARCH64_OPND_AIMM
:
1885 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1887 set_other_error (mismatch_detail
, idx
,
1888 _("invalid shift operator"));
1891 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
1893 set_other_error (mismatch_detail
, idx
,
1894 _("shift amount expected to be 0 or 12"));
1897 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
1899 set_other_error (mismatch_detail
, idx
,
1900 _("immediate out of range"));
1905 case AARCH64_OPND_HALF
:
1906 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
1907 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1909 set_other_error (mismatch_detail
, idx
,
1910 _("invalid shift operator"));
1913 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1914 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
1916 set_other_error (mismatch_detail
, idx
,
1917 _("shift amount should be a multiple of 16"));
1920 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
1922 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
1926 if (opnd
->imm
.value
< 0)
1928 set_other_error (mismatch_detail
, idx
,
1929 _("negative immediate value not allowed"));
1932 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
1934 set_other_error (mismatch_detail
, idx
,
1935 _("immediate out of range"));
1940 case AARCH64_OPND_IMM_MOV
:
1942 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1943 imm
= opnd
->imm
.value
;
1947 case OP_MOV_IMM_WIDEN
:
1949 /* Fall through... */
1950 case OP_MOV_IMM_WIDE
:
1951 if (!aarch64_wide_constant_p (imm
, esize
== 4, NULL
))
1953 set_other_error (mismatch_detail
, idx
,
1954 _("immediate out of range"));
1958 case OP_MOV_IMM_LOG
:
1959 if (!aarch64_logical_immediate_p (imm
, esize
, NULL
))
1961 set_other_error (mismatch_detail
, idx
,
1962 _("immediate out of range"));
1973 case AARCH64_OPND_NZCV
:
1974 case AARCH64_OPND_CCMP_IMM
:
1975 case AARCH64_OPND_EXCEPTION
:
1976 case AARCH64_OPND_UIMM4
:
1977 case AARCH64_OPND_UIMM7
:
1978 case AARCH64_OPND_UIMM3_OP1
:
1979 case AARCH64_OPND_UIMM3_OP2
:
1980 size
= get_operand_fields_width (get_operand_from_code (type
));
1982 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
1984 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
1990 case AARCH64_OPND_WIDTH
:
1991 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
1992 && opnds
[0].type
== AARCH64_OPND_Rd
);
1993 size
= get_upper_bound (qualifier
);
1994 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
1995 /* lsb+width <= reg.size */
1997 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
1998 size
- opnds
[idx
-1].imm
.value
);
2003 case AARCH64_OPND_LIMM
:
2005 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2006 uint64_t uimm
= opnd
->imm
.value
;
2007 if (opcode
->op
== OP_BIC
)
2009 if (aarch64_logical_immediate_p (uimm
, esize
, NULL
) == FALSE
)
2011 set_other_error (mismatch_detail
, idx
,
2012 _("immediate out of range"));
2018 case AARCH64_OPND_IMM0
:
2019 case AARCH64_OPND_FPIMM0
:
2020 if (opnd
->imm
.value
!= 0)
2022 set_other_error (mismatch_detail
, idx
,
2023 _("immediate zero expected"));
2028 case AARCH64_OPND_SHLL_IMM
:
2030 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2031 if (opnd
->imm
.value
!= size
)
2033 set_other_error (mismatch_detail
, idx
,
2034 _("invalid shift amount"));
2039 case AARCH64_OPND_IMM_VLSL
:
2040 size
= aarch64_get_qualifier_esize (qualifier
);
2041 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
2043 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2049 case AARCH64_OPND_IMM_VLSR
:
2050 size
= aarch64_get_qualifier_esize (qualifier
);
2051 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
2053 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
2058 case AARCH64_OPND_SIMD_IMM
:
2059 case AARCH64_OPND_SIMD_IMM_SFT
:
2060 /* Qualifier check. */
2063 case AARCH64_OPND_QLF_LSL
:
2064 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2066 set_other_error (mismatch_detail
, idx
,
2067 _("invalid shift operator"));
2071 case AARCH64_OPND_QLF_MSL
:
2072 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
2074 set_other_error (mismatch_detail
, idx
,
2075 _("invalid shift operator"));
2079 case AARCH64_OPND_QLF_NIL
:
2080 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2082 set_other_error (mismatch_detail
, idx
,
2083 _("shift is not permitted"));
2091 /* Is the immediate valid? */
2093 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
2095 /* uimm8 or simm8 */
2096 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
2098 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
2102 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
2105 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2106 ffffffffgggggggghhhhhhhh'. */
2107 set_other_error (mismatch_detail
, idx
,
2108 _("invalid value for immediate"));
2111 /* Is the shift amount valid? */
2112 switch (opnd
->shifter
.kind
)
2114 case AARCH64_MOD_LSL
:
2115 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2116 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
2118 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
2122 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
2124 set_unaligned_error (mismatch_detail
, idx
, 8);
2128 case AARCH64_MOD_MSL
:
2129 /* Only 8 and 16 are valid shift amount. */
2130 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
2132 set_other_error (mismatch_detail
, idx
,
2133 _("shift amount expected to be 0 or 16"));
2138 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2140 set_other_error (mismatch_detail
, idx
,
2141 _("invalid shift operator"));
2148 case AARCH64_OPND_FPIMM
:
2149 case AARCH64_OPND_SIMD_FPIMM
:
2150 if (opnd
->imm
.is_fp
== 0)
2152 set_other_error (mismatch_detail
, idx
,
2153 _("floating-point immediate expected"));
2156 /* The value is expected to be an 8-bit floating-point constant with
2157 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2158 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2160 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
2162 set_other_error (mismatch_detail
, idx
,
2163 _("immediate out of range"));
2166 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2168 set_other_error (mismatch_detail
, idx
,
2169 _("invalid shift operator"));
2174 case AARCH64_OPND_SVE_PATTERN_SCALED
:
2175 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL
);
2176 if (!value_in_range_p (opnd
->shifter
.amount
, 1, 16))
2178 set_multiplier_out_of_range_error (mismatch_detail
, idx
, 1, 16);
2188 case AARCH64_OPND_CLASS_CP_REG
:
2189 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2190 valid range: C0 - C15. */
2191 if (opnd
->reg
.regno
> 15)
2193 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2198 case AARCH64_OPND_CLASS_SYSTEM
:
2201 case AARCH64_OPND_PSTATEFIELD
:
2202 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
2205 The immediate must be #0 or #1. */
2206 if ((opnd
->pstatefield
== 0x03 /* UAO. */
2207 || opnd
->pstatefield
== 0x04) /* PAN. */
2208 && opnds
[1].imm
.value
> 1)
2210 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2213 /* MSR SPSel, #uimm4
2214 Uses uimm4 as a control value to select the stack pointer: if
2215 bit 0 is set it selects the current exception level's stack
2216 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2217 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2218 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
2220 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2229 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
2230 /* Get the upper bound for the element index. */
2231 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
2232 /* Index out-of-range. */
2233 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
2235 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2238 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2239 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2240 number is encoded in "size:M:Rm":
2246 if (type
== AARCH64_OPND_Em
&& qualifier
== AARCH64_OPND_QLF_S_H
2247 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
2249 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2254 case AARCH64_OPND_CLASS_MODIFIED_REG
:
2255 assert (idx
== 1 || idx
== 2);
2258 case AARCH64_OPND_Rm_EXT
:
2259 if (aarch64_extend_operator_p (opnd
->shifter
.kind
) == FALSE
2260 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2262 set_other_error (mismatch_detail
, idx
,
2263 _("extend operator expected"));
2266 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2267 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2268 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2270 if (!aarch64_stack_pointer_p (opnds
+ 0)
2271 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
2273 if (!opnd
->shifter
.operator_present
)
2275 set_other_error (mismatch_detail
, idx
,
2276 _("missing extend operator"));
2279 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2281 set_other_error (mismatch_detail
, idx
,
2282 _("'LSL' operator not allowed"));
2286 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
2287 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2288 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
2290 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
2293 /* In the 64-bit form, the final register operand is written as Wm
2294 for all but the (possibly omitted) UXTX/LSL and SXTX
2296 N.B. GAS allows X register to be used with any operator as a
2297 programming convenience. */
2298 if (qualifier
== AARCH64_OPND_QLF_X
2299 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
2300 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
2301 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
2303 set_other_error (mismatch_detail
, idx
, _("W register expected"));
2308 case AARCH64_OPND_Rm_SFT
:
2309 /* ROR is not available to the shifted register operand in
2310 arithmetic instructions. */
2311 if (aarch64_shift_operator_p (opnd
->shifter
.kind
) == FALSE
)
2313 set_other_error (mismatch_detail
, idx
,
2314 _("shift operator expected"));
2317 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
2318 && opcode
->iclass
!= log_shift
)
2320 set_other_error (mismatch_detail
, idx
,
2321 _("'ROR' operator not allowed"));
2324 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
2325 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2327 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2344 /* Main entrypoint for the operand constraint checking.
2346 Return 1 if operands of *INST meet the constraint applied by the operand
2347 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2348 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2349 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2350 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2351 error kind when it is notified that an instruction does not pass the check).
2353 Un-determined operand qualifiers may get established during the process. */
2356 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2357 aarch64_operand_error
*mismatch_detail
)
2361 DEBUG_TRACE ("enter");
2363 /* Check for cases where a source register needs to be the same as the
2364 destination register. Do this before matching qualifiers since if
2365 an instruction has both invalid tying and invalid qualifiers,
2366 the error about qualifiers would suggest several alternative
2367 instructions that also have invalid tying. */
2368 i
= inst
->opcode
->tied_operand
;
2369 if (i
> 0 && (inst
->operands
[0].reg
.regno
!= inst
->operands
[i
].reg
.regno
))
2371 if (mismatch_detail
)
2373 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_OPERAND
;
2374 mismatch_detail
->index
= i
;
2375 mismatch_detail
->error
= NULL
;
2380 /* Match operands' qualifier.
2381 *INST has already had qualifier establish for some, if not all, of
2382 its operands; we need to find out whether these established
2383 qualifiers match one of the qualifier sequence in
2384 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2385 with the corresponding qualifier in such a sequence.
2386 Only basic operand constraint checking is done here; the more thorough
2387 constraint checking will carried out by operand_general_constraint_met_p,
2388 which has be to called after this in order to get all of the operands'
2389 qualifiers established. */
2390 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2392 DEBUG_TRACE ("FAIL on operand qualifier matching");
2393 if (mismatch_detail
)
2395 /* Return an error type to indicate that it is the qualifier
2396 matching failure; we don't care about which operand as there
2397 are enough information in the opcode table to reproduce it. */
2398 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2399 mismatch_detail
->index
= -1;
2400 mismatch_detail
->error
= NULL
;
2405 /* Match operands' constraint. */
2406 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2408 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2409 if (type
== AARCH64_OPND_NIL
)
2411 if (inst
->operands
[i
].skip
)
2413 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2416 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2417 inst
->opcode
, mismatch_detail
) == 0)
2419 DEBUG_TRACE ("FAIL on operand %d", i
);
2424 DEBUG_TRACE ("PASS");
2429 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2430 Also updates the TYPE of each INST->OPERANDS with the corresponding
2431 value of OPCODE->OPERANDS.
2433 Note that some operand qualifiers may need to be manually cleared by
2434 the caller before it further calls the aarch64_opcode_encode; by
2435 doing this, it helps the qualifier matching facilities work
2438 const aarch64_opcode
*
2439 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2442 const aarch64_opcode
*old
= inst
->opcode
;
2444 inst
->opcode
= opcode
;
2446 /* Update the operand types. */
2447 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2449 inst
->operands
[i
].type
= opcode
->operands
[i
];
2450 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2454 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2460 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2463 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2464 if (operands
[i
] == operand
)
2466 else if (operands
[i
] == AARCH64_OPND_NIL
)
2471 /* R0...R30, followed by FOR31. */
2472 #define BANK(R, FOR31) \
2473 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2474 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2475 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2476 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2477 /* [0][0] 32-bit integer regs with sp Wn
2478 [0][1] 64-bit integer regs with sp Xn sf=1
2479 [1][0] 32-bit integer regs with #0 Wn
2480 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2481 static const char *int_reg
[2][2][32] = {
2482 #define R32(X) "w" #X
2483 #define R64(X) "x" #X
2484 { BANK (R32
, "wsp"), BANK (R64
, "sp") },
2485 { BANK (R32
, "wzr"), BANK (R64
, "xzr") }
2490 /* Names of the SVE vector registers, first with .S suffixes,
2491 then with .D suffixes. */
2493 static const char *sve_reg
[2][32] = {
2494 #define ZS(X) "z" #X ".s"
2495 #define ZD(X) "z" #X ".d"
2496 BANK (ZS
, ZS (31)), BANK (ZD
, ZD (31))
2502 /* Return the integer register name.
2503 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2505 static inline const char *
2506 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2508 const int has_zr
= sp_reg_p
? 0 : 1;
2509 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2510 return int_reg
[has_zr
][is_64
][regno
];
2513 /* Like get_int_reg_name, but IS_64 is always 1. */
2515 static inline const char *
2516 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2518 const int has_zr
= sp_reg_p
? 0 : 1;
2519 return int_reg
[has_zr
][1][regno
];
2522 /* Get the name of the integer offset register in OPND, using the shift type
2523 to decide whether it's a word or doubleword. */
2525 static inline const char *
2526 get_offset_int_reg_name (const aarch64_opnd_info
*opnd
)
2528 switch (opnd
->shifter
.kind
)
2530 case AARCH64_MOD_UXTW
:
2531 case AARCH64_MOD_SXTW
:
2532 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_W
, 0);
2534 case AARCH64_MOD_LSL
:
2535 case AARCH64_MOD_SXTX
:
2536 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_X
, 0);
2543 /* Get the name of the SVE vector offset register in OPND, using the operand
2544 qualifier to decide whether the suffix should be .S or .D. */
2546 static inline const char *
2547 get_addr_sve_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
)
2549 assert (qualifier
== AARCH64_OPND_QLF_S_S
2550 || qualifier
== AARCH64_OPND_QLF_S_D
);
2551 return sve_reg
[qualifier
== AARCH64_OPND_QLF_S_D
][regno
];
2554 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2574 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2575 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2576 (depending on the type of the instruction). IMM8 will be expanded to a
2577 single-precision floating-point value (SIZE == 4) or a double-precision
2578 floating-point value (SIZE == 8). A half-precision floating-point value
2579 (SIZE == 2) is expanded to a single-precision floating-point value. The
2580 expanded value is returned. */
2583 expand_fp_imm (int size
, uint32_t imm8
)
2586 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2588 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2589 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2590 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2591 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2592 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2595 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2596 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2597 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2598 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2599 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2602 else if (size
== 4 || size
== 2)
2604 imm
= (imm8_7
<< 31) /* imm8<7> */
2605 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2606 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2607 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2611 /* An unsupported size. */
2618 /* Produce the string representation of the register list operand *OPND
2619 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2620 the register name that comes before the register number, such as "v". */
2622 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
,
2625 const int num_regs
= opnd
->reglist
.num_regs
;
2626 const int first_reg
= opnd
->reglist
.first_regno
;
2627 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2628 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2629 char tb
[8]; /* Temporary buffer. */
2631 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2632 assert (num_regs
>= 1 && num_regs
<= 4);
2634 /* Prepare the index if any. */
2635 if (opnd
->reglist
.has_index
)
2636 snprintf (tb
, 8, "[%" PRIi64
"]", opnd
->reglist
.index
);
2640 /* The hyphenated form is preferred for disassembly if there are
2641 more than two registers in the list, and the register numbers
2642 are monotonically increasing in increments of one. */
2643 if (num_regs
> 2 && last_reg
> first_reg
)
2644 snprintf (buf
, size
, "{%s%d.%s-%s%d.%s}%s", prefix
, first_reg
, qlf_name
,
2645 prefix
, last_reg
, qlf_name
, tb
);
2648 const int reg0
= first_reg
;
2649 const int reg1
= (first_reg
+ 1) & 0x1f;
2650 const int reg2
= (first_reg
+ 2) & 0x1f;
2651 const int reg3
= (first_reg
+ 3) & 0x1f;
2656 snprintf (buf
, size
, "{%s%d.%s}%s", prefix
, reg0
, qlf_name
, tb
);
2659 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s}%s", prefix
, reg0
, qlf_name
,
2660 prefix
, reg1
, qlf_name
, tb
);
2663 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2664 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2665 prefix
, reg2
, qlf_name
, tb
);
2668 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2669 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2670 prefix
, reg2
, qlf_name
, prefix
, reg3
, qlf_name
, tb
);
2676 /* Print the register+immediate address in OPND to BUF, which has SIZE
2677 characters. BASE is the name of the base register. */
2680 print_immediate_offset_address (char *buf
, size_t size
,
2681 const aarch64_opnd_info
*opnd
,
2684 if (opnd
->addr
.writeback
)
2686 if (opnd
->addr
.preind
)
2687 snprintf (buf
, size
, "[%s,#%d]!", base
, opnd
->addr
.offset
.imm
);
2689 snprintf (buf
, size
, "[%s],#%d", base
, opnd
->addr
.offset
.imm
);
2693 if (opnd
->shifter
.operator_present
)
2695 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL_VL
);
2696 snprintf (buf
, size
, "[%s,#%d,mul vl]",
2697 base
, opnd
->addr
.offset
.imm
);
2699 else if (opnd
->addr
.offset
.imm
)
2700 snprintf (buf
, size
, "[%s,#%d]", base
, opnd
->addr
.offset
.imm
);
2702 snprintf (buf
, size
, "[%s]", base
);
2706 /* Produce the string representation of the register offset address operand
2707 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2708 the names of the base and offset registers. */
2710 print_register_offset_address (char *buf
, size_t size
,
2711 const aarch64_opnd_info
*opnd
,
2712 const char *base
, const char *offset
)
2714 char tb
[16]; /* Temporary buffer. */
2715 bfd_boolean print_extend_p
= TRUE
;
2716 bfd_boolean print_amount_p
= TRUE
;
2717 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
2719 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
2720 || !opnd
->shifter
.amount_present
))
2722 /* Not print the shift/extend amount when the amount is zero and
2723 when it is not the special case of 8-bit load/store instruction. */
2724 print_amount_p
= FALSE
;
2725 /* Likewise, no need to print the shift operator LSL in such a
2727 if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2728 print_extend_p
= FALSE
;
2731 /* Prepare for the extend/shift. */
2735 snprintf (tb
, sizeof (tb
), ",%s #%" PRIi64
, shift_name
,
2736 opnd
->shifter
.amount
);
2738 snprintf (tb
, sizeof (tb
), ",%s", shift_name
);
2743 snprintf (buf
, size
, "[%s,%s%s]", base
, offset
, tb
);
2746 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2747 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2748 PC, PCREL_P and ADDRESS are used to pass in and return information about
2749 the PC-relative address calculation, where the PC value is passed in
2750 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2751 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2752 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2754 The function serves both the disassembler and the assembler diagnostics
2755 issuer, which is the reason why it lives in this file. */
2758 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
2759 const aarch64_opcode
*opcode
,
2760 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
2764 const char *name
= NULL
;
2765 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
2766 enum aarch64_modifier_kind kind
;
2767 uint64_t addr
, enum_value
;
2775 case AARCH64_OPND_Rd
:
2776 case AARCH64_OPND_Rn
:
2777 case AARCH64_OPND_Rm
:
2778 case AARCH64_OPND_Rt
:
2779 case AARCH64_OPND_Rt2
:
2780 case AARCH64_OPND_Rs
:
2781 case AARCH64_OPND_Ra
:
2782 case AARCH64_OPND_Rt_SYS
:
2783 case AARCH64_OPND_PAIRREG
:
2784 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2785 the <ic_op>, therefore we we use opnd->present to override the
2786 generic optional-ness information. */
2787 if (opnd
->type
== AARCH64_OPND_Rt_SYS
&& !opnd
->present
)
2789 /* Omit the operand, e.g. RET. */
2790 if (optional_operand_p (opcode
, idx
)
2791 && opnd
->reg
.regno
== get_optional_operand_default_value (opcode
))
2793 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2794 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2795 snprintf (buf
, size
, "%s",
2796 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2799 case AARCH64_OPND_Rd_SP
:
2800 case AARCH64_OPND_Rn_SP
:
2801 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2802 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
2803 || opnd
->qualifier
== AARCH64_OPND_QLF_X
2804 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
2805 snprintf (buf
, size
, "%s",
2806 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
2809 case AARCH64_OPND_Rm_EXT
:
2810 kind
= opnd
->shifter
.kind
;
2811 assert (idx
== 1 || idx
== 2);
2812 if ((aarch64_stack_pointer_p (opnds
)
2813 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
2814 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
2815 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
2816 && kind
== AARCH64_MOD_UXTW
)
2817 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
2818 && kind
== AARCH64_MOD_UXTX
)))
2820 /* 'LSL' is the preferred form in this case. */
2821 kind
= AARCH64_MOD_LSL
;
2822 if (opnd
->shifter
.amount
== 0)
2824 /* Shifter omitted. */
2825 snprintf (buf
, size
, "%s",
2826 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2830 if (opnd
->shifter
.amount
)
2831 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
2832 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2833 aarch64_operand_modifiers
[kind
].name
,
2834 opnd
->shifter
.amount
);
2836 snprintf (buf
, size
, "%s, %s",
2837 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2838 aarch64_operand_modifiers
[kind
].name
);
2841 case AARCH64_OPND_Rm_SFT
:
2842 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2843 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2844 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2845 snprintf (buf
, size
, "%s",
2846 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2848 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
2849 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2850 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2851 opnd
->shifter
.amount
);
2854 case AARCH64_OPND_Fd
:
2855 case AARCH64_OPND_Fn
:
2856 case AARCH64_OPND_Fm
:
2857 case AARCH64_OPND_Fa
:
2858 case AARCH64_OPND_Ft
:
2859 case AARCH64_OPND_Ft2
:
2860 case AARCH64_OPND_Sd
:
2861 case AARCH64_OPND_Sn
:
2862 case AARCH64_OPND_Sm
:
2863 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
2867 case AARCH64_OPND_Vd
:
2868 case AARCH64_OPND_Vn
:
2869 case AARCH64_OPND_Vm
:
2870 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
2871 aarch64_get_qualifier_name (opnd
->qualifier
));
2874 case AARCH64_OPND_Ed
:
2875 case AARCH64_OPND_En
:
2876 case AARCH64_OPND_Em
:
2877 snprintf (buf
, size
, "v%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
2878 aarch64_get_qualifier_name (opnd
->qualifier
),
2879 opnd
->reglane
.index
);
2882 case AARCH64_OPND_VdD1
:
2883 case AARCH64_OPND_VnD1
:
2884 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
2887 case AARCH64_OPND_LVn
:
2888 case AARCH64_OPND_LVt
:
2889 case AARCH64_OPND_LVt_AL
:
2890 case AARCH64_OPND_LEt
:
2891 print_register_list (buf
, size
, opnd
, "v");
2894 case AARCH64_OPND_SVE_Pd
:
2895 case AARCH64_OPND_SVE_Pg3
:
2896 case AARCH64_OPND_SVE_Pg4_5
:
2897 case AARCH64_OPND_SVE_Pg4_10
:
2898 case AARCH64_OPND_SVE_Pg4_16
:
2899 case AARCH64_OPND_SVE_Pm
:
2900 case AARCH64_OPND_SVE_Pn
:
2901 case AARCH64_OPND_SVE_Pt
:
2902 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
2903 snprintf (buf
, size
, "p%d", opnd
->reg
.regno
);
2904 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
2905 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
2906 snprintf (buf
, size
, "p%d/%s", opnd
->reg
.regno
,
2907 aarch64_get_qualifier_name (opnd
->qualifier
));
2909 snprintf (buf
, size
, "p%d.%s", opnd
->reg
.regno
,
2910 aarch64_get_qualifier_name (opnd
->qualifier
));
2913 case AARCH64_OPND_SVE_Za_5
:
2914 case AARCH64_OPND_SVE_Za_16
:
2915 case AARCH64_OPND_SVE_Zd
:
2916 case AARCH64_OPND_SVE_Zm_5
:
2917 case AARCH64_OPND_SVE_Zm_16
:
2918 case AARCH64_OPND_SVE_Zn
:
2919 case AARCH64_OPND_SVE_Zt
:
2920 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
2921 snprintf (buf
, size
, "z%d", opnd
->reg
.regno
);
2923 snprintf (buf
, size
, "z%d.%s", opnd
->reg
.regno
,
2924 aarch64_get_qualifier_name (opnd
->qualifier
));
2927 case AARCH64_OPND_SVE_ZnxN
:
2928 case AARCH64_OPND_SVE_ZtxN
:
2929 print_register_list (buf
, size
, opnd
, "z");
2932 case AARCH64_OPND_SVE_Zn_INDEX
:
2933 snprintf (buf
, size
, "z%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
2934 aarch64_get_qualifier_name (opnd
->qualifier
),
2935 opnd
->reglane
.index
);
2938 case AARCH64_OPND_Cn
:
2939 case AARCH64_OPND_Cm
:
2940 snprintf (buf
, size
, "C%d", opnd
->reg
.regno
);
2943 case AARCH64_OPND_IDX
:
2944 case AARCH64_OPND_IMM
:
2945 case AARCH64_OPND_WIDTH
:
2946 case AARCH64_OPND_UIMM3_OP1
:
2947 case AARCH64_OPND_UIMM3_OP2
:
2948 case AARCH64_OPND_BIT_NUM
:
2949 case AARCH64_OPND_IMM_VLSL
:
2950 case AARCH64_OPND_IMM_VLSR
:
2951 case AARCH64_OPND_SHLL_IMM
:
2952 case AARCH64_OPND_IMM0
:
2953 case AARCH64_OPND_IMMR
:
2954 case AARCH64_OPND_IMMS
:
2955 case AARCH64_OPND_FBITS
:
2956 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
2959 case AARCH64_OPND_SVE_PATTERN
:
2960 if (optional_operand_p (opcode
, idx
)
2961 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
2963 enum_value
= opnd
->imm
.value
;
2964 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
2965 if (aarch64_sve_pattern_array
[enum_value
])
2966 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[enum_value
]);
2968 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
2971 case AARCH64_OPND_SVE_PATTERN_SCALED
:
2972 if (optional_operand_p (opcode
, idx
)
2973 && !opnd
->shifter
.operator_present
2974 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
2976 enum_value
= opnd
->imm
.value
;
2977 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
2978 if (aarch64_sve_pattern_array
[opnd
->imm
.value
])
2979 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[opnd
->imm
.value
]);
2981 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
2982 if (opnd
->shifter
.operator_present
)
2984 size_t len
= strlen (buf
);
2985 snprintf (buf
+ len
, size
- len
, ", %s #%" PRIi64
,
2986 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2987 opnd
->shifter
.amount
);
2991 case AARCH64_OPND_SVE_PRFOP
:
2992 enum_value
= opnd
->imm
.value
;
2993 assert (enum_value
< ARRAY_SIZE (aarch64_sve_prfop_array
));
2994 if (aarch64_sve_prfop_array
[enum_value
])
2995 snprintf (buf
, size
, "%s", aarch64_sve_prfop_array
[enum_value
]);
2997 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3000 case AARCH64_OPND_IMM_MOV
:
3001 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3003 case 4: /* e.g. MOV Wd, #<imm32>. */
3005 int imm32
= opnd
->imm
.value
;
3006 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
3009 case 8: /* e.g. MOV Xd, #<imm64>. */
3010 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
3011 opnd
->imm
.value
, opnd
->imm
.value
);
3013 default: assert (0);
3017 case AARCH64_OPND_FPIMM0
:
3018 snprintf (buf
, size
, "#0.0");
3021 case AARCH64_OPND_LIMM
:
3022 case AARCH64_OPND_AIMM
:
3023 case AARCH64_OPND_HALF
:
3024 if (opnd
->shifter
.amount
)
3025 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3026 opnd
->shifter
.amount
);
3028 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3031 case AARCH64_OPND_SIMD_IMM
:
3032 case AARCH64_OPND_SIMD_IMM_SFT
:
3033 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3034 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
3035 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3037 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%" PRIi64
, opnd
->imm
.value
,
3038 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3039 opnd
->shifter
.amount
);
3042 case AARCH64_OPND_FPIMM
:
3043 case AARCH64_OPND_SIMD_FPIMM
:
3044 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3046 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3049 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
3050 snprintf (buf
, size
, "#%.18e", c
.f
);
3053 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3056 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
3057 snprintf (buf
, size
, "#%.18e", c
.f
);
3060 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3063 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
3064 snprintf (buf
, size
, "#%.18e", c
.d
);
3067 default: assert (0);
3071 case AARCH64_OPND_CCMP_IMM
:
3072 case AARCH64_OPND_NZCV
:
3073 case AARCH64_OPND_EXCEPTION
:
3074 case AARCH64_OPND_UIMM4
:
3075 case AARCH64_OPND_UIMM7
:
3076 if (optional_operand_p (opcode
, idx
) == TRUE
3077 && (opnd
->imm
.value
==
3078 (int64_t) get_optional_operand_default_value (opcode
)))
3079 /* Omit the operand, e.g. DCPS1. */
3081 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
3084 case AARCH64_OPND_COND
:
3085 case AARCH64_OPND_COND1
:
3086 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
3089 case AARCH64_OPND_ADDR_ADRP
:
3090 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
3096 /* This is not necessary during the disassembling, as print_address_func
3097 in the disassemble_info will take care of the printing. But some
3098 other callers may be still interested in getting the string in *STR,
3099 so here we do snprintf regardless. */
3100 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3103 case AARCH64_OPND_ADDR_PCREL14
:
3104 case AARCH64_OPND_ADDR_PCREL19
:
3105 case AARCH64_OPND_ADDR_PCREL21
:
3106 case AARCH64_OPND_ADDR_PCREL26
:
3107 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
3112 /* This is not necessary during the disassembling, as print_address_func
3113 in the disassemble_info will take care of the printing. But some
3114 other callers may be still interested in getting the string in *STR,
3115 so here we do snprintf regardless. */
3116 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3119 case AARCH64_OPND_ADDR_SIMPLE
:
3120 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
3121 case AARCH64_OPND_SIMD_ADDR_POST
:
3122 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3123 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
3125 if (opnd
->addr
.offset
.is_reg
)
3126 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
3128 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
3131 snprintf (buf
, size
, "[%s]", name
);
3134 case AARCH64_OPND_ADDR_REGOFF
:
3135 case AARCH64_OPND_SVE_ADDR_RR
:
3136 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
3137 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
3138 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
3139 case AARCH64_OPND_SVE_ADDR_RX
:
3140 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
3141 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
3142 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
3143 print_register_offset_address
3144 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3145 get_offset_int_reg_name (opnd
));
3148 case AARCH64_OPND_SVE_ADDR_RZ
:
3149 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
3150 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
3151 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
3152 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
3153 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
3154 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
3155 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
3156 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
3157 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
3158 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
3159 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
3160 print_register_offset_address
3161 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3162 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3165 case AARCH64_OPND_ADDR_SIMM7
:
3166 case AARCH64_OPND_ADDR_SIMM9
:
3167 case AARCH64_OPND_ADDR_SIMM9_2
:
3168 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
3169 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
3170 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
3171 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
3172 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
3173 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
3174 case AARCH64_OPND_SVE_ADDR_RI_U6
:
3175 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
3176 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
3177 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
3178 print_immediate_offset_address
3179 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1));
3182 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
3183 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
3184 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
3185 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
3186 print_immediate_offset_address
3188 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
));
3191 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
3192 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
3193 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
3194 print_register_offset_address
3196 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
3197 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3200 case AARCH64_OPND_ADDR_UIMM12
:
3201 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3202 if (opnd
->addr
.offset
.imm
)
3203 snprintf (buf
, size
, "[%s,#%d]", name
, opnd
->addr
.offset
.imm
);
3205 snprintf (buf
, size
, "[%s]", name
);
3208 case AARCH64_OPND_SYSREG
:
3209 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
3210 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
3211 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
]))
3213 if (aarch64_sys_regs
[i
].name
)
3214 snprintf (buf
, size
, "%s", aarch64_sys_regs
[i
].name
);
3217 /* Implementation defined system register. */
3218 unsigned int value
= opnd
->sysreg
;
3219 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
3220 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
3225 case AARCH64_OPND_PSTATEFIELD
:
3226 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
3227 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
3229 assert (aarch64_pstatefields
[i
].name
);
3230 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
3233 case AARCH64_OPND_SYSREG_AT
:
3234 case AARCH64_OPND_SYSREG_DC
:
3235 case AARCH64_OPND_SYSREG_IC
:
3236 case AARCH64_OPND_SYSREG_TLBI
:
3237 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
3240 case AARCH64_OPND_BARRIER
:
3241 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
3244 case AARCH64_OPND_BARRIER_ISB
:
3245 /* Operand can be omitted, e.g. in DCPS1. */
3246 if (! optional_operand_p (opcode
, idx
)
3247 || (opnd
->barrier
->value
3248 != get_optional_operand_default_value (opcode
)))
3249 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
3252 case AARCH64_OPND_PRFOP
:
3253 if (opnd
->prfop
->name
!= NULL
)
3254 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
3256 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
3259 case AARCH64_OPND_BARRIER_PSB
:
3260 snprintf (buf
, size
, "%s", opnd
->hint_option
->name
);
3268 #define CPENC(op0,op1,crn,crm,op2) \
3269 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3270 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3271 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3272 /* for 3.9.10 System Instructions */
3273 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3295 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3300 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3305 #define F_HASXT 0x4 /* System instruction register <Xt>
3309 /* TODO there are two more issues need to be resolved
3310 1. handle read-only and write-only system registers
3311 2. handle cpu-implementation-defined system registers. */
3312 const aarch64_sys_reg aarch64_sys_regs
[] =
3314 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
3315 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
3316 { "elr_el1", CPEN_(0,C0
,1), 0 },
3317 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
3318 { "sp_el0", CPEN_(0,C1
,0), 0 },
3319 { "spsel", CPEN_(0,C2
,0), 0 },
3320 { "daif", CPEN_(3,C2
,1), 0 },
3321 { "currentel", CPEN_(0,C2
,2), 0 }, /* RO */
3322 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
3323 { "uao", CPEN_ (0, C2
, 4), F_ARCHEXT
},
3324 { "nzcv", CPEN_(3,C2
,0), 0 },
3325 { "fpcr", CPEN_(3,C4
,0), 0 },
3326 { "fpsr", CPEN_(3,C4
,1), 0 },
3327 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
3328 { "dlr_el0", CPEN_(3,C5
,1), 0 },
3329 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
3330 { "elr_el2", CPEN_(4,C0
,1), 0 },
3331 { "sp_el1", CPEN_(4,C1
,0), 0 },
3332 { "spsr_irq", CPEN_(4,C3
,0), 0 },
3333 { "spsr_abt", CPEN_(4,C3
,1), 0 },
3334 { "spsr_und", CPEN_(4,C3
,2), 0 },
3335 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
3336 { "spsr_el3", CPEN_(6,C0
,0), 0 },
3337 { "elr_el3", CPEN_(6,C0
,1), 0 },
3338 { "sp_el2", CPEN_(6,C1
,0), 0 },
3339 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
3340 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
3341 { "midr_el1", CPENC(3,0,C0
,C0
,0), 0 }, /* RO */
3342 { "ctr_el0", CPENC(3,3,C0
,C0
,1), 0 }, /* RO */
3343 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), 0 }, /* RO */
3344 { "revidr_el1", CPENC(3,0,C0
,C0
,6), 0 }, /* RO */
3345 { "aidr_el1", CPENC(3,1,C0
,C0
,7), 0 }, /* RO */
3346 { "dczid_el0", CPENC(3,3,C0
,C0
,7), 0 }, /* RO */
3347 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), 0 }, /* RO */
3348 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), 0 }, /* RO */
3349 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), 0 }, /* RO */
3350 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), 0 }, /* RO */
3351 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), 0 }, /* RO */
3352 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), 0 }, /* RO */
3353 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), 0 }, /* RO */
3354 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), 0 }, /* RO */
3355 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), 0 }, /* RO */
3356 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), 0 }, /* RO */
3357 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), 0 }, /* RO */
3358 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), 0 }, /* RO */
3359 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), 0 }, /* RO */
3360 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), 0 }, /* RO */
3361 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), 0 }, /* RO */
3362 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), 0 }, /* RO */
3363 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), 0 }, /* RO */
3364 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), 0 }, /* RO */
3365 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), 0 }, /* RO */
3366 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), 0 }, /* RO */
3367 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), 0 }, /* RO */
3368 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), 0 }, /* RO */
3369 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), 0 }, /* RO */
3370 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), 0 }, /* RO */
3371 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), 0 }, /* RO */
3372 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), 0 }, /* RO */
3373 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), 0 }, /* RO */
3374 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
}, /* RO */
3375 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), 0 }, /* RO */
3376 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), 0 }, /* RO */
3377 { "clidr_el1", CPENC(3,1,C0
,C0
,1), 0 }, /* RO */
3378 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 }, /* RO */
3379 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
3380 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
3381 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
3382 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
3383 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
3384 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
3385 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
3386 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
3387 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
3388 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
3389 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
3390 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
3391 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
3392 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
3393 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
3394 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
3395 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
3396 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
3397 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
3398 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
3399 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
3400 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
3401 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
3402 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
3403 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
3404 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
3405 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
3406 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
3407 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
3408 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
3409 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
3410 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
3411 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
3412 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
3413 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
3414 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
3415 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
3416 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
3417 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
3418 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
3419 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
3420 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
3421 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
3422 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
3423 { "vsesr_el2", CPENC (3, 4, C5
, C2
, 3), F_ARCHEXT
}, /* RO */
3424 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
3425 { "erridr_el1", CPENC (3, 0, C5
, C3
, 0), F_ARCHEXT
}, /* RO */
3426 { "errselr_el1", CPENC (3, 0, C5
, C3
, 1), F_ARCHEXT
},
3427 { "erxfr_el1", CPENC (3, 0, C5
, C4
, 0), F_ARCHEXT
}, /* RO */
3428 { "erxctlr_el1", CPENC (3, 0, C5
, C4
, 1), F_ARCHEXT
},
3429 { "erxstatus_el1", CPENC (3, 0, C5
, C4
, 2), F_ARCHEXT
},
3430 { "erxaddr_el1", CPENC (3, 0, C5
, C4
, 3), F_ARCHEXT
},
3431 { "erxmisc0_el1", CPENC (3, 0, C5
, C5
, 0), F_ARCHEXT
},
3432 { "erxmisc1_el1", CPENC (3, 0, C5
, C5
, 1), F_ARCHEXT
},
3433 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
3434 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
3435 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
3436 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
3437 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
3438 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
3439 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
3440 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
3441 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
3442 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
3443 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
3444 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
3445 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
3446 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
3447 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
3448 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
3449 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
3450 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
3451 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), 0 }, /* RO */
3452 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), 0 }, /* RO */
3453 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), 0 }, /* RO */
3454 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
3455 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
3456 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
3457 { "isr_el1", CPENC(3,0,C12
,C1
,0), 0 }, /* RO */
3458 { "disr_el1", CPENC (3, 0, C12
, C1
, 1), F_ARCHEXT
},
3459 { "vdisr_el2", CPENC (3, 4, C12
, C1
, 1), F_ARCHEXT
},
3460 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
3461 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
3462 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
3463 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
3464 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RO */
3465 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
3466 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
3467 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
3468 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
3469 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RO */
3470 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), 0 }, /* RO */
3471 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), 0 }, /* RO */
3472 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
3473 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
3474 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
3475 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
3476 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
3477 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
3478 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
3479 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
3480 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
3481 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
3482 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
3483 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
3484 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
3485 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
3486 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
3487 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
3488 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
3489 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
3490 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
3491 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
3492 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
3493 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
3494 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
3495 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
3496 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
3497 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
3498 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
3499 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
3500 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
3501 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
3502 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), 0 }, /* r */
3503 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
3504 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
3505 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* r */
3506 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* w */
3507 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 }, /* r */
3508 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 }, /* w */
3509 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
3510 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
3511 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
3512 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
3513 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
3514 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
3515 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
3516 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
3517 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
3518 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
3519 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
3520 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
3521 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
3522 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
3523 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
3524 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
3525 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
3526 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
3527 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
3528 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
3529 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
3530 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
3531 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
3532 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
3533 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
3534 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
3535 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
3536 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
3537 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
3538 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
3539 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
3540 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
3541 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
3542 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
3543 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
3544 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
3545 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
3546 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
3547 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
3548 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
3549 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
3550 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
3551 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
3552 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
3553 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
3554 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
3555 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
3556 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
3557 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
3558 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
3559 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
3560 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
3561 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
3562 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
3563 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
3564 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
3565 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
3566 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
3567 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
3568 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
3569 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
3570 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
3571 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
3572 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
3573 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
3574 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
3575 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), 0 }, /* r */
3576 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), 0 }, /* w */
3577 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), 0 }, /* r */
3578 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
3579 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
3580 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
3581 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
3582 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), 0 }, /* r */
3583 { "pmblimitr_el1", CPENC (3, 0, C9
, C10
, 0), F_ARCHEXT
}, /* rw */
3584 { "pmbptr_el1", CPENC (3, 0, C9
, C10
, 1), F_ARCHEXT
}, /* rw */
3585 { "pmbsr_el1", CPENC (3, 0, C9
, C10
, 3), F_ARCHEXT
}, /* rw */
3586 { "pmbidr_el1", CPENC (3, 0, C9
, C10
, 7), F_ARCHEXT
}, /* ro */
3587 { "pmscr_el1", CPENC (3, 0, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3588 { "pmsicr_el1", CPENC (3, 0, C9
, C9
, 2), F_ARCHEXT
}, /* rw */
3589 { "pmsirr_el1", CPENC (3, 0, C9
, C9
, 3), F_ARCHEXT
}, /* rw */
3590 { "pmsfcr_el1", CPENC (3, 0, C9
, C9
, 4), F_ARCHEXT
}, /* rw */
3591 { "pmsevfr_el1", CPENC (3, 0, C9
, C9
, 5), F_ARCHEXT
}, /* rw */
3592 { "pmslatfr_el1", CPENC (3, 0, C9
, C9
, 6), F_ARCHEXT
}, /* rw */
3593 { "pmsidr_el1", CPENC (3, 0, C9
, C9
, 7), F_ARCHEXT
}, /* ro */
3594 { "pmscr_el2", CPENC (3, 4, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3595 { "pmscr_el12", CPENC (3, 5, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3596 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
3597 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
3598 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
3599 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
3600 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), 0 }, /* w */
3601 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
3602 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), 0 }, /* r */
3603 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), 0 }, /* r */
3604 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
3605 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
3606 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
3607 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
3608 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
3609 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
3610 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
3611 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
3612 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
3613 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
3614 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
3615 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
3616 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
3617 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
3618 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
3619 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
3620 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
3621 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
3622 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
3623 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
3624 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
3625 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
3626 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
3627 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
3628 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
3629 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
3630 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
3631 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
3632 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
3633 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
3634 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
3635 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
3636 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
3637 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
3638 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
3639 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
3640 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
3641 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
3642 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
3643 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
3644 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
3645 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
3646 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
3647 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
3648 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
3649 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
3650 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
3651 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
3652 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
3653 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
3654 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
3655 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
3656 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
3657 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
3658 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
3659 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
3660 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
3661 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
3662 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
3663 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
3664 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
3665 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
3666 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
3667 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
3668 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
3669 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
3670 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
3671 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
3672 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
3673 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
3674 { 0, CPENC(0,0,0,0,0), 0 },
3678 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
3680 return (reg
->flags
& F_DEPRECATED
) != 0;
3684 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
3685 const aarch64_sys_reg
*reg
)
3687 if (!(reg
->flags
& F_ARCHEXT
))
3690 /* PAN. Values are from aarch64_sys_regs. */
3691 if (reg
->value
== CPEN_(0,C2
,3)
3692 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3695 /* Virtualization host extensions: system registers. */
3696 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
3697 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
3698 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
3699 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
3700 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
3701 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3704 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3705 if ((reg
->value
== CPEN_ (5, C0
, 0)
3706 || reg
->value
== CPEN_ (5, C0
, 1)
3707 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
3708 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
3709 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
3710 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
3711 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
3712 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
3713 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
3714 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
3715 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
3716 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
3717 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
3718 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
3719 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
3720 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
3721 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3724 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3725 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
3726 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
3727 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
3728 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
3729 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
3730 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
3731 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3734 /* ARMv8.2 features. */
3736 /* ID_AA64MMFR2_EL1. */
3737 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
3738 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3742 if (reg
->value
== CPEN_ (0, C2
, 4)
3743 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3746 /* RAS extension. */
3748 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3749 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3750 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
3751 || reg
->value
== CPENC (3, 0, C5
, C3
, 1)
3752 || reg
->value
== CPENC (3, 0, C5
, C3
, 2)
3753 || reg
->value
== CPENC (3, 0, C5
, C3
, 3)
3754 || reg
->value
== CPENC (3, 0, C5
, C4
, 0)
3755 || reg
->value
== CPENC (3, 0, C5
, C4
, 1)
3756 || reg
->value
== CPENC (3, 0, C5
, C4
, 2)
3757 || reg
->value
== CPENC (3, 0, C5
, C4
, 3)
3758 || reg
->value
== CPENC (3, 0, C5
, C5
, 0)
3759 || reg
->value
== CPENC (3, 0, C5
, C5
, 1))
3760 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3763 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3764 if ((reg
->value
== CPENC (3, 4, C5
, C2
, 3)
3765 || reg
->value
== CPENC (3, 0, C12
, C1
, 1)
3766 || reg
->value
== CPENC (3, 4, C12
, C1
, 1))
3767 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3770 /* Statistical Profiling extension. */
3771 if ((reg
->value
== CPENC (3, 0, C9
, C10
, 0)
3772 || reg
->value
== CPENC (3, 0, C9
, C10
, 1)
3773 || reg
->value
== CPENC (3, 0, C9
, C10
, 3)
3774 || reg
->value
== CPENC (3, 0, C9
, C10
, 7)
3775 || reg
->value
== CPENC (3, 0, C9
, C9
, 0)
3776 || reg
->value
== CPENC (3, 0, C9
, C9
, 2)
3777 || reg
->value
== CPENC (3, 0, C9
, C9
, 3)
3778 || reg
->value
== CPENC (3, 0, C9
, C9
, 4)
3779 || reg
->value
== CPENC (3, 0, C9
, C9
, 5)
3780 || reg
->value
== CPENC (3, 0, C9
, C9
, 6)
3781 || reg
->value
== CPENC (3, 0, C9
, C9
, 7)
3782 || reg
->value
== CPENC (3, 4, C9
, C9
, 0)
3783 || reg
->value
== CPENC (3, 5, C9
, C9
, 0))
3784 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PROFILE
))
3790 const aarch64_sys_reg aarch64_pstatefields
[] =
3792 { "spsel", 0x05, 0 },
3793 { "daifset", 0x1e, 0 },
3794 { "daifclr", 0x1f, 0 },
3795 { "pan", 0x04, F_ARCHEXT
},
3796 { "uao", 0x03, F_ARCHEXT
},
3797 { 0, CPENC(0,0,0,0,0), 0 },
3801 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
3802 const aarch64_sys_reg
*reg
)
3804 if (!(reg
->flags
& F_ARCHEXT
))
3807 /* PAN. Values are from aarch64_pstatefields. */
3808 if (reg
->value
== 0x04
3809 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3812 /* UAO. Values are from aarch64_pstatefields. */
3813 if (reg
->value
== 0x03
3814 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3820 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
3822 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
3823 { "iallu", CPENS(0,C7
,C5
,0), 0 },
3824 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
3825 { 0, CPENS(0,0,0,0), 0 }
3828 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
3830 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
3831 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
3832 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
3833 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
3834 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
3835 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
3836 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
3837 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
3838 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
3839 { 0, CPENS(0,0,0,0), 0 }
3842 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
3844 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
3845 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
3846 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
3847 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
3848 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
3849 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
3850 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
3851 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
3852 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
3853 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
3854 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
3855 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
3856 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
3857 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
3858 { 0, CPENS(0,0,0,0), 0 }
3861 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
3863 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
3864 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
3865 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
3866 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
3867 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
3868 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
3869 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
3870 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
3871 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
3872 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
3873 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
3874 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
3875 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
3876 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
3877 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
3878 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
3879 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
3880 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
3881 { "alle2", CPENS(4,C8
,C7
,0), 0 },
3882 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
3883 { "alle1", CPENS(4,C8
,C7
,4), 0 },
3884 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
3885 { "alle3", CPENS(6,C8
,C7
,0), 0 },
3886 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
3887 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
3888 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
3889 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
3890 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
3891 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
3892 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
3893 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
3894 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
3895 { 0, CPENS(0,0,0,0), 0 }
3899 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
3901 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
3905 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
3906 const aarch64_sys_ins_reg
*reg
)
3908 if (!(reg
->flags
& F_ARCHEXT
))
3911 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3912 if (reg
->value
== CPENS (3, C7
, C12
, 1)
3913 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3916 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3917 if ((reg
->value
== CPENS (0, C7
, C9
, 0)
3918 || reg
->value
== CPENS (0, C7
, C9
, 1))
3919 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3942 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
3943 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
3946 verify_ldpsw (const struct aarch64_opcode
* opcode ATTRIBUTE_UNUSED
,
3947 const aarch64_insn insn
)
3949 int t
= BITS (insn
, 4, 0);
3950 int n
= BITS (insn
, 9, 5);
3951 int t2
= BITS (insn
, 14, 10);
3955 /* Write back enabled. */
3956 if ((t
== n
|| t2
== n
) && n
!= 31)
3970 /* Include the opcode description table as well as the operand description
3972 #define VERIFIER(x) verify_##x
3973 #include "aarch64-tbl.h"