1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2023 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump
= false;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array
[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array
[16] = {
102 /* The enumeration strings associated with each value of a 6-bit RPRFM
104 const char *const aarch64_rprfmop_array
[64] = {
113 /* Vector length multiples for a predicate-as-counter operand. Used in things
114 like AARCH64_OPND_SME_VLxN_10. */
115 const char *const aarch64_sme_vlxn_array
[2] = {
120 /* Helper functions to determine which operand to be used to encode/decode
121 the size:Q fields for AdvSIMD instructions. */
124 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
126 return (qualifier
>= AARCH64_OPND_QLF_V_8B
127 && qualifier
<= AARCH64_OPND_QLF_V_1Q
);
131 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
133 return (qualifier
>= AARCH64_OPND_QLF_S_B
134 && qualifier
<= AARCH64_OPND_QLF_S_Q
);
143 DP_VECTOR_ACROSS_LANES
,
146 static const char significant_operand_index
[] =
148 0, /* DP_UNKNOWN, by default using operand 0. */
149 0, /* DP_VECTOR_3SAME */
150 1, /* DP_VECTOR_LONG */
151 2, /* DP_VECTOR_WIDE */
152 1, /* DP_VECTOR_ACROSS_LANES */
155 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
157 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
158 corresponds to one of a sequence of operands. */
160 static enum data_pattern
161 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
163 if (vector_qualifier_p (qualifiers
[0]))
165 /* e.g. v.4s, v.4s, v.4s
166 or v.4h, v.4h, v.h[3]. */
167 if (qualifiers
[0] == qualifiers
[1]
168 && vector_qualifier_p (qualifiers
[2])
169 && (aarch64_get_qualifier_esize (qualifiers
[0])
170 == aarch64_get_qualifier_esize (qualifiers
[1]))
171 && (aarch64_get_qualifier_esize (qualifiers
[0])
172 == aarch64_get_qualifier_esize (qualifiers
[2])))
173 return DP_VECTOR_3SAME
;
174 /* e.g. v.8h, v.8b, v.8b.
175 or v.4s, v.4h, v.h[2].
177 if (vector_qualifier_p (qualifiers
[1])
178 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
179 && (aarch64_get_qualifier_esize (qualifiers
[0])
180 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
181 return DP_VECTOR_LONG
;
182 /* e.g. v.8h, v.8h, v.8b. */
183 if (qualifiers
[0] == qualifiers
[1]
184 && vector_qualifier_p (qualifiers
[2])
185 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
186 && (aarch64_get_qualifier_esize (qualifiers
[0])
187 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
188 && (aarch64_get_qualifier_esize (qualifiers
[0])
189 == aarch64_get_qualifier_esize (qualifiers
[1])))
190 return DP_VECTOR_WIDE
;
192 else if (fp_qualifier_p (qualifiers
[0]))
194 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
195 if (vector_qualifier_p (qualifiers
[1])
196 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
197 return DP_VECTOR_ACROSS_LANES
;
203 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
204 the AdvSIMD instructions. */
205 /* N.B. it is possible to do some optimization that doesn't call
206 get_data_pattern each time when we need to select an operand. We can
207 either buffer the caculated the result or statically generate the data,
208 however, it is not obvious that the optimization will bring significant
212 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
215 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
218 /* Instruction bit-fields.
219 + Keep synced with 'enum aarch64_field_kind'. */
220 const aarch64_field fields
[] =
223 { 8, 4 }, /* CRm: in the system instructions. */
224 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
225 { 12, 4 }, /* CRn: in the system instructions. */
226 { 10, 8 }, /* CSSC_imm8. */
227 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
228 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
229 { 0, 5 }, /* LSE128_Rt: Shared input+output operand register. */
230 { 16, 5 }, /* LSE128_Rt2: Shared input+output operand register 2. */
231 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
232 { 22, 1 }, /* N: in logical (immediate) instructions. */
233 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
234 { 10, 5 }, /* Ra: in fp instructions. */
235 { 0, 5 }, /* Rd: in many integer instructions. */
236 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
237 { 5, 5 }, /* Rn: in many integer instructions. */
238 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
239 { 0, 5 }, /* Rt: in load/store instructions. */
240 { 10, 5 }, /* Rt2: in load/store pair instructions. */
241 { 12, 1 }, /* S: in load/store reg offset instructions. */
242 { 12, 2 }, /* SM3_imm2: Indexed element SM3 2 bits index immediate. */
243 { 1, 3 }, /* SME_Pdx2: predicate register, multiple of 2, [3:1]. */
244 { 13, 3 }, /* SME_Pm: second source scalable predicate register P0-P7. */
245 { 0, 3 }, /* SME_PNd3: PN0-PN7, bits [2:0]. */
246 { 5, 3 }, /* SME_PNn3: PN0-PN7, bits [7:5]. */
247 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
248 { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
249 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
250 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
251 { 10, 1 }, /* SME_VL_10: VLx2 or VLx4, bit [10]. */
252 { 13, 1 }, /* SME_VL_13: VLx2 or VLx4, bit [13]. */
253 { 0, 2 }, /* SME_ZAda_2b: tile ZA0-ZA3. */
254 { 0, 3 }, /* SME_ZAda_3b: tile ZA0-ZA7. */
255 { 1, 4 }, /* SME_Zdn2: Z0-Z31, multiple of 2, bits [4:1]. */
256 { 2, 3 }, /* SME_Zdn4: Z0-Z31, multiple of 4, bits [4:2]. */
257 { 16, 4 }, /* SME_Zm: Z0-Z15, bits [19:16]. */
258 { 17, 4 }, /* SME_Zm2: Z0-Z31, multiple of 2, bits [20:17]. */
259 { 18, 3 }, /* SME_Zm4: Z0-Z31, multiple of 4, bits [20:18]. */
260 { 6, 4 }, /* SME_Zn2: Z0-Z31, multiple of 2, bits [9:6]. */
261 { 7, 3 }, /* SME_Zn4: Z0-Z31, multiple of 4, bits [9:7]. */
262 { 4, 1 }, /* SME_ZtT: upper bit of Zt, bit [4]. */
263 { 0, 3 }, /* SME_Zt3: lower 3 bits of Zt, bits [2:0]. */
264 { 0, 2 }, /* SME_Zt2: lower 2 bits of Zt, bits [1:0]. */
265 { 23, 1 }, /* SME_i1: immediate field, bit 23. */
266 { 12, 2 }, /* SME_size_12: bits [13:12]. */
267 { 22, 2 }, /* SME_size_22: size<1>, size<0> class field, [23:22]. */
268 { 23, 1 }, /* SME_sz_23: bit [23]. */
269 { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
270 { 18, 3 }, /* SME_tszl: immediate and qualifier field, bits [20:18]. */
271 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
272 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
273 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
274 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
275 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
276 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
277 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
278 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
279 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
280 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
281 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
282 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
283 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
284 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
285 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
286 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
287 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
289 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
290 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
291 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
292 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
293 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
294 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
295 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
296 { 5, 1 }, /* SVE_i1: single-bit immediate. */
297 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
298 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
299 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
300 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
301 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
302 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
303 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
304 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
305 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
306 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
307 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
308 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
309 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
310 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
311 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
312 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
313 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
314 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
315 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
316 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
317 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
318 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
319 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
320 { 16, 4 }, /* SVE_tsz: triangular size select. */
321 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
322 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
323 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
324 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
325 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
326 { 22, 1 }, /* S_imm10: in LDRAA and LDRAB instructions. */
327 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
328 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
329 { 19, 5 }, /* b40: in the test bit and branch instructions. */
330 { 31, 1 }, /* b5: in the test bit and branch instructions. */
331 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
332 { 12, 4 }, /* cond: condition flags as a source operand. */
333 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
334 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
335 { 21, 2 }, /* hw: in move wide constant instructions. */
336 { 0, 1 }, /* imm1_0: general immediate in bits [0]. */
337 { 2, 1 }, /* imm1_2: general immediate in bits [2]. */
338 { 8, 1 }, /* imm1_8: general immediate in bits [8]. */
339 { 10, 1 }, /* imm1_10: general immediate in bits [10]. */
340 { 15, 1 }, /* imm1_15: general immediate in bits [15]. */
341 { 16, 1 }, /* imm1_16: general immediate in bits [16]. */
342 { 0, 2 }, /* imm2_0: general immediate in bits [1:0]. */
343 { 1, 2 }, /* imm2_1: general immediate in bits [2:1]. */
344 { 8, 2 }, /* imm2_8: general immediate in bits [9:8]. */
345 { 10, 2 }, /* imm2_10: 2-bit immediate, bits [11:10] */
346 { 12, 2 }, /* imm2_12: 2-bit immediate, bits [13:12] */
347 { 15, 2 }, /* imm2_15: 2-bit immediate, bits [16:15] */
348 { 16, 2 }, /* imm2_16: 2-bit immediate, bits [17:16] */
349 { 19, 2 }, /* imm2_19: 2-bit immediate, bits [20:19] */
350 { 0, 3 }, /* imm3_0: general immediate in bits [2:0]. */
351 { 5, 3 }, /* imm3_5: general immediate in bits [7:5]. */
352 { 10, 3 }, /* imm3_10: in add/sub extended reg instructions. */
353 { 12, 3 }, /* imm3_12: general immediate in bits [14:12]. */
354 { 14, 3 }, /* imm3_14: general immediate in bits [16:14]. */
355 { 15, 3 }, /* imm3_15: general immediate in bits [17:15]. */
356 { 0, 4 }, /* imm4_0: in rmif instructions. */
357 { 5, 4 }, /* imm4_5: in SME instructions. */
358 { 10, 4 }, /* imm4_10: in adddg/subg instructions. */
359 { 11, 4 }, /* imm4_11: in advsimd ext and advsimd ins instructions. */
360 { 14, 4 }, /* imm4_14: general immediate in bits [17:14]. */
361 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
362 { 10, 6 }, /* imm6_10: in add/sub reg shifted instructions. */
363 { 15, 6 }, /* imm6_15: in rmif instructions. */
364 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
365 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
366 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
367 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
368 { 5, 14 }, /* imm14: in test bit and branch instructions. */
369 { 0, 16 }, /* imm16_0: in udf instruction. */
370 { 5, 16 }, /* imm16_5: in exception instructions. */
371 { 5, 19 }, /* imm19: e.g. in CBZ. */
372 { 0, 26 }, /* imm26: in unconditional branch instructions. */
373 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
374 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
375 { 5, 19 }, /* immhi: e.g. in ADRP. */
376 { 29, 2 }, /* immlo: e.g. in ADRP. */
377 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
378 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
379 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
380 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
381 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
382 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
383 { 30, 1 }, /* lse_sz: in LSE extension atomic instructions. */
384 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
385 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
386 { 19, 2 }, /* op0: in the system instructions. */
387 { 16, 3 }, /* op1: in the system instructions. */
388 { 5, 3 }, /* op2: in the system instructions. */
389 { 22, 2 }, /* opc: in load/store reg offset instructions. */
390 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
391 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
392 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
393 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
394 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
395 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
396 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
397 { 31, 1 }, /* sf: in integer data processing instructions. */
398 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
399 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
400 { 22, 1 }, /* sz: 1-bit element size select. */
401 { 22, 2 }, /* type: floating point type field in fp data inst. */
402 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
405 enum aarch64_operand_class
406 aarch64_get_operand_class (enum aarch64_opnd type
)
408 return aarch64_operands
[type
].op_class
;
412 aarch64_get_operand_name (enum aarch64_opnd type
)
414 return aarch64_operands
[type
].name
;
417 /* Get operand description string.
418 This is usually for the diagnosis purpose. */
420 aarch64_get_operand_desc (enum aarch64_opnd type
)
422 return aarch64_operands
[type
].desc
;
425 /* Table of all conditional affixes. */
426 const aarch64_cond aarch64_conds
[16] =
428 {{"eq", "none"}, 0x0},
429 {{"ne", "any"}, 0x1},
430 {{"cs", "hs", "nlast"}, 0x2},
431 {{"cc", "lo", "ul", "last"}, 0x3},
432 {{"mi", "first"}, 0x4},
433 {{"pl", "nfrst"}, 0x5},
436 {{"hi", "pmore"}, 0x8},
437 {{"ls", "plast"}, 0x9},
438 {{"ge", "tcont"}, 0xa},
439 {{"lt", "tstop"}, 0xb},
447 get_cond_from_value (aarch64_insn value
)
450 return &aarch64_conds
[(unsigned int) value
];
454 get_inverted_cond (const aarch64_cond
*cond
)
456 return &aarch64_conds
[cond
->value
^ 0x1];
459 /* Table describing the operand extension/shifting operators; indexed by
460 enum aarch64_modifier_kind.
462 The value column provides the most common values for encoding modifiers,
463 which enables table-driven encoding/decoding for the modifiers. */
464 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
485 enum aarch64_modifier_kind
486 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
488 return desc
- aarch64_operand_modifiers
;
492 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
494 return aarch64_operand_modifiers
[kind
].value
;
497 enum aarch64_modifier_kind
498 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
502 return AARCH64_MOD_UXTB
+ value
;
504 return AARCH64_MOD_LSL
- value
;
508 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
510 return kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
;
514 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
516 return kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
;
519 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
539 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options
[4] =
540 { /* CRm<3:2> #imm */
541 { "oshnxs", 16 }, /* 00 16 */
542 { "nshnxs", 20 }, /* 01 20 */
543 { "ishnxs", 24 }, /* 10 24 */
544 { "synxs", 28 }, /* 11 28 */
547 /* Table describing the operands supported by the aliases of the HINT
550 The name column is the operand that is accepted for the alias. The value
551 column is the hint number of the alias. The list of operands is terminated
552 by NULL in the name column. */
554 const struct aarch64_name_value_pair aarch64_hint_options
[] =
556 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
557 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT
, 0x20) },
558 { "csync", HINT_OPD_CSYNC
}, /* PSB CSYNC. */
559 { "dsync", HINT_OPD_DSYNC
}, /* GCSB DSYNC. */
560 { "c", HINT_OPD_C
}, /* BTI C. */
561 { "j", HINT_OPD_J
}, /* BTI J. */
562 { "jc", HINT_OPD_JC
}, /* BTI JC. */
563 { NULL
, HINT_OPD_NULL
},
566 /* op -> op: load = 0 instruction = 1 store = 2
568 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
569 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
570 const struct aarch64_name_value_pair aarch64_prfops
[32] =
572 { "pldl1keep", B(0, 1, 0) },
573 { "pldl1strm", B(0, 1, 1) },
574 { "pldl2keep", B(0, 2, 0) },
575 { "pldl2strm", B(0, 2, 1) },
576 { "pldl3keep", B(0, 3, 0) },
577 { "pldl3strm", B(0, 3, 1) },
578 { "pldslckeep", B(0, 4, 0) },
579 { "pldslcstrm", B(0, 4, 1) },
580 { "plil1keep", B(1, 1, 0) },
581 { "plil1strm", B(1, 1, 1) },
582 { "plil2keep", B(1, 2, 0) },
583 { "plil2strm", B(1, 2, 1) },
584 { "plil3keep", B(1, 3, 0) },
585 { "plil3strm", B(1, 3, 1) },
586 { "plislckeep", B(1, 4, 0) },
587 { "plislcstrm", B(1, 4, 1) },
588 { "pstl1keep", B(2, 1, 0) },
589 { "pstl1strm", B(2, 1, 1) },
590 { "pstl2keep", B(2, 2, 0) },
591 { "pstl2strm", B(2, 2, 1) },
592 { "pstl3keep", B(2, 3, 0) },
593 { "pstl3strm", B(2, 3, 1) },
594 { "pstslckeep", B(2, 4, 0) },
595 { "pstslcstrm", B(2, 4, 1) },
607 /* Utilities on value constraint. */
610 value_in_range_p (int64_t value
, int low
, int high
)
612 return (value
>= low
&& value
<= high
) ? 1 : 0;
615 /* Return true if VALUE is a multiple of ALIGN. */
617 value_aligned_p (int64_t value
, int align
)
619 return (value
% align
) == 0;
622 /* A signed value fits in a field. */
624 value_fit_signed_field_p (int64_t value
, unsigned width
)
627 if (width
< sizeof (value
) * 8)
629 int64_t lim
= (uint64_t) 1 << (width
- 1);
630 if (value
>= -lim
&& value
< lim
)
636 /* An unsigned value fits in a field. */
638 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
641 if (width
< sizeof (value
) * 8)
643 int64_t lim
= (uint64_t) 1 << width
;
644 if (value
>= 0 && value
< lim
)
650 /* Return 1 if OPERAND is SP or WSP. */
652 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
654 return ((aarch64_get_operand_class (operand
->type
)
655 == AARCH64_OPND_CLASS_INT_REG
)
656 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
657 && operand
->reg
.regno
== 31);
660 /* Return 1 if OPERAND is XZR or WZP. */
662 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
664 return ((aarch64_get_operand_class (operand
->type
)
665 == AARCH64_OPND_CLASS_INT_REG
)
666 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
667 && operand
->reg
.regno
== 31);
670 /* Return true if the operand *OPERAND that has the operand code
671 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
672 qualified by the qualifier TARGET. */
675 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
676 aarch64_opnd_qualifier_t target
)
678 switch (operand
->qualifier
)
680 case AARCH64_OPND_QLF_W
:
681 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
684 case AARCH64_OPND_QLF_X
:
685 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
688 case AARCH64_OPND_QLF_WSP
:
689 if (target
== AARCH64_OPND_QLF_W
690 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
693 case AARCH64_OPND_QLF_SP
:
694 if (target
== AARCH64_OPND_QLF_X
695 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
705 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
706 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
708 Return NIL if more than one expected qualifiers are found. */
710 aarch64_opnd_qualifier_t
711 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
713 const aarch64_opnd_qualifier_t known_qlf
,
720 When the known qualifier is NIL, we have to assume that there is only
721 one qualifier sequence in the *QSEQ_LIST and return the corresponding
722 qualifier directly. One scenario is that for instruction
723 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
724 which has only one possible valid qualifier sequence
726 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
727 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
729 Because the qualifier NIL has dual roles in the qualifier sequence:
730 it can mean no qualifier for the operand, or the qualifer sequence is
731 not in use (when all qualifiers in the sequence are NILs), we have to
732 handle this special case here. */
733 if (known_qlf
== AARCH64_OPND_NIL
)
735 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
736 return qseq_list
[0][idx
];
739 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
741 if (qseq_list
[i
][known_idx
] == known_qlf
)
744 /* More than one sequences are found to have KNOWN_QLF at
746 return AARCH64_OPND_NIL
;
751 return qseq_list
[saved_i
][idx
];
754 enum operand_qualifier_kind
762 /* Operand qualifier description. */
763 struct operand_qualifier_data
765 /* The usage of the three data fields depends on the qualifier kind. */
772 enum operand_qualifier_kind kind
;
775 /* Indexed by the operand qualifier enumerators. */
776 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
778 {0, 0, 0, "NIL", OQK_NIL
},
780 /* Operand variant qualifiers.
782 element size, number of elements and common value for encoding. */
784 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
785 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
786 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
787 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
789 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
790 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
791 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
792 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
793 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
794 {4, 1, 0x0, "4b", OQK_OPD_VARIANT
},
795 {4, 1, 0x0, "2h", OQK_OPD_VARIANT
},
797 {1, 4, 0x0, "4b", OQK_OPD_VARIANT
},
798 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
799 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
800 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
801 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
802 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
803 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
804 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
805 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
806 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
807 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
809 {0, 0, 0, "z", OQK_OPD_VARIANT
},
810 {0, 0, 0, "m", OQK_OPD_VARIANT
},
812 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
813 {16, 0, 0, "tag", OQK_OPD_VARIANT
},
815 /* Qualifiers constraining the value range.
817 Lower bound, higher bound, unused. */
819 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE
},
820 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
821 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
822 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
823 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
824 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
825 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
827 /* Qualifiers for miscellaneous purpose.
829 unused, unused and unused. */
834 {0, 0, 0, "retrieving", 0},
838 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
840 return aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
;
844 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
846 return aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
;
850 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
852 return aarch64_opnd_qualifiers
[qualifier
].desc
;
855 /* Given an operand qualifier, return the expected data element size
856 of a qualified operand. */
858 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
860 assert (operand_variant_qualifier_p (qualifier
));
861 return aarch64_opnd_qualifiers
[qualifier
].data0
;
865 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
867 assert (operand_variant_qualifier_p (qualifier
));
868 return aarch64_opnd_qualifiers
[qualifier
].data1
;
872 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
874 assert (operand_variant_qualifier_p (qualifier
));
875 return aarch64_opnd_qualifiers
[qualifier
].data2
;
879 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
881 assert (qualifier_value_in_range_constraint_p (qualifier
));
882 return aarch64_opnd_qualifiers
[qualifier
].data0
;
886 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
888 assert (qualifier_value_in_range_constraint_p (qualifier
));
889 return aarch64_opnd_qualifiers
[qualifier
].data1
;
894 aarch64_verbose (const char *str
, ...)
905 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
909 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
910 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
915 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
916 const aarch64_opnd_qualifier_t
*qualifier
)
919 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
921 aarch64_verbose ("dump_match_qualifiers:");
922 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
923 curr
[i
] = opnd
[i
].qualifier
;
924 dump_qualifier_sequence (curr
);
925 aarch64_verbose ("against");
926 dump_qualifier_sequence (qualifier
);
928 #endif /* DEBUG_AARCH64 */
930 /* This function checks if the given instruction INSN is a destructive
931 instruction based on the usage of the registers. It does not recognize
932 unary destructive instructions. */
934 aarch64_is_destructive_by_operands (const aarch64_opcode
*opcode
)
937 const enum aarch64_opnd
*opnds
= opcode
->operands
;
939 if (opnds
[0] == AARCH64_OPND_NIL
)
942 while (opnds
[++i
] != AARCH64_OPND_NIL
)
943 if (opnds
[i
] == opnds
[0])
949 /* TODO improve this, we can have an extra field at the runtime to
950 store the number of operands rather than calculating it every time. */
953 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
956 const enum aarch64_opnd
*opnds
= opcode
->operands
;
957 while (opnds
[i
++] != AARCH64_OPND_NIL
)
960 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
964 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
965 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
967 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
968 This is always 0 if the function succeeds.
970 N.B. on the entry, it is very likely that only some operands in *INST
971 have had their qualifiers been established.
973 If STOP_AT is not -1, the function will only try to match
974 the qualifier sequence for operands before and including the operand
975 of index STOP_AT; and on success *RET will only be filled with the first
976 (STOP_AT+1) qualifiers.
978 A couple examples of the matching algorithm:
986 Apart from serving the main encoding routine, this can also be called
987 during or after the operand decoding. */
990 aarch64_find_best_match (const aarch64_inst
*inst
,
991 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
992 int stop_at
, aarch64_opnd_qualifier_t
*ret
,
995 int i
, num_opnds
, invalid
, min_invalid
;
996 const aarch64_opnd_qualifier_t
*qualifiers
;
998 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
1001 DEBUG_TRACE ("SUCCEED: no operand");
1006 if (stop_at
< 0 || stop_at
>= num_opnds
)
1007 stop_at
= num_opnds
- 1;
1009 /* For each pattern. */
1010 min_invalid
= num_opnds
;
1011 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
1014 qualifiers
= *qualifiers_list
;
1016 /* Start as positive. */
1019 DEBUG_TRACE ("%d", i
);
1020 #ifdef DEBUG_AARCH64
1022 dump_match_qualifiers (inst
->operands
, qualifiers
);
1025 /* The first entry should be taken literally, even if it's an empty
1026 qualifier sequence. (This matters for strict testing.) In other
1027 positions an empty sequence acts as a terminator. */
1028 if (i
> 0 && empty_qualifier_sequence_p (qualifiers
))
1031 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
1033 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
1034 && !(inst
->opcode
->flags
& F_STRICT
))
1036 /* Either the operand does not have qualifier, or the qualifier
1037 for the operand needs to be deduced from the qualifier
1039 In the latter case, any constraint checking related with
1040 the obtained qualifier should be done later in
1041 operand_general_constraint_met_p. */
1044 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
1046 /* Unless the target qualifier can also qualify the operand
1047 (which has already had a non-nil qualifier), non-equal
1048 qualifiers are generally un-matched. */
1049 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
1055 continue; /* Equal qualifiers are certainly matched. */
1058 if (min_invalid
> invalid
)
1059 min_invalid
= invalid
;
1061 /* Qualifiers established. */
1062 if (min_invalid
== 0)
1066 *invalid_count
= min_invalid
;
1067 if (min_invalid
== 0)
1069 /* Fill the result in *RET. */
1071 qualifiers
= *qualifiers_list
;
1073 DEBUG_TRACE ("complete qualifiers using list %d", i
);
1074 #ifdef DEBUG_AARCH64
1076 dump_qualifier_sequence (qualifiers
);
1079 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
1080 ret
[j
] = *qualifiers
;
1081 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
1082 ret
[j
] = AARCH64_OPND_QLF_NIL
;
1084 DEBUG_TRACE ("SUCCESS");
1088 DEBUG_TRACE ("FAIL");
1092 /* Operand qualifier matching and resolving.
1094 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1095 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1097 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
1098 This is always 0 if the function succeeds.
1100 if UPDATE_P, update the qualifier(s) in *INST after the matching
1104 match_operands_qualifier (aarch64_inst
*inst
, bool update_p
,
1108 aarch64_opnd_qualifier_seq_t qualifiers
;
1110 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
1111 qualifiers
, invalid_count
))
1113 DEBUG_TRACE ("matching FAIL");
1117 /* Update the qualifiers. */
1119 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1121 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
1123 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
1124 "update %s with %s for operand %d",
1125 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
1126 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
1127 inst
->operands
[i
].qualifier
= qualifiers
[i
];
1130 DEBUG_TRACE ("matching SUCCESS");
1134 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1137 IS32 indicates whether value is a 32-bit immediate or not.
1138 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1139 amount will be returned in *SHIFT_AMOUNT. */
1142 aarch64_wide_constant_p (uint64_t value
, int is32
, unsigned int *shift_amount
)
1146 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1150 /* Allow all zeros or all ones in top 32-bits, so that
1151 32-bit constant expressions like ~0x80000000 are
1153 if (value
>> 32 != 0 && value
>> 32 != 0xffffffff)
1154 /* Immediate out of range. */
1156 value
&= 0xffffffff;
1159 /* first, try movz then movn */
1161 if ((value
& ((uint64_t) 0xffff << 0)) == value
)
1163 else if ((value
& ((uint64_t) 0xffff << 16)) == value
)
1165 else if (!is32
&& (value
& ((uint64_t) 0xffff << 32)) == value
)
1167 else if (!is32
&& (value
& ((uint64_t) 0xffff << 48)) == value
)
1172 DEBUG_TRACE ("exit false with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1176 if (shift_amount
!= NULL
)
1177 *shift_amount
= amount
;
1179 DEBUG_TRACE ("exit true with amount %d", amount
);
1184 /* Build the accepted values for immediate logical SIMD instructions.
1186 The standard encodings of the immediate value are:
1187 N imms immr SIMD size R S
1188 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1189 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1190 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1191 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1192 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1193 0 11110s 00000r 2 UInt(r) UInt(s)
1194 where all-ones value of S is reserved.
1196 Let's call E the SIMD size.
1198 The immediate value is: S+1 bits '1' rotated to the right by R.
1200 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1201 (remember S != E - 1). */
1203 #define TOTAL_IMM_NB 5334
1208 aarch64_insn encoding
;
1209 } simd_imm_encoding
;
1211 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
1214 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
1216 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
1217 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
1219 if (imm1
->imm
< imm2
->imm
)
1221 if (imm1
->imm
> imm2
->imm
)
1226 /* immediate bitfield standard encoding
1227 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1228 1 ssssss rrrrrr 64 rrrrrr ssssss
1229 0 0sssss 0rrrrr 32 rrrrr sssss
1230 0 10ssss 00rrrr 16 rrrr ssss
1231 0 110sss 000rrr 8 rrr sss
1232 0 1110ss 0000rr 4 rr ss
1233 0 11110s 00000r 2 r s */
1235 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
1237 return (is64
<< 12) | (r
<< 6) | s
;
1241 build_immediate_table (void)
1243 uint32_t log_e
, e
, s
, r
, s_mask
;
1249 for (log_e
= 1; log_e
<= 6; log_e
++)
1251 /* Get element size. */
1256 mask
= 0xffffffffffffffffull
;
1262 mask
= (1ull << e
) - 1;
1264 1 ((1 << 4) - 1) << 2 = 111100
1265 2 ((1 << 3) - 1) << 3 = 111000
1266 3 ((1 << 2) - 1) << 4 = 110000
1267 4 ((1 << 1) - 1) << 5 = 100000
1268 5 ((1 << 0) - 1) << 6 = 000000 */
1269 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1271 for (s
= 0; s
< e
- 1; s
++)
1272 for (r
= 0; r
< e
; r
++)
1274 /* s+1 consecutive bits to 1 (s < 63) */
1275 imm
= (1ull << (s
+ 1)) - 1;
1276 /* rotate right by r */
1278 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1279 /* replicate the constant depending on SIMD size */
1282 case 1: imm
= (imm
<< 2) | imm
;
1284 case 2: imm
= (imm
<< 4) | imm
;
1286 case 3: imm
= (imm
<< 8) | imm
;
1288 case 4: imm
= (imm
<< 16) | imm
;
1290 case 5: imm
= (imm
<< 32) | imm
;
1295 simd_immediates
[nb_imms
].imm
= imm
;
1296 simd_immediates
[nb_imms
].encoding
=
1297 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1301 assert (nb_imms
== TOTAL_IMM_NB
);
1302 qsort(simd_immediates
, nb_imms
,
1303 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1306 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1307 be accepted by logical (immediate) instructions
1308 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1310 ESIZE is the number of bytes in the decoded immediate value.
1311 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1312 VALUE will be returned in *ENCODING. */
1315 aarch64_logical_immediate_p (uint64_t value
, int esize
, aarch64_insn
*encoding
)
1317 simd_imm_encoding imm_enc
;
1318 const simd_imm_encoding
*imm_encoding
;
1319 static bool initialized
= false;
1323 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), esize: %d", value
,
1328 build_immediate_table ();
1332 /* Allow all zeros or all ones in top bits, so that
1333 constant expressions like ~1 are permitted. */
1334 upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
1335 if ((value
& ~upper
) != value
&& (value
| upper
) != value
)
1338 /* Replicate to a full 64-bit value. */
1340 for (i
= esize
* 8; i
< 64; i
*= 2)
1341 value
|= (value
<< i
);
1343 imm_enc
.imm
= value
;
1344 imm_encoding
= (const simd_imm_encoding
*)
1345 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1346 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1347 if (imm_encoding
== NULL
)
1349 DEBUG_TRACE ("exit with false");
1352 if (encoding
!= NULL
)
1353 *encoding
= imm_encoding
->encoding
;
1354 DEBUG_TRACE ("exit with true");
1358 /* If 64-bit immediate IMM is in the format of
1359 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1360 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1361 of value "abcdefgh". Otherwise return -1. */
1363 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1369 for (i
= 0; i
< 8; i
++)
1371 byte
= (imm
>> (8 * i
)) & 0xff;
1374 else if (byte
!= 0x00)
1380 /* Utility inline functions for operand_general_constraint_met_p. */
1383 set_error (aarch64_operand_error
*mismatch_detail
,
1384 enum aarch64_operand_error_kind kind
, int idx
,
1387 if (mismatch_detail
== NULL
)
1389 mismatch_detail
->kind
= kind
;
1390 mismatch_detail
->index
= idx
;
1391 mismatch_detail
->error
= error
;
1395 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1398 if (mismatch_detail
== NULL
)
1400 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1404 set_invalid_regno_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1405 const char *prefix
, int lower_bound
, int upper_bound
)
1407 if (mismatch_detail
== NULL
)
1409 set_error (mismatch_detail
, AARCH64_OPDE_INVALID_REGNO
, idx
, NULL
);
1410 mismatch_detail
->data
[0].s
= prefix
;
1411 mismatch_detail
->data
[1].i
= lower_bound
;
1412 mismatch_detail
->data
[2].i
= upper_bound
;
1416 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1417 int idx
, int lower_bound
, int upper_bound
,
1420 if (mismatch_detail
== NULL
)
1422 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1423 mismatch_detail
->data
[0].i
= lower_bound
;
1424 mismatch_detail
->data
[1].i
= upper_bound
;
1428 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1429 int idx
, int lower_bound
, int upper_bound
)
1431 if (mismatch_detail
== NULL
)
1433 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1434 _("immediate value"));
1438 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1439 int idx
, int lower_bound
, int upper_bound
)
1441 if (mismatch_detail
== NULL
)
1443 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1444 _("immediate offset"));
1448 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1449 int idx
, int lower_bound
, int upper_bound
)
1451 if (mismatch_detail
== NULL
)
1453 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1454 _("register number"));
1458 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1459 int idx
, int lower_bound
, int upper_bound
)
1461 if (mismatch_detail
== NULL
)
1463 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1464 _("register element index"));
1468 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1469 int idx
, int lower_bound
, int upper_bound
)
1471 if (mismatch_detail
== NULL
)
1473 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1477 /* Report that the MUL modifier in operand IDX should be in the range
1478 [LOWER_BOUND, UPPER_BOUND]. */
1480 set_multiplier_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1481 int idx
, int lower_bound
, int upper_bound
)
1483 if (mismatch_detail
== NULL
)
1485 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1490 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1493 if (mismatch_detail
== NULL
)
1495 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1496 mismatch_detail
->data
[0].i
= alignment
;
1500 set_reg_list_length_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1503 if (mismatch_detail
== NULL
)
1505 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST_LENGTH
, idx
, NULL
);
1506 mismatch_detail
->data
[0].i
= 1 << expected_num
;
1510 set_reg_list_stride_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1513 if (mismatch_detail
== NULL
)
1515 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST_STRIDE
, idx
, NULL
);
1516 mismatch_detail
->data
[0].i
= 1 << expected_num
;
1520 set_invalid_vg_size (aarch64_operand_error
*mismatch_detail
,
1521 int idx
, int expected
)
1523 if (mismatch_detail
== NULL
)
1525 set_error (mismatch_detail
, AARCH64_OPDE_INVALID_VG_SIZE
, idx
, NULL
);
1526 mismatch_detail
->data
[0].i
= expected
;
1530 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1533 if (mismatch_detail
== NULL
)
1535 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1538 /* Check that indexed register operand OPND has a register in the range
1539 [MIN_REGNO, MAX_REGNO] and an index in the range [MIN_INDEX, MAX_INDEX].
1540 PREFIX is the register prefix, such as "z" for SVE vector registers. */
1543 check_reglane (const aarch64_opnd_info
*opnd
,
1544 aarch64_operand_error
*mismatch_detail
, int idx
,
1545 const char *prefix
, int min_regno
, int max_regno
,
1546 int min_index
, int max_index
)
1548 if (!value_in_range_p (opnd
->reglane
.regno
, min_regno
, max_regno
))
1550 set_invalid_regno_error (mismatch_detail
, idx
, prefix
, min_regno
,
1554 if (!value_in_range_p (opnd
->reglane
.index
, min_index
, max_index
))
1556 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, min_index
,
1563 /* Check that register list operand OPND has NUM_REGS registers and a
1564 register stride of STRIDE. */
1567 check_reglist (const aarch64_opnd_info
*opnd
,
1568 aarch64_operand_error
*mismatch_detail
, int idx
,
1569 int num_regs
, int stride
)
1571 if (opnd
->reglist
.num_regs
!= num_regs
)
1573 set_reg_list_length_error (mismatch_detail
, idx
, num_regs
);
1576 if (opnd
->reglist
.stride
!= stride
)
1578 set_reg_list_stride_error (mismatch_detail
, idx
, stride
);
1584 /* Check that indexed ZA operand OPND has:
1586 - a selection register in the range [MIN_WREG, MIN_WREG + 3]
1588 - RANGE_SIZE consecutive immediate offsets.
1590 - an initial immediate offset that is a multiple of RANGE_SIZE
1591 in the range [0, MAX_VALUE * RANGE_SIZE]
1593 - a vector group size of GROUP_SIZE. */
1596 check_za_access (const aarch64_opnd_info
*opnd
,
1597 aarch64_operand_error
*mismatch_detail
, int idx
,
1598 int min_wreg
, int max_value
, unsigned int range_size
,
1601 if (!value_in_range_p (opnd
->indexed_za
.index
.regno
, min_wreg
, min_wreg
+ 3))
1604 set_other_error (mismatch_detail
, idx
,
1605 _("expected a selection register in the"
1607 else if (min_wreg
== 8)
1608 set_other_error (mismatch_detail
, idx
,
1609 _("expected a selection register in the"
1616 int max_index
= max_value
* range_size
;
1617 if (!value_in_range_p (opnd
->indexed_za
.index
.imm
, 0, max_index
))
1619 set_offset_out_of_range_error (mismatch_detail
, idx
, 0, max_index
);
1623 if ((opnd
->indexed_za
.index
.imm
% range_size
) != 0)
1625 assert (range_size
== 2 || range_size
== 4);
1626 set_other_error (mismatch_detail
, idx
,
1628 ? _("starting offset is not a multiple of 2")
1629 : _("starting offset is not a multiple of 4"));
1633 if (opnd
->indexed_za
.index
.countm1
!= range_size
- 1)
1635 if (range_size
== 1)
1636 set_other_error (mismatch_detail
, idx
,
1637 _("expected a single offset rather than"
1639 else if (range_size
== 2)
1640 set_other_error (mismatch_detail
, idx
,
1641 _("expected a range of two offsets"));
1642 else if (range_size
== 4)
1643 set_other_error (mismatch_detail
, idx
,
1644 _("expected a range of four offsets"));
1650 /* The vector group specifier is optional in assembly code. */
1651 if (opnd
->indexed_za
.group_size
!= 0
1652 && opnd
->indexed_za
.group_size
!= group_size
)
1654 set_invalid_vg_size (mismatch_detail
, idx
, group_size
);
1661 /* General constraint checking based on operand code.
1663 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1664 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1666 This function has to be called after the qualifiers for all operands
1669 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1670 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1671 of error message during the disassembling where error message is not
1672 wanted. We avoid the dynamic construction of strings of error messages
1673 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1674 use a combination of error code, static string and some integer data to
1675 represent an error. */
1678 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1679 enum aarch64_opnd type
,
1680 const aarch64_opcode
*opcode
,
1681 aarch64_operand_error
*mismatch_detail
)
1683 unsigned num
, modifiers
, shift
;
1685 int64_t imm
, min_value
, max_value
;
1686 uint64_t uvalue
, mask
;
1687 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1688 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1691 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1693 switch (aarch64_operands
[type
].op_class
)
1695 case AARCH64_OPND_CLASS_INT_REG
:
1696 /* Check pair reg constraints for cas* instructions. */
1697 if (type
== AARCH64_OPND_PAIRREG
)
1699 assert (idx
== 1 || idx
== 3);
1700 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1702 set_syntax_error (mismatch_detail
, idx
- 1,
1703 _("reg pair must start from even reg"));
1706 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1708 set_syntax_error (mismatch_detail
, idx
,
1709 _("reg pair must be contiguous"));
1715 /* <Xt> may be optional in some IC and TLBI instructions. */
1716 if (type
== AARCH64_OPND_Rt_SYS
)
1718 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1719 == AARCH64_OPND_CLASS_SYSTEM
));
1720 if (opnds
[1].present
1721 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1723 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1726 if (!opnds
[1].present
1727 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1729 set_other_error (mismatch_detail
, idx
, _("missing register"));
1735 case AARCH64_OPND_QLF_WSP
:
1736 case AARCH64_OPND_QLF_SP
:
1737 if (!aarch64_stack_pointer_p (opnd
))
1739 set_other_error (mismatch_detail
, idx
,
1740 _("stack pointer register expected"));
1749 case AARCH64_OPND_CLASS_SVE_REG
:
1752 case AARCH64_OPND_SVE_Zm3_INDEX
:
1753 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
1754 case AARCH64_OPND_SVE_Zm3_19_INDEX
:
1755 case AARCH64_OPND_SVE_Zm3_11_INDEX
:
1756 case AARCH64_OPND_SVE_Zm4_11_INDEX
:
1757 case AARCH64_OPND_SVE_Zm4_INDEX
:
1758 size
= get_operand_fields_width (get_operand_from_code (type
));
1759 shift
= get_operand_specific_data (&aarch64_operands
[type
]);
1760 if (!check_reglane (opnd
, mismatch_detail
, idx
,
1761 "z", 0, (1 << shift
) - 1,
1762 0, (1u << (size
- shift
)) - 1))
1766 case AARCH64_OPND_SVE_Zn_INDEX
:
1767 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1768 if (!check_reglane (opnd
, mismatch_detail
, idx
, "z", 0, 31,
1773 case AARCH64_OPND_SME_PNn3_INDEX1
:
1774 case AARCH64_OPND_SME_PNn3_INDEX2
:
1775 size
= get_operand_field_width (get_operand_from_code (type
), 1);
1776 if (!check_reglane (opnd
, mismatch_detail
, idx
, "pn", 8, 15,
1777 0, (1 << size
) - 1))
1781 case AARCH64_OPND_SME_Zn_INDEX1_16
:
1782 case AARCH64_OPND_SME_Zn_INDEX2_15
:
1783 case AARCH64_OPND_SME_Zn_INDEX2_16
:
1784 case AARCH64_OPND_SME_Zn_INDEX3_14
:
1785 case AARCH64_OPND_SME_Zn_INDEX3_15
:
1786 case AARCH64_OPND_SME_Zn_INDEX4_14
:
1787 size
= get_operand_fields_width (get_operand_from_code (type
)) - 5;
1788 if (!check_reglane (opnd
, mismatch_detail
, idx
, "z", 0, 31,
1789 0, (1 << size
) - 1))
1793 case AARCH64_OPND_SME_Zm_INDEX1
:
1794 case AARCH64_OPND_SME_Zm_INDEX2
:
1795 case AARCH64_OPND_SME_Zm_INDEX3_1
:
1796 case AARCH64_OPND_SME_Zm_INDEX3_2
:
1797 case AARCH64_OPND_SME_Zm_INDEX3_10
:
1798 case AARCH64_OPND_SME_Zm_INDEX4_1
:
1799 case AARCH64_OPND_SME_Zm_INDEX4_10
:
1800 size
= get_operand_fields_width (get_operand_from_code (type
)) - 4;
1801 if (!check_reglane (opnd
, mismatch_detail
, idx
, "z", 0, 15,
1802 0, (1 << size
) - 1))
1806 case AARCH64_OPND_SME_Zm
:
1807 if (opnd
->reg
.regno
> 15)
1809 set_invalid_regno_error (mismatch_detail
, idx
, "z", 0, 15);
1814 case AARCH64_OPND_SME_PnT_Wm_imm
:
1815 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1816 max_value
= 16 / size
- 1;
1817 if (!check_za_access (opnd
, mismatch_detail
, idx
,
1818 12, max_value
, 1, 0))
1827 case AARCH64_OPND_CLASS_SVE_REGLIST
:
1830 case AARCH64_OPND_SME_Pdx2
:
1831 case AARCH64_OPND_SME_Zdnx2
:
1832 case AARCH64_OPND_SME_Zdnx4
:
1833 case AARCH64_OPND_SME_Zmx2
:
1834 case AARCH64_OPND_SME_Zmx4
:
1835 case AARCH64_OPND_SME_Znx2
:
1836 case AARCH64_OPND_SME_Znx4
:
1837 num
= get_operand_specific_data (&aarch64_operands
[type
]);
1838 if (!check_reglist (opnd
, mismatch_detail
, idx
, num
, 1))
1840 if ((opnd
->reglist
.first_regno
% num
) != 0)
1842 set_other_error (mismatch_detail
, idx
,
1843 _("start register out of range"));
1848 case AARCH64_OPND_SME_Ztx2_STRIDED
:
1849 case AARCH64_OPND_SME_Ztx4_STRIDED
:
1850 /* 2-register lists have a stride of 8 and 4-register lists
1851 have a stride of 4. */
1852 num
= get_operand_specific_data (&aarch64_operands
[type
]);
1853 if (!check_reglist (opnd
, mismatch_detail
, idx
, num
, 16 / num
))
1855 num
= 16 | (opnd
->reglist
.stride
- 1);
1856 if ((opnd
->reglist
.first_regno
& ~num
) != 0)
1858 set_other_error (mismatch_detail
, idx
,
1859 _("start register out of range"));
1864 case AARCH64_OPND_SME_PdxN
:
1865 case AARCH64_OPND_SVE_ZnxN
:
1866 case AARCH64_OPND_SVE_ZtxN
:
1867 num
= get_opcode_dependent_value (opcode
);
1868 if (!check_reglist (opnd
, mismatch_detail
, idx
, num
, 1))
1877 case AARCH64_OPND_CLASS_ZA_ACCESS
:
1880 case AARCH64_OPND_SME_ZA_HV_idx_src
:
1881 case AARCH64_OPND_SME_ZA_HV_idx_dest
:
1882 case AARCH64_OPND_SME_ZA_HV_idx_ldstr
:
1883 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1884 max_value
= 16 / size
- 1;
1885 if (!check_za_access (opnd
, mismatch_detail
, idx
, 12, max_value
, 1,
1886 get_opcode_dependent_value (opcode
)))
1890 case AARCH64_OPND_SME_ZA_array_off4
:
1891 if (!check_za_access (opnd
, mismatch_detail
, idx
, 12, 15, 1,
1892 get_opcode_dependent_value (opcode
)))
1896 case AARCH64_OPND_SME_ZA_array_off3_0
:
1897 case AARCH64_OPND_SME_ZA_array_off3_5
:
1898 if (!check_za_access (opnd
, mismatch_detail
, idx
, 8, 7, 1,
1899 get_opcode_dependent_value (opcode
)))
1903 case AARCH64_OPND_SME_ZA_array_off1x4
:
1904 if (!check_za_access (opnd
, mismatch_detail
, idx
, 8, 1, 4,
1905 get_opcode_dependent_value (opcode
)))
1909 case AARCH64_OPND_SME_ZA_array_off2x2
:
1910 if (!check_za_access (opnd
, mismatch_detail
, idx
, 8, 3, 2,
1911 get_opcode_dependent_value (opcode
)))
1915 case AARCH64_OPND_SME_ZA_array_off2x4
:
1916 if (!check_za_access (opnd
, mismatch_detail
, idx
, 8, 3, 4,
1917 get_opcode_dependent_value (opcode
)))
1921 case AARCH64_OPND_SME_ZA_array_off3x2
:
1922 if (!check_za_access (opnd
, mismatch_detail
, idx
, 8, 7, 2,
1923 get_opcode_dependent_value (opcode
)))
1927 case AARCH64_OPND_SME_ZA_HV_idx_srcxN
:
1928 case AARCH64_OPND_SME_ZA_HV_idx_destxN
:
1929 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1930 num
= get_opcode_dependent_value (opcode
);
1931 max_value
= 16 / num
/ size
;
1934 if (!check_za_access (opnd
, mismatch_detail
, idx
,
1935 12, max_value
, num
, 0))
1944 case AARCH64_OPND_CLASS_PRED_REG
:
1947 case AARCH64_OPND_SME_PNd3
:
1948 case AARCH64_OPND_SME_PNg3
:
1949 if (opnd
->reg
.regno
< 8)
1951 set_invalid_regno_error (mismatch_detail
, idx
, "pn", 8, 15);
1957 if (opnd
->reg
.regno
>= 8
1958 && get_operand_fields_width (get_operand_from_code (type
)) == 3)
1960 set_invalid_regno_error (mismatch_detail
, idx
, "p", 0, 7);
1967 case AARCH64_OPND_CLASS_COND
:
1968 if (type
== AARCH64_OPND_COND1
1969 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1971 /* Not allow AL or NV. */
1972 set_syntax_error (mismatch_detail
, idx
, NULL
);
1976 case AARCH64_OPND_CLASS_ADDRESS
:
1977 /* Check writeback. */
1978 switch (opcode
->iclass
)
1982 case ldstnapair_offs
:
1985 if (opnd
->addr
.writeback
== 1)
1987 set_syntax_error (mismatch_detail
, idx
,
1988 _("unexpected address writeback"));
1993 if (opnd
->addr
.writeback
== 1 && opnd
->addr
.preind
!= 1)
1995 set_syntax_error (mismatch_detail
, idx
,
1996 _("unexpected address writeback"));
2001 case ldstpair_indexed
:
2004 if (opnd
->addr
.writeback
== 0)
2006 set_syntax_error (mismatch_detail
, idx
,
2007 _("address writeback expected"));
2012 assert (opnd
->addr
.writeback
== 0);
2017 case AARCH64_OPND_ADDR_SIMM7
:
2018 /* Scaled signed 7 bits immediate offset. */
2019 /* Get the size of the data element that is accessed, which may be
2020 different from that of the source register size,
2021 e.g. in strb/ldrb. */
2022 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
2023 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
2025 set_offset_out_of_range_error (mismatch_detail
, idx
,
2026 -64 * size
, 63 * size
);
2029 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
2031 set_unaligned_error (mismatch_detail
, idx
, size
);
2035 case AARCH64_OPND_ADDR_OFFSET
:
2036 case AARCH64_OPND_ADDR_SIMM9
:
2037 /* Unscaled signed 9 bits immediate offset. */
2038 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
2040 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
2045 case AARCH64_OPND_ADDR_SIMM9_2
:
2046 /* Unscaled signed 9 bits immediate offset, which has to be negative
2048 size
= aarch64_get_qualifier_esize (qualifier
);
2049 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
2050 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
2051 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
2053 set_other_error (mismatch_detail
, idx
,
2054 _("negative or unaligned offset expected"));
2057 case AARCH64_OPND_ADDR_SIMM10
:
2058 /* Scaled signed 10 bits immediate offset. */
2059 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -4096, 4088))
2061 set_offset_out_of_range_error (mismatch_detail
, idx
, -4096, 4088);
2064 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 8))
2066 set_unaligned_error (mismatch_detail
, idx
, 8);
2071 case AARCH64_OPND_ADDR_SIMM11
:
2072 /* Signed 11 bits immediate offset (multiple of 16). */
2073 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -1024, 1008))
2075 set_offset_out_of_range_error (mismatch_detail
, idx
, -1024, 1008);
2079 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 16))
2081 set_unaligned_error (mismatch_detail
, idx
, 16);
2086 case AARCH64_OPND_ADDR_SIMM13
:
2087 /* Signed 13 bits immediate offset (multiple of 16). */
2088 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -4096, 4080))
2090 set_offset_out_of_range_error (mismatch_detail
, idx
, -4096, 4080);
2094 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 16))
2096 set_unaligned_error (mismatch_detail
, idx
, 16);
2101 case AARCH64_OPND_SIMD_ADDR_POST
:
2102 /* AdvSIMD load/store multiple structures, post-index. */
2104 if (opnd
->addr
.offset
.is_reg
)
2106 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
2110 set_other_error (mismatch_detail
, idx
,
2111 _("invalid register offset"));
2117 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
2118 unsigned num_bytes
; /* total number of bytes transferred. */
2119 /* The opcode dependent area stores the number of elements in
2120 each structure to be loaded/stored. */
2121 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
2122 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
2123 /* Special handling of loading single structure to all lane. */
2124 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
2125 * aarch64_get_qualifier_esize (prev
->qualifier
);
2127 num_bytes
= prev
->reglist
.num_regs
2128 * aarch64_get_qualifier_esize (prev
->qualifier
)
2129 * aarch64_get_qualifier_nelem (prev
->qualifier
);
2130 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
2132 set_other_error (mismatch_detail
, idx
,
2133 _("invalid post-increment amount"));
2139 case AARCH64_OPND_ADDR_REGOFF
:
2140 /* Get the size of the data element that is accessed, which may be
2141 different from that of the source register size,
2142 e.g. in strb/ldrb. */
2143 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
2144 /* It is either no shift or shift by the binary logarithm of SIZE. */
2145 if (opnd
->shifter
.amount
!= 0
2146 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
2148 set_other_error (mismatch_detail
, idx
,
2149 _("invalid shift amount"));
2152 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
2154 switch (opnd
->shifter
.kind
)
2156 case AARCH64_MOD_UXTW
:
2157 case AARCH64_MOD_LSL
:
2158 case AARCH64_MOD_SXTW
:
2159 case AARCH64_MOD_SXTX
: break;
2161 set_other_error (mismatch_detail
, idx
,
2162 _("invalid extend/shift operator"));
2167 case AARCH64_OPND_ADDR_UIMM12
:
2168 imm
= opnd
->addr
.offset
.imm
;
2169 /* Get the size of the data element that is accessed, which may be
2170 different from that of the source register size,
2171 e.g. in strb/ldrb. */
2172 size
= aarch64_get_qualifier_esize (qualifier
);
2173 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
2175 set_offset_out_of_range_error (mismatch_detail
, idx
,
2179 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
2181 set_unaligned_error (mismatch_detail
, idx
, size
);
2186 case AARCH64_OPND_ADDR_PCREL14
:
2187 case AARCH64_OPND_ADDR_PCREL19
:
2188 case AARCH64_OPND_ADDR_PCREL21
:
2189 case AARCH64_OPND_ADDR_PCREL26
:
2190 imm
= opnd
->imm
.value
;
2191 if (operand_need_shift_by_two (get_operand_from_code (type
)))
2193 /* The offset value in a PC-relative branch instruction is alway
2194 4-byte aligned and is encoded without the lowest 2 bits. */
2195 if (!value_aligned_p (imm
, 4))
2197 set_unaligned_error (mismatch_detail
, idx
, 4);
2200 /* Right shift by 2 so that we can carry out the following check
2204 size
= get_operand_fields_width (get_operand_from_code (type
));
2205 if (!value_fit_signed_field_p (imm
, size
))
2207 set_other_error (mismatch_detail
, idx
,
2208 _("immediate out of range"));
2213 case AARCH64_OPND_SME_ADDR_RI_U4xVL
:
2214 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 15))
2216 set_offset_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2221 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
2222 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
2223 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
2224 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
2228 assert (!opnd
->addr
.offset
.is_reg
);
2229 assert (opnd
->addr
.preind
);
2230 num
= 1 + get_operand_specific_data (&aarch64_operands
[type
]);
2233 if ((opnd
->addr
.offset
.imm
!= 0 && !opnd
->shifter
.operator_present
)
2234 || (opnd
->shifter
.operator_present
2235 && opnd
->shifter
.kind
!= AARCH64_MOD_MUL_VL
))
2237 set_other_error (mismatch_detail
, idx
,
2238 _("invalid addressing mode"));
2241 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
2243 set_offset_out_of_range_error (mismatch_detail
, idx
,
2244 min_value
, max_value
);
2247 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
2249 set_unaligned_error (mismatch_detail
, idx
, num
);
2254 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
2257 goto sve_imm_offset_vl
;
2259 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
2262 goto sve_imm_offset_vl
;
2264 case AARCH64_OPND_SVE_ADDR_RI_U6
:
2265 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
2266 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
2267 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
2271 assert (!opnd
->addr
.offset
.is_reg
);
2272 assert (opnd
->addr
.preind
);
2273 num
= 1 << get_operand_specific_data (&aarch64_operands
[type
]);
2276 if (opnd
->shifter
.operator_present
2277 || opnd
->shifter
.amount_present
)
2279 set_other_error (mismatch_detail
, idx
,
2280 _("invalid addressing mode"));
2283 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
2285 set_offset_out_of_range_error (mismatch_detail
, idx
,
2286 min_value
, max_value
);
2289 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
2291 set_unaligned_error (mismatch_detail
, idx
, num
);
2296 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
2297 case AARCH64_OPND_SVE_ADDR_RI_S4x32
:
2300 goto sve_imm_offset
;
2302 case AARCH64_OPND_SVE_ADDR_ZX
:
2303 /* Everything is already ensured by parse_operands or
2304 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
2306 assert (opnd
->addr
.offset
.is_reg
);
2307 assert (opnd
->addr
.preind
);
2308 assert ((aarch64_operands
[type
].flags
& OPD_F_NO_ZR
) == 0);
2309 assert (opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2310 assert (opnd
->shifter
.operator_present
== 0);
2313 case AARCH64_OPND_SVE_ADDR_R
:
2314 case AARCH64_OPND_SVE_ADDR_RR
:
2315 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
2316 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
2317 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
2318 case AARCH64_OPND_SVE_ADDR_RR_LSL4
:
2319 case AARCH64_OPND_SVE_ADDR_RX
:
2320 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
2321 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
2322 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
2323 case AARCH64_OPND_SVE_ADDR_RZ
:
2324 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
2325 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
2326 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
2327 modifiers
= 1 << AARCH64_MOD_LSL
;
2329 assert (opnd
->addr
.offset
.is_reg
);
2330 assert (opnd
->addr
.preind
);
2331 if ((aarch64_operands
[type
].flags
& OPD_F_NO_ZR
) != 0
2332 && opnd
->addr
.offset
.regno
== 31)
2334 set_other_error (mismatch_detail
, idx
,
2335 _("index register xzr is not allowed"));
2338 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
2339 || (opnd
->shifter
.amount
2340 != get_operand_specific_data (&aarch64_operands
[type
])))
2342 set_other_error (mismatch_detail
, idx
,
2343 _("invalid addressing mode"));
2348 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
2349 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
2350 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
2351 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
2352 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
2353 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
2354 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
2355 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
2356 modifiers
= (1 << AARCH64_MOD_SXTW
) | (1 << AARCH64_MOD_UXTW
);
2357 goto sve_rr_operand
;
2359 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
2360 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
2361 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
2362 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
2365 goto sve_imm_offset
;
2367 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
2368 modifiers
= 1 << AARCH64_MOD_LSL
;
2370 assert (opnd
->addr
.offset
.is_reg
);
2371 assert (opnd
->addr
.preind
);
2372 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
2373 || opnd
->shifter
.amount
< 0
2374 || opnd
->shifter
.amount
> 3)
2376 set_other_error (mismatch_detail
, idx
,
2377 _("invalid addressing mode"));
2382 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
2383 modifiers
= (1 << AARCH64_MOD_SXTW
);
2384 goto sve_zz_operand
;
2386 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
2387 modifiers
= 1 << AARCH64_MOD_UXTW
;
2388 goto sve_zz_operand
;
2395 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
2396 if (type
== AARCH64_OPND_LEt
)
2398 /* Get the upper bound for the element index. */
2399 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
2400 if (!value_in_range_p (opnd
->reglist
.index
, 0, num
))
2402 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2406 /* The opcode dependent area stores the number of elements in
2407 each structure to be loaded/stored. */
2408 num
= get_opcode_dependent_value (opcode
);
2411 case AARCH64_OPND_LVt
:
2412 assert (num
>= 1 && num
<= 4);
2413 /* Unless LD1/ST1, the number of registers should be equal to that
2414 of the structure elements. */
2415 if (num
!= 1 && !check_reglist (opnd
, mismatch_detail
, idx
, num
, 1))
2418 case AARCH64_OPND_LVt_AL
:
2419 case AARCH64_OPND_LEt
:
2420 assert (num
>= 1 && num
<= 4);
2421 /* The number of registers should be equal to that of the structure
2423 if (!check_reglist (opnd
, mismatch_detail
, idx
, num
, 1))
2429 if (opnd
->reglist
.stride
!= 1)
2431 set_reg_list_stride_error (mismatch_detail
, idx
, 1);
2436 case AARCH64_OPND_CLASS_IMMEDIATE
:
2437 /* Constraint check on immediate operand. */
2438 imm
= opnd
->imm
.value
;
2439 /* E.g. imm_0_31 constrains value to be 0..31. */
2440 if (qualifier_value_in_range_constraint_p (qualifier
)
2441 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
2442 get_upper_bound (qualifier
)))
2444 set_imm_out_of_range_error (mismatch_detail
, idx
,
2445 get_lower_bound (qualifier
),
2446 get_upper_bound (qualifier
));
2452 case AARCH64_OPND_AIMM
:
2453 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2455 set_other_error (mismatch_detail
, idx
,
2456 _("invalid shift operator"));
2459 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
2461 set_other_error (mismatch_detail
, idx
,
2462 _("shift amount must be 0 or 12"));
2465 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
2467 set_other_error (mismatch_detail
, idx
,
2468 _("immediate out of range"));
2473 case AARCH64_OPND_HALF
:
2474 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
2475 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2477 set_other_error (mismatch_detail
, idx
,
2478 _("invalid shift operator"));
2481 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2482 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
2484 set_other_error (mismatch_detail
, idx
,
2485 _("shift amount must be a multiple of 16"));
2488 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
2490 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
2494 if (opnd
->imm
.value
< 0)
2496 set_other_error (mismatch_detail
, idx
,
2497 _("negative immediate value not allowed"));
2500 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
2502 set_other_error (mismatch_detail
, idx
,
2503 _("immediate out of range"));
2508 case AARCH64_OPND_IMM_MOV
:
2510 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2511 imm
= opnd
->imm
.value
;
2515 case OP_MOV_IMM_WIDEN
:
2518 case OP_MOV_IMM_WIDE
:
2519 if (!aarch64_wide_constant_p (imm
, esize
== 4, NULL
))
2521 set_other_error (mismatch_detail
, idx
,
2522 _("immediate out of range"));
2526 case OP_MOV_IMM_LOG
:
2527 if (!aarch64_logical_immediate_p (imm
, esize
, NULL
))
2529 set_other_error (mismatch_detail
, idx
,
2530 _("immediate out of range"));
2541 case AARCH64_OPND_NZCV
:
2542 case AARCH64_OPND_CCMP_IMM
:
2543 case AARCH64_OPND_EXCEPTION
:
2544 case AARCH64_OPND_UNDEFINED
:
2545 case AARCH64_OPND_TME_UIMM16
:
2546 case AARCH64_OPND_UIMM4
:
2547 case AARCH64_OPND_UIMM4_ADDG
:
2548 case AARCH64_OPND_UIMM7
:
2549 case AARCH64_OPND_UIMM3_OP1
:
2550 case AARCH64_OPND_UIMM3_OP2
:
2551 case AARCH64_OPND_SVE_UIMM3
:
2552 case AARCH64_OPND_SVE_UIMM7
:
2553 case AARCH64_OPND_SVE_UIMM8
:
2554 case AARCH64_OPND_SVE_UIMM8_53
:
2555 case AARCH64_OPND_CSSC_UIMM8
:
2556 size
= get_operand_fields_width (get_operand_from_code (type
));
2558 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
2560 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2566 case AARCH64_OPND_UIMM10
:
2567 /* Scaled unsigned 10 bits immediate offset. */
2568 if (!value_in_range_p (opnd
->imm
.value
, 0, 1008))
2570 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1008);
2574 if (!value_aligned_p (opnd
->imm
.value
, 16))
2576 set_unaligned_error (mismatch_detail
, idx
, 16);
2581 case AARCH64_OPND_SIMM5
:
2582 case AARCH64_OPND_SVE_SIMM5
:
2583 case AARCH64_OPND_SVE_SIMM5B
:
2584 case AARCH64_OPND_SVE_SIMM6
:
2585 case AARCH64_OPND_SVE_SIMM8
:
2586 case AARCH64_OPND_CSSC_SIMM8
:
2587 size
= get_operand_fields_width (get_operand_from_code (type
));
2589 if (!value_fit_signed_field_p (opnd
->imm
.value
, size
))
2591 set_imm_out_of_range_error (mismatch_detail
, idx
,
2593 (1 << (size
- 1)) - 1);
2598 case AARCH64_OPND_WIDTH
:
2599 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
2600 && opnds
[0].type
== AARCH64_OPND_Rd
);
2601 size
= get_upper_bound (qualifier
);
2602 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
2603 /* lsb+width <= reg.size */
2605 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
2606 size
- opnds
[idx
-1].imm
.value
);
2611 case AARCH64_OPND_LIMM
:
2612 case AARCH64_OPND_SVE_LIMM
:
2614 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2615 uint64_t uimm
= opnd
->imm
.value
;
2616 if (opcode
->op
== OP_BIC
)
2618 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2620 set_other_error (mismatch_detail
, idx
,
2621 _("immediate out of range"));
2627 case AARCH64_OPND_IMM0
:
2628 case AARCH64_OPND_FPIMM0
:
2629 if (opnd
->imm
.value
!= 0)
2631 set_other_error (mismatch_detail
, idx
,
2632 _("immediate zero expected"));
2637 case AARCH64_OPND_IMM_ROT1
:
2638 case AARCH64_OPND_IMM_ROT2
:
2639 case AARCH64_OPND_SVE_IMM_ROT2
:
2640 if (opnd
->imm
.value
!= 0
2641 && opnd
->imm
.value
!= 90
2642 && opnd
->imm
.value
!= 180
2643 && opnd
->imm
.value
!= 270)
2645 set_other_error (mismatch_detail
, idx
,
2646 _("rotate expected to be 0, 90, 180 or 270"));
2651 case AARCH64_OPND_IMM_ROT3
:
2652 case AARCH64_OPND_SVE_IMM_ROT1
:
2653 case AARCH64_OPND_SVE_IMM_ROT3
:
2654 if (opnd
->imm
.value
!= 90 && opnd
->imm
.value
!= 270)
2656 set_other_error (mismatch_detail
, idx
,
2657 _("rotate expected to be 90 or 270"));
2662 case AARCH64_OPND_SHLL_IMM
:
2664 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2665 if (opnd
->imm
.value
!= size
)
2667 set_other_error (mismatch_detail
, idx
,
2668 _("invalid shift amount"));
2673 case AARCH64_OPND_IMM_VLSL
:
2674 size
= aarch64_get_qualifier_esize (qualifier
);
2675 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
2677 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2683 case AARCH64_OPND_IMM_VLSR
:
2684 size
= aarch64_get_qualifier_esize (qualifier
);
2685 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
2687 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
2692 case AARCH64_OPND_SIMD_IMM
:
2693 case AARCH64_OPND_SIMD_IMM_SFT
:
2694 /* Qualifier check. */
2697 case AARCH64_OPND_QLF_LSL
:
2698 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2700 set_other_error (mismatch_detail
, idx
,
2701 _("invalid shift operator"));
2705 case AARCH64_OPND_QLF_MSL
:
2706 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
2708 set_other_error (mismatch_detail
, idx
,
2709 _("invalid shift operator"));
2713 case AARCH64_OPND_QLF_NIL
:
2714 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2716 set_other_error (mismatch_detail
, idx
,
2717 _("shift is not permitted"));
2725 /* Is the immediate valid? */
2727 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
2729 /* uimm8 or simm8 */
2730 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
2732 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
2736 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
2739 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2740 ffffffffgggggggghhhhhhhh'. */
2741 set_other_error (mismatch_detail
, idx
,
2742 _("invalid value for immediate"));
2745 /* Is the shift amount valid? */
2746 switch (opnd
->shifter
.kind
)
2748 case AARCH64_MOD_LSL
:
2749 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2750 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
2752 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
2756 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
2758 set_unaligned_error (mismatch_detail
, idx
, 8);
2762 case AARCH64_MOD_MSL
:
2763 /* Only 8 and 16 are valid shift amount. */
2764 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
2766 set_other_error (mismatch_detail
, idx
,
2767 _("shift amount must be 0 or 16"));
2772 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2774 set_other_error (mismatch_detail
, idx
,
2775 _("invalid shift operator"));
2782 case AARCH64_OPND_FPIMM
:
2783 case AARCH64_OPND_SIMD_FPIMM
:
2784 case AARCH64_OPND_SVE_FPIMM8
:
2785 if (opnd
->imm
.is_fp
== 0)
2787 set_other_error (mismatch_detail
, idx
,
2788 _("floating-point immediate expected"));
2791 /* The value is expected to be an 8-bit floating-point constant with
2792 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2793 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2795 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
2797 set_other_error (mismatch_detail
, idx
,
2798 _("immediate out of range"));
2801 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2803 set_other_error (mismatch_detail
, idx
,
2804 _("invalid shift operator"));
2809 case AARCH64_OPND_SVE_AIMM
:
2812 assert (opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2813 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2814 mask
= ~((uint64_t) -1 << (size
* 4) << (size
* 4));
2815 uvalue
= opnd
->imm
.value
;
2816 shift
= opnd
->shifter
.amount
;
2821 set_other_error (mismatch_detail
, idx
,
2822 _("no shift amount allowed for"
2823 " 8-bit constants"));
2829 if (shift
!= 0 && shift
!= 8)
2831 set_other_error (mismatch_detail
, idx
,
2832 _("shift amount must be 0 or 8"));
2835 if (shift
== 0 && (uvalue
& 0xff) == 0)
2838 uvalue
= (int64_t) uvalue
/ 256;
2842 if ((uvalue
& mask
) != uvalue
&& (uvalue
| ~mask
) != uvalue
)
2844 set_other_error (mismatch_detail
, idx
,
2845 _("immediate too big for element size"));
2848 uvalue
= (uvalue
- min_value
) & mask
;
2851 set_other_error (mismatch_detail
, idx
,
2852 _("invalid arithmetic immediate"));
2857 case AARCH64_OPND_SVE_ASIMM
:
2861 case AARCH64_OPND_SVE_I1_HALF_ONE
:
2862 assert (opnd
->imm
.is_fp
);
2863 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x3f800000)
2865 set_other_error (mismatch_detail
, idx
,
2866 _("floating-point value must be 0.5 or 1.0"));
2871 case AARCH64_OPND_SVE_I1_HALF_TWO
:
2872 assert (opnd
->imm
.is_fp
);
2873 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x40000000)
2875 set_other_error (mismatch_detail
, idx
,
2876 _("floating-point value must be 0.5 or 2.0"));
2881 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
2882 assert (opnd
->imm
.is_fp
);
2883 if (opnd
->imm
.value
!= 0 && opnd
->imm
.value
!= 0x3f800000)
2885 set_other_error (mismatch_detail
, idx
,
2886 _("floating-point value must be 0.0 or 1.0"));
2891 case AARCH64_OPND_SVE_INV_LIMM
:
2893 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2894 uint64_t uimm
= ~opnd
->imm
.value
;
2895 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2897 set_other_error (mismatch_detail
, idx
,
2898 _("immediate out of range"));
2904 case AARCH64_OPND_SVE_LIMM_MOV
:
2906 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2907 uint64_t uimm
= opnd
->imm
.value
;
2908 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2910 set_other_error (mismatch_detail
, idx
,
2911 _("immediate out of range"));
2914 if (!aarch64_sve_dupm_mov_immediate_p (uimm
, esize
))
2916 set_other_error (mismatch_detail
, idx
,
2917 _("invalid replicated MOV immediate"));
2923 case AARCH64_OPND_SVE_PATTERN_SCALED
:
2924 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL
);
2925 if (!value_in_range_p (opnd
->shifter
.amount
, 1, 16))
2927 set_multiplier_out_of_range_error (mismatch_detail
, idx
, 1, 16);
2932 case AARCH64_OPND_SVE_SHLIMM_PRED
:
2933 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
2934 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22
:
2935 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2936 if (!value_in_range_p (opnd
->imm
.value
, 0, 8 * size
- 1))
2938 set_imm_out_of_range_error (mismatch_detail
, idx
,
2944 case AARCH64_OPND_SME_SHRIMM4
:
2945 size
= 1 << get_operand_fields_width (get_operand_from_code (type
));
2946 if (!value_in_range_p (opnd
->imm
.value
, 1, size
))
2948 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
);
2953 case AARCH64_OPND_SME_SHRIMM5
:
2954 case AARCH64_OPND_SVE_SHRIMM_PRED
:
2955 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
2956 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22
:
2957 num
= (type
== AARCH64_OPND_SVE_SHRIMM_UNPRED_22
) ? 2 : 1;
2958 size
= aarch64_get_qualifier_esize (opnds
[idx
- num
].qualifier
);
2959 if (!value_in_range_p (opnd
->imm
.value
, 1, 8 * size
))
2961 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, 8*size
);
2966 case AARCH64_OPND_SME_ZT0_INDEX
:
2967 if (!value_in_range_p (opnd
->imm
.value
, 0, 56))
2969 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, 56);
2972 if (opnd
->imm
.value
% 8 != 0)
2974 set_other_error (mismatch_detail
, idx
,
2975 _("byte index must be a multiple of 8"));
2985 case AARCH64_OPND_CLASS_SYSTEM
:
2988 case AARCH64_OPND_PSTATEFIELD
:
2989 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
2990 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
2992 assert (aarch64_pstatefields
[i
].name
);
2993 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
2994 max_value
= F_GET_REG_MAX_VALUE (aarch64_pstatefields
[i
].flags
);
2995 if (opnds
[1].imm
.value
< 0 || opnds
[1].imm
.value
> max_value
)
2997 set_imm_out_of_range_error (mismatch_detail
, 1, 0, max_value
);
3001 case AARCH64_OPND_PRFOP
:
3002 if (opcode
->iclass
== ldst_regoff
&& opnd
->prfop
->value
>= 24)
3004 set_other_error (mismatch_detail
, idx
,
3005 _("the register-index form of PRFM does"
3006 " not accept opcodes in the range 24-31"));
3015 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
3016 /* Get the upper bound for the element index. */
3017 if (opcode
->op
== OP_FCMLA_ELEM
)
3018 /* FCMLA index range depends on the vector size of other operands
3019 and is halfed because complex numbers take two elements. */
3020 num
= aarch64_get_qualifier_nelem (opnds
[0].qualifier
)
3021 * aarch64_get_qualifier_esize (opnds
[0].qualifier
) / 2;
3024 num
= num
/ aarch64_get_qualifier_esize (qualifier
) - 1;
3025 assert (aarch64_get_qualifier_nelem (qualifier
) == 1);
3027 /* Index out-of-range. */
3028 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
3030 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
3033 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
3034 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
3035 number is encoded in "size:M:Rm":
3041 if (type
== AARCH64_OPND_Em16
&& qualifier
== AARCH64_OPND_QLF_S_H
3042 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
3044 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
3049 case AARCH64_OPND_CLASS_MODIFIED_REG
:
3050 assert (idx
== 1 || idx
== 2);
3053 case AARCH64_OPND_Rm_EXT
:
3054 if (!aarch64_extend_operator_p (opnd
->shifter
.kind
)
3055 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
3057 set_other_error (mismatch_detail
, idx
,
3058 _("extend operator expected"));
3061 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
3062 (i.e. SP), in which case it defaults to LSL. The LSL alias is
3063 only valid when "Rd" or "Rn" is '11111', and is preferred in that
3065 if (!aarch64_stack_pointer_p (opnds
+ 0)
3066 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
3068 if (!opnd
->shifter
.operator_present
)
3070 set_other_error (mismatch_detail
, idx
,
3071 _("missing extend operator"));
3074 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3076 set_other_error (mismatch_detail
, idx
,
3077 _("'LSL' operator not allowed"));
3081 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
3082 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
3083 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
3085 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
3088 /* In the 64-bit form, the final register operand is written as Wm
3089 for all but the (possibly omitted) UXTX/LSL and SXTX
3091 N.B. GAS allows X register to be used with any operator as a
3092 programming convenience. */
3093 if (qualifier
== AARCH64_OPND_QLF_X
3094 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
3095 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
3096 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
3098 set_other_error (mismatch_detail
, idx
, _("W register expected"));
3103 case AARCH64_OPND_Rm_SFT
:
3104 /* ROR is not available to the shifted register operand in
3105 arithmetic instructions. */
3106 if (!aarch64_shift_operator_p (opnd
->shifter
.kind
))
3108 set_other_error (mismatch_detail
, idx
,
3109 _("shift operator expected"));
3112 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
3113 && opcode
->iclass
!= log_shift
)
3115 set_other_error (mismatch_detail
, idx
,
3116 _("'ROR' operator not allowed"));
3119 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
3120 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
3122 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
3139 /* Main entrypoint for the operand constraint checking.
3141 Return 1 if operands of *INST meet the constraint applied by the operand
3142 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
3143 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
3144 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
3145 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
3146 error kind when it is notified that an instruction does not pass the check).
3148 Un-determined operand qualifiers may get established during the process. */
3151 aarch64_match_operands_constraint (aarch64_inst
*inst
,
3152 aarch64_operand_error
*mismatch_detail
)
3156 DEBUG_TRACE ("enter");
3158 i
= inst
->opcode
->tied_operand
;
3162 /* Check for tied_operands with specific opcode iclass. */
3163 switch (inst
->opcode
->iclass
)
3165 /* For SME LDR and STR instructions #imm must have the same numerical
3166 value for both operands.
3170 assert (inst
->operands
[0].type
== AARCH64_OPND_SME_ZA_array_off4
);
3171 assert (inst
->operands
[1].type
== AARCH64_OPND_SME_ADDR_RI_U4xVL
);
3172 if (inst
->operands
[0].indexed_za
.index
.imm
3173 != inst
->operands
[1].addr
.offset
.imm
)
3175 if (mismatch_detail
)
3177 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_IMMS
;
3178 mismatch_detail
->index
= i
;
3186 /* Check for cases where a source register needs to be the
3187 same as the destination register. Do this before
3188 matching qualifiers since if an instruction has both
3189 invalid tying and invalid qualifiers, the error about
3190 qualifiers would suggest several alternative instructions
3191 that also have invalid tying. */
3192 enum aarch64_operand_class op_class1
3193 = aarch64_get_operand_class (inst
->operands
[0].type
);
3194 enum aarch64_operand_class op_class2
3195 = aarch64_get_operand_class (inst
->operands
[i
].type
);
3196 assert (op_class1
== op_class2
);
3197 if (op_class1
== AARCH64_OPND_CLASS_SVE_REGLIST
3198 ? ((inst
->operands
[0].reglist
.first_regno
3199 != inst
->operands
[i
].reglist
.first_regno
)
3200 || (inst
->operands
[0].reglist
.num_regs
3201 != inst
->operands
[i
].reglist
.num_regs
)
3202 || (inst
->operands
[0].reglist
.stride
3203 != inst
->operands
[i
].reglist
.stride
))
3204 : (inst
->operands
[0].reg
.regno
3205 != inst
->operands
[i
].reg
.regno
))
3207 if (mismatch_detail
)
3209 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_OPERAND
;
3210 mismatch_detail
->index
= i
;
3211 mismatch_detail
->error
= NULL
;
3220 /* Match operands' qualifier.
3221 *INST has already had qualifier establish for some, if not all, of
3222 its operands; we need to find out whether these established
3223 qualifiers match one of the qualifier sequence in
3224 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
3225 with the corresponding qualifier in such a sequence.
3226 Only basic operand constraint checking is done here; the more thorough
3227 constraint checking will carried out by operand_general_constraint_met_p,
3228 which has be to called after this in order to get all of the operands'
3229 qualifiers established. */
3231 if (match_operands_qualifier (inst
, true /* update_p */,
3232 &invalid_count
) == 0)
3234 DEBUG_TRACE ("FAIL on operand qualifier matching");
3235 if (mismatch_detail
)
3237 /* Return an error type to indicate that it is the qualifier
3238 matching failure; we don't care about which operand as there
3239 are enough information in the opcode table to reproduce it. */
3240 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
3241 mismatch_detail
->index
= -1;
3242 mismatch_detail
->error
= NULL
;
3243 mismatch_detail
->data
[0].i
= invalid_count
;
3248 /* Match operands' constraint. */
3249 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
3251 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
3252 if (type
== AARCH64_OPND_NIL
)
3254 if (inst
->operands
[i
].skip
)
3256 DEBUG_TRACE ("skip the incomplete operand %d", i
);
3259 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
3260 inst
->opcode
, mismatch_detail
) == 0)
3262 DEBUG_TRACE ("FAIL on operand %d", i
);
3267 DEBUG_TRACE ("PASS");
3272 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
3273 Also updates the TYPE of each INST->OPERANDS with the corresponding
3274 value of OPCODE->OPERANDS.
3276 Note that some operand qualifiers may need to be manually cleared by
3277 the caller before it further calls the aarch64_opcode_encode; by
3278 doing this, it helps the qualifier matching facilities work
3281 const aarch64_opcode
*
3282 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
3285 const aarch64_opcode
*old
= inst
->opcode
;
3287 inst
->opcode
= opcode
;
3289 /* Update the operand types. */
3290 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
3292 inst
->operands
[i
].type
= opcode
->operands
[i
];
3293 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
3297 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
3303 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
3306 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
3307 if (operands
[i
] == operand
)
3309 else if (operands
[i
] == AARCH64_OPND_NIL
)
3314 /* R0...R30, followed by FOR31. */
3315 #define BANK(R, FOR31) \
3316 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
3317 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
3318 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
3319 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
3320 /* [0][0] 32-bit integer regs with sp Wn
3321 [0][1] 64-bit integer regs with sp Xn sf=1
3322 [1][0] 32-bit integer regs with #0 Wn
3323 [1][1] 64-bit integer regs with #0 Xn sf=1 */
3324 static const char *int_reg
[2][2][32] = {
3325 #define R32(X) "w" #X
3326 #define R64(X) "x" #X
3327 { BANK (R32
, "wsp"), BANK (R64
, "sp") },
3328 { BANK (R32
, "wzr"), BANK (R64
, "xzr") }
3333 /* Names of the SVE vector registers, first with .S suffixes,
3334 then with .D suffixes. */
3336 static const char *sve_reg
[2][32] = {
3337 #define ZS(X) "z" #X ".s"
3338 #define ZD(X) "z" #X ".d"
3339 BANK (ZS
, ZS (31)), BANK (ZD
, ZD (31))
3345 /* Return the integer register name.
3346 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
3348 static inline const char *
3349 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
3351 const int has_zr
= sp_reg_p
? 0 : 1;
3352 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
3353 return int_reg
[has_zr
][is_64
][regno
];
3356 /* Like get_int_reg_name, but IS_64 is always 1. */
3358 static inline const char *
3359 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
3361 const int has_zr
= sp_reg_p
? 0 : 1;
3362 return int_reg
[has_zr
][1][regno
];
3365 /* Get the name of the integer offset register in OPND, using the shift type
3366 to decide whether it's a word or doubleword. */
3368 static inline const char *
3369 get_offset_int_reg_name (const aarch64_opnd_info
*opnd
)
3371 switch (opnd
->shifter
.kind
)
3373 case AARCH64_MOD_UXTW
:
3374 case AARCH64_MOD_SXTW
:
3375 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_W
, 0);
3377 case AARCH64_MOD_LSL
:
3378 case AARCH64_MOD_SXTX
:
3379 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_X
, 0);
3386 /* Get the name of the SVE vector offset register in OPND, using the operand
3387 qualifier to decide whether the suffix should be .S or .D. */
3389 static inline const char *
3390 get_addr_sve_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
)
3392 assert (qualifier
== AARCH64_OPND_QLF_S_S
3393 || qualifier
== AARCH64_OPND_QLF_S_D
);
3394 return sve_reg
[qualifier
== AARCH64_OPND_QLF_S_D
][regno
];
3397 /* Types for expanding an encoded 8-bit value to a floating-point value. */
3417 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
3418 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
3419 (depending on the type of the instruction). IMM8 will be expanded to a
3420 single-precision floating-point value (SIZE == 4) or a double-precision
3421 floating-point value (SIZE == 8). A half-precision floating-point value
3422 (SIZE == 2) is expanded to a single-precision floating-point value. The
3423 expanded value is returned. */
3426 expand_fp_imm (int size
, uint32_t imm8
)
3429 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
3431 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
3432 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
3433 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
3434 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
3435 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
3438 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
3439 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
3440 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
3441 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
3442 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
3445 else if (size
== 4 || size
== 2)
3447 imm
= (imm8_7
<< 31) /* imm8<7> */
3448 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
3449 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
3450 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
3454 /* An unsupported size. */
3461 /* Return a string based on FMT with the register style applied. */
3464 style_reg (struct aarch64_styler
*styler
, const char *fmt
, ...)
3470 txt
= styler
->apply_style (styler
, dis_style_register
, fmt
, ap
);
3476 /* Return a string based on FMT with the immediate style applied. */
3479 style_imm (struct aarch64_styler
*styler
, const char *fmt
, ...)
3485 txt
= styler
->apply_style (styler
, dis_style_immediate
, fmt
, ap
);
3491 /* Return a string based on FMT with the sub-mnemonic style applied. */
3494 style_sub_mnem (struct aarch64_styler
*styler
, const char *fmt
, ...)
3500 txt
= styler
->apply_style (styler
, dis_style_sub_mnemonic
, fmt
, ap
);
3506 /* Return a string based on FMT with the address style applied. */
3509 style_addr (struct aarch64_styler
*styler
, const char *fmt
, ...)
3515 txt
= styler
->apply_style (styler
, dis_style_address
, fmt
, ap
);
3521 /* Produce the string representation of the register list operand *OPND
3522 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3523 the register name that comes before the register number, such as "v". */
3525 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
,
3526 const char *prefix
, struct aarch64_styler
*styler
)
3528 const int mask
= (prefix
[0] == 'p' ? 15 : 31);
3529 const int num_regs
= opnd
->reglist
.num_regs
;
3530 const int stride
= opnd
->reglist
.stride
;
3531 const int first_reg
= opnd
->reglist
.first_regno
;
3532 const int last_reg
= (first_reg
+ (num_regs
- 1) * stride
) & mask
;
3533 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
3534 char tb
[16]; /* Temporary buffer. */
3536 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
3537 assert (num_regs
>= 1 && num_regs
<= 4);
3539 /* Prepare the index if any. */
3540 if (opnd
->reglist
.has_index
)
3541 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3542 snprintf (tb
, sizeof (tb
), "[%s]",
3543 style_imm (styler
, "%" PRIi64
, (opnd
->reglist
.index
% 100)));
3547 /* The hyphenated form is preferred for disassembly if there are
3548 more than two registers in the list, and the register numbers
3549 are monotonically increasing in increments of one. */
3550 if (stride
== 1 && num_regs
> 1)
3551 snprintf (buf
, size
, "{%s-%s}%s",
3552 style_reg (styler
, "%s%d.%s", prefix
, first_reg
, qlf_name
),
3553 style_reg (styler
, "%s%d.%s", prefix
, last_reg
, qlf_name
), tb
);
3556 const int reg0
= first_reg
;
3557 const int reg1
= (first_reg
+ stride
) & mask
;
3558 const int reg2
= (first_reg
+ stride
* 2) & mask
;
3559 const int reg3
= (first_reg
+ stride
* 3) & mask
;
3564 snprintf (buf
, size
, "{%s}%s",
3565 style_reg (styler
, "%s%d.%s", prefix
, reg0
, qlf_name
),
3569 snprintf (buf
, size
, "{%s, %s}%s",
3570 style_reg (styler
, "%s%d.%s", prefix
, reg0
, qlf_name
),
3571 style_reg (styler
, "%s%d.%s", prefix
, reg1
, qlf_name
),
3575 snprintf (buf
, size
, "{%s, %s, %s}%s",
3576 style_reg (styler
, "%s%d.%s", prefix
, reg0
, qlf_name
),
3577 style_reg (styler
, "%s%d.%s", prefix
, reg1
, qlf_name
),
3578 style_reg (styler
, "%s%d.%s", prefix
, reg2
, qlf_name
),
3582 snprintf (buf
, size
, "{%s, %s, %s, %s}%s",
3583 style_reg (styler
, "%s%d.%s", prefix
, reg0
, qlf_name
),
3584 style_reg (styler
, "%s%d.%s", prefix
, reg1
, qlf_name
),
3585 style_reg (styler
, "%s%d.%s", prefix
, reg2
, qlf_name
),
3586 style_reg (styler
, "%s%d.%s", prefix
, reg3
, qlf_name
),
3593 /* Print the register+immediate address in OPND to BUF, which has SIZE
3594 characters. BASE is the name of the base register. */
3597 print_immediate_offset_address (char *buf
, size_t size
,
3598 const aarch64_opnd_info
*opnd
,
3600 struct aarch64_styler
*styler
)
3602 if (opnd
->addr
.writeback
)
3604 if (opnd
->addr
.preind
)
3606 if (opnd
->type
== AARCH64_OPND_ADDR_SIMM10
&& !opnd
->addr
.offset
.imm
)
3607 snprintf (buf
, size
, "[%s]!", style_reg (styler
, base
));
3609 snprintf (buf
, size
, "[%s, %s]!",
3610 style_reg (styler
, base
),
3611 style_imm (styler
, "#%d", opnd
->addr
.offset
.imm
));
3614 snprintf (buf
, size
, "[%s], %s",
3615 style_reg (styler
, base
),
3616 style_imm (styler
, "#%d", opnd
->addr
.offset
.imm
));
3620 if (opnd
->shifter
.operator_present
)
3622 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL_VL
);
3623 snprintf (buf
, size
, "[%s, %s, %s]",
3624 style_reg (styler
, base
),
3625 style_imm (styler
, "#%d", opnd
->addr
.offset
.imm
),
3626 style_sub_mnem (styler
, "mul vl"));
3628 else if (opnd
->addr
.offset
.imm
)
3629 snprintf (buf
, size
, "[%s, %s]",
3630 style_reg (styler
, base
),
3631 style_imm (styler
, "#%d", opnd
->addr
.offset
.imm
));
3633 snprintf (buf
, size
, "[%s]", style_reg (styler
, base
));
3637 /* Produce the string representation of the register offset address operand
3638 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3639 the names of the base and offset registers. */
3641 print_register_offset_address (char *buf
, size_t size
,
3642 const aarch64_opnd_info
*opnd
,
3643 const char *base
, const char *offset
,
3644 struct aarch64_styler
*styler
)
3646 char tb
[32]; /* Temporary buffer. */
3647 bool print_extend_p
= true;
3648 bool print_amount_p
= true;
3649 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
3651 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
3652 || !opnd
->shifter
.amount_present
))
3654 /* Not print the shift/extend amount when the amount is zero and
3655 when it is not the special case of 8-bit load/store instruction. */
3656 print_amount_p
= false;
3657 /* Likewise, no need to print the shift operator LSL in such a
3659 if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3660 print_extend_p
= false;
3663 /* Prepare for the extend/shift. */
3667 snprintf (tb
, sizeof (tb
), ", %s %s",
3668 style_sub_mnem (styler
, shift_name
),
3669 style_imm (styler
, "#%" PRIi64
,
3670 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3671 (opnd
->shifter
.amount
% 100)));
3673 snprintf (tb
, sizeof (tb
), ", %s",
3674 style_sub_mnem (styler
, shift_name
));
3679 snprintf (buf
, size
, "[%s, %s%s]", style_reg (styler
, base
),
3680 style_reg (styler
, offset
), tb
);
3683 /* Print ZA tiles from imm8 in ZERO instruction.
3685 The preferred disassembly of this instruction uses the shortest list of tile
3686 names that represent the encoded immediate mask.
3689 * An all-ones immediate is disassembled as {ZA}.
3690 * An all-zeros immediate is disassembled as an empty list { }.
3693 print_sme_za_list (char *buf
, size_t size
, int mask
,
3694 struct aarch64_styler
*styler
)
3696 const char* zan
[] = { "za", "za0.h", "za1.h", "za0.s",
3697 "za1.s", "za2.s", "za3.s", "za0.d",
3698 "za1.d", "za2.d", "za3.d", "za4.d",
3699 "za5.d", "za6.d", "za7.d", " " };
3700 const int zan_v
[] = { 0xff, 0x55, 0xaa, 0x11,
3701 0x22, 0x44, 0x88, 0x01,
3702 0x02, 0x04, 0x08, 0x10,
3703 0x20, 0x40, 0x80, 0x00 };
3705 const int ZAN_SIZE
= sizeof(zan
) / sizeof(zan
[0]);
3707 k
= snprintf (buf
, size
, "{");
3708 for (i
= 0; i
< ZAN_SIZE
; i
++)
3710 if ((mask
& zan_v
[i
]) == zan_v
[i
])
3714 k
+= snprintf (buf
+ k
, size
- k
, ", ");
3716 k
+= snprintf (buf
+ k
, size
- k
, "%s", style_reg (styler
, zan
[i
]));
3721 snprintf (buf
+ k
, size
- k
, "}");
3724 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3725 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3726 PC, PCREL_P and ADDRESS are used to pass in and return information about
3727 the PC-relative address calculation, where the PC value is passed in
3728 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3729 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3730 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3732 The function serves both the disassembler and the assembler diagnostics
3733 issuer, which is the reason why it lives in this file. */
3736 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
3737 const aarch64_opcode
*opcode
,
3738 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
3739 bfd_vma
*address
, char** notes
,
3740 char *comment
, size_t comment_size
,
3741 aarch64_feature_set features
,
3742 struct aarch64_styler
*styler
)
3744 unsigned int i
, num_conds
;
3745 const char *name
= NULL
;
3746 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
3747 enum aarch64_modifier_kind kind
;
3748 uint64_t addr
, enum_value
;
3750 if (comment
!= NULL
)
3752 assert (comment_size
> 0);
3756 assert (comment_size
== 0);
3764 case AARCH64_OPND_Rd
:
3765 case AARCH64_OPND_Rn
:
3766 case AARCH64_OPND_Rm
:
3767 case AARCH64_OPND_Rt
:
3768 case AARCH64_OPND_Rt2
:
3769 case AARCH64_OPND_Rs
:
3770 case AARCH64_OPND_Ra
:
3771 case AARCH64_OPND_Rt_LS64
:
3772 case AARCH64_OPND_Rt_SYS
:
3773 case AARCH64_OPND_PAIRREG
:
3774 case AARCH64_OPND_SVE_Rm
:
3775 case AARCH64_OPND_LSE128_Rt
:
3776 case AARCH64_OPND_LSE128_Rt2
:
3777 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3778 the <ic_op>, therefore we use opnd->present to override the
3779 generic optional-ness information. */
3780 if (opnd
->type
== AARCH64_OPND_Rt_SYS
)
3785 /* Omit the operand, e.g. RET. */
3786 else if (optional_operand_p (opcode
, idx
)
3788 == get_optional_operand_default_value (opcode
)))
3790 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3791 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3792 snprintf (buf
, size
, "%s",
3793 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
,
3794 opnd
->qualifier
, 0)));
3797 case AARCH64_OPND_Rd_SP
:
3798 case AARCH64_OPND_Rn_SP
:
3799 case AARCH64_OPND_Rt_SP
:
3800 case AARCH64_OPND_SVE_Rn_SP
:
3801 case AARCH64_OPND_Rm_SP
:
3802 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3803 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
3804 || opnd
->qualifier
== AARCH64_OPND_QLF_X
3805 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
3806 snprintf (buf
, size
, "%s",
3807 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
,
3808 opnd
->qualifier
, 1)));
3811 case AARCH64_OPND_Rm_EXT
:
3812 kind
= opnd
->shifter
.kind
;
3813 assert (idx
== 1 || idx
== 2);
3814 if ((aarch64_stack_pointer_p (opnds
)
3815 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
3816 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
3817 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
3818 && kind
== AARCH64_MOD_UXTW
)
3819 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
3820 && kind
== AARCH64_MOD_UXTX
)))
3822 /* 'LSL' is the preferred form in this case. */
3823 kind
= AARCH64_MOD_LSL
;
3824 if (opnd
->shifter
.amount
== 0)
3826 /* Shifter omitted. */
3827 snprintf (buf
, size
, "%s",
3829 get_int_reg_name (opnd
->reg
.regno
,
3830 opnd
->qualifier
, 0)));
3834 if (opnd
->shifter
.amount
)
3835 snprintf (buf
, size
, "%s, %s %s",
3836 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0)),
3837 style_sub_mnem (styler
, aarch64_operand_modifiers
[kind
].name
),
3838 style_imm (styler
, "#%" PRIi64
, opnd
->shifter
.amount
));
3840 snprintf (buf
, size
, "%s, %s",
3841 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0)),
3842 style_sub_mnem (styler
, aarch64_operand_modifiers
[kind
].name
));
3845 case AARCH64_OPND_Rm_SFT
:
3846 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3847 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3848 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3849 snprintf (buf
, size
, "%s",
3850 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
,
3851 opnd
->qualifier
, 0)));
3853 snprintf (buf
, size
, "%s, %s %s",
3854 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0)),
3855 style_sub_mnem (styler
, aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
),
3856 style_imm (styler
, "#%" PRIi64
, opnd
->shifter
.amount
));
3859 case AARCH64_OPND_Fd
:
3860 case AARCH64_OPND_Fn
:
3861 case AARCH64_OPND_Fm
:
3862 case AARCH64_OPND_Fa
:
3863 case AARCH64_OPND_Ft
:
3864 case AARCH64_OPND_Ft2
:
3865 case AARCH64_OPND_Sd
:
3866 case AARCH64_OPND_Sn
:
3867 case AARCH64_OPND_Sm
:
3868 case AARCH64_OPND_SVE_VZn
:
3869 case AARCH64_OPND_SVE_Vd
:
3870 case AARCH64_OPND_SVE_Vm
:
3871 case AARCH64_OPND_SVE_Vn
:
3872 snprintf (buf
, size
, "%s",
3873 style_reg (styler
, "%s%d",
3874 aarch64_get_qualifier_name (opnd
->qualifier
),
3878 case AARCH64_OPND_Va
:
3879 case AARCH64_OPND_Vd
:
3880 case AARCH64_OPND_Vn
:
3881 case AARCH64_OPND_Vm
:
3882 snprintf (buf
, size
, "%s",
3883 style_reg (styler
, "v%d.%s", opnd
->reg
.regno
,
3884 aarch64_get_qualifier_name (opnd
->qualifier
)));
3887 case AARCH64_OPND_Ed
:
3888 case AARCH64_OPND_En
:
3889 case AARCH64_OPND_Em
:
3890 case AARCH64_OPND_Em16
:
3891 case AARCH64_OPND_SM3_IMM2
:
3892 snprintf (buf
, size
, "%s[%s]",
3893 style_reg (styler
, "v%d.%s", opnd
->reglane
.regno
,
3894 aarch64_get_qualifier_name (opnd
->qualifier
)),
3895 style_imm (styler
, "%" PRIi64
, opnd
->reglane
.index
));
3898 case AARCH64_OPND_VdD1
:
3899 case AARCH64_OPND_VnD1
:
3900 snprintf (buf
, size
, "%s[%s]",
3901 style_reg (styler
, "v%d.d", opnd
->reg
.regno
),
3902 style_imm (styler
, "1"));
3905 case AARCH64_OPND_LVn
:
3906 case AARCH64_OPND_LVt
:
3907 case AARCH64_OPND_LVt_AL
:
3908 case AARCH64_OPND_LEt
:
3909 print_register_list (buf
, size
, opnd
, "v", styler
);
3912 case AARCH64_OPND_SVE_Pd
:
3913 case AARCH64_OPND_SVE_Pg3
:
3914 case AARCH64_OPND_SVE_Pg4_5
:
3915 case AARCH64_OPND_SVE_Pg4_10
:
3916 case AARCH64_OPND_SVE_Pg4_16
:
3917 case AARCH64_OPND_SVE_Pm
:
3918 case AARCH64_OPND_SVE_Pn
:
3919 case AARCH64_OPND_SVE_Pt
:
3920 case AARCH64_OPND_SME_Pm
:
3921 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3922 snprintf (buf
, size
, "%s",
3923 style_reg (styler
, "p%d", opnd
->reg
.regno
));
3924 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
3925 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
3926 snprintf (buf
, size
, "%s",
3927 style_reg (styler
, "p%d/%s", opnd
->reg
.regno
,
3928 aarch64_get_qualifier_name (opnd
->qualifier
)));
3930 snprintf (buf
, size
, "%s",
3931 style_reg (styler
, "p%d.%s", opnd
->reg
.regno
,
3932 aarch64_get_qualifier_name (opnd
->qualifier
)));
3935 case AARCH64_OPND_SVE_PNd
:
3936 case AARCH64_OPND_SVE_PNg4_10
:
3937 case AARCH64_OPND_SVE_PNn
:
3938 case AARCH64_OPND_SVE_PNt
:
3939 case AARCH64_OPND_SME_PNd3
:
3940 case AARCH64_OPND_SME_PNg3
:
3941 case AARCH64_OPND_SME_PNn
:
3942 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3943 snprintf (buf
, size
, "%s",
3944 style_reg (styler
, "pn%d", opnd
->reg
.regno
));
3945 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
3946 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
3947 snprintf (buf
, size
, "%s",
3948 style_reg (styler
, "pn%d/%s", opnd
->reg
.regno
,
3949 aarch64_get_qualifier_name (opnd
->qualifier
)));
3951 snprintf (buf
, size
, "%s",
3952 style_reg (styler
, "pn%d.%s", opnd
->reg
.regno
,
3953 aarch64_get_qualifier_name (opnd
->qualifier
)));
3956 case AARCH64_OPND_SME_Pdx2
:
3957 case AARCH64_OPND_SME_PdxN
:
3958 print_register_list (buf
, size
, opnd
, "p", styler
);
3961 case AARCH64_OPND_SME_PNn3_INDEX1
:
3962 case AARCH64_OPND_SME_PNn3_INDEX2
:
3963 snprintf (buf
, size
, "%s[%s]",
3964 style_reg (styler
, "pn%d", opnd
->reglane
.regno
),
3965 style_imm (styler
, "%" PRIi64
, opnd
->reglane
.index
));
3968 case AARCH64_OPND_SVE_Za_5
:
3969 case AARCH64_OPND_SVE_Za_16
:
3970 case AARCH64_OPND_SVE_Zd
:
3971 case AARCH64_OPND_SVE_Zm_5
:
3972 case AARCH64_OPND_SVE_Zm_16
:
3973 case AARCH64_OPND_SVE_Zn
:
3974 case AARCH64_OPND_SVE_Zt
:
3975 case AARCH64_OPND_SME_Zm
:
3976 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3977 snprintf (buf
, size
, "%s", style_reg (styler
, "z%d", opnd
->reg
.regno
));
3979 snprintf (buf
, size
, "%s",
3980 style_reg (styler
, "z%d.%s", opnd
->reg
.regno
,
3981 aarch64_get_qualifier_name (opnd
->qualifier
)));
3984 case AARCH64_OPND_SVE_ZnxN
:
3985 case AARCH64_OPND_SVE_ZtxN
:
3986 case AARCH64_OPND_SME_Zdnx2
:
3987 case AARCH64_OPND_SME_Zdnx4
:
3988 case AARCH64_OPND_SME_Zmx2
:
3989 case AARCH64_OPND_SME_Zmx4
:
3990 case AARCH64_OPND_SME_Znx2
:
3991 case AARCH64_OPND_SME_Znx4
:
3992 case AARCH64_OPND_SME_Ztx2_STRIDED
:
3993 case AARCH64_OPND_SME_Ztx4_STRIDED
:
3994 print_register_list (buf
, size
, opnd
, "z", styler
);
3997 case AARCH64_OPND_SVE_Zm3_INDEX
:
3998 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
3999 case AARCH64_OPND_SVE_Zm3_19_INDEX
:
4000 case AARCH64_OPND_SVE_Zm3_11_INDEX
:
4001 case AARCH64_OPND_SVE_Zm4_11_INDEX
:
4002 case AARCH64_OPND_SVE_Zm4_INDEX
:
4003 case AARCH64_OPND_SVE_Zn_INDEX
:
4004 case AARCH64_OPND_SME_Zm_INDEX1
:
4005 case AARCH64_OPND_SME_Zm_INDEX2
:
4006 case AARCH64_OPND_SME_Zm_INDEX3_1
:
4007 case AARCH64_OPND_SME_Zm_INDEX3_2
:
4008 case AARCH64_OPND_SME_Zm_INDEX3_10
:
4009 case AARCH64_OPND_SME_Zm_INDEX4_1
:
4010 case AARCH64_OPND_SME_Zm_INDEX4_10
:
4011 case AARCH64_OPND_SME_Zn_INDEX1_16
:
4012 case AARCH64_OPND_SME_Zn_INDEX2_15
:
4013 case AARCH64_OPND_SME_Zn_INDEX2_16
:
4014 case AARCH64_OPND_SME_Zn_INDEX3_14
:
4015 case AARCH64_OPND_SME_Zn_INDEX3_15
:
4016 case AARCH64_OPND_SME_Zn_INDEX4_14
:
4017 snprintf (buf
, size
, "%s[%s]",
4018 (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
4019 ? style_reg (styler
, "z%d", opnd
->reglane
.regno
)
4020 : style_reg (styler
, "z%d.%s", opnd
->reglane
.regno
,
4021 aarch64_get_qualifier_name (opnd
->qualifier
))),
4022 style_imm (styler
, "%" PRIi64
, opnd
->reglane
.index
));
4025 case AARCH64_OPND_SME_ZAda_2b
:
4026 case AARCH64_OPND_SME_ZAda_3b
:
4027 snprintf (buf
, size
, "%s",
4028 style_reg (styler
, "za%d.%s", opnd
->reg
.regno
,
4029 aarch64_get_qualifier_name (opnd
->qualifier
)));
4032 case AARCH64_OPND_SME_ZA_HV_idx_src
:
4033 case AARCH64_OPND_SME_ZA_HV_idx_srcxN
:
4034 case AARCH64_OPND_SME_ZA_HV_idx_dest
:
4035 case AARCH64_OPND_SME_ZA_HV_idx_destxN
:
4036 case AARCH64_OPND_SME_ZA_HV_idx_ldstr
:
4037 snprintf (buf
, size
, "%s%s[%s, %s%s%s%s%s]%s",
4038 opnd
->type
== AARCH64_OPND_SME_ZA_HV_idx_ldstr
? "{" : "",
4039 style_reg (styler
, "za%d%c.%s",
4040 opnd
->indexed_za
.regno
,
4041 opnd
->indexed_za
.v
== 1 ? 'v' : 'h',
4042 aarch64_get_qualifier_name (opnd
->qualifier
)),
4043 style_reg (styler
, "w%d", opnd
->indexed_za
.index
.regno
),
4044 style_imm (styler
, "%" PRIi64
, opnd
->indexed_za
.index
.imm
),
4045 opnd
->indexed_za
.index
.countm1
? ":" : "",
4046 (opnd
->indexed_za
.index
.countm1
4047 ? style_imm (styler
, "%d",
4048 opnd
->indexed_za
.index
.imm
4049 + opnd
->indexed_za
.index
.countm1
)
4051 opnd
->indexed_za
.group_size
? ", " : "",
4052 opnd
->indexed_za
.group_size
== 2
4053 ? style_sub_mnem (styler
, "vgx2")
4054 : opnd
->indexed_za
.group_size
== 4
4055 ? style_sub_mnem (styler
, "vgx4") : "",
4056 opnd
->type
== AARCH64_OPND_SME_ZA_HV_idx_ldstr
? "}" : "");
4059 case AARCH64_OPND_SME_list_of_64bit_tiles
:
4060 print_sme_za_list (buf
, size
, opnd
->reg
.regno
, styler
);
4063 case AARCH64_OPND_SME_ZA_array_off1x4
:
4064 case AARCH64_OPND_SME_ZA_array_off2x2
:
4065 case AARCH64_OPND_SME_ZA_array_off2x4
:
4066 case AARCH64_OPND_SME_ZA_array_off3_0
:
4067 case AARCH64_OPND_SME_ZA_array_off3_5
:
4068 case AARCH64_OPND_SME_ZA_array_off3x2
:
4069 case AARCH64_OPND_SME_ZA_array_off4
:
4070 snprintf (buf
, size
, "%s[%s, %s%s%s%s%s]",
4071 style_reg (styler
, "za%s%s",
4072 opnd
->qualifier
== AARCH64_OPND_QLF_NIL
? "" : ".",
4073 (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
4075 : aarch64_get_qualifier_name (opnd
->qualifier
))),
4076 style_reg (styler
, "w%d", opnd
->indexed_za
.index
.regno
),
4077 style_imm (styler
, "%" PRIi64
, opnd
->indexed_za
.index
.imm
),
4078 opnd
->indexed_za
.index
.countm1
? ":" : "",
4079 (opnd
->indexed_za
.index
.countm1
4080 ? style_imm (styler
, "%d",
4081 opnd
->indexed_za
.index
.imm
4082 + opnd
->indexed_za
.index
.countm1
)
4084 opnd
->indexed_za
.group_size
? ", " : "",
4085 opnd
->indexed_za
.group_size
== 2
4086 ? style_sub_mnem (styler
, "vgx2")
4087 : opnd
->indexed_za
.group_size
== 4
4088 ? style_sub_mnem (styler
, "vgx4") : "");
4091 case AARCH64_OPND_SME_SM_ZA
:
4092 snprintf (buf
, size
, "%s",
4093 style_reg (styler
, opnd
->reg
.regno
== 's' ? "sm" : "za"));
4096 case AARCH64_OPND_SME_PnT_Wm_imm
:
4097 snprintf (buf
, size
, "%s[%s, %s]",
4098 style_reg (styler
, "p%d.%s", opnd
->indexed_za
.regno
,
4099 aarch64_get_qualifier_name (opnd
->qualifier
)),
4100 style_reg (styler
, "w%d", opnd
->indexed_za
.index
.regno
),
4101 style_imm (styler
, "%" PRIi64
, opnd
->indexed_za
.index
.imm
));
4104 case AARCH64_OPND_SME_VLxN_10
:
4105 case AARCH64_OPND_SME_VLxN_13
:
4106 enum_value
= opnd
->imm
.value
;
4107 assert (enum_value
< ARRAY_SIZE (aarch64_sme_vlxn_array
));
4108 snprintf (buf
, size
, "%s",
4109 style_sub_mnem (styler
, aarch64_sme_vlxn_array
[enum_value
]));
4112 case AARCH64_OPND_CRn
:
4113 case AARCH64_OPND_CRm
:
4114 snprintf (buf
, size
, "%s",
4115 style_reg (styler
, "C%" PRIi64
, opnd
->imm
.value
));
4118 case AARCH64_OPND_IDX
:
4119 case AARCH64_OPND_MASK
:
4120 case AARCH64_OPND_IMM
:
4121 case AARCH64_OPND_IMM_2
:
4122 case AARCH64_OPND_WIDTH
:
4123 case AARCH64_OPND_UIMM3_OP1
:
4124 case AARCH64_OPND_UIMM3_OP2
:
4125 case AARCH64_OPND_BIT_NUM
:
4126 case AARCH64_OPND_IMM_VLSL
:
4127 case AARCH64_OPND_IMM_VLSR
:
4128 case AARCH64_OPND_SHLL_IMM
:
4129 case AARCH64_OPND_IMM0
:
4130 case AARCH64_OPND_IMMR
:
4131 case AARCH64_OPND_IMMS
:
4132 case AARCH64_OPND_UNDEFINED
:
4133 case AARCH64_OPND_FBITS
:
4134 case AARCH64_OPND_TME_UIMM16
:
4135 case AARCH64_OPND_SIMM5
:
4136 case AARCH64_OPND_SME_SHRIMM4
:
4137 case AARCH64_OPND_SME_SHRIMM5
:
4138 case AARCH64_OPND_SVE_SHLIMM_PRED
:
4139 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
4140 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22
:
4141 case AARCH64_OPND_SVE_SHRIMM_PRED
:
4142 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
4143 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22
:
4144 case AARCH64_OPND_SVE_SIMM5
:
4145 case AARCH64_OPND_SVE_SIMM5B
:
4146 case AARCH64_OPND_SVE_SIMM6
:
4147 case AARCH64_OPND_SVE_SIMM8
:
4148 case AARCH64_OPND_SVE_UIMM3
:
4149 case AARCH64_OPND_SVE_UIMM7
:
4150 case AARCH64_OPND_SVE_UIMM8
:
4151 case AARCH64_OPND_SVE_UIMM8_53
:
4152 case AARCH64_OPND_IMM_ROT1
:
4153 case AARCH64_OPND_IMM_ROT2
:
4154 case AARCH64_OPND_IMM_ROT3
:
4155 case AARCH64_OPND_SVE_IMM_ROT1
:
4156 case AARCH64_OPND_SVE_IMM_ROT2
:
4157 case AARCH64_OPND_SVE_IMM_ROT3
:
4158 case AARCH64_OPND_CSSC_SIMM8
:
4159 case AARCH64_OPND_CSSC_UIMM8
:
4160 snprintf (buf
, size
, "%s",
4161 style_imm (styler
, "#%" PRIi64
, opnd
->imm
.value
));
4164 case AARCH64_OPND_SVE_I1_HALF_ONE
:
4165 case AARCH64_OPND_SVE_I1_HALF_TWO
:
4166 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
4169 c
.i
= opnd
->imm
.value
;
4170 snprintf (buf
, size
, "%s", style_imm (styler
, "#%.1f", c
.f
));
4174 case AARCH64_OPND_SVE_PATTERN
:
4175 if (optional_operand_p (opcode
, idx
)
4176 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
4178 enum_value
= opnd
->imm
.value
;
4179 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
4180 if (aarch64_sve_pattern_array
[enum_value
])
4181 snprintf (buf
, size
, "%s",
4182 style_reg (styler
, aarch64_sve_pattern_array
[enum_value
]));
4184 snprintf (buf
, size
, "%s",
4185 style_imm (styler
, "#%" PRIi64
, opnd
->imm
.value
));
4188 case AARCH64_OPND_SVE_PATTERN_SCALED
:
4189 if (optional_operand_p (opcode
, idx
)
4190 && !opnd
->shifter
.operator_present
4191 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
4193 enum_value
= opnd
->imm
.value
;
4194 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
4195 if (aarch64_sve_pattern_array
[opnd
->imm
.value
])
4196 snprintf (buf
, size
, "%s",
4198 aarch64_sve_pattern_array
[opnd
->imm
.value
]));
4200 snprintf (buf
, size
, "%s",
4201 style_imm (styler
, "#%" PRIi64
, opnd
->imm
.value
));
4202 if (opnd
->shifter
.operator_present
)
4204 size_t len
= strlen (buf
);
4205 const char *shift_name
4206 = aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
4207 snprintf (buf
+ len
, size
- len
, ", %s %s",
4208 style_sub_mnem (styler
, shift_name
),
4209 style_imm (styler
, "#%" PRIi64
, opnd
->shifter
.amount
));
4213 case AARCH64_OPND_SVE_PRFOP
:
4214 enum_value
= opnd
->imm
.value
;
4215 assert (enum_value
< ARRAY_SIZE (aarch64_sve_prfop_array
));
4216 if (aarch64_sve_prfop_array
[enum_value
])
4217 snprintf (buf
, size
, "%s",
4218 style_reg (styler
, aarch64_sve_prfop_array
[enum_value
]));
4220 snprintf (buf
, size
, "%s",
4221 style_imm (styler
, "#%" PRIi64
, opnd
->imm
.value
));
4224 case AARCH64_OPND_IMM_MOV
:
4225 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
4227 case 4: /* e.g. MOV Wd, #<imm32>. */
4229 int imm32
= opnd
->imm
.value
;
4230 snprintf (buf
, size
, "%s",
4231 style_imm (styler
, "#0x%-20x", imm32
));
4232 snprintf (comment
, comment_size
, "#%d", imm32
);
4235 case 8: /* e.g. MOV Xd, #<imm64>. */
4236 snprintf (buf
, size
, "%s", style_imm (styler
, "#0x%-20" PRIx64
,
4238 snprintf (comment
, comment_size
, "#%" PRIi64
, opnd
->imm
.value
);
4241 snprintf (buf
, size
, "<invalid>");
4246 case AARCH64_OPND_FPIMM0
:
4247 snprintf (buf
, size
, "%s", style_imm (styler
, "#0.0"));
4250 case AARCH64_OPND_LIMM
:
4251 case AARCH64_OPND_AIMM
:
4252 case AARCH64_OPND_HALF
:
4253 case AARCH64_OPND_SVE_INV_LIMM
:
4254 case AARCH64_OPND_SVE_LIMM
:
4255 case AARCH64_OPND_SVE_LIMM_MOV
:
4256 if (opnd
->shifter
.amount
)
4257 snprintf (buf
, size
, "%s, %s %s",
4258 style_imm (styler
, "#0x%" PRIx64
, opnd
->imm
.value
),
4259 style_sub_mnem (styler
, "lsl"),
4260 style_imm (styler
, "#%" PRIi64
, opnd
->shifter
.amount
));
4262 snprintf (buf
, size
, "%s",
4263 style_imm (styler
, "#0x%" PRIx64
, opnd
->imm
.value
));
4266 case AARCH64_OPND_SIMD_IMM
:
4267 case AARCH64_OPND_SIMD_IMM_SFT
:
4268 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
4269 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
4270 snprintf (buf
, size
, "%s",
4271 style_imm (styler
, "#0x%" PRIx64
, opnd
->imm
.value
));
4273 snprintf (buf
, size
, "%s, %s %s",
4274 style_imm (styler
, "#0x%" PRIx64
, opnd
->imm
.value
),
4275 style_sub_mnem (styler
, aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
),
4276 style_imm (styler
, "#%" PRIi64
, opnd
->shifter
.amount
));
4279 case AARCH64_OPND_SVE_AIMM
:
4280 case AARCH64_OPND_SVE_ASIMM
:
4281 if (opnd
->shifter
.amount
)
4282 snprintf (buf
, size
, "%s, %s %s",
4283 style_imm (styler
, "#%" PRIi64
, opnd
->imm
.value
),
4284 style_sub_mnem (styler
, "lsl"),
4285 style_imm (styler
, "#%" PRIi64
, opnd
->shifter
.amount
));
4287 snprintf (buf
, size
, "%s",
4288 style_imm (styler
, "#%" PRIi64
, opnd
->imm
.value
));
4291 case AARCH64_OPND_FPIMM
:
4292 case AARCH64_OPND_SIMD_FPIMM
:
4293 case AARCH64_OPND_SVE_FPIMM8
:
4294 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
4296 case 2: /* e.g. FMOV <Hd>, #<imm>. */
4299 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
4300 snprintf (buf
, size
, "%s", style_imm (styler
, "#%.18e", c
.f
));
4303 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
4306 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
4307 snprintf (buf
, size
, "%s", style_imm (styler
, "#%.18e", c
.f
));
4310 case 8: /* e.g. FMOV <Sd>, #<imm>. */
4313 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
4314 snprintf (buf
, size
, "%s", style_imm (styler
, "#%.18e", c
.d
));
4318 snprintf (buf
, size
, "<invalid>");
4323 case AARCH64_OPND_CCMP_IMM
:
4324 case AARCH64_OPND_NZCV
:
4325 case AARCH64_OPND_EXCEPTION
:
4326 case AARCH64_OPND_UIMM4
:
4327 case AARCH64_OPND_UIMM4_ADDG
:
4328 case AARCH64_OPND_UIMM7
:
4329 case AARCH64_OPND_UIMM10
:
4330 if (optional_operand_p (opcode
, idx
)
4331 && (opnd
->imm
.value
==
4332 (int64_t) get_optional_operand_default_value (opcode
)))
4333 /* Omit the operand, e.g. DCPS1. */
4335 snprintf (buf
, size
, "%s",
4336 style_imm (styler
, "#0x%x", (unsigned int) opnd
->imm
.value
));
4339 case AARCH64_OPND_COND
:
4340 case AARCH64_OPND_COND1
:
4341 snprintf (buf
, size
, "%s",
4342 style_sub_mnem (styler
, opnd
->cond
->names
[0]));
4343 num_conds
= ARRAY_SIZE (opnd
->cond
->names
);
4344 for (i
= 1; i
< num_conds
&& opnd
->cond
->names
[i
]; ++i
)
4346 size_t len
= comment
!= NULL
? strlen (comment
) : 0;
4348 snprintf (comment
+ len
, comment_size
- len
, "%s = %s",
4349 opnd
->cond
->names
[0], opnd
->cond
->names
[i
]);
4351 snprintf (comment
+ len
, comment_size
- len
, ", %s",
4352 opnd
->cond
->names
[i
]);
4356 case AARCH64_OPND_ADDR_ADRP
:
4357 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
4363 /* This is not necessary during the disassembling, as print_address_func
4364 in the disassemble_info will take care of the printing. But some
4365 other callers may be still interested in getting the string in *STR,
4366 so here we do snprintf regardless. */
4367 snprintf (buf
, size
, "%s", style_addr (styler
, "#0x%" PRIx64
, addr
));
4370 case AARCH64_OPND_ADDR_PCREL14
:
4371 case AARCH64_OPND_ADDR_PCREL19
:
4372 case AARCH64_OPND_ADDR_PCREL21
:
4373 case AARCH64_OPND_ADDR_PCREL26
:
4374 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
4379 /* This is not necessary during the disassembling, as print_address_func
4380 in the disassemble_info will take care of the printing. But some
4381 other callers may be still interested in getting the string in *STR,
4382 so here we do snprintf regardless. */
4383 snprintf (buf
, size
, "%s", style_addr (styler
, "#0x%" PRIx64
, addr
));
4386 case AARCH64_OPND_ADDR_SIMPLE
:
4387 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
4388 case AARCH64_OPND_SIMD_ADDR_POST
:
4389 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
4390 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
4392 if (opnd
->addr
.offset
.is_reg
)
4393 snprintf (buf
, size
, "[%s], %s",
4394 style_reg (styler
, name
),
4395 style_reg (styler
, "x%d", opnd
->addr
.offset
.regno
));
4397 snprintf (buf
, size
, "[%s], %s",
4398 style_reg (styler
, name
),
4399 style_imm (styler
, "#%d", opnd
->addr
.offset
.imm
));
4402 snprintf (buf
, size
, "[%s]", style_reg (styler
, name
));
4405 case AARCH64_OPND_ADDR_REGOFF
:
4406 case AARCH64_OPND_SVE_ADDR_R
:
4407 case AARCH64_OPND_SVE_ADDR_RR
:
4408 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
4409 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
4410 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
4411 case AARCH64_OPND_SVE_ADDR_RR_LSL4
:
4412 case AARCH64_OPND_SVE_ADDR_RX
:
4413 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
4414 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
4415 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
4416 print_register_offset_address
4417 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
4418 get_offset_int_reg_name (opnd
), styler
);
4421 case AARCH64_OPND_SVE_ADDR_ZX
:
4422 print_register_offset_address
4424 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
4425 get_64bit_int_reg_name (opnd
->addr
.offset
.regno
, 0), styler
);
4428 case AARCH64_OPND_SVE_ADDR_RZ
:
4429 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
4430 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
4431 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
4432 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
4433 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
4434 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
4435 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
4436 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
4437 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
4438 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
4439 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
4440 print_register_offset_address
4441 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
4442 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
),
4446 case AARCH64_OPND_ADDR_SIMM7
:
4447 case AARCH64_OPND_ADDR_SIMM9
:
4448 case AARCH64_OPND_ADDR_SIMM9_2
:
4449 case AARCH64_OPND_ADDR_SIMM10
:
4450 case AARCH64_OPND_ADDR_SIMM11
:
4451 case AARCH64_OPND_ADDR_SIMM13
:
4452 case AARCH64_OPND_ADDR_OFFSET
:
4453 case AARCH64_OPND_SME_ADDR_RI_U4xVL
:
4454 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
4455 case AARCH64_OPND_SVE_ADDR_RI_S4x32
:
4456 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
4457 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
4458 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
4459 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
4460 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
4461 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
4462 case AARCH64_OPND_SVE_ADDR_RI_U6
:
4463 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
4464 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
4465 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
4466 print_immediate_offset_address
4467 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
4471 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
4472 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
4473 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
4474 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
4475 print_immediate_offset_address
4477 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
4481 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
4482 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
4483 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
4484 print_register_offset_address
4486 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
4487 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
),
4491 case AARCH64_OPND_ADDR_UIMM12
:
4492 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
4493 if (opnd
->addr
.offset
.imm
)
4494 snprintf (buf
, size
, "[%s, %s]",
4495 style_reg (styler
, name
),
4496 style_imm (styler
, "#%d", opnd
->addr
.offset
.imm
));
4498 snprintf (buf
, size
, "[%s]", style_reg (styler
, name
));
4501 case AARCH64_OPND_SYSREG
:
4502 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
4504 const aarch64_sys_reg
*sr
= aarch64_sys_regs
+ i
;
4507 = (!(sr
->flags
& (F_REG_READ
| F_REG_WRITE
))
4508 || (sr
->flags
& opnd
->sysreg
.flags
) == opnd
->sysreg
.flags
)
4509 && AARCH64_CPU_HAS_ALL_FEATURES (features
, sr
->features
);
4511 /* Try and find an exact match, But if that fails, return the first
4512 partial match that was found. */
4513 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
.value
4514 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs
[i
].flags
)
4515 && ! aarch64_sys_reg_alias_p (aarch64_sys_regs
[i
].flags
)
4516 && (name
== NULL
|| exact_match
))
4518 name
= aarch64_sys_regs
[i
].name
;
4526 /* If we didn't match exactly, that means the presense of a flag
4527 indicates what we didn't want for this instruction. e.g. If
4528 F_REG_READ is there, that means we were looking for a write
4529 register. See aarch64_ext_sysreg. */
4530 if (aarch64_sys_regs
[i
].flags
& F_REG_WRITE
)
4531 *notes
= _("reading from a write-only register");
4532 else if (aarch64_sys_regs
[i
].flags
& F_REG_READ
)
4533 *notes
= _("writing to a read-only register");
4538 snprintf (buf
, size
, "%s", style_reg (styler
, name
));
4541 /* Implementation defined system register. */
4542 unsigned int value
= opnd
->sysreg
.value
;
4543 snprintf (buf
, size
, "%s",
4544 style_reg (styler
, "s%u_%u_c%u_c%u_%u",
4545 (value
>> 14) & 0x3, (value
>> 11) & 0x7,
4546 (value
>> 7) & 0xf, (value
>> 3) & 0xf,
4551 case AARCH64_OPND_PSTATEFIELD
:
4552 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
4553 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
4555 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
4556 SVCRZA and SVCRSMZA. */
4557 uint32_t flags
= aarch64_pstatefields
[i
].flags
;
4558 if (flags
& F_REG_IN_CRM
4559 && (PSTATE_DECODE_CRM (opnd
->sysreg
.flags
)
4560 != PSTATE_DECODE_CRM (flags
)))
4564 assert (aarch64_pstatefields
[i
].name
);
4565 snprintf (buf
, size
, "%s",
4566 style_reg (styler
, aarch64_pstatefields
[i
].name
));
4569 case AARCH64_OPND_SYSREG_AT
:
4570 case AARCH64_OPND_SYSREG_DC
:
4571 case AARCH64_OPND_SYSREG_IC
:
4572 case AARCH64_OPND_SYSREG_TLBI
:
4573 case AARCH64_OPND_SYSREG_SR
:
4574 snprintf (buf
, size
, "%s", style_reg (styler
, opnd
->sysins_op
->name
));
4577 case AARCH64_OPND_BARRIER
:
4578 case AARCH64_OPND_BARRIER_DSB_NXS
:
4580 if (opnd
->barrier
->name
[0] == '#')
4581 snprintf (buf
, size
, "%s", style_imm (styler
, opnd
->barrier
->name
));
4583 snprintf (buf
, size
, "%s",
4584 style_sub_mnem (styler
, opnd
->barrier
->name
));
4588 case AARCH64_OPND_BARRIER_ISB
:
4589 /* Operand can be omitted, e.g. in DCPS1. */
4590 if (! optional_operand_p (opcode
, idx
)
4591 || (opnd
->barrier
->value
4592 != get_optional_operand_default_value (opcode
)))
4593 snprintf (buf
, size
, "%s",
4594 style_imm (styler
, "#0x%x", opnd
->barrier
->value
));
4597 case AARCH64_OPND_PRFOP
:
4598 if (opnd
->prfop
->name
!= NULL
)
4599 snprintf (buf
, size
, "%s", style_sub_mnem (styler
, opnd
->prfop
->name
));
4601 snprintf (buf
, size
, "%s", style_imm (styler
, "#0x%02x",
4602 opnd
->prfop
->value
));
4605 case AARCH64_OPND_RPRFMOP
:
4606 enum_value
= opnd
->imm
.value
;
4607 if (enum_value
< ARRAY_SIZE (aarch64_rprfmop_array
)
4608 && aarch64_rprfmop_array
[enum_value
])
4609 snprintf (buf
, size
, "%s",
4610 style_reg (styler
, aarch64_rprfmop_array
[enum_value
]));
4612 snprintf (buf
, size
, "%s",
4613 style_imm (styler
, "#%" PRIi64
, opnd
->imm
.value
));
4616 case AARCH64_OPND_BARRIER_PSB
:
4617 snprintf (buf
, size
, "%s", style_sub_mnem (styler
, "csync"));
4620 case AARCH64_OPND_X16
:
4621 snprintf (buf
, size
, "%s", style_reg (styler
, "x16"));
4624 case AARCH64_OPND_SME_ZT0
:
4625 snprintf (buf
, size
, "%s", style_reg (styler
, "zt0"));
4628 case AARCH64_OPND_SME_ZT0_INDEX
:
4629 snprintf (buf
, size
, "%s[%s]", style_reg (styler
, "zt0"),
4630 style_imm (styler
, "%d", (int) opnd
->imm
.value
));
4633 case AARCH64_OPND_SME_ZT0_LIST
:
4634 snprintf (buf
, size
, "{%s}", style_reg (styler
, "zt0"));
4637 case AARCH64_OPND_BARRIER_GCSB
:
4638 snprintf (buf
, size
, "%s", style_sub_mnem (styler
, "dsync"));
4641 case AARCH64_OPND_BTI_TARGET
:
4642 if ((HINT_FLAG (opnd
->hint_option
->value
) & HINT_OPD_F_NOPRINT
) == 0)
4643 snprintf (buf
, size
, "%s",
4644 style_sub_mnem (styler
, opnd
->hint_option
->name
));
4647 case AARCH64_OPND_MOPS_ADDR_Rd
:
4648 case AARCH64_OPND_MOPS_ADDR_Rs
:
4649 snprintf (buf
, size
, "[%s]!",
4651 get_int_reg_name (opnd
->reg
.regno
,
4652 AARCH64_OPND_QLF_X
, 0)));
4655 case AARCH64_OPND_MOPS_WB_Rn
:
4656 snprintf (buf
, size
, "%s!",
4657 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
,
4658 AARCH64_OPND_QLF_X
, 0)));
4662 snprintf (buf
, size
, "<invalid>");
4667 #define CPENC(op0,op1,crn,crm,op2) \
4668 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
4669 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
4670 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
4671 /* for 3.9.10 System Instructions */
4672 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
4691 /* TODO there is one more issues need to be resolved
4692 1. handle cpu-implementation-defined system registers.
4694 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4695 respectively. If neither of these are set then the register is read-write. */
4696 const aarch64_sys_reg aarch64_sys_regs
[] =
4698 #define SYSREG(name, encoding, flags, features) \
4699 { name, encoding, flags, features },
4700 #include "aarch64-sys-regs.def"
4701 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES
}
4706 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags
)
4708 return (reg_flags
& F_DEPRECATED
) != 0;
4712 aarch64_sys_reg_alias_p (const uint32_t reg_flags
)
4714 return (reg_flags
& F_REG_ALIAS
) != 0;
4717 /* The CPENC below is fairly misleading, the fields
4718 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4719 by ins_pstatefield, which just shifts the value by the width of the fields
4720 in a loop. So if you CPENC them only the first value will be set, the rest
4721 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4722 value of 0b110000000001000000 (0x30040) while what you want is
4724 const aarch64_sys_reg aarch64_pstatefields
[] =
4726 { "spsel", 0x05, F_REG_MAX_VALUE (1), AARCH64_NO_FEATURES
},
4727 { "daifset", 0x1e, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES
},
4728 { "daifclr", 0x1f, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES
},
4729 { "pan", 0x04, F_REG_MAX_VALUE (1) | F_ARCHEXT
, AARCH64_FEATURE (PAN
) },
4730 { "uao", 0x03, F_REG_MAX_VALUE (1) | F_ARCHEXT
, AARCH64_FEATURE (V8_2A
) },
4731 { "ssbs", 0x19, F_REG_MAX_VALUE (1) | F_ARCHEXT
, AARCH64_FEATURE (SSBS
) },
4732 { "dit", 0x1a, F_REG_MAX_VALUE (1) | F_ARCHEXT
, AARCH64_FEATURE (V8_4A
) },
4733 { "tco", 0x1c, F_REG_MAX_VALUE (1) | F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
4734 { "svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x2,0x1) | F_REG_MAX_VALUE (1)
4735 | F_ARCHEXT
, AARCH64_FEATURE (SME
) },
4736 { "svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x4,0x1) | F_REG_MAX_VALUE (1)
4737 | F_ARCHEXT
, AARCH64_FEATURE (SME
) },
4738 { "svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x6,0x1) | F_REG_MAX_VALUE (1)
4739 | F_ARCHEXT
, AARCH64_FEATURE (SME
) },
4740 { "allint", 0x08, F_REG_MAX_VALUE (1) | F_ARCHEXT
, AARCH64_FEATURE (V8_8A
) },
4741 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES
},
4745 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
4746 const aarch64_sys_reg
*reg
)
4748 if (!(reg
->flags
& F_ARCHEXT
))
4751 return AARCH64_CPU_HAS_ALL_FEATURES (features
, reg
->features
);
4754 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
4756 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
4757 { "iallu", CPENS(0,C7
,C5
,0), 0 },
4758 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
4759 { 0, CPENS(0,0,0,0), 0 }
4762 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
4764 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
4765 { "gva", CPENS (3, C7
, C4
, 3), F_HASXT
| F_ARCHEXT
},
4766 { "gzva", CPENS (3, C7
, C4
, 4), F_HASXT
| F_ARCHEXT
},
4767 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
4768 { "igvac", CPENS (0, C7
, C6
, 3), F_HASXT
| F_ARCHEXT
},
4769 { "igsw", CPENS (0, C7
, C6
, 4), F_HASXT
| F_ARCHEXT
},
4770 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
4771 { "igdvac", CPENS (0, C7
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4772 { "igdsw", CPENS (0, C7
, C6
, 6), F_HASXT
| F_ARCHEXT
},
4773 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
4774 { "cgvac", CPENS (3, C7
, C10
, 3), F_HASXT
| F_ARCHEXT
},
4775 { "cgdvac", CPENS (3, C7
, C10
, 5), F_HASXT
| F_ARCHEXT
},
4776 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
4777 { "cgsw", CPENS (0, C7
, C10
, 4), F_HASXT
| F_ARCHEXT
},
4778 { "cgdsw", CPENS (0, C7
, C10
, 6), F_HASXT
| F_ARCHEXT
},
4779 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
4780 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
4781 { "cgvap", CPENS (3, C7
, C12
, 3), F_HASXT
| F_ARCHEXT
},
4782 { "cgdvap", CPENS (3, C7
, C12
, 5), F_HASXT
| F_ARCHEXT
},
4783 { "cvadp", CPENS (3, C7
, C13
, 1), F_HASXT
| F_ARCHEXT
},
4784 { "cgvadp", CPENS (3, C7
, C13
, 3), F_HASXT
| F_ARCHEXT
},
4785 { "cgdvadp", CPENS (3, C7
, C13
, 5), F_HASXT
| F_ARCHEXT
},
4786 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
4787 { "cigvac", CPENS (3, C7
, C14
, 3), F_HASXT
| F_ARCHEXT
},
4788 { "cigdvac", CPENS (3, C7
, C14
, 5), F_HASXT
| F_ARCHEXT
},
4789 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
4790 { "cigsw", CPENS (0, C7
, C14
, 4), F_HASXT
| F_ARCHEXT
},
4791 { "cigdsw", CPENS (0, C7
, C14
, 6), F_HASXT
| F_ARCHEXT
},
4792 { "cipapa", CPENS (6, C7
, C14
, 1), F_HASXT
},
4793 { "cigdpapa", CPENS (6, C7
, C14
, 5), F_HASXT
},
4794 { 0, CPENS(0,0,0,0), 0 }
4797 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
4799 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
4800 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
4801 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
4802 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
4803 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
4804 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
4805 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
4806 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
4807 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
4808 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
4809 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
4810 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
4811 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
4812 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
4813 { "s1e1a", CPENS (0, C7
, C9
, 2), F_HASXT
| F_ARCHEXT
},
4814 { "s1e2a", CPENS (4, C7
, C9
, 2), F_HASXT
| F_ARCHEXT
},
4815 { "s1e3a", CPENS (6, C7
, C9
, 2), F_HASXT
| F_ARCHEXT
},
4816 { 0, CPENS(0,0,0,0), 0 }
4819 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
4821 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
4822 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
4823 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
4824 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
4825 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
4826 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
4827 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
4828 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
4829 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
4830 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
4831 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
4832 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
4833 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
4834 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
4835 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
4836 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
4837 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
4838 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
4839 { "alle2", CPENS(4,C8
,C7
,0), 0 },
4840 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
4841 { "alle1", CPENS(4,C8
,C7
,4), 0 },
4842 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
4843 { "alle3", CPENS(6,C8
,C7
,0), 0 },
4844 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
4845 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
4846 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
4847 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
4848 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
4849 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
4850 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
4851 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
4852 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
4854 { "vmalle1os", CPENS (0, C8
, C1
, 0), F_ARCHEXT
},
4855 { "vae1os", CPENS (0, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4856 { "aside1os", CPENS (0, C8
, C1
, 2), F_HASXT
| F_ARCHEXT
},
4857 { "vaae1os", CPENS (0, C8
, C1
, 3), F_HASXT
| F_ARCHEXT
},
4858 { "vale1os", CPENS (0, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4859 { "vaale1os", CPENS (0, C8
, C1
, 7), F_HASXT
| F_ARCHEXT
},
4860 { "ipas2e1os", CPENS (4, C8
, C4
, 0), F_HASXT
| F_ARCHEXT
},
4861 { "ipas2le1os", CPENS (4, C8
, C4
, 4), F_HASXT
| F_ARCHEXT
},
4862 { "vae2os", CPENS (4, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4863 { "vale2os", CPENS (4, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4864 { "vmalls12e1os", CPENS (4, C8
, C1
, 6), F_ARCHEXT
},
4865 { "vae3os", CPENS (6, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4866 { "vale3os", CPENS (6, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4867 { "alle2os", CPENS (4, C8
, C1
, 0), F_ARCHEXT
},
4868 { "alle1os", CPENS (4, C8
, C1
, 4), F_ARCHEXT
},
4869 { "alle3os", CPENS (6, C8
, C1
, 0), F_ARCHEXT
},
4871 { "rvae1", CPENS (0, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4872 { "rvaae1", CPENS (0, C8
, C6
, 3), F_HASXT
| F_ARCHEXT
},
4873 { "rvale1", CPENS (0, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4874 { "rvaale1", CPENS (0, C8
, C6
, 7), F_HASXT
| F_ARCHEXT
},
4875 { "rvae1is", CPENS (0, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4876 { "rvaae1is", CPENS (0, C8
, C2
, 3), F_HASXT
| F_ARCHEXT
},
4877 { "rvale1is", CPENS (0, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4878 { "rvaale1is", CPENS (0, C8
, C2
, 7), F_HASXT
| F_ARCHEXT
},
4879 { "rvae1os", CPENS (0, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4880 { "rvaae1os", CPENS (0, C8
, C5
, 3), F_HASXT
| F_ARCHEXT
},
4881 { "rvale1os", CPENS (0, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4882 { "rvaale1os", CPENS (0, C8
, C5
, 7), F_HASXT
| F_ARCHEXT
},
4883 { "ripas2e1is", CPENS (4, C8
, C0
, 2), F_HASXT
| F_ARCHEXT
},
4884 { "ripas2le1is",CPENS (4, C8
, C0
, 6), F_HASXT
| F_ARCHEXT
},
4885 { "ripas2e1", CPENS (4, C8
, C4
, 2), F_HASXT
| F_ARCHEXT
},
4886 { "ripas2le1", CPENS (4, C8
, C4
, 6), F_HASXT
| F_ARCHEXT
},
4887 { "ripas2e1os", CPENS (4, C8
, C4
, 3), F_HASXT
| F_ARCHEXT
},
4888 { "ripas2le1os",CPENS (4, C8
, C4
, 7), F_HASXT
| F_ARCHEXT
},
4889 { "rvae2", CPENS (4, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4890 { "rvale2", CPENS (4, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4891 { "rvae2is", CPENS (4, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4892 { "rvale2is", CPENS (4, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4893 { "rvae2os", CPENS (4, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4894 { "rvale2os", CPENS (4, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4895 { "rvae3", CPENS (6, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4896 { "rvale3", CPENS (6, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4897 { "rvae3is", CPENS (6, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4898 { "rvale3is", CPENS (6, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4899 { "rvae3os", CPENS (6, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4900 { "rvale3os", CPENS (6, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4902 { "rpaos", CPENS (6, C8
, C4
, 3), F_HASXT
},
4903 { "rpalos", CPENS (6, C8
, C4
, 7), F_HASXT
},
4904 { "paallos", CPENS (6, C8
, C1
, 4), 0},
4905 { "paall", CPENS (6, C8
, C7
, 4), 0},
4907 { 0, CPENS(0,0,0,0), 0 }
4910 const aarch64_sys_ins_reg aarch64_sys_regs_sr
[] =
4912 /* RCTX is somewhat unique in a way that it has different values
4913 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4914 Thus op2 is masked out and instead encoded directly in the
4915 aarch64_opcode_table entries for the respective instructions. */
4916 { "rctx", CPENS(3,C7
,C3
,0), F_HASXT
| F_ARCHEXT
| F_REG_WRITE
}, /* WO */
4918 { 0, CPENS(0,0,0,0), 0 }
4922 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
4924 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
4928 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
4929 const char *reg_name
,
4930 aarch64_insn reg_value
,
4932 const aarch64_feature_set
*reg_features
)
4934 /* Armv8-R has no EL3. */
4935 if (AARCH64_CPU_HAS_FEATURE (features
, V8R
))
4937 const char *suffix
= strrchr (reg_name
, '_');
4938 if (suffix
&& !strcmp (suffix
, "_el3"))
4942 if (!(reg_flags
& F_ARCHEXT
))
4946 && AARCH64_CPU_HAS_ALL_FEATURES (features
, *reg_features
))
4949 /* ARMv8.4 TLB instructions. */
4950 if ((reg_value
== CPENS (0, C8
, C1
, 0)
4951 || reg_value
== CPENS (0, C8
, C1
, 1)
4952 || reg_value
== CPENS (0, C8
, C1
, 2)
4953 || reg_value
== CPENS (0, C8
, C1
, 3)
4954 || reg_value
== CPENS (0, C8
, C1
, 5)
4955 || reg_value
== CPENS (0, C8
, C1
, 7)
4956 || reg_value
== CPENS (4, C8
, C4
, 0)
4957 || reg_value
== CPENS (4, C8
, C4
, 4)
4958 || reg_value
== CPENS (4, C8
, C1
, 1)
4959 || reg_value
== CPENS (4, C8
, C1
, 5)
4960 || reg_value
== CPENS (4, C8
, C1
, 6)
4961 || reg_value
== CPENS (6, C8
, C1
, 1)
4962 || reg_value
== CPENS (6, C8
, C1
, 5)
4963 || reg_value
== CPENS (4, C8
, C1
, 0)
4964 || reg_value
== CPENS (4, C8
, C1
, 4)
4965 || reg_value
== CPENS (6, C8
, C1
, 0)
4966 || reg_value
== CPENS (0, C8
, C6
, 1)
4967 || reg_value
== CPENS (0, C8
, C6
, 3)
4968 || reg_value
== CPENS (0, C8
, C6
, 5)
4969 || reg_value
== CPENS (0, C8
, C6
, 7)
4970 || reg_value
== CPENS (0, C8
, C2
, 1)
4971 || reg_value
== CPENS (0, C8
, C2
, 3)
4972 || reg_value
== CPENS (0, C8
, C2
, 5)
4973 || reg_value
== CPENS (0, C8
, C2
, 7)
4974 || reg_value
== CPENS (0, C8
, C5
, 1)
4975 || reg_value
== CPENS (0, C8
, C5
, 3)
4976 || reg_value
== CPENS (0, C8
, C5
, 5)
4977 || reg_value
== CPENS (0, C8
, C5
, 7)
4978 || reg_value
== CPENS (4, C8
, C0
, 2)
4979 || reg_value
== CPENS (4, C8
, C0
, 6)
4980 || reg_value
== CPENS (4, C8
, C4
, 2)
4981 || reg_value
== CPENS (4, C8
, C4
, 6)
4982 || reg_value
== CPENS (4, C8
, C4
, 3)
4983 || reg_value
== CPENS (4, C8
, C4
, 7)
4984 || reg_value
== CPENS (4, C8
, C6
, 1)
4985 || reg_value
== CPENS (4, C8
, C6
, 5)
4986 || reg_value
== CPENS (4, C8
, C2
, 1)
4987 || reg_value
== CPENS (4, C8
, C2
, 5)
4988 || reg_value
== CPENS (4, C8
, C5
, 1)
4989 || reg_value
== CPENS (4, C8
, C5
, 5)
4990 || reg_value
== CPENS (6, C8
, C6
, 1)
4991 || reg_value
== CPENS (6, C8
, C6
, 5)
4992 || reg_value
== CPENS (6, C8
, C2
, 1)
4993 || reg_value
== CPENS (6, C8
, C2
, 5)
4994 || reg_value
== CPENS (6, C8
, C5
, 1)
4995 || reg_value
== CPENS (6, C8
, C5
, 5))
4996 && AARCH64_CPU_HAS_FEATURE (features
, V8_4A
))
4999 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
5000 if (reg_value
== CPENS (3, C7
, C12
, 1)
5001 && AARCH64_CPU_HAS_FEATURE (features
, V8_2A
))
5004 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
5005 if (reg_value
== CPENS (3, C7
, C13
, 1)
5006 && AARCH64_CPU_HAS_FEATURE (features
, CVADP
))
5009 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
5010 if ((reg_value
== CPENS (0, C7
, C6
, 3)
5011 || reg_value
== CPENS (0, C7
, C6
, 4)
5012 || reg_value
== CPENS (0, C7
, C10
, 4)
5013 || reg_value
== CPENS (0, C7
, C14
, 4)
5014 || reg_value
== CPENS (3, C7
, C10
, 3)
5015 || reg_value
== CPENS (3, C7
, C12
, 3)
5016 || reg_value
== CPENS (3, C7
, C13
, 3)
5017 || reg_value
== CPENS (3, C7
, C14
, 3)
5018 || reg_value
== CPENS (3, C7
, C4
, 3)
5019 || reg_value
== CPENS (0, C7
, C6
, 5)
5020 || reg_value
== CPENS (0, C7
, C6
, 6)
5021 || reg_value
== CPENS (0, C7
, C10
, 6)
5022 || reg_value
== CPENS (0, C7
, C14
, 6)
5023 || reg_value
== CPENS (3, C7
, C10
, 5)
5024 || reg_value
== CPENS (3, C7
, C12
, 5)
5025 || reg_value
== CPENS (3, C7
, C13
, 5)
5026 || reg_value
== CPENS (3, C7
, C14
, 5)
5027 || reg_value
== CPENS (3, C7
, C4
, 4))
5028 && AARCH64_CPU_HAS_FEATURE (features
, MEMTAG
))
5031 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
5032 if ((reg_value
== CPENS (0, C7
, C9
, 0)
5033 || reg_value
== CPENS (0, C7
, C9
, 1))
5034 && AARCH64_CPU_HAS_FEATURE (features
, V8_2A
))
5037 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
5038 if (reg_value
== CPENS (3, C7
, C3
, 0)
5039 && AARCH64_CPU_HAS_FEATURE (features
, PREDRES
))
5042 if ((reg_value
== CPENC (3,0,13,0,3)
5043 || reg_value
== CPENC (3,0,13,0,6))
5044 && AARCH64_CPU_HAS_FEATURE (features
, THE
))
5047 if ((reg_value
== CPENS (0, C7
, C9
, 2)
5048 || reg_value
== CPENS (4, C7
, C9
, 2)
5049 || reg_value
== CPENS (6, C7
, C9
, 2))
5050 && AARCH64_CPU_HAS_FEATURE (features
, ATS1A
))
5073 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5074 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5076 static enum err_type
5077 verify_ldpsw (const struct aarch64_inst
*inst ATTRIBUTE_UNUSED
,
5078 const aarch64_insn insn
, bfd_vma pc ATTRIBUTE_UNUSED
,
5079 bool encoding ATTRIBUTE_UNUSED
,
5080 aarch64_operand_error
*mismatch_detail ATTRIBUTE_UNUSED
,
5081 aarch64_instr_sequence
*insn_sequence ATTRIBUTE_UNUSED
)
5083 int t
= BITS (insn
, 4, 0);
5084 int n
= BITS (insn
, 9, 5);
5085 int t2
= BITS (insn
, 14, 10);
5089 /* Write back enabled. */
5090 if ((t
== n
|| t2
== n
) && n
!= 31)
5104 /* Verifier for vector by element 3 operands functions where the
5105 conditions `if sz:L == 11 then UNDEFINED` holds. */
5107 static enum err_type
5108 verify_elem_sd (const struct aarch64_inst
*inst
, const aarch64_insn insn
,
5109 bfd_vma pc ATTRIBUTE_UNUSED
, bool encoding
,
5110 aarch64_operand_error
*mismatch_detail ATTRIBUTE_UNUSED
,
5111 aarch64_instr_sequence
*insn_sequence ATTRIBUTE_UNUSED
)
5113 const aarch64_insn undef_pattern
= 0x3;
5116 assert (inst
->opcode
);
5117 assert (inst
->opcode
->operands
[2] == AARCH64_OPND_Em
);
5118 value
= encoding
? inst
->value
: insn
;
5121 if (undef_pattern
== extract_fields (value
, 0, 2, FLD_sz
, FLD_L
))
5127 /* Check an instruction that takes three register operands and that
5128 requires the register numbers to be distinct from one another. */
5130 static enum err_type
5131 verify_three_different_regs (const struct aarch64_inst
*inst
,
5132 const aarch64_insn insn ATTRIBUTE_UNUSED
,
5133 bfd_vma pc ATTRIBUTE_UNUSED
,
5134 bool encoding ATTRIBUTE_UNUSED
,
5135 aarch64_operand_error
*mismatch_detail
5137 aarch64_instr_sequence
*insn_sequence
5142 rd
= inst
->operands
[0].reg
.regno
;
5143 rs
= inst
->operands
[1].reg
.regno
;
5144 rn
= inst
->operands
[2].reg
.regno
;
5145 if (rd
== rs
|| rd
== rn
|| rs
== rn
)
5147 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5148 mismatch_detail
->error
5149 = _("the three register operands must be distinct from one another");
5150 mismatch_detail
->index
= -1;
5157 /* Add INST to the end of INSN_SEQUENCE. */
5160 add_insn_to_sequence (const struct aarch64_inst
*inst
,
5161 aarch64_instr_sequence
*insn_sequence
)
5163 insn_sequence
->instr
[insn_sequence
->num_added_insns
++] = *inst
;
5166 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5167 If INST is NULL the given insn_sequence is cleared and the sequence is left
5171 init_insn_sequence (const struct aarch64_inst
*inst
,
5172 aarch64_instr_sequence
*insn_sequence
)
5174 int num_req_entries
= 0;
5176 if (insn_sequence
->instr
)
5178 XDELETE (insn_sequence
->instr
);
5179 insn_sequence
->instr
= NULL
;
5182 /* Handle all the cases here. May need to think of something smarter than
5183 a giant if/else chain if this grows. At that time, a lookup table may be
5185 if (inst
&& inst
->opcode
->constraints
& C_SCAN_MOVPRFX
)
5186 num_req_entries
= 1;
5187 if (inst
&& (inst
->opcode
->constraints
& C_SCAN_MOPS_PME
) == C_SCAN_MOPS_P
)
5188 num_req_entries
= 2;
5190 insn_sequence
->num_added_insns
= 0;
5191 insn_sequence
->num_allocated_insns
= num_req_entries
;
5193 if (num_req_entries
!= 0)
5195 insn_sequence
->instr
= XCNEWVEC (aarch64_inst
, num_req_entries
);
5196 add_insn_to_sequence (inst
, insn_sequence
);
5200 /* Subroutine of verify_constraints. Check whether the instruction
5201 is part of a MOPS P/M/E sequence and, if so, whether sequencing
5202 expectations are met. Return true if the check passes, otherwise
5203 describe the problem in MISMATCH_DETAIL.
5205 IS_NEW_SECTION is true if INST is assumed to start a new section.
5206 The other arguments are as for verify_constraints. */
5209 verify_mops_pme_sequence (const struct aarch64_inst
*inst
,
5210 bool is_new_section
,
5211 aarch64_operand_error
*mismatch_detail
,
5212 aarch64_instr_sequence
*insn_sequence
)
5214 const struct aarch64_opcode
*opcode
;
5215 const struct aarch64_inst
*prev_insn
;
5218 opcode
= inst
->opcode
;
5219 if (insn_sequence
->instr
)
5220 prev_insn
= insn_sequence
->instr
+ (insn_sequence
->num_added_insns
- 1);
5225 && (prev_insn
->opcode
->constraints
& C_SCAN_MOPS_PME
)
5226 && prev_insn
->opcode
!= opcode
- 1)
5228 mismatch_detail
->kind
= AARCH64_OPDE_EXPECTED_A_AFTER_B
;
5229 mismatch_detail
->error
= NULL
;
5230 mismatch_detail
->index
= -1;
5231 mismatch_detail
->data
[0].s
= prev_insn
->opcode
[1].name
;
5232 mismatch_detail
->data
[1].s
= prev_insn
->opcode
->name
;
5233 mismatch_detail
->non_fatal
= true;
5237 if (opcode
->constraints
& C_SCAN_MOPS_PME
)
5239 if (is_new_section
|| !prev_insn
|| prev_insn
->opcode
!= opcode
- 1)
5241 mismatch_detail
->kind
= AARCH64_OPDE_A_SHOULD_FOLLOW_B
;
5242 mismatch_detail
->error
= NULL
;
5243 mismatch_detail
->index
= -1;
5244 mismatch_detail
->data
[0].s
= opcode
->name
;
5245 mismatch_detail
->data
[1].s
= opcode
[-1].name
;
5246 mismatch_detail
->non_fatal
= true;
5250 for (i
= 0; i
< 3; ++i
)
5251 /* There's no specific requirement for the data register to be
5252 the same between consecutive SET* instructions. */
5253 if ((opcode
->operands
[i
] == AARCH64_OPND_MOPS_ADDR_Rd
5254 || opcode
->operands
[i
] == AARCH64_OPND_MOPS_ADDR_Rs
5255 || opcode
->operands
[i
] == AARCH64_OPND_MOPS_WB_Rn
)
5256 && prev_insn
->operands
[i
].reg
.regno
!= inst
->operands
[i
].reg
.regno
)
5258 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5259 if (opcode
->operands
[i
] == AARCH64_OPND_MOPS_ADDR_Rd
)
5260 mismatch_detail
->error
= _("destination register differs from "
5261 "preceding instruction");
5262 else if (opcode
->operands
[i
] == AARCH64_OPND_MOPS_ADDR_Rs
)
5263 mismatch_detail
->error
= _("source register differs from "
5264 "preceding instruction");
5266 mismatch_detail
->error
= _("size register differs from "
5267 "preceding instruction");
5268 mismatch_detail
->index
= i
;
5269 mismatch_detail
->non_fatal
= true;
5277 /* This function verifies that the instruction INST adheres to its specified
5278 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5279 returned and MISMATCH_DETAIL contains the reason why verification failed.
5281 The function is called both during assembly and disassembly. If assembling
5282 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5283 and will contain the PC of the current instruction w.r.t to the section.
5285 If ENCODING and PC=0 then you are at a start of a section. The constraints
5286 are verified against the given state insn_sequence which is updated as it
5287 transitions through the verification. */
5290 verify_constraints (const struct aarch64_inst
*inst
,
5291 const aarch64_insn insn ATTRIBUTE_UNUSED
,
5294 aarch64_operand_error
*mismatch_detail
,
5295 aarch64_instr_sequence
*insn_sequence
)
5298 assert (inst
->opcode
);
5300 const struct aarch64_opcode
*opcode
= inst
->opcode
;
5301 if (!opcode
->constraints
&& !insn_sequence
->instr
)
5304 assert (insn_sequence
);
5306 enum err_type res
= ERR_OK
;
5308 /* This instruction puts a constraint on the insn_sequence. */
5309 if (opcode
->flags
& F_SCAN
)
5311 if (insn_sequence
->instr
)
5313 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5314 mismatch_detail
->error
= _("instruction opens new dependency "
5315 "sequence without ending previous one");
5316 mismatch_detail
->index
= -1;
5317 mismatch_detail
->non_fatal
= true;
5321 init_insn_sequence (inst
, insn_sequence
);
5325 bool is_new_section
= (!encoding
&& pc
== 0);
5326 if (!verify_mops_pme_sequence (inst
, is_new_section
, mismatch_detail
,
5330 if ((opcode
->constraints
& C_SCAN_MOPS_PME
) != C_SCAN_MOPS_M
)
5331 init_insn_sequence (NULL
, insn_sequence
);
5334 /* Verify constraints on an existing sequence. */
5335 if (insn_sequence
->instr
)
5337 const struct aarch64_opcode
* inst_opcode
= insn_sequence
->instr
->opcode
;
5338 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5339 closed a previous one that we should have. */
5340 if (is_new_section
&& res
== ERR_OK
)
5342 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5343 mismatch_detail
->error
= _("previous `movprfx' sequence not closed");
5344 mismatch_detail
->index
= -1;
5345 mismatch_detail
->non_fatal
= true;
5347 /* Reset the sequence. */
5348 init_insn_sequence (NULL
, insn_sequence
);
5352 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5353 if (inst_opcode
->constraints
& C_SCAN_MOVPRFX
)
5355 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5356 instruction for better error messages. */
5357 if (!opcode
->avariant
5358 || (!AARCH64_CPU_HAS_FEATURE (*opcode
->avariant
, SVE
)
5359 && !AARCH64_CPU_HAS_FEATURE (*opcode
->avariant
, SVE2
)))
5361 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5362 mismatch_detail
->error
= _("SVE instruction expected after "
5364 mismatch_detail
->index
= -1;
5365 mismatch_detail
->non_fatal
= true;
5370 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5371 instruction that is allowed to be used with a MOVPRFX. */
5372 if (!(opcode
->constraints
& C_SCAN_MOVPRFX
))
5374 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5375 mismatch_detail
->error
= _("SVE `movprfx' compatible instruction "
5377 mismatch_detail
->index
= -1;
5378 mismatch_detail
->non_fatal
= true;
5383 /* Next check for usage of the predicate register. */
5384 aarch64_opnd_info blk_dest
= insn_sequence
->instr
->operands
[0];
5385 aarch64_opnd_info blk_pred
, inst_pred
;
5386 memset (&blk_pred
, 0, sizeof (aarch64_opnd_info
));
5387 memset (&inst_pred
, 0, sizeof (aarch64_opnd_info
));
5388 bool predicated
= false;
5389 assert (blk_dest
.type
== AARCH64_OPND_SVE_Zd
);
5391 /* Determine if the movprfx instruction used is predicated or not. */
5392 if (insn_sequence
->instr
->operands
[1].type
== AARCH64_OPND_SVE_Pg3
)
5395 blk_pred
= insn_sequence
->instr
->operands
[1];
5398 unsigned char max_elem_size
= 0;
5399 unsigned char current_elem_size
;
5400 int num_op_used
= 0, last_op_usage
= 0;
5401 int i
, inst_pred_idx
= -1;
5402 int num_ops
= aarch64_num_of_operands (opcode
);
5403 for (i
= 0; i
< num_ops
; i
++)
5405 aarch64_opnd_info inst_op
= inst
->operands
[i
];
5406 switch (inst_op
.type
)
5408 case AARCH64_OPND_SVE_Zd
:
5409 case AARCH64_OPND_SVE_Zm_5
:
5410 case AARCH64_OPND_SVE_Zm_16
:
5411 case AARCH64_OPND_SVE_Zn
:
5412 case AARCH64_OPND_SVE_Zt
:
5413 case AARCH64_OPND_SVE_Vm
:
5414 case AARCH64_OPND_SVE_Vn
:
5415 case AARCH64_OPND_Va
:
5416 case AARCH64_OPND_Vn
:
5417 case AARCH64_OPND_Vm
:
5418 case AARCH64_OPND_Sn
:
5419 case AARCH64_OPND_Sm
:
5420 if (inst_op
.reg
.regno
== blk_dest
.reg
.regno
)
5426 = aarch64_get_qualifier_esize (inst_op
.qualifier
);
5427 if (current_elem_size
> max_elem_size
)
5428 max_elem_size
= current_elem_size
;
5430 case AARCH64_OPND_SVE_Pd
:
5431 case AARCH64_OPND_SVE_Pg3
:
5432 case AARCH64_OPND_SVE_Pg4_5
:
5433 case AARCH64_OPND_SVE_Pg4_10
:
5434 case AARCH64_OPND_SVE_Pg4_16
:
5435 case AARCH64_OPND_SVE_Pm
:
5436 case AARCH64_OPND_SVE_Pn
:
5437 case AARCH64_OPND_SVE_Pt
:
5438 case AARCH64_OPND_SME_Pm
:
5439 inst_pred
= inst_op
;
5447 assert (max_elem_size
!= 0);
5448 aarch64_opnd_info inst_dest
= inst
->operands
[0];
5449 /* Determine the size that should be used to compare against the
5452 = opcode
->constraints
& C_MAX_ELEM
5454 : aarch64_get_qualifier_esize (inst_dest
.qualifier
);
5456 /* If movprfx is predicated do some extra checks. */
5459 /* The instruction must be predicated. */
5460 if (inst_pred_idx
< 0)
5462 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5463 mismatch_detail
->error
= _("predicated instruction expected "
5465 mismatch_detail
->index
= -1;
5466 mismatch_detail
->non_fatal
= true;
5471 /* The instruction must have a merging predicate. */
5472 if (inst_pred
.qualifier
!= AARCH64_OPND_QLF_P_M
)
5474 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5475 mismatch_detail
->error
= _("merging predicate expected due "
5476 "to preceding `movprfx'");
5477 mismatch_detail
->index
= inst_pred_idx
;
5478 mismatch_detail
->non_fatal
= true;
5483 /* The same register must be used in instruction. */
5484 if (blk_pred
.reg
.regno
!= inst_pred
.reg
.regno
)
5486 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5487 mismatch_detail
->error
= _("predicate register differs "
5488 "from that in preceding "
5490 mismatch_detail
->index
= inst_pred_idx
;
5491 mismatch_detail
->non_fatal
= true;
5497 /* Destructive operations by definition must allow one usage of the
5500 = aarch64_is_destructive_by_operands (opcode
) ? 2 : 1;
5502 /* Operand is not used at all. */
5503 if (num_op_used
== 0)
5505 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5506 mismatch_detail
->error
= _("output register of preceding "
5507 "`movprfx' not used in current "
5509 mismatch_detail
->index
= 0;
5510 mismatch_detail
->non_fatal
= true;
5515 /* We now know it's used, now determine exactly where it's used. */
5516 if (blk_dest
.reg
.regno
!= inst_dest
.reg
.regno
)
5518 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5519 mismatch_detail
->error
= _("output register of preceding "
5520 "`movprfx' expected as output");
5521 mismatch_detail
->index
= 0;
5522 mismatch_detail
->non_fatal
= true;
5527 /* Operand used more than allowed for the specific opcode type. */
5528 if (num_op_used
> allowed_usage
)
5530 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5531 mismatch_detail
->error
= _("output register of preceding "
5532 "`movprfx' used as input");
5533 mismatch_detail
->index
= last_op_usage
;
5534 mismatch_detail
->non_fatal
= true;
5539 /* Now the only thing left is the qualifiers checks. The register
5540 must have the same maximum element size. */
5541 if (inst_dest
.qualifier
5542 && blk_dest
.qualifier
5543 && current_elem_size
5544 != aarch64_get_qualifier_esize (blk_dest
.qualifier
))
5546 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5547 mismatch_detail
->error
= _("register size not compatible with "
5548 "previous `movprfx'");
5549 mismatch_detail
->index
= 0;
5550 mismatch_detail
->non_fatal
= true;
5557 if (insn_sequence
->num_added_insns
== insn_sequence
->num_allocated_insns
)
5558 /* We've checked the last instruction in the sequence and so
5559 don't need the sequence any more. */
5560 init_insn_sequence (NULL
, insn_sequence
);
5562 add_insn_to_sequence (inst
, insn_sequence
);
5569 /* Return true if VALUE cannot be moved into an SVE register using DUP
5570 (with any element size, not just ESIZE) and if using DUPM would
5571 therefore be OK. ESIZE is the number of bytes in the immediate. */
5574 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue
, int esize
)
5576 int64_t svalue
= uvalue
;
5577 uint64_t upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
5579 if ((uvalue
& ~upper
) != uvalue
&& (uvalue
| upper
) != uvalue
)
5581 if (esize
<= 4 || (uint32_t) uvalue
== (uint32_t) (uvalue
>> 32))
5583 svalue
= (int32_t) uvalue
;
5584 if (esize
<= 2 || (uint16_t) uvalue
== (uint16_t) (uvalue
>> 16))
5586 svalue
= (int16_t) uvalue
;
5587 if (esize
== 1 || (uint8_t) uvalue
== (uint8_t) (uvalue
>> 8))
5591 if ((svalue
& 0xff) == 0)
5593 return svalue
< -128 || svalue
>= 128;
5596 /* Return true if a CPU with the AARCH64_FEATURE_* bits in CPU_VARIANT
5597 supports the instruction described by INST. */
5600 aarch64_cpu_supports_inst_p (aarch64_feature_set cpu_variant
,
5603 if (!inst
->opcode
->avariant
5604 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant
, *inst
->opcode
->avariant
))
5607 if (inst
->opcode
->iclass
== sme_fp_sd
5608 && inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
5609 && !AARCH64_CPU_HAS_FEATURE (cpu_variant
, SME_F64F64
))
5612 if (inst
->opcode
->iclass
== sme_int_sd
5613 && inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
5614 && !AARCH64_CPU_HAS_FEATURE (cpu_variant
, SME_I16I64
))
5620 /* Include the opcode description table as well as the operand description
5622 #define VERIFIER(x) verify_##x
5623 #include "aarch64-tbl.h"